diff --git a/Documentation/devicetree/bindings/gpio/pl061-gpio.txt b/Documentation/devicetree/bindings/gpio/pl061-gpio.txt
index a2c416b..6c41b97 100644
--- a/Documentation/devicetree/bindings/gpio/pl061-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/pl061-gpio.txt
@@ -7,4 +7,28 @@ Required properties:
- bit 0 specifies polarity (0 for normal, 1 for inverted)
- gpio-controller : Marks the device node as a GPIO controller.
- interrupts : Interrupt mapping for GPIO IRQ.
+- baseidx : base index for the GPIO numeration.
+
+Example of gpio-controller nodes:
+ gpio0 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd887000 0x0 0x1000>;
+ interrupts = <0 2 4>;
+ clocks = <&sbclk>;
+ clock-names = "apb_pclk";
+ baseidx = <0>;
+ };
+
+ gpio1 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd888000 0x0 0x1000>;
+ interrupts = <0 3 4>;
+ clocks = <&sbclk>;
+ clock-names = "apb_pclk";
+ baseidx = <8>;
+ };
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 3994f0b..3458d63 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -478,15 +478,6 @@ tcp_syn_retries - INTEGER
tcp_timestamps - BOOLEAN
Enable timestamps as defined in RFC1323.
-tcp_min_tso_segs - INTEGER
- Minimal number of segments per TSO frame.
- Since linux-3.12, TCP does an automatic sizing of TSO frames,
- depending on flow rate, instead of filling 64Kbytes packets.
- For specific usages, it's possible to force TCP to build big
- TSO frames. Note that TCP stack might split too big TSO packets
- if available window is too small.
- Default: 2
-
tcp_tso_win_divisor - INTEGER
This allows control over what percentage of the congestion window
can be consumed by a single TSO frame.
diff --git a/Makefile b/Makefile
index ba784b7..7ba3e2e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 10
SUBLEVEL = 20
-EXTRAVERSION =
+EXTRAVERSION = -al-5.0-pr2
NAME = TOSSUG Baby Fish
# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 18a9f5e..c4ad8b0 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -100,6 +100,14 @@ config ARM_DMA_IOMMU_ALIGNMENT
endif
+config ARM_HWCC_FLAG
+ bool
+ default n
+ help
+ Select if you want the ARM specific device structure to inclue a flag
+ for determining whether HW cache coherency is enabled.
+ If unsure, say 'N'.
+
config HAVE_PWM
bool
@@ -167,6 +175,10 @@ config ARCH_HAS_ILOG2_U32
config ARCH_HAS_ILOG2_U64
bool
+config ARCH_LONG_LONG_ATOMIC
+ bool
+ default n
+
config ARCH_HAS_CPUFREQ
bool
help
@@ -474,6 +486,7 @@ config ARCH_IXP4XX
bool "IXP4xx-based"
depends on MMU
select ARCH_HAS_DMA_SET_COHERENT_MASK
+ select ARCH_SUPPORTS_BIG_ENDIAN
select ARCH_REQUIRE_GPIOLIB
select CLKSRC_MMIO
select CPU_XSCALE
@@ -864,6 +877,35 @@ config ARCH_OMAP1
help
Support for older TI OMAP1 (omap7xx, omap15xx or omap16xx)
+config ARCH_ALPINE
+ bool "AnnapurnaLabs Alpine SOCs with Device Tree support"
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARM_AMBA
+ select SPARSE_IRQ
+ select ARM_TIMER_SP804
+ select CLKDEV_LOOKUP
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_CHIP
+ select HAVE_CLK
+ select HAVE_SMP
+ select HAVE_ARM_ARCH_TIMER
+ select HAVE_SCHED_CLOCK
+ select ARCH_SCHED_CLOCK
+ select ICST
+ select CPU_V7
+ select ARM_GIC
+ select HAVE_ARCH_TIMERS
+ select COMMON_CLK
+ select PCI
+ select PCI_DOMAINS
+ select ARCH_SUPPORTS_MSI
+ select ARM_HWCC_FLAG
+ select ARCH_LONG_LONG_ATOMIC
+ select ARM_HAS_SG_CHAIN
+ select ARCH_SUPPORTS_BIG_ENDIAN
+ help
+ This enables support for the AnnapurnaLabs SoC Gen1 boards.
+
endchoice
menu "Multiple platform selection"
@@ -1024,6 +1066,8 @@ source "arch/arm/mach-virt/Kconfig"
source "arch/arm/mach-vt8500/Kconfig"
+source "arch/arm/mach-alpine/Kconfig"
+
source "arch/arm/mach-w90x900/Kconfig"
source "arch/arm/mach-zynq/Kconfig"
@@ -1538,6 +1582,16 @@ choice
bool "1G/3G user/kernel split"
endchoice
+config FUNC_REORDER
+ bool "Function reordering support"
+ default n
+ help
+ Support for function reordering based on functionlist found in
+ arch/arm/kernel/functionlist.
+ Function reordering can be used to increase locality on the I-cache.
+ This option add the flag -ffunction-sections to the compilation
+ flags.
+
config PAGE_OFFSET
hex
default 0x40000000 if VMSPLIT_1G
@@ -1731,6 +1785,14 @@ config HW_PERF_EVENTS
Enable hardware performance counter support for perf events. If
disabled, perf events will use software events only.
+config SYS_SUPPORTS_HUGETLBFS
+ def_bool y
+ depends on ARM_LPAE
+
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ def_bool y
+ depends on ARM_LPAE
+
source "mm/Kconfig"
config FORCE_MAX_ZONEORDER
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 1d41908..ec939e6 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -483,6 +483,13 @@ choice
This option selects UART0 on VIA/Wondermedia System-on-a-chip
devices, including VT8500, WM8505, WM8650 and WM8850.
+ config DEBUG_ALPINE_UART
+ bool "Kernel low-level debugging messages via ALPINE UART"
+ depends on ARCH_ALPINE
+ help
+ Say Y here if you want kernel low-level debugging support
+ on ALPINE based platforms.
+
config DEBUG_LL_UART_NONE
bool "No low-level debugging UART"
depends on !ARCH_MULTIPLATFORM
@@ -648,6 +655,7 @@ config DEBUG_LL_INCLUDE
DEBUG_VEXPRESS_UART0_CA9 || DEBUG_VEXPRESS_UART0_RS1
default "debug/vt8500.S" if DEBUG_VT8500_UART0
default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
+ default "debug/alpine.S" if DEBUG_ALPINE_UART
default "mach/debug-macro.S"
config DEBUG_UNCOMPRESS
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 1ba358b..4eda02f 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -16,6 +16,7 @@ LDFLAGS :=
LDFLAGS_vmlinux :=-p --no-undefined -X
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
+LDFLAGS_MODULE += --be8
endif
OBJCOPYFLAGS :=-O binary -R .comment -S
@@ -29,6 +30,10 @@ KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
# Select a platform tht is kept up-to-date
KBUILD_DEFCONFIG := versatile_defconfig
+ifeq ($(CONFIG_FUNC_REORDER),y)
+KBUILD_CFLAGS += -ffunction-sections
+endif
+
# defines filename extension depending memory management type.
ifeq ($(CONFIG_MMU),)
MMUEXT := -nommu
@@ -189,12 +194,14 @@ machine-$(CONFIG_ARCH_VEXPRESS) += vexpress
machine-$(CONFIG_ARCH_VT8500) += vt8500
machine-$(CONFIG_ARCH_W90X900) += w90x900
machine-$(CONFIG_FOOTBRIDGE) += footbridge
+machine-$(CONFIG_ARCH_ALPINE) += alpine
machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
machine-$(CONFIG_PLAT_SPEAR) += spear
machine-$(CONFIG_ARCH_VIRT) += virt
machine-$(CONFIG_ARCH_ZYNQ) += zynq
machine-$(CONFIG_ARCH_SUNXI) += sunxi
+
# Platform directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
plat-$(CONFIG_ARCH_OMAP) += omap
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 032a8d9..f6e34be 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -135,6 +135,7 @@ start:
.word _edata @ zImage end address
THUMB( .thumb )
1:
+ ARM_BE8( setend be ) @ go BE8 if compiled for BE8
mrs r9, cpsr
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install @ get into SVC mode, reversibly
@@ -679,9 +680,7 @@ __armv4_mmu_cache_on:
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x0030
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r0, r0, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
bl __common_mmu_cache_on
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
@@ -708,9 +707,7 @@ __armv7_mmu_cache_on:
orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
@ (needed for ARM1176)
#ifdef CONFIG_MMU
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r0, r0, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index f0895c5..0278de3 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -210,6 +210,19 @@ dtb-$(CONFIG_ARCH_VT8500) += vt8500-bv07.dtb \
wm8850-w70v2.dtb
dtb-$(CONFIG_ARCH_ZYNQ) += zynq-zc702.dtb
+dtb-$(CONFIG_ARCH_ALPINE) += \
+ alpine_security_box.dtb \
+ alpine_k2s.dtb \
+ alpine_sdnic_nand.dtb \
+ alpine_sdnic.dtb \
+ alpine_db_pcie_no_arch_timer.dtb \
+ alpine_db_pcie.dtb \
+ alpine_db_al314.dtb \
+ alpine_db_al212.dtb \
+ alpine_db_no_arch_timer.dtb \
+ alpine_db.dtb \
+ alpine_db_m0.dtb \
+
targets += dtbs
targets += $(dtb-y)
endif
diff --git a/arch/arm/boot/dts/alpine.dtsi b/arch/arm/boot/dts/alpine.dtsi
new file mode 100644
index 0000000..be044f1
--- /dev/null
+++ b/arch/arm/boot/dts/alpine.dtsi
@@ -0,0 +1,657 @@
+/*
+ * Copyright 2013 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+/*
+ * Addressing RMN: 1024
+ *
+ * RMN description:
+ * The PCIe core reserve the lower 1MB for cfg_mem, and return UR (unsupported
+ * request) for inbound access to this addresses.
+ *
+ * Software flow:
+ * Reserve the first 1MB
+ */
+/memreserve/ 0x0 0x100000;
+
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "annapurna-labs,alpine";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ clock-ranges;
+
+ memory {
+ device_type = "memory";
+ reg = <0 0 0 0>;
+ };
+
+ /* CPU Configuration */
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <0>;
+ clocks = <&cpuclk>;
+ clock-names = "cpu";
+ clock-frequency = <0>; /* Filled by U-Boot */
+ };
+
+ cpu@1 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <1>;
+ clocks = <&cpuclk>;
+ clock-names = "cpu";
+ clock-frequency = <0>; /* Filled by U-Boot */
+ };
+
+ cpu@2 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <2>;
+ clocks = <&cpuclk>;
+ clock-names = "cpu";
+ clock-frequency = <0>; /* Filled by U-Boot */
+ };
+
+ cpu@3 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <3>;
+ clocks = <&cpuclk>;
+ clock-names = "cpu";
+ clock-frequency = <0>; /* Filled by U-Boot */
+ };
+ };
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "simple-bus";
+ interrupt-parent = <&gic_main>;
+ ranges;
+
+ /* Architected Timer */
+ arch-timer {
+ compatible = "arm,cortex-a15-timer",
+ "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ clock-frequency = <50000000>;
+ };
+
+ /* Interrupt Controller */
+ gic_main: gic_main {
+ compatible = "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ #size-cells = <0>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0xfb001000 0x0 0x1000>,
+ <0x0 0xfb002000 0x0 0x2000>,
+ <0x0 0xfb004000 0x0 0x1000>,
+ <0x0 0xfb006000 0x0 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ /**
+ * Secondary Interrupt Controller
+ * Connected to main GIC PPI 15, all CPUs, high level
+ */
+ /*gic_secondary {
+ compatible = "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ #size-cells = <0>;
+ #address-cells = <1>;
+ interrupt-controller;
+ reg = <0x0 0xfb009000 0x0 0x1000>,
+ <0x0 0xfb00a000 0x0 0x100>;
+ interrupts = <1 15 0xf04>;
+ };*/
+
+ /* CPU Resume */
+ cpu_resume {
+ compatible = "annapurna-labs,al-cpu-resume";
+ reg = <0x0 0xfbff5ec0 0x0 0x30>;
+ };
+
+ ccu {
+ compatible = "annapurna-labs,al-ccu";
+ reg = <0x0 0xfb090000 0x0 0x10000>;
+ io_coherency = <1>;
+ };
+
+ /* North Bridge Service Registers */
+ nb_service {
+ compatible = "annapurna-labs,al-nb-service";
+ reg = <0x0 0xfb070000 0x0 0x10000>;
+ interrupts = <0 64 4 0 65 4 0 66 4 0 67 4>;
+ dev_ord_relax = <0>;
+ };
+
+ /* PBS Registers */
+ pbs {
+ compatible = "annapurna-labs,al-pbs";
+ reg = <0x0 0xfd8a8000 0x0 0x00001000>;
+ };
+
+ /* MSIX Configuration */
+ msix {
+ compatible = "annapurna-labs,al-msix";
+ reg = <0x0 0xfbe00000 0x0 0x100000>;
+ interrupts = <0 96 1 0 159 1>;
+ };
+
+ /* Performance Monitor Unit */
+ pmu {
+ compatible = "arm,cortex-a15-pmu";
+ interrupts = <0 68 4 0 69 4 0 70 4 0 71 4>;
+ };
+
+ /* Timer 0 */
+ timer0 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x0 0xfd890000 0x0 0x1000>;
+ interrupts = <0 9 4>; /* SPI 9, active high level */
+ clocks = <&sbclk>;
+ clock-names = "sbclk";
+ };
+
+ /* Timer 1 */
+ timer1 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x0 0xfd891000 0x0 0x1000>;
+ interrupts = <0 10 4>; /* SPI 10, active high level */
+ clocks = <&sbclk>;
+ clock-names = "sbclk";
+ };
+
+ /* Timer 2 */
+ timer2 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x0 0xfd892000 0x0 0x1000>;
+ interrupts = <0 11 4>; /* SPI 11, active high level */
+ clocks = <&sbclk>;
+ clock-names = "sbclk";
+ };
+
+ /* Timer 3 */
+ timer3 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x0 0xfd893000 0x0 0x1000>;
+ interrupts = <0 12 4>; /* SPI 12, active high level */
+ clocks = <&sbclk>;
+ clock-names = "sbclk";
+ };
+
+ /* WDT 0 */
+ wdt0 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0x0 0xfd88c000 0x0 0x1000>;
+ interrupts = <0 13 4>; /* SPI 13, active high level */
+ clocks = <&sbclk>;
+ clock-names = "apb_pclk";
+ };
+
+ /* WDT 1 */
+ wdt1 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0x0 0xfd88d000 0x0 0x1000>;
+ interrupts = <0 14 4>; /* SPI 14, active high level */
+ clocks = <&sbclk>;
+ clock-names = "apb_pclk";
+ };
+
+ /* WDT 2 */
+ wdt2 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0x0 0xfd88e000 0x0 0x1000>;
+ interrupts = <0 15 4>; /* SPI 15, active high level */
+ clocks = <&sbclk>;
+ clock-names = "apb_pclk";
+ };
+
+ /* WDT 3 */
+ wdt3 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0x0 0xfd88f000 0x0 0x1000>;
+ interrupts = <0 16 4>; /* SPI 16, active high level */
+ clocks = <&sbclk>;
+ clock-names = "apb_pclk";
+ };
+
+ /* I2C Preloader */
+ i2c-pld {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xfd880000 0x0 0x1000>;
+ interrupts = <0 21 4>;
+ clocks = <&sbclk>;
+ clock-frequency = <400000>;
+ };
+
+ /* I2C Generic */
+ i2c-gen {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xfd894000 0x0 0x1000>;
+ interrupts = <0 8 4>;
+ clocks = <&sbclk>;
+ clock-frequency = <400000>;
+ };
+
+ /* GPIO 0 */
+ gpio0: gpio0 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd887000 0x0 0x1000>;
+ interrupts = <0 2 4>;
+ clocks = <&sbclk>;
+ clock-names = "apb_pclk";
+ baseidx = <0>;
+ };
+
+ /* GPIO 1 */
+ gpio1: gpio1 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd888000 0x0 0x1000>;
+ interrupts = <0 3 4>;
+ clocks = <&sbclk>;
+ clock-names = "apb_pclk";
+ baseidx = <8>;
+ };
+
+ /* GPIO 2 */
+ gpio2: gpio2 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd889000 0x0 0x1000>;
+ interrupts = <0 4 4>;
+ clocks = <&sbclk>;
+ clock-names = "apb_pclk";
+ baseidx = <16>;
+ };
+
+ /* GPIO 3 */
+ gpio3: gpio3 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd88a000 0x0 0x1000>;
+ interrupts = <0 5 4>;
+ clocks = <&sbclk>;
+ clock-names = "apb_pclk";
+ baseidx = <24>;
+ };
+
+ /* GPIO 4 */
+ gpio4: gpio4 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd88b000 0x0 0x1000>;
+ interrupts = <0 6 4>;
+ clocks = <&sbclk>;
+ clock-names = "apb_pclk";
+ baseidx = <32>;
+ };
+
+ /* GPIO 5 */
+ gpio5: gpio5 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd897000 0x0 0x1000>;
+ interrupts = <0 7 4>;
+ clocks = <&sbclk>;
+ clock-names = "apb_pclk";
+ baseidx = <40>;
+ };
+
+ /* UART 0 */
+ uart0 {
+ compatible = "ns16550a";
+ reg = <0x0 0xfd883000 0x0 0x1000>;
+ clock-frequency = <0>; /* Filled by U-Boot */
+ interrupts = <0 17 4>; /* SPI 17, active high level */
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ /* UART 1 */
+ uart1 {
+ compatible = "ns16550a";
+ reg = <0x0 0xfd884000 0x0 0x1000>;
+ clock-frequency = <0>; /* Filled by U-Boot */
+ interrupts = <0 18 4>; /* SPI 18, active high level */
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ /* UART 2 */
+ uart2 {
+ compatible = "ns16550a";
+ reg = <0x0 0xfd885000 0x0 0x1000>;
+ clock-frequency = <0>; /* Filled by U-Boot */
+ interrupts = <0 19 4>; /* SPI 19, active high level */
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ /* UART 3 */
+ uart3 {
+ compatible = "ns16550a";
+ reg = <0x0 0xfd886000 0x0 0x1000>;
+ clock-frequency = <0>; /* Filled by U-Boot */
+ interrupts = <0 20 4>; /* SPI 20, active high level */
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ /* Internal PCIe Controller */
+ pcie-internal {
+ compatible = "annapurna-labs,al-internal-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic_main>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <0x3000 0 0 1 &gic_main 0 32 4>,
+ <0x3800 0 0 1 &gic_main 0 36 4>,
+ <0x4000 0 0 1 &gic_main 0 43 4>,
+ <0x4800 0 0 1 &gic_main 0 44 4>;
+ /* -> CAUSE INt 128 to disapear !!!!!
+ <0x0000 0 0 1 &gic_main 0 96 4>,
+ <0x0800 0 0 1 &gic_main 0 112 4>,
+ <0x1000 0 0 1 &gic_main 0 128 4>,
+ <0x1800 0 0 1 &gic_main 0 144 4>,
+ <0x2000 0 0 1 &gic_main 0 160 4>,
+ <0x2800 0 0 1 &gic_main 0 176 4>,
+ <0x3000 0 0 1 &gic_main 0 32 4>,
+ <0x3800 0 0 1 &gic_main 0 36 4>,
+ <0x4000 0 0 1 &gic_main 0 43 4>,
+ <0x4800 0 0 1 &gic_main 0 44 4>; */
+
+ /* ranges:
+ * - ECAM - non prefetchable config space
+ * - 32 bit non prefetchable memory space
+ */
+ ranges = <0x00000000 0x0 0xfbc00000 0x0 0xfbc00000 0x0 0x100000
+ 0x02000000 0x0 0xfe000000 0x0 0xfe000000 0x0 0x1000000>;
+
+ bus-range = <0x00 0x00>;
+ };
+
+ /* External PCIe Controller 0*/
+ pcie-external0 {
+ compatible = "annapurna-labs,al-pci";
+ reg = <0x0 0xfd800000 0x0 0x00020000>; /* controller 0 registers */
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic_main>;
+ interrupt-map-mask = <0x00 0 0 7>;
+ interrupt-map = <0x0000 0 0 1 &gic_main 0 40 4>;
+
+ /* ranges:
+ * Controller 0:
+ * - ECAM - non prefetchable config space: 2MB
+ * - IO - IO port space 64KB, reserve 64KB from target memory windows
+ * real IO address on the pci bus starts at 0x10000
+ * - 32 bit non prefetchable memory space: 128MB - 64KB
+ */
+ ranges = <0x00000000 0x0 0xfb600000 0x0 0xfb600000 0x0 0x00200000
+ 0x01000000 0x0 0x00010000 0x0 0xe0000000 0x0 0x00010000
+ 0x02000000 0x0 0xe0010000 0x0 0xe0010000 0x0 0x07ff0000>;
+
+ bus-range = <0x00 0xff>;
+ };
+
+ /* External PCIe Controllers 1*/
+ pcie-external1 {
+ compatible = "annapurna-labs,al-pci";
+ reg = <0x0 0xfd820000 0x0 0x00020000>; /* controller 1 registers */
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic_main>;
+ interrupt-map-mask = <0x0 0 0 7>;
+ interrupt-map = <0x0000 0 0 1 &gic_main 0 41 4>;
+
+ /* ranges:
+ * - ECAM - non prefetchable config space: 2MB
+ * - IO - IO port space 64KB, reserve 64KB from target memory windows
+ * real IO address on the pci bus starts at 0x20000
+ * - 32 bit non prefetchable memory space: 64MB - 64KB
+ */
+ ranges = <0x00000000 0x0 0xfb800000 0x0 0xfb800000 0x0 0x00200000
+ 0x01000000 0x0 0x00020000 0x0 0xe8000000 0x0 0x00010000
+ 0x02000000 0x0 0xe8010000 0x0 0xe8010000 0x0 0x03ff0000>;
+
+ bus-range = <0x00 0xff>;
+ };
+
+ /* External PCIe Controllers 2*/
+ pcie-external2 {
+ compatible = "annapurna-labs,al-pci";
+ reg = <0x0 0xfd840000 0x0 0x00020000>; /* controller 2 registers */
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic_main>;
+ interrupt-map-mask = <0x0 0 0 7>;
+ interrupt-map = <0x0000 0 0 1 &gic_main 0 42 4>;
+
+ /* ranges:
+ * - ECAM - non prefetchable config space: 2MB
+ * - IO - IO port space 64KB, reserve 64KB from target memory windows
+ * real IO address on the pci bus starts at 0x30000
+ * - 32 bit non prefetchable memory space: 64MB - 64KB
+ */
+ ranges = <0x00000000 0x0 0xfba00000 0x0 0xfba00000 0x0 0x00200000
+ 0x01000000 0x0 0x00030000 0x0 0xec000000 0x0 0x00010000
+ 0x02000000 0x0 0xec010000 0x0 0xec010000 0x0 0x03ff0000>;
+
+ bus-range = <0x00 0xff>;
+ };
+
+ /* Thermal sensor */
+ thermal {
+ compatible = "annapurna-labs,al-thermal";
+ reg = <0x0 0xfd860a00 0x0 0x00000100>;
+ };
+
+ /* NOR flash */
+ nor_flash {
+ compatible = "cfi-flash";
+ reg = <0x0 0xf4000000 0x0 0x4000000>;
+ bank-width = <1>;
+ device-width = <1>;
+
+ /* TODO: Add partitions */
+ };
+
+ /* NAND flash */
+ nand-flash {
+ compatible = "annapurna-labs,al-nand";
+ reg = <0x0 0xfa100000 0x0 0x00202000>;
+ interrupts = <0 1 4>;
+ };
+
+ /* SPI Bus */
+ spi {
+ compatible = "snps,dw-spi-mmio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0xfd882000 0x0 0x1000>;
+ interrupts = <0 23 4>;
+ num-chipselect = <4>;
+ bus-num = <0>;
+ clocks = <&sbclk>;
+ clock-names = "sbclk";
+ };
+
+ /* Clocks */
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Reference clock */
+ refclk: refclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>; /* Filled by U-Boot */
+ };
+
+ /* South Bridge Clock */
+ sbclk: sbclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <1000000>; /* Filled by U-Boot */
+ };
+
+ /* North Bridge Clock */
+ nbclk: nbclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <1000000>; /* Filled by U-Boot */
+ };
+
+ /* CPU Clock */
+ cpuclk: cpuclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <1000000>; /* Filled by U-Boot */
+ };
+ };
+
+ /* SerDes */
+ serdes {
+ compatible = "annapurna-labs,al-serdes";
+ reg = <0x0 0xfd8c0000 0x0 0x1000>;
+ };
+
+ /* Memory Controller Registers */
+ mc {
+ compatible = "annapurna-labs,al-mc";
+ reg = <0x0 0xfb080000 0x0 0x00010000>;
+ };
+
+ /* Pin control (Multi usage I/O) */
+ pinctrl {
+ compatible = "annapurna-labs,al-pinctrl";
+ reg = <0x0 0xfd8a8000 0x0 0x00001000>;
+
+ if_nor_8: if_nor_8 { id = "if_nor_8"; arg = <0>; };
+ if_nor_16: if_nor_16 { id = "if_nor_16"; arg = <0>; };
+ if_nor_cs_0: if_nor_cs_0 { id = "if_nor_cs_0"; arg = <0>; };
+ if_nor_cs_1: if_nor_cs_1 { id = "if_nor_cs_1"; arg = <0>; };
+ if_nor_cs_2: if_nor_cs_2 { id = "if_nor_cs_2"; arg = <0>; };
+ if_nor_cs_3: if_nor_cs_3 { id = "if_nor_cs_3"; arg = <0>; };
+ if_nor_wp: if_nor_wp { id = "if_nor_wp"; arg = <0>; };
+ if_nand_8: if_nand_8 { id = "if_nand_8"; arg = <0>; };
+ if_nand_16: if_nand_16 { id = "if_nand_16"; arg = <0>; };
+ if_nand_cs_0: if_nand_cs_0 { id = "if_nand_cs_0"; arg = <0>; };
+ if_nand_cs_1: if_nand_cs_1 { id = "if_nand_cs_1"; arg = <0>; };
+ if_nand_cs_2: if_nand_cs_2 { id = "if_nand_cs_2"; arg = <0>; };
+ if_nand_cs_3: if_nand_cs_3 { id = "if_nand_cs_3"; arg = <0>; };
+ if_nand_wp: if_nand_wp { id = "if_nand_wp"; arg = <0>; };
+ if_sram_8: if_sram_8 { id = "if_sram_8"; arg = <0>; };
+ if_sram_16: if_sram_16 { id = "if_sram_16"; arg = <0>; };
+ if_sram_cs_0: if_sram_cs_0 { id = "if_sram_cs_0"; arg = <0>; };
+ if_sram_cs_1: if_sram_cs_1 { id = "if_sram_cs_1"; arg = <0>; };
+ if_sram_cs_2: if_sram_cs_2 { id = "if_sram_cs_2"; arg = <0>; };
+ if_sram_cs_3: if_sram_cs_3 { id = "if_sram_cs_3"; arg = <0>; };
+ if_sata_0_leds: if_sata_0_leds { id = "if_sata_0_leds"; arg = <0>; };
+ if_sata_1_leds: if_sata_1_leds { id = "if_sata_1_leds"; arg = <0>; };
+ if_eth_leds: if_eth_leds { id = "if_eth_leds"; arg = <0>; };
+ if_eth_gpio: if_eth_gpio { id = "if_eth_gpio"; arg = <0>; };
+ if_uart_1: if_uart_1 { id = "if_uart_1"; arg = <0>; };
+ if_uart_1_modem: if_uart_1_modem { id = "if_uart_1_modem"; arg = <0>; };
+ if_uart_2: if_uart_2 { id = "if_uart_2"; arg = <0>; };
+ if_uart_3: if_uart_3 { id = "if_uart_3"; arg = <0>; };
+ if_i2c_gen: if_i2c_gen { id = "if_i2c_gen"; arg = <0>; };
+ if_ulpi_0_rst_n: if_ulpi_0_rst_n { id = "if_ulpi_0_rst_n"; arg = <0>; };
+ if_ulpi_1_rst_n: if_ulpi_1_rst_n { id = "if_ulpi_1_rst_n"; arg = <0>; };
+ if_pci_ep_int_a: if_pci_ep_int_a { id = "if_pci_ep_int_a"; arg = <0>; };
+ if_pci_ep_reset_out: if_pci_ep_reset_out { id = "if_pci_ep_reset_out"; arg = <0>; };
+ if_spim_a_ss_1: if_spim_a_ss_1 { id = "if_spim_a_ss_1"; arg = <0>; };
+ if_spim_a_ss_2: if_spim_a_ss_2 { id = "if_spim_a_ss_2"; arg = <0>; };
+ if_spim_a_ss_3: if_spim_a_ss_3 { id = "if_spim_a_ss_3"; arg = <0>; };
+ if_ulpi_1_b: if_ulpi_1_b { id = "if_ulpi_1_b"; arg = <0>; };
+ if_gpio0: if_gpio0 { id = "if_gpio"; arg = <0>; };
+ if_gpio1: if_gpio1 { id = "if_gpio"; arg = <1>; };
+ if_gpio2: if_gpio2 { id = "if_gpio"; arg = <2>; };
+ if_gpio3: if_gpio3 { id = "if_gpio"; arg = <3>; };
+ if_gpio4: if_gpio4 { id = "if_gpio"; arg = <4>; };
+ if_gpio5: if_gpio5 { id = "if_gpio"; arg = <5>; };
+ if_gpio6: if_gpio6 { id = "if_gpio"; arg = <6>; };
+ if_gpio7: if_gpio7 { id = "if_gpio"; arg = <7>; };
+ if_gpio8: if_gpio8 { id = "if_gpio"; arg = <8>; };
+ if_gpio9: if_gpio9 { id = "if_gpio"; arg = <9>; };
+ if_gpio10: if_gpio10 { id = "if_gpio"; arg = <10>; };
+ if_gpio11: if_gpio11 { id = "if_gpio"; arg = <11>; };
+ if_gpio12: if_gpio12 { id = "if_gpio"; arg = <12>; };
+ if_gpio13: if_gpio13 { id = "if_gpio"; arg = <13>; };
+ if_gpio14: if_gpio14 { id = "if_gpio"; arg = <14>; };
+ if_gpio15: if_gpio15 { id = "if_gpio"; arg = <15>; };
+ if_gpio16: if_gpio16 { id = "if_gpio"; arg = <16>; };
+ if_gpio17: if_gpio17 { id = "if_gpio"; arg = <17>; };
+ if_gpio18: if_gpio18 { id = "if_gpio"; arg = <18>; };
+ if_gpio19: if_gpio19 { id = "if_gpio"; arg = <19>; };
+ if_gpio20: if_gpio20 { id = "if_gpio"; arg = <20>; };
+ if_gpio21: if_gpio21 { id = "if_gpio"; arg = <21>; };
+ if_gpio22: if_gpio22 { id = "if_gpio"; arg = <22>; };
+ if_gpio23: if_gpio23 { id = "if_gpio"; arg = <23>; };
+ if_gpio24: if_gpio24 { id = "if_gpio"; arg = <24>; };
+ if_gpio25: if_gpio25 { id = "if_gpio"; arg = <25>; };
+ if_gpio26: if_gpio26 { id = "if_gpio"; arg = <26>; };
+ if_gpio27: if_gpio27 { id = "if_gpio"; arg = <27>; };
+ if_gpio28: if_gpio28 { id = "if_gpio"; arg = <28>; };
+ if_gpio29: if_gpio29 { id = "if_gpio"; arg = <29>; };
+ if_gpio30: if_gpio30 { id = "if_gpio"; arg = <30>; };
+ if_gpio31: if_gpio31 { id = "if_gpio"; arg = <31>; };
+ if_gpio32: if_gpio32 { id = "if_gpio"; arg = <32>; };
+ if_gpio33: if_gpio33 { id = "if_gpio"; arg = <33>; };
+ if_gpio34: if_gpio34 { id = "if_gpio"; arg = <34>; };
+ if_gpio35: if_gpio35 { id = "if_gpio"; arg = <35>; };
+ if_gpio36: if_gpio36 { id = "if_gpio"; arg = <36>; };
+ if_gpio37: if_gpio37 { id = "if_gpio"; arg = <37>; };
+ if_gpio38: if_gpio38 { id = "if_gpio"; arg = <38>; };
+ if_gpio39: if_gpio39 { id = "if_gpio"; arg = <39>; };
+ if_gpio40: if_gpio40 { id = "if_gpio"; arg = <40>; };
+ if_gpio41: if_gpio41 { id = "if_gpio"; arg = <41>; };
+ if_gpio42: if_gpio42 { id = "if_gpio"; arg = <42>; };
+ if_gpio43: if_gpio43 { id = "if_gpio"; arg = <43>; };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/alpine_db.dts b/arch/arm/boot/dts/alpine_db.dts
new file mode 100644
index 0000000..4aa7286
--- /dev/null
+++ b/arch/arm/boot/dts/alpine_db.dts
@@ -0,0 +1,776 @@
+/*
+ * Copyright 2013 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+/dts-v1/;
+
+/include/ "alpine.dtsi"
+
+/ {
+ version = "2.5";
+ model = "Annapurna Labs Alpine Dev Board";
+
+ hypervisor {
+ };
+
+ soc {
+ board-cfg {
+ id = "alpine_db (2xSATA)";
+
+ /* U-Boot offset in boot source [bytes] */
+ u-boot-offset = <0x20000>;
+
+ /*
+ * Some of the PBS bus controllers have dedicated pins,
+ * however most of the pins are Multi-usage I/Os (MUIO),
+ * thus enabling flexible usage.
+ * Refer to the specific chip datasheet for further details.
+ *
+ * Choose which interfaces are to be multiplexed:
+ * - &if_nand_8 - NAND
+ * - &if_nand_cs_0, ..., &if_nand_cs_3 - NAND CS 0..3
+ * - &if_nand_wp - NAND WP
+ * - &if_sata_0_leds, &if_sata_1_leds - SATA leds
+ * - &if_eth_leds - Ethernet leds
+ * - &if_uart_1, ..., &if_uart_3 - UART 1..3
+ * - &if_i2c_gen - secondary I2C conrtoller
+ * - &if_ulpi_0_rst_n - ULPI reset
+ * - &if_pci_ep_int_a - PCIe EP interrupt
+ * - &if_pci_ep_reset_out - PCIe EP reset out
+ * - &if_spim_a_ss_1, ..., &if_spim_a_ss_3 - SPI CS 1..3
+ */
+ pinctrl_init {
+ pinctrl-0 = <
+ &if_nand_8
+ &if_nand_cs_0
+ &if_sata_1_leds
+ &if_eth_leds
+ &if_uart_1>;
+ };
+
+ /*
+ * Initial GPIO configuration - which pins are input,
+ * which are output, and what is the initial value for
+ * output pins.
+ * By default, GPIO pins that are not listed below are
+ * input pins.
+ * GPIO pins which are listed below are automatically
+ * multiplexed and should not conflict with the
+ * multiplexed interfaces listed in 'pinctrl_init'
+ * above.
+ */
+ gpio_init {
+ /* <
+ * GPIO num 1, is output, output value
+ * GPIO num 2, is output, output value
+ * ...
+ * GPIO num N, is output, output value
+ * >
+ */
+ gpio-list = <
+ /* Inputs */
+ 0 0 0 /* UART 1,2,3 module inserted */
+ 32 0 0 /* PCIE0 present */
+ 33 0 0 /* PCIE1_present */
+ 34 0 0 /* PCIE2SATA not present */
+ 35 0 0 /* SFP 0,1 absent */
+ 36 0 0 /* SFP 2,3 absent */
+ 37 0 0 /* External ETH PHY interrupt */
+
+ /* Outputs */
+ 1 1 0 /* Debug led 0 */
+ 2 1 1 /* Debug led 1 */
+ 3 1 1 /* ULPI 0 RST_N */
+ 5 1 0 /* Preloader EEPROM enable */
+ 38 1 1 /* PCI RSTN */
+ 39 1 1 /* SFP ON */
+ 40 1 0 /* External ETH PHY A reset */
+ 41 1 0 /* External ETH PHY B reset */
+ 42 1 1 /* VDD DRAM 1: 1.5V, 0: 1.35V */
+ 43 1 0 /* VTT OFF */
+ >;
+ };
+
+ /* SerDes initialization configuration */
+ serdes {
+ /*
+ * 'ref-clock' can be any of the following:
+ * - "100Mhz-internal" - 100 Mhz internal clock
+ * - "100Mhz" - 100 Mhz on-board clock
+ * - "156.25Mhz" - 156.25 Mhz on-board clock
+ * - "right" - Clock is routed from right group
+ * - assuming the right group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ * - "left" - Clock is routed from left group
+ * - assuming the left group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ *
+ * 'active-lanes' selects which lanes are active
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-tx-lanes' selects which lanes have their
+ * Tx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-rx-lanes' selects which lanes have their
+ * Rx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'ssc' - SSC (spread spectrum clock)
+ * - "enabled" or "disabled"
+ * - Relevant only for 'interface' = 'sata'
+ *
+ * group 0:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "pcie_g2x2_pcie_g2x2" - 2xPCIe gen 2 x2
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ *
+ * group 1:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 2:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 3:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "sgmii" - SGMII (up to 4 ports)
+ * - "sgmii-2.5g" - SGMII 2.5G (up to 4 ports)
+ * - "10gbe" - 10GbE (up to 4 ports)
+ */
+
+ group0 {
+ interface = "pcie_g3x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+
+ group1 {
+ interface = "sata";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+
+ group2 {
+ interface = "sata";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+
+ group3 {
+ interface = "10gbe";
+ ref-clock = "156.25Mhz";
+ active-lanes = <1 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+ };
+
+ /* Ethernet port configuration */
+ ethernet {
+ port0 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <1>; /* I2C Mux Channel 0 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+
+ };
+
+ port1 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <4>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+
+ port2 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <3>; /* I2C Mux Channel 2 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+ };
+
+ port3 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <5>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+ };
+
+ /* PCIe port configuration */
+ pcie {
+ /*
+ * Selects which ports are used as end-point
+ * ports, e.g. <0>.
+ */
+ ep-ports = <>;
+
+ /* Port 0 status, speed, and number of lanes */
+ port0 {
+ status = "enabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 1 status, speed, and number of lanes */
+ port1 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 2 status, speed, and number of lanes */
+ port2 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+ };
+ };
+
+ /* pmu { status = "disabled"; }; */
+ /* arch-timer { compatible = "disabled"; }; */
+ timer0 { status = "disabled"; };
+ /* timer1 { status = "disabled"; }; */
+ timer2 { status = "disabled"; };
+ timer3 { status = "disabled"; };
+ wdt1 { status = "disabled"; };
+ wdt2 { status = "disabled"; };
+ wdt3 { status = "disabled"; };
+ /* i2c-pld { status = "disabled"; }; */
+ /* spi { status = "disabled"; }; */
+ i2c-gen { status = "disabled"; };
+ /* gpio0 { status = "disabled"; }; */
+ /* gpio1 { status = "disabled"; }; */
+ /* gpio2 { status = "disabled"; }; */
+ /* gpio3 { status = "disabled"; }; */
+ /* gpio4 { status = "disabled"; }; */
+ /* gpio5 { status = "disabled"; }; */
+ /*uart0 { status = "disabled"; };*/
+ /*uart1 { status = "disabled"; };*/
+ uart2 { status = "disabled"; };
+ uart3 { status = "disabled"; };
+ nor_flash { status = "disabled"; };
+ /* nand-flash { status = "disabled"; }; */
+ /* al-fabric { status = "disabled"; }; */
+ /* pcie-internal { status = "disabled"; }; */
+ /* pcie-external0 { status = "disabled"; }; */
+ /* pcie-external1 { status = "disabled"; }; */
+ /* pcie-external2 { status = "disabled"; }; */
+ nand-flash {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ max-onfi-timing-mode = <1>;
+
+ partition@0 {
+ label = "al_boot";
+ reg = <0x00000000 0x00200000>;
+ };
+ partition@1 {
+ label = "device_tree";
+ reg = <0x00200000 0x00100000>;
+ };
+ partition@2 {
+ label = "linux_kernel";
+ reg = <0x00300000 0x00d00000>;
+ };
+ partition@3 {
+ label = "ubifs";
+ reg = <0x01000000 0x03f000000>;
+ };
+ };
+
+ i2c-pld {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bq32k@68 {
+ compatible = "bq32000";
+ reg = <0x68>;
+ };
+
+ i2c_mux@70 {
+ compatible = "pca9548";
+ reg = <0x70>;
+ };
+ };
+
+ spi {
+ /* cs-gpios = <&gpio0 4 0>; */
+
+ spiflash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spi_flash_jedec_detection";
+ spi-max-frequency = <37500000>; /* 37.5MHz */
+ reg = <0>;
+
+ partition@0 {
+ reg = <0x0 0x00200000>; /* 2MB */
+ label = "spi_part0";
+ };
+
+ partition@1 {
+ reg = <0x00200000 0x00200000>; /* 2MB */
+ label = "spi_part1";
+ };
+
+ partition@2 {
+ reg = <0x00400000 0x00C00000>; /* 12MB */
+ label = "spi_part2";
+ };
+ };
+ };
+
+ /* SATA SW controlled leds, use gpio 1 for port 0 host 1, and gpio 2 for port 1 host 1 */
+ /* host 1 pci address is domain 0, bus 0, device (slot) 9 */
+ /* uncomment the below node in order to enable */
+ /*
+ sata_sw_leds {
+ compatible = "annapurna-labs,al-sata-sw-leds";
+ led@0 {
+ label = "host 1, port 0 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <0>;
+ gpios =<&gpio0 1 0>;
+ };
+ led@1 {
+ label = "host 1, port 1 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <1>;
+ gpios =<&gpio0 2 0>;
+ };
+ };
+ */
+ };
+};
diff --git a/arch/arm/boot/dts/alpine_db_al212.dts b/arch/arm/boot/dts/alpine_db_al212.dts
new file mode 100644
index 0000000..7b5b7e3
--- /dev/null
+++ b/arch/arm/boot/dts/alpine_db_al212.dts
@@ -0,0 +1,700 @@
+/*
+ * Copyright 2013 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+/dts-v1/;
+
+/include/ "alpine.dtsi"
+
+/ {
+ version = "2.5";
+ model = "Annapurna Labs Alpine Dev Board";
+
+ hypervisor {
+ };
+
+ soc {
+ board-cfg {
+ id = "alpine_db (AL-212)";
+
+ /* U-Boot offset in boot source [bytes] */
+ u-boot-offset = <0x20000>;
+
+ /*
+ * Some of the PBS bus controllers have dedicated pins,
+ * however most of the pins are Multi-usage I/Os (MUIO),
+ * thus enabling flexible usage.
+ * Refer to the specific chip datasheet for further details.
+ *
+ * Choose which interfaces are to be multiplexed:
+ * - &if_nand_8 - NAND
+ * - &if_nand_cs_0, ..., &if_nand_cs_3 - NAND CS 0..3
+ * - &if_nand_wp - NAND WP
+ * - &if_sata_0_leds, &if_sata_1_leds - SATA leds
+ * - &if_eth_leds - Ethernet leds
+ * - &if_uart_1, ..., &if_uart_3 - UART 1..3
+ * - &if_i2c_gen - secondary I2C conrtoller
+ * - &if_ulpi_0_rst_n - ULPI reset
+ * - &if_pci_ep_int_a - PCIe EP interrupt
+ * - &if_pci_ep_reset_out - PCIe EP reset out
+ * - &if_spim_a_ss_1, ..., &if_spim_a_ss_3 - SPI CS 1..3
+ */
+ pinctrl_init {
+ pinctrl-0 = <
+ &if_nand_8
+ &if_nand_cs_0
+ &if_sata_1_leds
+ &if_eth_leds
+ &if_uart_1>;
+ };
+
+ /*
+ * Initial GPIO configuration - which pins are input,
+ * which are output, and what is the initial value for
+ * output pins.
+ * By default, GPIO pins that are not listed below are
+ * input pins.
+ * GPIO pins which are listed below are automatically
+ * multiplexed and should not conflict with the
+ * multiplexed interfaces listed in 'pinctrl_init'
+ * above.
+ */
+ gpio_init {
+ /* <
+ * GPIO num 1, is output, output value
+ * GPIO num 2, is output, output value
+ * ...
+ * GPIO num N, is output, output value
+ * >
+ */
+ gpio-list = <
+ /* Inputs */
+ 0 0 0 /* UART 1,2,3 module inserted */
+ 32 0 0 /* PCIE0 present */
+ 33 0 0 /* PCIE1_present */
+ 34 0 0 /* PCIE2SATA not present */
+ 35 0 0 /* SFP 0,1 absent */
+ 36 0 0 /* SFP 2,3 absent */
+ 37 0 0 /* External ETH PHY interrupt */
+
+ /* Outputs */
+ 1 1 0 /* Debug led 0 */
+ 2 1 1 /* Debug led 1 */
+ 3 1 1 /* ULPI 0 RST_N */
+ 5 1 0 /* Preloader EEPROM enable */
+ 38 1 1 /* PCI RSTN */
+ 39 1 1 /* SFP ON */
+ 40 1 0 /* External ETH PHY A reset */
+ 41 1 0 /* External ETH PHY B reset */
+ 42 1 1 /* VDD DRAM 1: 1.5V, 0: 1.35V */
+ 43 1 0 /* VTT OFF */
+ >;
+ };
+
+ /* SerDes initialization configuration */
+ serdes {
+ /*
+ * 'ref-clock' can be any of the following:
+ * - "100Mhz-internal" - 100 Mhz internal clock
+ * - "100Mhz" - 100 Mhz on-board clock
+ * - "156.25Mhz" - 156.25 Mhz on-board clock
+ * - "right" - Clock is routed from right group
+ * - assuming the right group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ * - "left" - Clock is routed from left group
+ * - assuming the left group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ *
+ * 'active-lanes' selects which lanes are active
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-tx-lanes' selects which lanes have their
+ * Tx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-rx-lanes' selects which lanes have their
+ * Rx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'ssc' - SSC (spread spectrum clock)
+ * - "enabled" or "disabled"
+ * - Relevant only for 'interface' = 'sata'
+ *
+ * group 0:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "pcie_g2x2_pcie_g2x2" - 2xPCIe gen 2 x2
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ *
+ * group 1:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 2:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 3:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "sgmii" - SGMII (up to 4 ports)
+ * - "sgmii-2.5g" - SGMII 2.5G (up to 4 ports)
+ * - "10gbe" - 10GbE (up to 4 ports)
+ */
+
+ group0 {
+ interface = "pcie_g2x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+
+ group1 {
+ interface = "off";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+
+ group2 {
+ interface = "sata";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+
+ group3 {
+ interface = "10gbe";
+ ref-clock = "156.25Mhz";
+ active-lanes = <3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+ };
+
+ /* Ethernet port configuration */
+ ethernet {
+ port0 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <1>; /* I2C Mux Channel 0 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+
+ };
+
+ port1 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <4>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+
+ port2 {
+ status = "disabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <3>; /* I2C Mux Channel 2 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+ };
+
+ port3 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <5>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+ };
+
+ /* PCIe port configuration */
+ pcie {
+ /*
+ * Selects which ports are used as end-point
+ * ports, e.g. <0>.
+ */
+ ep-ports = <>;
+
+ /* Port 0 status, speed, and number of lanes */
+ port0 {
+ status = "enabled";
+ gen = <2>;
+ width = <1>;
+ };
+
+ /* Port 1 status, speed, and number of lanes */
+ port1 {
+ status = "disabled";
+ gen = <2>;
+ width = <1>;
+ };
+
+ /* Port 2 status, speed, and number of lanes */
+ port2 {
+ status = "disabled";
+ gen = <2>;
+ width = <1>;
+ };
+ };
+ };
+
+ /* pmu { status = "disabled"; }; */
+ arch-timer { compatible = "disabled"; };
+ /* timer0 { status = "disabled"; }; */
+ /* timer1 { status = "disabled"; }; */
+ timer2 { status = "disabled"; };
+ timer3 { status = "disabled"; };
+ wdt1 { status = "disabled"; };
+ wdt2 { status = "disabled"; };
+ wdt3 { status = "disabled"; };
+ /* i2c-pld { status = "disabled"; }; */
+ /* spi { status = "disabled"; }; */
+ i2c-gen { status = "disabled"; };
+ /* gpio0 { status = "disabled"; }; */
+ /* gpio1 { status = "disabled"; }; */
+ /* gpio2 { status = "disabled"; }; */
+ /* gpio3 { status = "disabled"; }; */
+ /* gpio4 { status = "disabled"; }; */
+ /* gpio5 { status = "disabled"; }; */
+ /*uart0 { status = "disabled"; };*/
+ /*uart1 { status = "disabled"; };*/
+ uart2 { status = "disabled"; };
+ uart3 { status = "disabled"; };
+ nor_flash { status = "disabled"; };
+ /* nand-flash { status = "disabled"; }; */
+ /* al-fabric { status = "disabled"; }; */
+ /* pcie-internal { status = "disabled"; }; */
+ /* pcie-external0 { status = "disabled"; }; */
+ /* pcie-external1 { status = "disabled"; }; */
+ /* pcie-external2 { status = "disabled"; }; */
+ nand-flash {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ max-onfi-timing-mode = <1>;
+
+ partition@0 {
+ label = "al_boot";
+ reg = <0x00000000 0x00200000>;
+ };
+ partition@1 {
+ label = "device_tree";
+ reg = <0x00200000 0x00100000>;
+ };
+ partition@2 {
+ label = "linux_kernel";
+ reg = <0x00300000 0x00d00000>;
+ };
+ partition@3 {
+ label = "ubifs";
+ reg = <0x01000000 0x03f000000>;
+ };
+ };
+
+ i2c-pld {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bq32k@68 {
+ compatible = "bq32000";
+ reg = <0x68>;
+ };
+
+ i2c_mux@70 {
+ compatible = "pca9548";
+ reg = <0x70>;
+ };
+ };
+
+ spi {
+ /* cs-gpios = <&gpio0 4 0>; */
+
+ spiflash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spi_flash_jedec_detection";
+ spi-max-frequency = <37500000>; /* 37.5MHz */
+ reg = <0>;
+
+ partition@0 {
+ reg = <0x0 0x00200000>; /* 2MB */
+ label = "spi_part0";
+ };
+
+ partition@1 {
+ reg = <0x00200000 0x00200000>; /* 2MB */
+ label = "spi_part1";
+ };
+
+ partition@2 {
+ reg = <0x00400000 0x00C00000>; /* 12MB */
+ label = "spi_part2";
+ };
+ };
+ };
+
+ /* SATA SW controlled leds, use gpio 1 for port 0 host 1, and gpio 2 for port 1 host 1 */
+ /* host 1 pci address is domain 0, bus 0, device (slot) 9 */
+ /* uncomment the below node in order to enable */
+ /*
+ sata_sw_leds {
+ compatible = "annapurna-labs,al-sata-sw-leds";
+ led@0 {
+ label = "host 1, port 0 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <0>;
+ gpios =<&gpio0 1 0>;
+ };
+ led@1 {
+ label = "host 1, port 1 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <1>;
+ gpios =<&gpio0 2 0>;
+ };
+ };
+ */
+ };
+};
diff --git a/arch/arm/boot/dts/alpine_db_al314.dts b/arch/arm/boot/dts/alpine_db_al314.dts
new file mode 100644
index 0000000..19e16eb
--- /dev/null
+++ b/arch/arm/boot/dts/alpine_db_al314.dts
@@ -0,0 +1,700 @@
+/*
+ * Copyright 2013 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+/dts-v1/;
+
+/include/ "alpine.dtsi"
+
+/ {
+ version = "2.5";
+ model = "Annapurna Labs Alpine Dev Board";
+
+ hypervisor {
+ };
+
+ soc {
+ board-cfg {
+ id = "alpine_db (AL-314 - 2xPCIe)";
+
+ /* U-Boot offset in boot source [bytes] */
+ u-boot-offset = <0x20000>;
+
+ /*
+ * Some of the PBS bus controllers have dedicated pins,
+ * however most of the pins are Multi-usage I/Os (MUIO),
+ * thus enabling flexible usage.
+ * Refer to the specific chip datasheet for further details.
+ *
+ * Choose which interfaces are to be multiplexed:
+ * - &if_nand_8 - NAND
+ * - &if_nand_cs_0, ..., &if_nand_cs_3 - NAND CS 0..3
+ * - &if_nand_wp - NAND WP
+ * - &if_sata_0_leds, &if_sata_1_leds - SATA leds
+ * - &if_eth_leds - Ethernet leds
+ * - &if_uart_1, ..., &if_uart_3 - UART 1..3
+ * - &if_i2c_gen - secondary I2C conrtoller
+ * - &if_ulpi_0_rst_n - ULPI reset
+ * - &if_pci_ep_int_a - PCIe EP interrupt
+ * - &if_pci_ep_reset_out - PCIe EP reset out
+ * - &if_spim_a_ss_1, ..., &if_spim_a_ss_3 - SPI CS 1..3
+ */
+ pinctrl_init {
+ pinctrl-0 = <
+ &if_nand_8
+ &if_nand_cs_0
+ &if_sata_1_leds
+ &if_eth_leds
+ &if_uart_1>;
+ };
+
+ /*
+ * Initial GPIO configuration - which pins are input,
+ * which are output, and what is the initial value for
+ * output pins.
+ * By default, GPIO pins that are not listed below are
+ * input pins.
+ * GPIO pins which are listed below are automatically
+ * multiplexed and should not conflict with the
+ * multiplexed interfaces listed in 'pinctrl_init'
+ * above.
+ */
+ gpio_init {
+ /* <
+ * GPIO num 1, is output, output value
+ * GPIO num 2, is output, output value
+ * ...
+ * GPIO num N, is output, output value
+ * >
+ */
+ gpio-list = <
+ /* Inputs */
+ 0 0 0 /* UART 1,2,3 module inserted */
+ 32 0 0 /* PCIE0 present */
+ 33 0 0 /* PCIE1_present */
+ 34 0 0 /* PCIE2SATA not present */
+ 35 0 0 /* SFP 0,1 absent */
+ 36 0 0 /* SFP 2,3 absent */
+ 37 0 0 /* External ETH PHY interrupt */
+
+ /* Outputs */
+ 1 1 0 /* Debug led 0 */
+ 2 1 1 /* Debug led 1 */
+ 3 1 1 /* ULPI 0 RST_N */
+ 5 1 0 /* Preloader EEPROM enable */
+ 38 1 1 /* PCI RSTN */
+ 39 1 1 /* SFP ON */
+ 40 1 0 /* External ETH PHY A reset */
+ 41 1 0 /* External ETH PHY B reset */
+ 42 1 1 /* VDD DRAM 1: 1.5V, 0: 1.35V */
+ 43 1 0 /* VTT OFF */
+ >;
+ };
+
+ /* SerDes initialization configuration */
+ serdes {
+ /*
+ * 'ref-clock' can be any of the following:
+ * - "100Mhz-internal" - 100 Mhz internal clock
+ * - "100Mhz" - 100 Mhz on-board clock
+ * - "156.25Mhz" - 156.25 Mhz on-board clock
+ * - "right" - Clock is routed from right group
+ * - assuming the right group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ * - "left" - Clock is routed from left group
+ * - assuming the left group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ *
+ * 'active-lanes' selects which lanes are active
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-tx-lanes' selects which lanes have their
+ * Tx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-rx-lanes' selects which lanes have their
+ * Rx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'ssc' - SSC (spread spectrum clock)
+ * - "enabled" or "disabled"
+ * - Relevant only for 'interface' = 'sata'
+ *
+ * group 0:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "pcie_g2x2_pcie_g2x2" - 2xPCIe gen 2 x2
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ *
+ * group 1:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 2:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 3:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "sgmii" - SGMII (up to 4 ports)
+ * - "sgmii-2.5g" - SGMII 2.5G (up to 4 ports)
+ * - "10gbe" - 10GbE (up to 4 ports)
+ */
+
+ group0 {
+ interface = "pcie_g2x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+
+ group1 {
+ interface = "pcie_g2x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+
+ group2 {
+ interface = "sata";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+
+ group3 {
+ interface = "10gbe";
+ ref-clock = "156.25Mhz";
+ active-lanes = <1 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+ };
+
+ /* Ethernet port configuration */
+ ethernet {
+ port0 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <1>; /* I2C Mux Channel 0 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+
+ };
+
+ port1 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <4>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+
+ port2 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <3>; /* I2C Mux Channel 2 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+ };
+
+ port3 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <5>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+ };
+
+ /* PCIe port configuration */
+ pcie {
+ /*
+ * Selects which ports are used as end-point
+ * ports, e.g. <0>.
+ */
+ ep-ports = <>;
+
+ /* Port 0 status, speed, and number of lanes */
+ port0 {
+ status = "enabled";
+ gen = <2>;
+ width = <2>;
+ };
+
+ /* Port 1 status, speed, and number of lanes */
+ port1 {
+ status = "disabled";
+ gen = <2>;
+ width = <2>;
+ };
+
+ /* Port 2 status, speed, and number of lanes */
+ port2 {
+ status = "disabled";
+ gen = <2>;
+ width = <4>;
+ };
+ };
+ };
+
+ /* pmu { status = "disabled"; }; */
+ arch-timer { compatible = "disabled"; };
+ /* timer0 { status = "disabled"; }; */
+ /* timer1 { status = "disabled"; }; */
+ timer2 { status = "disabled"; };
+ timer3 { status = "disabled"; };
+ wdt1 { status = "disabled"; };
+ wdt2 { status = "disabled"; };
+ wdt3 { status = "disabled"; };
+ /* i2c-pld { status = "disabled"; }; */
+ /* spi { status = "disabled"; }; */
+ i2c-gen { status = "disabled"; };
+ /* gpio0 { status = "disabled"; }; */
+ /* gpio1 { status = "disabled"; }; */
+ /* gpio2 { status = "disabled"; }; */
+ /* gpio3 { status = "disabled"; }; */
+ /* gpio4 { status = "disabled"; }; */
+ /* gpio5 { status = "disabled"; }; */
+ /*uart0 { status = "disabled"; };*/
+ /*uart1 { status = "disabled"; };*/
+ uart2 { status = "disabled"; };
+ uart3 { status = "disabled"; };
+ nor_flash { status = "disabled"; };
+ /* nand-flash { status = "disabled"; }; */
+ /* al-fabric { status = "disabled"; }; */
+ /* pcie-internal { status = "disabled"; }; */
+ /* pcie-external0 { status = "disabled"; }; */
+ /* pcie-external1 { status = "disabled"; }; */
+ /* pcie-external2 { status = "disabled"; }; */
+ nand-flash {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ max-onfi-timing-mode = <1>;
+
+ partition@0 {
+ label = "al_boot";
+ reg = <0x00000000 0x00200000>;
+ };
+ partition@1 {
+ label = "device_tree";
+ reg = <0x00200000 0x00100000>;
+ };
+ partition@2 {
+ label = "linux_kernel";
+ reg = <0x00300000 0x00d00000>;
+ };
+ partition@3 {
+ label = "ubifs";
+ reg = <0x01000000 0x03f000000>;
+ };
+ };
+
+ i2c-pld {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bq32k@68 {
+ compatible = "bq32000";
+ reg = <0x68>;
+ };
+
+ i2c_mux@70 {
+ compatible = "pca9548";
+ reg = <0x70>;
+ };
+ };
+
+ spi {
+ /* cs-gpios = <&gpio0 4 0>; */
+
+ spiflash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spi_flash_jedec_detection";
+ spi-max-frequency = <37500000>; /* 37.5MHz */
+ reg = <0>;
+
+ partition@0 {
+ reg = <0x0 0x00200000>; /* 2MB */
+ label = "spi_part0";
+ };
+
+ partition@1 {
+ reg = <0x00200000 0x00200000>; /* 2MB */
+ label = "spi_part1";
+ };
+
+ partition@2 {
+ reg = <0x00400000 0x00C00000>; /* 12MB */
+ label = "spi_part2";
+ };
+ };
+ };
+
+ /* SATA SW controlled leds, use gpio 1 for port 0 host 1, and gpio 2 for port 1 host 1 */
+ /* host 1 pci address is domain 0, bus 0, device (slot) 9 */
+ /* uncomment the below node in order to enable */
+ /*
+ sata_sw_leds {
+ compatible = "annapurna-labs,al-sata-sw-leds";
+ led@0 {
+ label = "host 1, port 0 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <0>;
+ gpios =<&gpio0 1 0>;
+ };
+ led@1 {
+ label = "host 1, port 1 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <1>;
+ gpios =<&gpio0 2 0>;
+ };
+ };
+ */
+ };
+};
diff --git a/arch/arm/boot/dts/alpine_db_m0.dts b/arch/arm/boot/dts/alpine_db_m0.dts
new file mode 100644
index 0000000..b5d52f3
--- /dev/null
+++ b/arch/arm/boot/dts/alpine_db_m0.dts
@@ -0,0 +1,775 @@
+/*
+ * Copyright 2013 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+/dts-v1/;
+
+/include/ "alpine.dtsi"
+
+/ {
+ version = "M0 - 2.1";
+ model = "Annapurna Labs Alpine Dev Board";
+
+ hypervisor {
+ };
+
+ soc {
+ board-cfg {
+ id = "alpine_db_auto";
+
+ /* U-Boot offset in boot source [bytes] */
+ u-boot-offset = <0x20000>;
+
+ /*
+ * Some of the PBS bus controllers have dedicated pins,
+ * however most of the pins are Multi-usage I/Os (MUIO),
+ * thus enabling flexible usage.
+ * Refer to the specific chip datasheet for further details.
+ *
+ * Choose which interfaces are to be multiplexed:
+ * - &if_nand_8 - NAND
+ * - &if_nand_cs_0, ..., &if_nand_cs_3 - NAND CS 0..3
+ * - &if_nand_wp - NAND WP
+ * - &if_sata_0_leds, &if_sata_1_leds - SATA leds
+ * - &if_eth_leds - Ethernet leds
+ * - &if_uart_1, ..., &if_uart_3 - UART 1..3
+ * - &if_i2c_gen - secondary I2C conrtoller
+ * - &if_ulpi_0_rst_n - ULPI reset
+ * - &if_pci_ep_int_a - PCIe EP interrupt
+ * - &if_pci_ep_reset_out - PCIe EP reset out
+ * - &if_spim_a_ss_1, ..., &if_spim_a_ss_3 - SPI CS 1..3
+ */
+ pinctrl_init {
+ pinctrl-0 = <
+ &if_nand_8
+ &if_nand_cs_0
+ &if_sata_1_leds
+ &if_eth_leds
+ &if_uart_1>;
+ };
+
+ /*
+ * Initial GPIO configuration - which pins are input,
+ * which are output, and what is the initial value for
+ * output pins.
+ * By default, GPIO pins that are not listed below are
+ * input pins.
+ * GPIO pins which are listed below are automatically
+ * multiplexed and should not conflict with the
+ * multiplexed interfaces listed in 'pinctrl_init'
+ * above.
+ */
+ gpio_init {
+ /* <
+ * GPIO num 1, is output, output value
+ * GPIO num 2, is output, output value
+ * ...
+ * GPIO num N, is output, output value
+ * >
+ */
+ gpio-list = <
+ /* Inputs */
+ 0 0 0 /* UART 1,2,3 module inserted */
+ 32 0 0 /* PCIE0 present */
+ 33 0 0 /* PCIE1_present */
+ 34 0 0 /* PCIE2SATA not present */
+ 35 0 0 /* SFP 0,1 absent */
+ 36 0 0 /* SFP 2,3 absent */
+ 37 0 0 /* External ETH PHY interrupt */
+
+ /* Outputs */
+ 1 1 0 /* Debug led 0 */
+ 2 1 1 /* Debug led 1 */
+ 3 1 1 /* ULPI 0 RST_N */
+ 5 1 0 /* Preloader EEPROM enable */
+ 38 1 1 /* PCI RSTN */
+ 39 1 1 /* SFP ON */
+ 40 1 0 /* External ETH PHY A reset */
+ 41 1 0 /* External ETH PHY B reset */
+ 42 1 1 /* VDD DRAM 1: 1.5V, 0: 1.35V */
+ 43 1 0 /* VTT OFF */
+ >;
+ };
+
+ /* SerDes initialization configuration */
+ serdes {
+ /*
+ * 'ref-clock' can be any of the following:
+ * - "100Mhz-internal" - 100 Mhz internal clock
+ * - "100Mhz" - 100 Mhz on-board clock
+ * - "156.25Mhz" - 156.25 Mhz on-board clock
+ * - "right" - Clock is routed from right group
+ * - assuming the right group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ * - "left" - Clock is routed from left group
+ * - assuming the left group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ *
+ * 'active-lanes' selects which lanes are active
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-tx-lanes' selects which lanes have their
+ * Tx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-rx-lanes' selects which lanes have their
+ * Rx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'ssc' - SSC (spread spectrum clock)
+ * - "enabled" or "disabled"
+ * - Relevant only for 'interface' = 'sata'
+ *
+ * group 0:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "pcie_g2x2_pcie_g2x2" - 2xPCIe gen 2 x2
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ *
+ * group 1:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 2:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 3:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "sgmii" - SGMII (up to 4 ports)
+ * - "10gbe" - 10GbE (up to 4 ports)
+ */
+
+ group0 {
+ interface = "pcie_g3x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+
+ group1 {
+ interface = "sata";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x10>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <20>;
+ post_emph = <4>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x10>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <20>;
+ post_emph = <4>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x10>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <20>;
+ post_emph = <4>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x10>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <20>;
+ post_emph = <4>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+
+ group2 {
+ interface = "sata";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x10>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <16>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x10>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <16>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x10>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <16>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x10>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <16>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+
+ group3 {
+ interface = "10gbe";
+ ref-clock = "156.25Mhz";
+ active-lanes = <1 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+ };
+
+ /* Ethernet port configuration */
+ ethernet {
+ port0 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <1>; /* I2C Mux Channel 0 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+
+ };
+
+ port1 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <4>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+
+ port2 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <3>; /* I2C Mux Channel 2 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+ };
+
+ port3 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <5>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+ };
+
+ /* PCIe port configuration */
+ pcie {
+ /*
+ * Selects which ports are used as end-point
+ * ports, e.g. <0>.
+ */
+ ep-ports = <>;
+
+ /* Port 0 status, speed, and number of lanes */
+ port0 {
+ status = "enabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 1 status, speed, and number of lanes */
+ port1 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 2 status, speed, and number of lanes */
+ port2 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+ };
+ };
+
+ /* pmu { status = "disabled"; }; */
+ arch-timer { compatible = "disabled"; };
+ /* timer0 { status = "disabled"; }; */
+ /* timer1 { status = "disabled"; }; */
+ timer2 { status = "disabled"; };
+ timer3 { status = "disabled"; };
+ wdt1 { status = "disabled"; };
+ wdt2 { status = "disabled"; };
+ wdt3 { status = "disabled"; };
+ /* i2c-pld { status = "disabled"; }; */
+ /* spi { status = "disabled"; }; */
+ i2c-gen { status = "disabled"; };
+ /* gpio0 { status = "disabled"; }; */
+ /* gpio1 { status = "disabled"; }; */
+ /* gpio2 { status = "disabled"; }; */
+ /* gpio3 { status = "disabled"; }; */
+ /* gpio4 { status = "disabled"; }; */
+ /* gpio5 { status = "disabled"; }; */
+ /*uart0 { status = "disabled"; };*/
+ /*uart1 { status = "disabled"; };*/
+ uart2 { status = "disabled"; };
+ uart3 { status = "disabled"; };
+ nor_flash { status = "disabled"; };
+ /* nand-flash { status = "disabled"; }; */
+ /* al-fabric { status = "disabled"; }; */
+ /* pcie-internal { status = "disabled"; }; */
+ /* pcie-external0 { status = "disabled"; }; */
+ /* pcie-external1 { status = "disabled"; }; */
+ /* pcie-external2 { status = "disabled"; }; */
+ nand-flash {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ max-onfi-timing-mode = <1>;
+
+ partition@0 {
+ label = "al_boot";
+ reg = <0x00000000 0x00200000>;
+ };
+ partition@1 {
+ label = "device_tree";
+ reg = <0x00200000 0x00100000>;
+ };
+ partition@2 {
+ label = "linux_kernel";
+ reg = <0x00300000 0x00d00000>;
+ };
+ partition@3 {
+ label = "ubifs";
+ reg = <0x01000000 0x03f000000>;
+ };
+ };
+
+ i2c-pld {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bq32k@68 {
+ compatible = "bq32000";
+ reg = <0x68>;
+ };
+
+ i2c_mux@70 {
+ compatible = "pca9548";
+ reg = <0x70>;
+ };
+ };
+
+ spi {
+ /* cs-gpios = <&gpio0 4 0>; */
+
+ spiflash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spi_flash_jedec_detection";
+ spi-max-frequency = <37500000>; /* 37.5MHz */
+ reg = <0>;
+
+ partition@0 {
+ reg = <0x0 0x00200000>; /* 2MB */
+ label = "spi_part0";
+ };
+
+ partition@1 {
+ reg = <0x00200000 0x00200000>; /* 2MB */
+ label = "spi_part1";
+ };
+
+ partition@2 {
+ reg = <0x00400000 0x00C00000>; /* 12MB */
+ label = "spi_part2";
+ };
+ };
+ };
+
+ /* SATA SW controlled leds, use gpio 1 for port 0 host 1, and gpio 2 for port 1 host 1 */
+ /* host 1 pci address is domain 0, bus 0, device (slot) 9 */
+ /* uncomment the below node in order to enable */
+ /*
+ sata_sw_leds {
+ compatible = "annapurna-labs,al-sata-sw-leds";
+ led@0 {
+ label = "host 1, port 0 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <0>;
+ gpios =<&gpio0 1 0>;
+ };
+ led@1 {
+ label = "host 1, port 1 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <1>;
+ gpios =<&gpio0 2 0>;
+ };
+ };
+ */
+ };
+};
diff --git a/arch/arm/boot/dts/alpine_db_no_arch_timer.dts b/arch/arm/boot/dts/alpine_db_no_arch_timer.dts
new file mode 100644
index 0000000..3f0ed9d
--- /dev/null
+++ b/arch/arm/boot/dts/alpine_db_no_arch_timer.dts
@@ -0,0 +1,776 @@
+/*
+ * Copyright 2013 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+/dts-v1/;
+
+/include/ "alpine.dtsi"
+
+/ {
+ version = "2.5";
+ model = "Annapurna Labs Alpine Dev Board";
+
+ hypervisor {
+ };
+
+ soc {
+ board-cfg {
+ id = "alpine_db (2xSATA)";
+
+ /* U-Boot offset in boot source [bytes] */
+ u-boot-offset = <0x20000>;
+
+ /*
+ * Some of the PBS bus controllers have dedicated pins,
+ * however most of the pins are Multi-usage I/Os (MUIO),
+ * thus enabling flexible usage.
+ * Refer to the specific chip datasheet for further details.
+ *
+ * Choose which interfaces are to be multiplexed:
+ * - &if_nand_8 - NAND
+ * - &if_nand_cs_0, ..., &if_nand_cs_3 - NAND CS 0..3
+ * - &if_nand_wp - NAND WP
+ * - &if_sata_0_leds, &if_sata_1_leds - SATA leds
+ * - &if_eth_leds - Ethernet leds
+ * - &if_uart_1, ..., &if_uart_3 - UART 1..3
+ * - &if_i2c_gen - secondary I2C conrtoller
+ * - &if_ulpi_0_rst_n - ULPI reset
+ * - &if_pci_ep_int_a - PCIe EP interrupt
+ * - &if_pci_ep_reset_out - PCIe EP reset out
+ * - &if_spim_a_ss_1, ..., &if_spim_a_ss_3 - SPI CS 1..3
+ */
+ pinctrl_init {
+ pinctrl-0 = <
+ &if_nand_8
+ &if_nand_cs_0
+ &if_sata_1_leds
+ &if_eth_leds
+ &if_uart_1>;
+ };
+
+ /*
+ * Initial GPIO configuration - which pins are input,
+ * which are output, and what is the initial value for
+ * output pins.
+ * By default, GPIO pins that are not listed below are
+ * input pins.
+ * GPIO pins which are listed below are automatically
+ * multiplexed and should not conflict with the
+ * multiplexed interfaces listed in 'pinctrl_init'
+ * above.
+ */
+ gpio_init {
+ /* <
+ * GPIO num 1, is output, output value
+ * GPIO num 2, is output, output value
+ * ...
+ * GPIO num N, is output, output value
+ * >
+ */
+ gpio-list = <
+ /* Inputs */
+ 0 0 0 /* UART 1,2,3 module inserted */
+ 32 0 0 /* PCIE0 present */
+ 33 0 0 /* PCIE1_present */
+ 34 0 0 /* PCIE2SATA not present */
+ 35 0 0 /* SFP 0,1 absent */
+ 36 0 0 /* SFP 2,3 absent */
+ 37 0 0 /* External ETH PHY interrupt */
+
+ /* Outputs */
+ 1 1 0 /* Debug led 0 */
+ 2 1 1 /* Debug led 1 */
+ 3 1 1 /* ULPI 0 RST_N */
+ 5 1 0 /* Preloader EEPROM enable */
+ 38 1 1 /* PCI RSTN */
+ 39 1 1 /* SFP ON */
+ 40 1 0 /* External ETH PHY A reset */
+ 41 1 0 /* External ETH PHY B reset */
+ 42 1 1 /* VDD DRAM 1: 1.5V, 0: 1.35V */
+ 43 1 0 /* VTT OFF */
+ >;
+ };
+
+ /* SerDes initialization configuration */
+ serdes {
+ /*
+ * 'ref-clock' can be any of the following:
+ * - "100Mhz-internal" - 100 Mhz internal clock
+ * - "100Mhz" - 100 Mhz on-board clock
+ * - "156.25Mhz" - 156.25 Mhz on-board clock
+ * - "right" - Clock is routed from right group
+ * - assuming the right group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ * - "left" - Clock is routed from left group
+ * - assuming the left group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ *
+ * 'active-lanes' selects which lanes are active
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-tx-lanes' selects which lanes have their
+ * Tx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-rx-lanes' selects which lanes have their
+ * Rx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'ssc' - SSC (spread spectrum clock)
+ * - "enabled" or "disabled"
+ * - Relevant only for 'interface' = 'sata'
+ *
+ * group 0:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "pcie_g2x2_pcie_g2x2" - 2xPCIe gen 2 x2
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ *
+ * group 1:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 2:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 3:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "sgmii" - SGMII (up to 4 ports)
+ * - "sgmii-2.5g" - SGMII 2.5G (up to 4 ports)
+ * - "10gbe" - 10GbE (up to 4 ports)
+ */
+
+ group0 {
+ interface = "pcie_g3x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+
+ group1 {
+ interface = "sata";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+
+ group2 {
+ interface = "sata";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+
+ group3 {
+ interface = "10gbe";
+ ref-clock = "156.25Mhz";
+ active-lanes = <1 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+ };
+
+ /* Ethernet port configuration */
+ ethernet {
+ port0 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <1>; /* I2C Mux Channel 0 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+
+ };
+
+ port1 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <4>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+
+ port2 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <3>; /* I2C Mux Channel 2 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+ };
+
+ port3 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, sgmii-2.5g, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <5>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+ };
+
+ /* PCIe port configuration */
+ pcie {
+ /*
+ * Selects which ports are used as end-point
+ * ports, e.g. <0>.
+ */
+ ep-ports = <>;
+
+ /* Port 0 status, speed, and number of lanes */
+ port0 {
+ status = "enabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 1 status, speed, and number of lanes */
+ port1 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 2 status, speed, and number of lanes */
+ port2 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+ };
+ };
+
+ /* pmu { status = "disabled"; }; */
+ arch-timer { compatible = "disabled"; };
+ /* timer0 { status = "disabled"; }; */
+ /* timer1 { status = "disabled"; }; */
+ timer2 { status = "disabled"; };
+ timer3 { status = "disabled"; };
+ wdt1 { status = "disabled"; };
+ wdt2 { status = "disabled"; };
+ wdt3 { status = "disabled"; };
+ /* i2c-pld { status = "disabled"; }; */
+ /* spi { status = "disabled"; }; */
+ i2c-gen { status = "disabled"; };
+ /* gpio0 { status = "disabled"; }; */
+ /* gpio1 { status = "disabled"; }; */
+ /* gpio2 { status = "disabled"; }; */
+ /* gpio3 { status = "disabled"; }; */
+ /* gpio4 { status = "disabled"; }; */
+ /* gpio5 { status = "disabled"; }; */
+ /*uart0 { status = "disabled"; };*/
+ /*uart1 { status = "disabled"; };*/
+ uart2 { status = "disabled"; };
+ uart3 { status = "disabled"; };
+ nor_flash { status = "disabled"; };
+ /* nand-flash { status = "disabled"; }; */
+ /* al-fabric { status = "disabled"; }; */
+ /* pcie-internal { status = "disabled"; }; */
+ /* pcie-external0 { status = "disabled"; }; */
+ /* pcie-external1 { status = "disabled"; }; */
+ /* pcie-external2 { status = "disabled"; }; */
+ nand-flash {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ max-onfi-timing-mode = <1>;
+
+ partition@0 {
+ label = "al_boot";
+ reg = <0x00000000 0x00200000>;
+ };
+ partition@1 {
+ label = "device_tree";
+ reg = <0x00200000 0x00100000>;
+ };
+ partition@2 {
+ label = "linux_kernel";
+ reg = <0x00300000 0x00d00000>;
+ };
+ partition@3 {
+ label = "ubifs";
+ reg = <0x01000000 0x03f000000>;
+ };
+ };
+
+ i2c-pld {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bq32k@68 {
+ compatible = "bq32000";
+ reg = <0x68>;
+ };
+
+ i2c_mux@70 {
+ compatible = "pca9548";
+ reg = <0x70>;
+ };
+ };
+
+ spi {
+ /* cs-gpios = <&gpio0 4 0>; */
+
+ spiflash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spi_flash_jedec_detection";
+ spi-max-frequency = <37500000>; /* 37.5MHz */
+ reg = <0>;
+
+ partition@0 {
+ reg = <0x0 0x00200000>; /* 2MB */
+ label = "spi_part0";
+ };
+
+ partition@1 {
+ reg = <0x00200000 0x00200000>; /* 2MB */
+ label = "spi_part1";
+ };
+
+ partition@2 {
+ reg = <0x00400000 0x00C00000>; /* 12MB */
+ label = "spi_part2";
+ };
+ };
+ };
+
+ /* SATA SW controlled leds, use gpio 1 for port 0 host 1, and gpio 2 for port 1 host 1 */
+ /* host 1 pci address is domain 0, bus 0, device (slot) 9 */
+ /* uncomment the below node in order to enable */
+ /*
+ sata_sw_leds {
+ compatible = "annapurna-labs,al-sata-sw-leds";
+ led@0 {
+ label = "host 1, port 0 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <0>;
+ gpios =<&gpio0 1 0>;
+ };
+ led@1 {
+ label = "host 1, port 1 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <1>;
+ gpios =<&gpio0 2 0>;
+ };
+ };
+ */
+ };
+};
diff --git a/arch/arm/boot/dts/alpine_db_pcie.dts b/arch/arm/boot/dts/alpine_db_pcie.dts
new file mode 100644
index 0000000..5eabc73
--- /dev/null
+++ b/arch/arm/boot/dts/alpine_db_pcie.dts
@@ -0,0 +1,699 @@
+/*
+ * Copyright 2013 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+/dts-v1/;
+
+/include/ "alpine.dtsi"
+
+/ {
+ version = "2.5";
+ model = "Annapurna Labs Alpine Dev Board";
+
+ hypervisor {
+ };
+
+ soc {
+ board-cfg {
+ id = "alpine_db (2xPCIe)";
+
+ /* U-Boot offset in boot source [bytes] */
+ u-boot-offset = <0x20000>;
+
+ /*
+ * Some of the PBS bus controllers have dedicated pins,
+ * however most of the pins are Multi-usage I/Os (MUIO),
+ * thus enabling flexible usage.
+ * Refer to the specific chip datasheet for further details.
+ *
+ * Choose which interfaces are to be multiplexed:
+ * - &if_nand_8 - NAND
+ * - &if_nand_cs_0, ..., &if_nand_cs_3 - NAND CS 0..3
+ * - &if_nand_wp - NAND WP
+ * - &if_sata_0_leds, &if_sata_1_leds - SATA leds
+ * - &if_eth_leds - Ethernet leds
+ * - &if_uart_1, ..., &if_uart_3 - UART 1..3
+ * - &if_i2c_gen - secondary I2C conrtoller
+ * - &if_ulpi_0_rst_n - ULPI reset
+ * - &if_pci_ep_int_a - PCIe EP interrupt
+ * - &if_pci_ep_reset_out - PCIe EP reset out
+ * - &if_spim_a_ss_1, ..., &if_spim_a_ss_3 - SPI CS 1..3
+ */
+ pinctrl_init {
+ pinctrl-0 = <
+ &if_nand_8
+ &if_nand_cs_0
+ &if_sata_1_leds
+ &if_eth_leds
+ &if_uart_1>;
+ };
+
+ /*
+ * Initial GPIO configuration - which pins are input,
+ * which are output, and what is the initial value for
+ * output pins.
+ * By default, GPIO pins that are not listed below are
+ * input pins.
+ * GPIO pins which are listed below are automatically
+ * multiplexed and should not conflict with the
+ * multiplexed interfaces listed in 'pinctrl_init'
+ * above.
+ */
+ gpio_init {
+ /* <
+ * GPIO num 1, is output, output value
+ * GPIO num 2, is output, output value
+ * ...
+ * GPIO num N, is output, output value
+ * >
+ */
+ gpio-list = <
+ /* Inputs */
+ 0 0 0 /* UART 1,2,3 module inserted */
+ 32 0 0 /* PCIE0 present */
+ 33 0 0 /* PCIE1_present */
+ 34 0 0 /* PCIE2SATA not present */
+ 35 0 0 /* SFP 0,1 absent */
+ 36 0 0 /* SFP 2,3 absent */
+ 37 0 0 /* External ETH PHY interrupt */
+
+ /* Outputs */
+ 1 1 0 /* Debug led 0 */
+ 2 1 1 /* Debug led 1 */
+ 3 1 1 /* ULPI 0 RST_N */
+ 5 1 0 /* Preloader EEPROM enable */
+ 38 1 1 /* PCI RSTN */
+ 39 1 1 /* SFP ON */
+ 40 1 0 /* External ETH PHY A reset */
+ 41 1 0 /* External ETH PHY B reset */
+ 42 1 1 /* VDD DRAM 1: 1.5V, 0: 1.35V */
+ 43 1 0 /* VTT OFF */
+ >;
+ };
+
+ /* SerDes initialization configuration */
+ serdes {
+ /*
+ * 'ref-clock' can be any of the following:
+ * - "100Mhz-internal" - 100 Mhz internal clock
+ * - "100Mhz" - 100 Mhz on-board clock
+ * - "156.25Mhz" - 156.25 Mhz on-board clock
+ * - "right" - Clock is routed from right group
+ * - assuming the right group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ * - "left" - Clock is routed from left group
+ * - assuming the left group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ *
+ * 'active-lanes' selects which lanes are active
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-tx-lanes' selects which lanes have their
+ * Tx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-rx-lanes' selects which lanes have their
+ * Rx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'ssc' - SSC (spread spectrum clock)
+ * - "enabled" or "disabled"
+ * - Relevant only for 'interface' = 'sata'
+ *
+ * group 0:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "pcie_g2x2_pcie_g2x2" - 2xPCIe gen 2 x2
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ *
+ * group 1:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 2:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 3:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "sgmii" - SGMII (up to 4 ports)
+ * - "10gbe" - 10GbE (up to 4 ports)
+ */
+
+ group0 {
+ interface = "pcie_g3x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+
+ group1 {
+ interface = "pcie_g3x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+
+ group2 {
+ interface = "sata";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+
+ group3 {
+ interface = "10gbe";
+ ref-clock = "156.25Mhz";
+ active-lanes = <1 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+ };
+
+ /* Ethernet port configuration */
+ ethernet {
+ port0 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <1>; /* I2C Mux Channel 0 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+
+ };
+
+ port1 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <4>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+
+ port2 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <3>; /* I2C Mux Channel 2 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+ };
+
+ port3 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <5>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+ };
+
+ /* PCIe port configuration */
+ pcie {
+ /*
+ * Selects which ports are used as end-point
+ * ports, e.g. <0>.
+ */
+ ep-ports = <>;
+
+ /* Port 0 status, speed, and number of lanes */
+ port0 {
+ status = "enabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 1 status, speed, and number of lanes */
+ port1 {
+ status = "enabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 2 status, speed, and number of lanes */
+ port2 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+ };
+ };
+
+ /* pmu { status = "disabled"; }; */
+ /* arch-timer { compatible = "disabled"; }; */
+ timer0 { status = "disabled"; };
+ /* timer1 { status = "disabled"; }; */
+ timer2 { status = "disabled"; };
+ timer3 { status = "disabled"; };
+ wdt1 { status = "disabled"; };
+ wdt2 { status = "disabled"; };
+ wdt3 { status = "disabled"; };
+ /* i2c-pld { status = "disabled"; }; */
+ /* spi { status = "disabled"; }; */
+ i2c-gen { status = "disabled"; };
+ /* gpio0 { status = "disabled"; }; */
+ /* gpio1 { status = "disabled"; }; */
+ /* gpio2 { status = "disabled"; }; */
+ /* gpio3 { status = "disabled"; }; */
+ /* gpio4 { status = "disabled"; }; */
+ /* gpio5 { status = "disabled"; }; */
+ /*uart0 { status = "disabled"; };*/
+ /*uart1 { status = "disabled"; };*/
+ uart2 { status = "disabled"; };
+ uart3 { status = "disabled"; };
+ nor_flash { status = "disabled"; };
+ /* nand-flash { status = "disabled"; }; */
+ /* al-fabric { status = "disabled"; }; */
+ /* pcie-internal { status = "disabled"; }; */
+ /* pcie-external0 { status = "disabled"; }; */
+ /* pcie-external1 { status = "disabled"; }; */
+ /* pcie-external2 { status = "disabled"; }; */
+ nand-flash {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ max-onfi-timing-mode = <1>;
+
+ partition@0 {
+ label = "al_boot";
+ reg = <0x00000000 0x00200000>;
+ };
+ partition@1 {
+ label = "device_tree";
+ reg = <0x00200000 0x00100000>;
+ };
+ partition@2 {
+ label = "linux_kernel";
+ reg = <0x00300000 0x00d00000>;
+ };
+ partition@3 {
+ label = "ubifs";
+ reg = <0x01000000 0x03f000000>;
+ };
+ };
+
+ i2c-pld {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bq32k@68 {
+ compatible = "bq32000";
+ reg = <0x68>;
+ };
+
+ i2c_mux@70 {
+ compatible = "pca9548";
+ reg = <0x70>;
+ };
+ };
+
+ spi {
+ /* cs-gpios = <&gpio0 4 0>; */
+
+ spiflash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spi_flash_jedec_detection";
+ spi-max-frequency = <37500000>; /* 37.5MHz */
+ reg = <0>;
+
+ partition@0 {
+ reg = <0x0 0x00200000>; /* 2MB */
+ label = "spi_part0";
+ };
+
+ partition@1 {
+ reg = <0x00200000 0x00200000>; /* 2MB */
+ label = "spi_part1";
+ };
+
+ partition@2 {
+ reg = <0x00400000 0x00C00000>; /* 12MB */
+ label = "spi_part2";
+ };
+ };
+ };
+
+ /* SATA SW controlled leds, use gpio 1 for port 0 host 1, and gpio 2 for port 1 host 1 */
+ /* host 1 pci address is domain 0, bus 0, device (slot) 9 */
+ /* uncomment the below node in order to enable */
+ /*
+ sata_sw_leds {
+ compatible = "annapurna-labs,al-sata-sw-leds";
+ led@0 {
+ label = "host 1, port 0 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <0>;
+ gpios =<&gpio0 1 0>;
+ };
+ led@1 {
+ label = "host 1, port 1 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <1>;
+ gpios =<&gpio0 2 0>;
+ };
+ };
+ */
+ };
+};
diff --git a/arch/arm/boot/dts/alpine_db_pcie_no_arch_timer.dts b/arch/arm/boot/dts/alpine_db_pcie_no_arch_timer.dts
new file mode 100644
index 0000000..d661904
--- /dev/null
+++ b/arch/arm/boot/dts/alpine_db_pcie_no_arch_timer.dts
@@ -0,0 +1,699 @@
+/*
+ * Copyright 2013 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+/dts-v1/;
+
+/include/ "alpine.dtsi"
+
+/ {
+ version = "2.5";
+ model = "Annapurna Labs Alpine Dev Board";
+
+ hypervisor {
+ };
+
+ soc {
+ board-cfg {
+ id = "alpine_db (2xPCIe)";
+
+ /* U-Boot offset in boot source [bytes] */
+ u-boot-offset = <0x20000>;
+
+ /*
+ * Some of the PBS bus controllers have dedicated pins,
+ * however most of the pins are Multi-usage I/Os (MUIO),
+ * thus enabling flexible usage.
+ * Refer to the specific chip datasheet for further details.
+ *
+ * Choose which interfaces are to be multiplexed:
+ * - &if_nand_8 - NAND
+ * - &if_nand_cs_0, ..., &if_nand_cs_3 - NAND CS 0..3
+ * - &if_nand_wp - NAND WP
+ * - &if_sata_0_leds, &if_sata_1_leds - SATA leds
+ * - &if_eth_leds - Ethernet leds
+ * - &if_uart_1, ..., &if_uart_3 - UART 1..3
+ * - &if_i2c_gen - secondary I2C conrtoller
+ * - &if_ulpi_0_rst_n - ULPI reset
+ * - &if_pci_ep_int_a - PCIe EP interrupt
+ * - &if_pci_ep_reset_out - PCIe EP reset out
+ * - &if_spim_a_ss_1, ..., &if_spim_a_ss_3 - SPI CS 1..3
+ */
+ pinctrl_init {
+ pinctrl-0 = <
+ &if_nand_8
+ &if_nand_cs_0
+ &if_sata_1_leds
+ &if_eth_leds
+ &if_uart_1>;
+ };
+
+ /*
+ * Initial GPIO configuration - which pins are input,
+ * which are output, and what is the initial value for
+ * output pins.
+ * By default, GPIO pins that are not listed below are
+ * input pins.
+ * GPIO pins which are listed below are automatically
+ * multiplexed and should not conflict with the
+ * multiplexed interfaces listed in 'pinctrl_init'
+ * above.
+ */
+ gpio_init {
+ /* <
+ * GPIO num 1, is output, output value
+ * GPIO num 2, is output, output value
+ * ...
+ * GPIO num N, is output, output value
+ * >
+ */
+ gpio-list = <
+ /* Inputs */
+ 0 0 0 /* UART 1,2,3 module inserted */
+ 32 0 0 /* PCIE0 present */
+ 33 0 0 /* PCIE1_present */
+ 34 0 0 /* PCIE2SATA not present */
+ 35 0 0 /* SFP 0,1 absent */
+ 36 0 0 /* SFP 2,3 absent */
+ 37 0 0 /* External ETH PHY interrupt */
+
+ /* Outputs */
+ 1 1 0 /* Debug led 0 */
+ 2 1 1 /* Debug led 1 */
+ 3 1 1 /* ULPI 0 RST_N */
+ 5 1 0 /* Preloader EEPROM enable */
+ 38 1 1 /* PCI RSTN */
+ 39 1 1 /* SFP ON */
+ 40 1 0 /* External ETH PHY A reset */
+ 41 1 0 /* External ETH PHY B reset */
+ 42 1 1 /* VDD DRAM 1: 1.5V, 0: 1.35V */
+ 43 1 0 /* VTT OFF */
+ >;
+ };
+
+ /* SerDes initialization configuration */
+ serdes {
+ /*
+ * 'ref-clock' can be any of the following:
+ * - "100Mhz-internal" - 100 Mhz internal clock
+ * - "100Mhz" - 100 Mhz on-board clock
+ * - "156.25Mhz" - 156.25 Mhz on-board clock
+ * - "right" - Clock is routed from right group
+ * - assuming the right group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ * - "left" - Clock is routed from left group
+ * - assuming the left group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ *
+ * 'active-lanes' selects which lanes are active
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-tx-lanes' selects which lanes have their
+ * Tx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-rx-lanes' selects which lanes have their
+ * Rx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'ssc' - SSC (spread spectrum clock)
+ * - "enabled" or "disabled"
+ * - Relevant only for 'interface' = 'sata'
+ *
+ * group 0:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "pcie_g2x2_pcie_g2x2" - 2xPCIe gen 2 x2
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ *
+ * group 1:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 2:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 3:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "sgmii" - SGMII (up to 4 ports)
+ * - "10gbe" - 10GbE (up to 4 ports)
+ */
+
+ group0 {
+ interface = "pcie_g3x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+
+ group1 {
+ interface = "pcie_g3x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+
+ group2 {
+ interface = "sata";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <19>;
+ post_emph = <2>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+
+ group3 {
+ interface = "10gbe";
+ ref-clock = "156.25Mhz";
+ active-lanes = <1 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_1_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_2_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ lane_3_params {
+ rx { override = "disabled"; };
+ tx { override = "disabled"; };
+ };
+ };
+ };
+
+ /* Ethernet port configuration */
+ ethernet {
+ port0 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <1>; /* I2C Mux Channel 0 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+
+ };
+
+ port1 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <4>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+
+ port2 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <3>; /* I2C Mux Channel 2 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+ };
+
+ port3 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+ ext_phy {
+ /* PHY management interface type (mdc-mdio, xmdc-xmdio, i2c) */
+ phy_mgmt_if = "mdc-mdio";
+ /* in case the interface is i2c phy-addr is the
+ * 5 lowest bits of the i2c address*/
+ phy-addr = <5>;
+ /*
+ * MDC-MDIO frequency:
+ * - "2.5Mhz"
+ * - "1.0Mhz"
+ */
+ mdc-mdio-freq = "1.0Mhz";
+ /* auto negotiation (in-band, out-of-band)*/
+ auto-neg-mode="out-of-band";
+ };
+ };
+ };
+
+ /* PCIe port configuration */
+ pcie {
+ /*
+ * Selects which ports are used as end-point
+ * ports, e.g. <0>.
+ */
+ ep-ports = <>;
+
+ /* Port 0 status, speed, and number of lanes */
+ port0 {
+ status = "enabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 1 status, speed, and number of lanes */
+ port1 {
+ status = "enabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 2 status, speed, and number of lanes */
+ port2 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+ };
+ };
+
+ /* pmu { status = "disabled"; }; */
+ arch-timer { compatible = "disabled"; };
+ /* timer0 { status = "disabled"; }; */
+ /* timer1 { status = "disabled"; }; */
+ timer2 { status = "disabled"; };
+ timer3 { status = "disabled"; };
+ wdt1 { status = "disabled"; };
+ wdt2 { status = "disabled"; };
+ wdt3 { status = "disabled"; };
+ /* i2c-pld { status = "disabled"; }; */
+ /* spi { status = "disabled"; }; */
+ i2c-gen { status = "disabled"; };
+ /* gpio0 { status = "disabled"; }; */
+ /* gpio1 { status = "disabled"; }; */
+ /* gpio2 { status = "disabled"; }; */
+ /* gpio3 { status = "disabled"; }; */
+ /* gpio4 { status = "disabled"; }; */
+ /* gpio5 { status = "disabled"; }; */
+ /*uart0 { status = "disabled"; };*/
+ /*uart1 { status = "disabled"; };*/
+ uart2 { status = "disabled"; };
+ uart3 { status = "disabled"; };
+ nor_flash { status = "disabled"; };
+ /* nand-flash { status = "disabled"; }; */
+ /* al-fabric { status = "disabled"; }; */
+ /* pcie-internal { status = "disabled"; }; */
+ /* pcie-external0 { status = "disabled"; }; */
+ /* pcie-external1 { status = "disabled"; }; */
+ /* pcie-external2 { status = "disabled"; }; */
+ nand-flash {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ max-onfi-timing-mode = <1>;
+
+ partition@0 {
+ label = "al_boot";
+ reg = <0x00000000 0x00200000>;
+ };
+ partition@1 {
+ label = "device_tree";
+ reg = <0x00200000 0x00100000>;
+ };
+ partition@2 {
+ label = "linux_kernel";
+ reg = <0x00300000 0x00d00000>;
+ };
+ partition@3 {
+ label = "ubifs";
+ reg = <0x01000000 0x03f000000>;
+ };
+ };
+
+ i2c-pld {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bq32k@68 {
+ compatible = "bq32000";
+ reg = <0x68>;
+ };
+
+ i2c_mux@70 {
+ compatible = "pca9548";
+ reg = <0x70>;
+ };
+ };
+
+ spi {
+ /* cs-gpios = <&gpio0 4 0>; */
+
+ spiflash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spi_flash_jedec_detection";
+ spi-max-frequency = <37500000>; /* 37.5MHz */
+ reg = <0>;
+
+ partition@0 {
+ reg = <0x0 0x00200000>; /* 2MB */
+ label = "spi_part0";
+ };
+
+ partition@1 {
+ reg = <0x00200000 0x00200000>; /* 2MB */
+ label = "spi_part1";
+ };
+
+ partition@2 {
+ reg = <0x00400000 0x00C00000>; /* 12MB */
+ label = "spi_part2";
+ };
+ };
+ };
+
+ /* SATA SW controlled leds, use gpio 1 for port 0 host 1, and gpio 2 for port 1 host 1 */
+ /* host 1 pci address is domain 0, bus 0, device (slot) 9 */
+ /* uncomment the below node in order to enable */
+ /*
+ sata_sw_leds {
+ compatible = "annapurna-labs,al-sata-sw-leds";
+ led@0 {
+ label = "host 1, port 0 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <0>;
+ gpios =<&gpio0 1 0>;
+ };
+ led@1 {
+ label = "host 1, port 1 activity&presence led";
+ pci_domain = <0>;
+ pci_bus = <0>;
+ pci_dev = <9>;
+ port = <1>;
+ gpios =<&gpio0 2 0>;
+ };
+ };
+ */
+ };
+};
diff --git a/arch/arm/boot/dts/alpine_k2s.dts b/arch/arm/boot/dts/alpine_k2s.dts
new file mode 100644
index 0000000..e3a24be
--- /dev/null
+++ b/arch/arm/boot/dts/alpine_k2s.dts
@@ -0,0 +1,873 @@
+/*
+ * Copyright 2014 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+/dts-v1/;
+
+/* Reserve application bar 0 for both 2GB and 4GB configurations */
+/memreserve/ 0x30000000 0x8000000;
+/memreserve/ 0xb0000000 0x8000000;
+
+/* Reserve APCEA memory for both 2GB and 4GB configurations */
+/memreserve/ 0x38000000 0x8000000;
+/memreserve/ 0xb8000000 0x8000000;
+
+/include/ "alpine.dtsi"
+
+/ {
+ version = "2.5";
+ model = "Annapurna Labs Alpine k2s";
+
+ soc {
+ board-cfg {
+ id = "Alpine k2s";
+
+ /* U-Boot offset in boot source [bytes] */
+ u-boot-offset = <0x20000>;
+
+ /*
+ * Some of the PBS bus controllers have dedicated pins,
+ * however most of the pins are Multi-usage I/Os (MUIO),
+ * thus enabling flexible usage.
+ * Refer to the specific chip datasheet for further details.
+ *
+ * Choose which interfaces are to be multiplexed:
+ * - &if_nand_8 - NAND
+ * - &if_nand_cs_0, ..., &if_nand_cs_3 - NAND CS 0..3
+ * - &if_nand_wp - NAND WP
+ * - &if_sata_0_leds, &if_sata_1_leds - SATA leds
+ * - &if_eth_leds - Ethernet leds
+ * - &if_uart_1, ..., &if_uart_3 - UART 1..3
+ * - &if_i2c_gen - secondary I2C conrtoller
+ * - &if_ulpi_0_rst_n - ULPI reset
+ * - &if_pci_ep_int_a - PCIe EP interrupt
+ * - &if_pci_ep_reset_out - PCIe EP reset out
+ * - &if_spim_a_ss_1, ..., &if_spim_a_ss_3 - SPI CS 1..3
+ */
+ pinctrl_init {
+ pinctrl-0 = <
+ &if_nand_8
+ &if_nand_cs_0
+ &if_uart_1
+ &if_uart_2
+ &if_eth_leds
+ &if_pci_ep_int_a>;
+ };
+
+ /*
+ * Initial GPIO configuration - which pins are input,
+ * which are output, and what is the initial value for
+ * output pins.
+ * By default, GPIO pins that are not listed below are
+ * input pins.
+ * GPIO pins which are listed below are automatically
+ * multiplexed and should not conflict with the
+ * multiplexed interfaces listed in 'pinctrl_init'
+ * above.
+ */
+ gpio_init {
+ /* <
+ * GPIO num 1, is output, output value
+ * GPIO num 2, is output, output value
+ * ...
+ * GPIO num N, is output, output value
+ * >
+ */
+ gpio-list = <
+ /* Inputs */
+ 0 0 0 /* retimer int */
+ 1 0 0 /* QSFP present */
+ 32 0 0 /* I2C module present */
+ 35 0 0 /* Expansion port0 Interrupt */
+ 36 0 0 /* Expansion port1 Interrupt */
+ 37 0 0 /* Input for now */
+ 38 0 0 /* Input for now */
+ 39 0 0 /* QSFP Interrupt */
+ 40 0 0 /* Input for now */
+
+ /* Outputs */
+ 4 1 1 /* PLTRST#_LPC_R */
+ 41 1 1 /* Diag start indication */
+ 42 1 1 /* VDD DRAM 1: 1.5V, 0: 1.35V */
+ 43 1 0 /* VTT OFF */
+ >;
+ };
+
+ /* SerDes initialization configuration */
+ serdes {
+ /*
+ * 'ref-clock' can be any of the following:
+ * - "100Mhz-internal" - 100 Mhz internal clock
+ * - "100Mhz" - 100 Mhz on-board clock
+ * - "156.25Mhz" - 156.25 Mhz on-board clock
+ * - "right" - Clock is routed from right group
+ * - assuming the right group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ * - "left" - Clock is routed from left group
+ * - assuming the left group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ *
+ * 'active-lanes' selects which lanes are active
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-tx-lanes' selects which lanes have their
+ * Tx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-rx-lanes' selects which lanes have their
+ * Rx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'ssc' - SSC (spread spectrum clock)
+ * - "enabled" or "disabled"
+ * - Relevant only for 'interface' = 'sata'
+ *
+ * group 0:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "pcie_g2x2_pcie_g2x2" - 2xPCIe gen 2 x2
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ *
+ * group 1:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 2:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 3:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "sgmii" - SGMII (up to 4 ports)
+ * - "10gbe" - 10GbE (up to 4 ports)
+ */
+
+ group0 {
+ interface = "pcie_g3x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+ };
+
+ group1 {
+ interface = "sata";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+
+ group2 {
+ interface = "sata";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x0>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+
+ group3 {
+ interface = "10gbe";
+ ref-clock = "156.25Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x7>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x7>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x7>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x7>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+ };
+
+ /* Ethernet port configuration */
+ ethernet {
+ port0 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <4>; /* I2C Mux Channel 3 */
+ freeze-serdes-params = "enable";
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if DAC are used and what is the length of it.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "enabled";
+ /*
+ * The type of the retimer connected to this port
+ * currently supported: br210, br410
+ */
+ type = "br410";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <4>;
+ i2c-addr = <86>;
+ channel = "D";
+ };
+ };
+
+ port1 {
+ status = "enabled";
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <4>; /* I2C Mux Channel 3 */
+ freeze-serdes-params = "enable";
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if DAC are used and what is the length of it.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "enabled";
+ /*
+ * The type of the retimer connected to this port
+ * currently supported: br210, br410
+ */
+ type = "br410";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <4>;
+ i2c-addr = <86>;
+ channel = "C";
+ };
+ };
+
+ port2 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <4>; /* I2C Mux Channel 3 */
+ freeze-serdes-params = "enable";
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if DAC are used and what is the length of it.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "enabled";
+ /*
+ * The type of the retimer connected to this port
+ * currently supported: br210, br410
+ */
+ type = "br410";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <4>;
+ i2c-addr = <86>;
+ channel = "B";
+ };
+ };
+
+ port3 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <4>; /* I2C Mux Channel 3 */
+ freeze-serdes-params = "enable";
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if DAC are used and what is the length of it.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "enabled";
+ /*
+ * The type of the retimer connected to this port
+ * currently supported: br210, br410
+ */
+ type = "br410";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <4>;
+ i2c-addr = <86>;
+ channel = "A";
+ };
+ };
+ };
+
+ /* PCIe port configuration */
+ pcie {
+ /*
+ * Selects which ports are used as end-point
+ * ports, e.g. <0>.
+ */
+ ep-ports = <0>;
+
+ /* Port 0 status, speed, and number of lanes */
+ port0 {
+ status = "enabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 1 status, speed, and number of lanes */
+ port1 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 2 status, speed, and number of lanes */
+ port2 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+ };
+ };
+
+ /* pmu { status = "disabled"; }; */
+ arch-timer { compatible = "disabled"; };
+ /* timer0 { status = "disabled"; }; */
+ /* timer1 { status = "disabled"; }; */
+ timer2 { status = "disabled"; };
+ timer3 { status = "disabled"; };
+ wdt1 { status = "disabled"; };
+ wdt2 { status = "disabled"; };
+ wdt3 { status = "disabled"; };
+ /* i2c-pld { status = "disabled"; }; */
+ spi { status = "disabled"; };
+ i2c-gen { status = "disabled"; };
+ /* gpio0 { status = "disabled"; }; */
+ /* gpio1 { status = "disabled"; }; */
+ /* gpio2 { status = "disabled"; }; */
+ /* gpio3 { status = "disabled"; }; */
+ /* gpio4 { status = "disabled"; }; */
+ /* gpio5 { status = "disabled"; }; */
+ /* uart0 { status = "disabled"; }; */
+ /* uart1 { status = "disabled"; }; */
+ /* uart2 { status = "disabled"; }; */
+ uart3 { status = "disabled"; };
+ nor_flash { status = "disabled"; };
+ /* nand-flash { status = "disabled"; }; */
+ /* al-fabric { status = "disabled"; }; */
+ /* pcie-internal { status = "disabled"; }; */
+ /* pcie-external0 { status = "disabled"; }; */
+ /* pcie-external1 { status = "disabled"; }; */
+ /* pcie-external2 { status = "disabled"; }; */
+ nand-flash {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ max-onfi-timing-mode = <1>;
+
+ /* Don't unified the reserved partition */
+ partition@0 {
+ label = "reserved1";
+ reg = <0x00000000 0x00200000>;
+ };
+ partition@1 {
+ label = "reserved2";
+ reg = <0x00200000 0x00200000>;
+ };
+ partition@2 {
+ label = "linux_kernel";
+ reg = <0x00400000 0x00c00000>;
+ };
+ partition@3 {
+ label = "ubifs";
+ reg = <0x01000000 0x037000000>;
+ };
+ partition@4 {
+ label = "ubifs_apps";
+ reg = <0x38000000 0x008000000>;
+ };
+ };
+
+ i2c-pld {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c_mux@70 {
+ compatible = "pca9548";
+ reg = <0x70>;
+ };
+ };
+
+ spi {
+ /* cs-gpios = <&gpio0 4 0>; */
+
+ spiflash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spi_flash_jedec_detection";
+ spi-max-frequency = <37500000>; /* 37.5MHz */
+ reg = <0>;
+
+ partition@0 {
+ reg = <0x0 0x00200000>; /* 2MB */
+ label = "spi_part0";
+ };
+
+ partition@1 {
+ reg = <0x00200000 0x00200000>; /* 2MB */
+ label = "spi_part1";
+ };
+
+ partition@2 {
+ reg = <0x00400000 0x00C00000>; /* 12MB */
+ label = "spi_part2";
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/alpine_sdnic.dts b/arch/arm/boot/dts/alpine_sdnic.dts
new file mode 100644
index 0000000..4e75bd4
--- /dev/null
+++ b/arch/arm/boot/dts/alpine_sdnic.dts
@@ -0,0 +1,547 @@
+/*
+ * Copyright 2014 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+/dts-v1/;
+
+/include/ "alpine.dtsi"
+
+/ {
+ version = "2.5";
+ model = "Annapurna Labs Alpine Smart NIC";
+
+ soc {
+ board-cfg {
+ id = "Smart NIC";
+
+ /* U-Boot offset in boot source [bytes] */
+ u-boot-offset = <0x20000>;
+
+ /*
+ * Some of the PBS bus controllers have dedicated pins,
+ * however most of the pins are Multi-usage I/Os (MUIO),
+ * thus enabling flexible usage.
+ * Refer to the specific chip datasheet for further details.
+ *
+ * Choose which interfaces are to be multiplexed:
+ * - &if_nand_8 - NAND
+ * - &if_nand_cs_0, ..., &if_nand_cs_3 - NAND CS 0..3
+ * - &if_nand_wp - NAND WP
+ * - &if_sata_0_leds, &if_sata_1_leds - SATA leds
+ * - &if_eth_leds - Ethernet leds
+ * - &if_uart_1, ..., &if_uart_3 - UART 1..3
+ * - &if_i2c_gen - secondary I2C conrtoller
+ * - &if_ulpi_0_rst_n - ULPI reset
+ * - &if_pci_ep_int_a - PCIe EP interrupt
+ * - &if_pci_ep_reset_out - PCIe EP reset out
+ * - &if_spim_a_ss_1, ..., &if_spim_a_ss_3 - SPI CS 1..3
+ */
+ pinctrl_init {
+ pinctrl-0 = <
+ &if_uart_1
+ &if_uart_2
+ &if_eth_leds
+ &if_pci_ep_int_a>;
+ };
+
+ /*
+ * Initial GPIO configuration - which pins are input,
+ * which are output, and what is the initial value for
+ * output pins.
+ * By default, GPIO pins that are not listed below are
+ * input pins.
+ * GPIO pins which are listed below are automatically
+ * multiplexed and should not conflict with the
+ * multiplexed interfaces listed in 'pinctrl_init'
+ * above.
+ */
+ gpio_init {
+ /* <
+ * GPIO num 1, is output, output value
+ * GPIO num 2, is output, output value
+ * ...
+ * GPIO num N, is output, output value
+ * >
+ */
+ gpio-list = <
+ /* Inputs */
+ 32 0 0 /* SFP 3 absent */
+ 35 0 0 /* SFP 0 absent */
+ 36 0 0 /* SFP 1 absent */
+ 37 0 0 /* Input for now */
+ 38 0 0 /* Input for now */
+ 40 0 0 /* Input for now */
+
+ /* Outputs */
+ 0 1 0 /* EEPROM En */
+ 4 1 1 /* PLTRST#_LPC_R */
+ 39 1 1 /* SFP Enable */
+ 41 1 1 /* Diag start indication */
+ 42 1 1 /* VDD DRAM 1: 1.5V, 0: 1.35V */
+ 43 1 0 /* VTT OFF */
+ >;
+ };
+
+ /* SerDes initialization configuration */
+ serdes {
+ /*
+ * 'ref-clock' can be any of the following:
+ * - "100Mhz-internal" - 100 Mhz internal clock
+ * - "100Mhz" - 100 Mhz on-board clock
+ * - "156.25Mhz" - 156.25 Mhz on-board clock
+ * - "right" - Clock is routed from right group
+ * - assuming the right group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ * - "left" - Clock is routed from left group
+ * - assuming the left group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ *
+ * 'active-lanes' selects which lanes are active
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-tx-lanes' selects which lanes have their
+ * Tx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-rx-lanes' selects which lanes have their
+ * Rx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'ssc' - SSC (spread spectrum clock)
+ * - "enabled" or "disabled"
+ * - Relevant only for 'interface' = 'sata'
+ *
+ * group 0:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "pcie_g2x2_pcie_g2x2" - 2xPCIe gen 2 x2
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ *
+ * group 1:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 2:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 3:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "sgmii" - SGMII (up to 4 ports)
+ * - "10gbe" - 10GbE (up to 4 ports)
+ */
+
+ group0 {
+ interface = "pcie_g3x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+ };
+
+ group1 {
+ interface = "off";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+ };
+
+ group2 {
+ interface = "off";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+ };
+
+ group3 {
+ interface = "10gbe";
+ ref-clock = "156.25Mhz";
+ active-lanes = <0 1 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+ };
+ };
+
+ /* Ethernet port configuration */
+ ethernet {
+ port0 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <3>; /* I2C Mux Channel 2 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "enabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+ };
+
+ port1 {
+ status = "disabled";
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <3>; /* I2C Mux Channel 2 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+ };
+
+ port2 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <2>; /* I2C Mux Channel 1 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "enabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "A";
+ };
+ };
+
+ port3 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <1>; /* I2C Mux Channel 0 */
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if DAC are used and what is the length of it.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+ };
+ };
+
+ /* PCIe port configuration */
+ pcie {
+ /*
+ * Selects which ports are used as end-point
+ * ports, e.g. <0>.
+ */
+ ep-ports = <0>;
+
+ /* Port 0 status, speed, and number of lanes */
+ port0 {
+ status = "enabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 1 status, speed, and number of lanes */
+ port1 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 2 status, speed, and number of lanes */
+ port2 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+ };
+ };
+
+ /* pmu { status = "disabled"; }; */
+ arch-timer { compatible = "disabled"; };
+ /* timer0 { status = "disabled"; }; */
+ /* timer1 { status = "disabled"; }; */
+ timer2 { status = "disabled"; };
+ timer3 { status = "disabled"; };
+ wdt1 { status = "disabled"; };
+ wdt2 { status = "disabled"; };
+ wdt3 { status = "disabled"; };
+ /* i2c-pld { status = "disabled"; }; */
+ spi { status = "disabled"; };
+ i2c-gen { status = "disabled"; };
+ /* gpio0 { status = "disabled"; }; */
+ /* gpio1 { status = "disabled"; }; */
+ /* gpio2 { status = "disabled"; }; */
+ /* gpio3 { status = "disabled"; }; */
+ /* gpio4 { status = "disabled"; }; */
+ /* gpio5 { status = "disabled"; }; */
+ /* uart0 { status = "disabled"; }; */
+ /* uart1 { status = "disabled"; }; */
+ /* uart2 { status = "disabled"; }; */
+ uart3 { status = "disabled"; };
+ nor_flash { status = "disabled"; };
+ nand-flash { status = "disabled"; };
+ /* al-fabric { status = "disabled"; }; */
+ /* pcie-internal { status = "disabled"; }; */
+ /* pcie-external0 { status = "disabled"; }; */
+ /* pcie-external1 { status = "disabled"; }; */
+ /* pcie-external2 { status = "disabled"; }; */
+ nand-flash {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ max-onfi-timing-mode = <1>;
+
+ partition@0 {
+ label = "al_boot";
+ reg = <0x00000000 0x00200000>;
+ };
+ partition@1 {
+ label = "device_tree";
+ reg = <0x00200000 0x00100000>;
+ };
+ partition@2 {
+ label = "linux_kernel";
+ reg = <0x00300000 0x00d00000>;
+ };
+ partition@3 {
+ label = "ubifs";
+ reg = <0x01000000 0x03f000000>;
+ };
+ };
+
+ i2c-pld {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c_mux@70 {
+ compatible = "pca9543";
+ reg = <0x70>;
+ };
+ };
+
+ spi {
+ /* cs-gpios = <&gpio0 4 0>; */
+
+ spiflash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spi_flash_jedec_detection";
+ spi-max-frequency = <37500000>; /* 37.5MHz */
+ reg = <0>;
+
+ partition@0 {
+ reg = <0x0 0x00200000>; /* 2MB */
+ label = "spi_part0";
+ };
+
+ partition@1 {
+ reg = <0x00200000 0x00200000>; /* 2MB */
+ label = "spi_part1";
+ };
+
+ partition@2 {
+ reg = <0x00400000 0x00C00000>; /* 12MB */
+ label = "spi_part2";
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/alpine_sdnic_nand.dts b/arch/arm/boot/dts/alpine_sdnic_nand.dts
new file mode 100644
index 0000000..362005d
--- /dev/null
+++ b/arch/arm/boot/dts/alpine_sdnic_nand.dts
@@ -0,0 +1,668 @@
+/*
+ * Copyright 2014 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+/dts-v1/;
+
+/* Reserve application bar 0 for both 2GB and 4GB configurations */
+/memreserve/ 0x30000000 0x8000000;
+/memreserve/ 0xb0000000 0x8000000;
+
+/* Reserve APCEA memory for both 2GB and 4GB configurations */
+/memreserve/ 0x38000000 0x8000000;
+/memreserve/ 0xb8000000 0x8000000;
+
+/* Reserve 16K for uC update (from 1MB)*/
+/memreserve/ 0x00100000 0x00004000;
+
+/* Reserve 1MB from 32MB offset for trace functionality */
+/memreserve/ 0x02000000 0x00100000;
+
+/* Reserve 15MB from 33MB offset for device memory to be used by host */
+/memreserve/ 0x02100000 0x00F00000;
+
+
+/include/ "alpine.dtsi"
+
+/ {
+ version = "2.5";
+ model = "Annapurna Labs Alpine Smart NIC with NAND";
+
+ soc {
+ board-cfg {
+ id = "Smart NIC with NAND";
+
+ /* U-Boot offset in boot source [bytes] */
+ u-boot-offset = <0x20000>;
+
+ /*
+ * Some of the PBS bus controllers have dedicated pins,
+ * however most of the pins are Multi-usage I/Os (MUIO),
+ * thus enabling flexible usage.
+ * Refer to the specific chip datasheet for further details.
+ *
+ * Choose which interfaces are to be multiplexed:
+ * - &if_nand_8 - NAND
+ * - &if_nand_cs_0, ..., &if_nand_cs_3 - NAND CS 0..3
+ * - &if_nand_wp - NAND WP
+ * - &if_sata_0_leds, &if_sata_1_leds - SATA leds
+ * - &if_eth_leds - Ethernet leds
+ * - &if_uart_1, ..., &if_uart_3 - UART 1..3
+ * - &if_i2c_gen - secondary I2C conrtoller
+ * - &if_ulpi_0_rst_n - ULPI reset
+ * - &if_pci_ep_int_a - PCIe EP interrupt
+ * - &if_pci_ep_reset_out - PCIe EP reset out
+ * - &if_spim_a_ss_1, ..., &if_spim_a_ss_3 - SPI CS 1..3
+ */
+ pinctrl_init {
+ pinctrl-0 = <
+ &if_nand_8
+ &if_nand_cs_0
+ &if_uart_1
+ &if_uart_2
+ &if_eth_leds
+ &if_pci_ep_int_a>;
+ };
+
+ /*
+ * Initial GPIO configuration - which pins are input,
+ * which are output, and what is the initial value for
+ * output pins.
+ * By default, GPIO pins that are not listed below are
+ * input pins.
+ * GPIO pins which are listed below are automatically
+ * multiplexed and should not conflict with the
+ * multiplexed interfaces listed in 'pinctrl_init'
+ * above.
+ */
+ gpio_init {
+ /* <
+ * GPIO num 1, is output, output value
+ * GPIO num 2, is output, output value
+ * ...
+ * GPIO num N, is output, output value
+ * >
+ */
+ gpio-list = <
+ /* Inputs */
+ 32 0 0 /* SFP 3 absent */
+ 35 0 0 /* SFP 0 absent */
+ 36 0 0 /* SFP 1 absent */
+ 37 0 0 /* Input for now */
+ 38 0 0 /* Input for now */
+ 40 0 0 /* Input for now */
+
+ /* Outputs */
+ 0 1 0 /* EEPROM En */
+ 4 1 1 /* PLTRST#_LPC_R */
+ 39 1 1 /* SFP Enable */
+ 41 1 1 /* Diag start indication */
+ 42 1 1 /* VDD DRAM 1: 1.5V, 0: 1.35V */
+ 43 1 0 /* VTT OFF */
+ >;
+ };
+
+ /* SerDes initialization configuration */
+ serdes {
+ /*
+ * 'ref-clock' can be any of the following:
+ * - "100Mhz-internal" - 100 Mhz internal clock
+ * - "100Mhz" - 100 Mhz on-board clock
+ * - "156.25Mhz" - 156.25 Mhz on-board clock
+ * - "right" - Clock is routed from right group
+ * - assuming the right group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ * - "left" - Clock is routed from left group
+ * - assuming the left group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ *
+ * 'active-lanes' selects which lanes are active
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-tx-lanes' selects which lanes have their
+ * Tx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-rx-lanes' selects which lanes have their
+ * Rx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'ssc' - SSC (spread spectrum clock)
+ * - "enabled" or "disabled"
+ * - Relevant only for 'interface' = 'sata'
+ *
+ * group 0:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "pcie_g2x2_pcie_g2x2" - 2xPCIe gen 2 x2
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ *
+ * group 1:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 2:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 3:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "sgmii" - SGMII (up to 4 ports)
+ * - "10gbe" - 10GbE (up to 4 ports)
+ */
+
+ group0 {
+ interface = "pcie_g3x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+ };
+
+ group1 {
+ interface = "off";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+ };
+
+ group2 {
+ interface = "off";
+ ref-clock = "100Mhz";
+ active-lanes = <0 1 2 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+ };
+
+ group3 {
+ interface = "10gbe";
+ ref-clock = "156.25Mhz";
+ active-lanes = <0 1 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+
+ lane_0_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x4>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_1_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x4>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_2_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x4>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ lane_3_params {
+ rx {
+ override = "enabled";
+ dcgain = <0x0>;
+ dfe_3db_freq = <0x7>;
+ dfe_gain = <0x0>;
+ dfe_1st_tap_ctrl = <0x0>;
+ dfe_2nd_tap_ctrl = <0x8>;
+ dfe_3rd_tap_ctrl = <0x0>;
+ dfe_4th_tap_ctrl = <0x8>;
+ low_freq_agc_gain = <0x7>;
+ high_freq_agc_boost = <0x4>;
+ precal_code_sel = <0x0>;
+ };
+ tx {
+ override = "enabled";
+ amp = <1>;
+ total_driver_units = <25>;
+ post_emph = <6>;
+ pre_emph = <0>;
+ slew_rate = <0>;
+ };
+ };
+ };
+ };
+
+ /* Ethernet port configuration */
+ ethernet {
+ port0 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <3>; /* I2C Mux Channel 2 */
+ freeze-serdes-params = "enable";
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "enabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+ };
+
+ port1 {
+ status = "disabled";
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <3>; /* I2C Mux Channel 2 */
+ freeze-serdes-params = "enable";
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+ };
+
+ port2 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <2>; /* I2C Mux Channel 1 */
+ freeze-serdes-params = "enable";
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if passive DAC (direct attached cable) are used
+ * and what is the length (in meters) of it.
+ * in case of active DAC the parameter should be
+ * set to disable.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "enabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "A";
+ };
+ };
+
+ port3 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <1>; /* I2C Mux Channel 0 */
+ freeze-serdes-params = "enable";
+
+ 10g-serial {
+ /*
+ * in case of 10g-serial mode indicate
+ * if DAC are used and what is the length of it.
+ * in auto-detect mode will be the default value
+ * in case the SFP EEPROM can't be accessed
+ */
+ dac = "enabled";
+ dac-length = <3>;
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+
+ retimer {
+ exist = "disabled";
+ /*
+ * I2C bus ID of the retimer as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A,
+ * Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ */
+ i2c-bus = <0>;
+ i2c-addr = <97>;
+ channel = "B";
+ };
+ };
+ };
+
+ /* PCIe port configuration */
+ pcie {
+ /*
+ * Selects which ports are used as end-point
+ * ports, e.g. <0>.
+ */
+ ep-ports = <0>;
+
+ /* Port 0 status, speed, and number of lanes */
+ port0 {
+ status = "enabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 1 status, speed, and number of lanes */
+ port1 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+
+ /* Port 2 status, speed, and number of lanes */
+ port2 {
+ status = "disabled";
+ gen = <3>;
+ width = <4>;
+ };
+ };
+ };
+
+ /* pmu { status = "disabled"; }; */
+ arch-timer { compatible = "disabled"; };
+ /* timer0 { status = "disabled"; }; */
+ /* timer1 { status = "disabled"; }; */
+ timer2 { status = "disabled"; };
+ timer3 { status = "disabled"; };
+ wdt1 { status = "disabled"; };
+ wdt2 { status = "disabled"; };
+ wdt3 { status = "disabled"; };
+ /* i2c-pld { status = "disabled"; }; */
+ spi { status = "disabled"; };
+ i2c-gen { status = "disabled"; };
+ /* gpio0 { status = "disabled"; }; */
+ /* gpio1 { status = "disabled"; }; */
+ /* gpio2 { status = "disabled"; }; */
+ /* gpio3 { status = "disabled"; }; */
+ /* gpio4 { status = "disabled"; }; */
+ /* gpio5 { status = "disabled"; }; */
+ /* uart0 { status = "disabled"; }; */
+ /* uart1 { status = "disabled"; }; */
+ /* uart2 { status = "disabled"; }; */
+ uart3 { status = "disabled"; };
+ nor_flash { status = "disabled"; };
+ /* nand-flash { status = "disabled"; }; */
+ /* al-fabric { status = "disabled"; }; */
+ /* pcie-internal { status = "disabled"; }; */
+ /* pcie-external0 { status = "disabled"; }; */
+ /* pcie-external1 { status = "disabled"; }; */
+ /* pcie-external2 { status = "disabled"; }; */
+ nand-flash {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ max-onfi-timing-mode = <1>;
+
+ partition@0 {
+ label = "al_boot";
+ reg = <0x00000000 0x00200000>;
+ };
+ partition@1 {
+ label = "device_tree";
+ reg = <0x00200000 0x00100000>;
+ };
+ partition@2 {
+ label = "linux_kernel";
+ reg = <0x00300000 0x00d00000>;
+ };
+ partition@3 {
+ label = "ubifs";
+ reg = <0x01000000 0x037000000>;
+ };
+ partition@4 {
+ label = "ubifs_apps";
+ reg = <0x38000000 0x008000000>;
+ };
+ };
+
+ i2c-pld {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c_mux@70 {
+ compatible = "pca9548";
+ reg = <0x70>;
+ };
+ };
+
+ spi {
+ /* cs-gpios = <&gpio0 4 0>; */
+
+ spiflash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spi_flash_jedec_detection";
+ spi-max-frequency = <37500000>; /* 37.5MHz */
+ reg = <0>;
+
+ partition@0 {
+ reg = <0x0 0x00200000>; /* 2MB */
+ label = "spi_part0";
+ };
+
+ partition@1 {
+ reg = <0x00200000 0x00200000>; /* 2MB */
+ label = "spi_part1";
+ };
+
+ partition@2 {
+ reg = <0x00400000 0x00C00000>; /* 12MB */
+ label = "spi_part2";
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/alpine_security_box.dts b/arch/arm/boot/dts/alpine_security_box.dts
new file mode 100644
index 0000000..ab8e907
--- /dev/null
+++ b/arch/arm/boot/dts/alpine_security_box.dts
@@ -0,0 +1,452 @@
+/*
+ * Copyright 2013 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+/dts-v1/;
+
+/include/ "alpine.dtsi"
+
+/ {
+ version = "2.5";
+ model = "Annapurna Labs Alpine Dev Board";
+
+ hypervisor {
+ };
+
+ soc {
+ board-cfg {
+ id = "Accton Security Box";
+
+ /* U-Boot offset in boot source [bytes] */
+ u-boot-offset = <0x20000>;
+
+ /*
+ * Some of the PBS bus controllers have dedicated pins,
+ * however most of the pins are Multi-usage I/Os (MUIO),
+ * thus enabling flexible usage.
+ * Refer to the specific chip datasheet for further details.
+ *
+ * Choose which interfaces are to be multiplexed:
+ * - &if_nand_8 - NAND
+ * - &if_nand_cs_0, ..., &if_nand_cs_3 - NAND CS 0..3
+ * - &if_nand_wp - NAND WP
+ * - &if_sata_0_leds, &if_sata_1_leds - SATA leds
+ * - &if_eth_leds - Ethernet leds
+ * - &if_uart_1, ..., &if_uart_3 - UART 1..3
+ * - &if_i2c_gen - secondary I2C conrtoller
+ * - &if_ulpi_0_rst_n - ULPI reset
+ * - &if_pci_ep_int_a - PCIe EP interrupt
+ * - &if_spim_a_ss_1, ..., &if_spim_a_ss_3 - SPI CS 1..3
+ */
+ pinctrl_init {
+ pinctrl-0 = <
+ &if_nand_8
+ &if_nand_cs_0
+ &if_eth_leds
+ >;
+ };
+
+ /*
+ * Initial GPIO configuration - which pins are input,
+ * which are output, and what is the initial value for
+ * output pins.
+ * By default, GPIO pins that are not listed below are
+ * input pins.
+ * GPIO pins which are listed below are automatically
+ * multiplexed and should not conflict with the
+ * multiplexed interfaces listed in 'pinctrl_init'
+ * above.
+ */
+ gpio_init {
+ /* <
+ * GPIO num 1, is output, output value
+ * GPIO num 2, is output, output value
+ * ...
+ * GPIO num N, is output, output value
+ * >
+ */
+ gpio-list = <
+ /* Inputs */
+ 1 0 0 /* PCIe_A_WALE_n */
+ 2 0 0 /* PCIe_B_WALE_n */
+ 32 0 0 /* PCIe0_PRESENTn */
+ 33 0 0 /* PCIe0_PRESENTn */
+ 34 0 0 /* Reset Defaults */
+ 35 0 0 /* SFP_ABSENT1 */
+ 36 0 0 /* SFP_ABSENT3 */
+ 37 0 0 /* PHY_INTn */
+
+ /* Outputs */
+ 0 1 0 /* LED_USB1 - default 0(OFF) */
+ 3 1 1 /* UAB_A_RSTn - default 1(Out of reset) */
+ 4 1 0 /* LED_USB2 - default 0(OFF) */
+ 5 1 0 /* EEPROM_EN - default 0(disbale access to eeprom once init is done) */
+ 22 1 0 /* LED[4] - default 0(OFF) */
+ 23 1 0 /* LED[4] - default 0(OFF) */
+ 24 1 0 /* LED[4] - default 0(OFF) */
+ 25 1 0 /* LED[4] - default 0(OFF) */
+ 38 1 1 /* PCIe_RST_n - default 1 (Out of Reset) */
+ 39 1 1 /* SFP ON - default 1 (Out of Reset) */
+ 40 1 0 /* ETH_A_RST - default 0 (Out of Reset) */
+ 41 1 0 /* ETH_B_RST - default 0 (Out of Reset) */
+ 42 1 1 /* VDDR_1.5V - default 1 (1.5V) */
+ 43 1 0 /* VTT_OFF - default 0 (ON) */
+ >;
+ };
+
+ /* SerDes initialization configuration */
+ serdes {
+ /*
+ * 'ref-clock' can be any of the following:
+ * - "100Mhz-internal" - 100 Mhz internal clock
+ * - "100Mhz" - 100 Mhz on-board clock
+ * - "156.25Mhz" - 156.25 Mhz on-board clock
+ * - "right" - Clock is routed from right group
+ * - assuming the right group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ * - "left" - Clock is routed from left group
+ * - assuming the left group has
+ * on-board clock
+ * - assuming 3 <> 2 <> 1 <> 0
+ *
+ * 'active-lanes' selects which lanes are active
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-tx-lanes' selects which lanes have their
+ * Tx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'inv-rx-lanes' selects which lanes have their
+ * Rx polarity inverted (+/-)
+ * e.g. <0 1 2 3>, <0 1>, etc.
+ *
+ * 'ssc' - SSC (spread spectrum clock)
+ * - "enabled" or "disabled"
+ * - Relevant only for 'interface' = 'sata'
+ *
+ * group 0:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "pcie_g2x2_pcie_g2x2" - 2xPCIe gen 2 x2
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ *
+ * group 1:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 2:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g3x4" - PCIe gen 3 x4
+ * - "sata" - SATA (up to 4 ports)
+ *
+ * group 3:
+ * 'interface' can be any of the following:
+ * - "off" - no interface on this group
+ * - "off_bp" - no interface on this group, but
+ * r2l/l2r clocks are bypassed
+ * - "pcie_g2x2_usb" - PCIe gen 2 x2 + USB
+ * - "sgmii" - SGMII (up to 4 ports)
+ * - "10gbe" - 10GbE (up to 4 ports)
+ */
+
+ group0 {
+ interface = "pcie_g2x1_pcie_g2x1";
+ ref-clock = "100Mhz";
+ active-lanes = <0 2>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+ };
+
+ group1 {
+ interface = "off";
+ ref-clock = "100Mhz";
+ active-lanes = <>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+ };
+
+ group2 {
+ interface = "pcie_g3x4";
+ ref-clock = "100Mhz";
+ active-lanes = <0>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+ };
+
+ group3 {
+ interface = "10gbe";
+ ref-clock = "156.25Mhz";
+ active-lanes = <1 3>;
+ inv-tx-lanes = <>;
+ inv-rx-lanes = <>;
+ ssc = "disabled";
+ };
+ };
+
+ /* Ethernet port configuration */
+ ethernet {
+ port0 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <1>; /* I2C Mux Channel 0 */
+
+ 10g-serial {
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+ };
+
+ port1 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+
+ 1g-serial {
+ auto-neg = "disabled";
+ speed = "1000M";
+ duplex = "full";
+ };
+
+ };
+
+ port2 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "auto-detect-auto-speed";
+
+ /*
+ * I2C bus ID as shows in Linux I2C bus enumeration:
+ * When using I2C_A and I2C Mux is connected to I2C_A, Linux enumerates multiple I2C buses:
+ * - I2C bus 0 is the physical bus
+ * - I2C bus 1 is channel 0 of the mux
+ * - I2C bus 2 is channel 1 of the mux
+ * - ...
+ * Relevant when MAC mode is auto-detect or when phy_type is i2c
+ */
+ i2c-id = <3>; /* I2C Mux Channel 2 */
+
+ 10g-serial {
+ auto-neg = "disabled";
+ link-training = "disabled";
+ fec = "disabled";
+ };
+ };
+
+ port3 {
+ status = "enabled";
+
+ /*
+ * MAC mode (rgmii, sgmii, 1g-serial, 10g-serial, auto-detect, auto-detect-auto-speed)
+ * - auto-detect mode is relevant when SFP exists. in this mode
+ * SFP EEPROM will be read to detect what SFP inserted
+ * - auto-detect-auto-speed mode is relevant when SFP exists and
+ * the SerDes reference clock frequency is 156.25Mhz. In this
+ * mode SFP EEPROM will be read to detect what SFP inserted while
+ * 10G serial and 1G serial SFP modeuls can be exchanged on-the-fly
+ * as long as all active SerDes based Ethernet ports use the same
+ * mode.
+ */
+ mode = "rgmii";
+
+ 1g-serial {
+ auto-neg = "disabled";
+ speed = "1000M";
+ duplex = "full";
+ };
+ };
+ };
+
+ /* PCIe port configuration */
+ pcie {
+ /*
+ * Selects which ports are used as end-point
+ * ports, e.g. <0>.
+ */
+ ep-ports = <>;
+
+ /* Port 0 status, speed, and number of lanes */
+ port0 {
+ status = "enabled";
+ gen = <2>;
+ width = <1>;
+ };
+
+ /* Port 1 status, speed, and number of lanes */
+ port1 {
+ status = "enabled";
+ gen = <2>;
+ width = <1>;
+ };
+
+ /* Port 2 status, speed, and number of lanes */
+ port2 {
+ status = "enabled";
+ gen = <2>;
+ width = <1>;
+ };
+ };
+ };
+
+ /* pmu { status = "disabled"; }; */
+ arch-timer { compatible = "disabled"; };
+ /* timer0 { status = "disabled"; }; */
+ /* timer1 { status = "disabled"; }; */
+ timer2 { status = "disabled"; };
+ timer3 { status = "disabled"; };
+ wdt1 { status = "disabled"; };
+ wdt2 { status = "disabled"; };
+ wdt3 { status = "disabled"; };
+ /* i2c-pld { status = "disabled"; }; */
+ spi { status = "disabled"; };
+ i2c-gen { status = "disabled"; };
+ /* gpio0 { status = "disabled"; }; */
+ /* gpio1 { status = "disabled"; }; */
+ /* gpio2 { status = "disabled"; }; */
+ /* gpio3 { status = "disabled"; }; */
+ /* gpio4 { status = "disabled"; }; */
+ /* gpio5 { status = "disabled"; }; */
+ /*uart0 { status = "disabled"; };*/
+ /*uart1 { status = "disabled"; };*/
+ uart2 { status = "disabled"; };
+ uart3 { status = "disabled"; };
+ nor_flash { status = "disabled"; };
+ /* nand-flash { status = "disabled"; }; */
+ /* al-fabric { status = "disabled"; }; */
+ /* pcie-internal { status = "disabled"; }; */
+ /* pcie-external0 { status = "disabled"; }; */
+ /* pcie-external1 { status = "disabled"; }; */
+ /* pcie-external2 { status = "disabled"; }; */
+ nand-flash {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ max-onfi-timing-mode = <1>;
+
+ partition@0 {
+ label = "al_boot";
+ reg = <0x00000000 0x00200000>;
+ };
+ partition@1 {
+ label = "device_tree";
+ reg = <0x00200000 0x00100000>;
+ };
+ partition@2 {
+ label = "linux_kernel";
+ reg = <0x00300000 0x00d00000>;
+ };
+ partition@3 {
+ label = "ubifs";
+ reg = <0x01000000 0x03f000000>;
+ };
+ };
+
+ i2c-pld {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c_mux@70 {
+ compatible = "pca9548";
+ reg = <0x70>;
+ };
+ };
+
+ spi {
+ /* cs-gpios = <&gpio0 4 0>; */
+
+ spiflash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spi_flash_jedec_detection";
+ spi-max-frequency = <37500000>; /* 37.5MHz */
+ reg = <0>;
+
+ partition@0 {
+ reg = <0x0 0x00200000>; /* 2MB */
+ label = "spi_part0";
+ };
+
+ partition@1 {
+ reg = <0x00200000 0x00200000>; /* 2MB */
+ label = "spi_part1";
+ };
+
+ partition@2 {
+ reg = <0x00400000 0x00C00000>; /* 12MB */
+ label = "spi_part2";
+ };
+ };
+ };
+
+ };
+};
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 05ee9ee..e780afb 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -53,6 +53,13 @@
#define put_byte_3 lsl #0
#endif
+/* Select code for any configuration running in BE8 mode */
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define ARM_BE8(code...) code
+#else
+#define ARM_BE8(code...)
+#endif
+
/*
* Data preload for architectures that support it
*/
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 7af5c6c..b274bde 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -2,6 +2,8 @@
#define _ASMARM_BUG_H
#include
+#include
+#include
#ifdef CONFIG_BUG
@@ -12,10 +14,10 @@
*/
#ifdef CONFIG_THUMB2_KERNEL
#define BUG_INSTR_VALUE 0xde02
-#define BUG_INSTR_TYPE ".hword "
+#define BUG_INSTR(__value) __inst_thumb16(__value)
#else
#define BUG_INSTR_VALUE 0xe7f001f2
-#define BUG_INSTR_TYPE ".word "
+#define BUG_INSTR(__value) __inst_arm(__value)
#endif
@@ -33,7 +35,7 @@
#define __BUG(__file, __line, __value) \
do { \
- asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \
+ asm volatile("1:\t" BUG_INSTR(__value) "\n" \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
@@ -48,7 +50,7 @@ do { \
#define __BUG(__file, __line, __value) \
do { \
- asm volatile(BUG_INSTR_TYPE #__value); \
+ asm volatile(BUG_INSTR(__value) "\n"); \
unreachable(); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index dc662fc..37b9c61 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -17,6 +17,9 @@ struct dev_archdata {
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif
+#ifdef CONFIG_ARM_HWCC_FLAG
+ int hwcc; /* 1 - HW cache coherency, 0 - SW cache coherency */
+#endif
};
struct omap_device;
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 91b99ab..8726e20 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -48,6 +48,10 @@ extern void kunmap_high(struct page *page);
#undef ARCH_NEEDS_KMAP_HIGH_GET
#endif
+#if __LINUX_ARM_ARCH__ >= 7
+#undef ARCH_NEEDS_KMAP_HIGH_GET
+#endif
+
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high_get(struct page *page);
#else
diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h
new file mode 100644
index 0000000..d4014fb
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb-3level.h
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/include/asm/hugetlb-3level.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_3LEVEL_H
+#define _ASM_ARM_HUGETLB_3LEVEL_H
+
+
+/*
+ * If our huge pte is non-zero then mark the valid bit.
+ * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero
+ * ptes.
+ * (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
+ */
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ pte_t retval = *ptep;
+ if (pte_val(retval))
+ pte_val(retval) |= L_PTE_VALID;
+ return retval;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_clear_flush(vma, addr, ptep);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
new file mode 100644
index 0000000..1f1b1cd
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb.h
@@ -0,0 +1,84 @@
+/*
+ * arch/arm/include/asm/hugetlb.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_H
+#define _ASM_ARM_HUGETLB_H
+
+#include
+#include
+
+#include
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ struct hstate *h = hstate_file(file);
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+
+#endif /* _ASM_ARM_HUGETLB_H */
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 652b560..5a8bb8f 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -286,15 +286,32 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
* IO port primitives for more information.
*/
#ifndef readl
+#ifdef CONFIG_AL_PCIE_RMN_1010
+uint32_t al_dma_read_reg32(const volatile void __iomem *address);
+uint16_t al_dma_read_reg16(const volatile void __iomem *address);
+uint8_t al_dma_read_reg8(const volatile void __iomem *address);
+
+#define readb_relaxed(c) (al_dma_read_reg8(c))
+#define readw_relaxed(c) (al_dma_read_reg16(c))
+#define readl_relaxed(c) (al_dma_read_reg32(c))
+
+#else
#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
__raw_readw(c)); __r; })
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
__raw_readl(c)); __r; })
+#endif
#define writeb_relaxed(v,c) __raw_writeb(v,c)
#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
+
+#ifdef CONFIG_AL_PCIE_RMN_1010
+void al_dma_write_reg32(volatile void __iomem *address, u32 val);
+#define writel_relaxed(v,c) (al_dma_write_reg32(c,v))
+#else
#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
+#endif
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 48066ce..0a9d5dd 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -11,6 +11,7 @@
#define __ARM_KGDB_H__
#include
+#include
/*
* GDB assumes that we're a user process being debugged, so
@@ -41,7 +42,7 @@
static inline void arch_kgdb_breakpoint(void)
{
- asm(".word 0xe7ffdeff");
+ asm(__inst_arm(0xe7ffdeff));
}
extern void kgdb_handle_bus_error(void);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 472ac70..f47a132 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -64,7 +64,7 @@ void kvm_clear_hyp_idmap(void);
static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
{
- pte_val(*pte) = new_pte;
+ pte_val(*pte) = pte_val(new_pte);
/*
* flush_pmd_entry just takes a void pointer and cleans the necessary
* cache entries, so we can reuse the function for ptes.
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 18f5cef..f088c86 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -30,6 +30,7 @@
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
+#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
#define PMD_BIT4 (_AT(pmdval_t, 0))
#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
#define PMD_APTABLE_SHIFT (61)
@@ -41,6 +42,8 @@
*/
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
+#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
@@ -66,6 +69,7 @@
#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
+#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 86b8fe3..54733e5 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -62,6 +62,14 @@
#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
/*
+ * Hugetlb definitions.
+ */
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+/*
* "Linux" PTE definitions for LPAE.
*
* These bits overlap with the hardware bits but the naming is preserved for
@@ -79,6 +87,11 @@
#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
+#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
+#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
+#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
+#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
+
/*
* To be used in assembly code with the upper page attributes.
*/
@@ -166,8 +179,83 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
clean_pmd_entry(pmdp); \
} while (0)
+/*
+ * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
+ * that are written to a page table but not for ptes created with mk_pte.
+ *
+ * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
+ * hugetlb_cow, where it is compared with an entry in a page table.
+ * This comparison test fails erroneously leading ultimately to a memory leak.
+ *
+ * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
+ * present before running the comparison.
+ */
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \
+ : pte_val(pte_a)) \
+ == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \
+ : pte_val(pte_b)))
+
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
+#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
+#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
+
+#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
+
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
+#endif
+
+#define PMD_BIT_FUNC(fn,op) \
+static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+
+PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
+PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
+PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
+PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
+
+#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+
+#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+
+/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
+#define pmd_mknotpresent(pmd) (__pmd(0))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY |
+ PMD_SECT_VALID | PMD_SECT_NONE;
+ pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
+ return pmd;
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ BUG_ON(addr >= TASK_SIZE);
+
+ /* create a faulting entry if PROT_NONE protected */
+ if (pmd_val(pmd) & PMD_SECT_NONE)
+ pmd_val(pmd) &= ~PMD_SECT_VALID;
+
+ *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
+ flush_pmd_entry(pmdp);
+}
+
+static inline int has_transparent_hugepage(void)
+{
+ return 1;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_3LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9bcd262..eaedce7 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -24,6 +24,9 @@
#include
#include
+
+#include
+
#ifdef CONFIG_ARM_LPAE
#include
#else
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 6220e9f..f34cee4 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -71,6 +71,12 @@ static inline void dsb_sev(void)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#ifdef CONFIG_ARCH_ALPINE
+extern unsigned int al_spin_lock_wfe_enable;
+#else
+#define al_spin_lock_wfe_enable 1
+#endif
+
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -88,7 +94,8 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
: "cc");
while (lockval.tickets.next != lockval.tickets.owner) {
- wfe();
+ if (al_spin_lock_wfe_enable)
+ wfe();
lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
}
@@ -121,7 +128,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
lock->tickets.owner++;
- dsb_sev();
+ if (al_spin_lock_wfe_enable)
+ dsb_sev();
}
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index aa9b4ac..0baf7f0 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -207,6 +207,12 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
#endif
}
+static inline void
+tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
+{
+ tlb_add_flush(tlb, addr);
+}
+
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index a3625d1..c374592 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -535,6 +535,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
}
#endif
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
#endif
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7e1f760..17e1661 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -273,7 +273,9 @@ do { \
: "r" (addr), "i" (-EFAULT) \
: "cc")
+
#ifndef __ARMEB__
+#if !defined(CONFIG_VHOST_NET)
#define __get_user_asm_half(x,__gu_addr,err) \
({ \
unsigned long __b1, __b2; \
@@ -281,7 +283,26 @@ do { \
__get_user_asm_byte(__b2, __gu_addr + 1, err); \
(x) = __b1 | (__b2 << 8); \
})
-#else
+#else /* CONFIG_VIRTIO_NET */
+#define __get_user_asm_half(x,addr,err) \
+ __asm__ __volatile__( \
+ "1: " TUSER(ldrh) " %1,[%2],#0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %0, %3\n" \
+ " mov %1, #0\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .popsection" \
+ : "+r" (err), "=&r" (x) \
+ : "r" (addr), "i" (-EFAULT) \
+ : "cc")
+#endif
+#else /* ARMEB */
#define __get_user_asm_half(x,__gu_addr,err) \
({ \
unsigned long __b1, __b2; \
@@ -291,6 +312,7 @@ do { \
})
#endif
+
#define __get_user_asm_word(x,addr,err) \
__asm__ __volatile__( \
"1: " TUSER(ldr) " %1,[%2],#0\n" \
@@ -355,13 +377,32 @@ do { \
: "cc")
#ifndef __ARMEB__
+#if !defined(CONFIG_VHOST_NET)
#define __put_user_asm_half(x,__pu_addr,err) \
({ \
unsigned long __temp = (unsigned long)(x); \
__put_user_asm_byte(__temp, __pu_addr, err); \
__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
})
-#else
+#else /* CONFIG_VIRTIO_NET */
+ #define __put_user_asm_half(x,__pu_addr,err) \
+ __asm__ __volatile__( \
+ "1: " TUSER(strh) " %1,[%2],#0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %0, %3\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .popsection" \
+ : "+r" (err) \
+ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
+ : "cc")
+#endif
+#else /* ARMEB */
#define __put_user_asm_half(x,__pu_addr,err) \
({ \
unsigned long __temp = (unsigned long)(x); \
diff --git a/arch/arm/include/debug/alpine.S b/arch/arm/include/debug/alpine.S
new file mode 100644
index 0000000..ec05952
--- /dev/null
+++ b/arch/arm/include/debug/alpine.S
@@ -0,0 +1,32 @@
+/*
+ * Early serial output macro for Alpine
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define DEBUG_ALPINE_SB_BASE 0xfc000000
+#define DEBUG_ALPINE_PBS_OFFSET 0x01880000
+#define DEBUG_ALPINE_UART_OFFSET 0x3000
+
+ .macro addruart,rp,rv, tmp
+ mov \rp, #DEBUG_ALPINE_UART_OFFSET
+ orr \rp, \rp, #DEBUG_ALPINE_PBS_OFFSET
+ orr \rv, \rp, #DEBUG_ALPINE_SB_BASE @ virtual base
+ orr \rp, \rp, #DEBUG_ALPINE_SB_BASE @ physical base
+ .endm
+
+#define UART_SHIFT 2
+#include
+
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 5859c8b..57271c2 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -28,6 +28,27 @@
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
+#ifndef CONFIG_ARM_LPAE
+ const u64 sz_4g = 4 * (u64)SZ_1G;
+
+ if (base >= sz_4g) {
+ pr_info("Ignoring memory at 0x%08llx to fit in "
+ "32-bit physical address space\n", base);
+ return;
+ }
+
+ if ((base + size) >= sz_4g) {
+ pr_info("Truncating memory at 0x%08llx to fit in "
+ "32-bit physical address space\n", base);
+ /*
+ * Ensure 'base + size' fits 32 bits and yet size is aligned
+ * to page size (assuming base & size were aligned in the first
+ * place)
+ */
+ size = sz_4g - base - PAGE_SIZE;
+ }
+#endif
+
arm_add_memory(base, size);
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index d43c7e5..3d0b390 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -415,9 +415,8 @@ __und_usr:
bne __und_usr_thumb
sub r4, r2, #4 @ ARM instr at LR - 4
1: ldrt r0, [r4]
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r0, r0 @ little endian instruction
-#endif
+ ARM_BE8(rev r0, r0) @ little endian instruction
+
@ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index bc5bc0a..8c79344 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -379,9 +379,7 @@ ENTRY(vector_swi)
#else
ldr r10, [lr, #-4] @ get SWI instruction
#endif
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r10, r10 @ little endian instruction
-#endif
+ ARM_BE8(rev r10, r10) @ little endian instruction
#elif defined(CONFIG_AEABI)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 8bac553..a20d6c8 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -77,6 +77,7 @@
__HEAD
ENTRY(stext)
+ ARM_BE8(setend be ) @ ensure we are in BE8 mode
THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
@@ -351,6 +352,9 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
+
+ ARM_BE8(setend be) @ ensure we are in BE8 mode
+
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
@@ -584,8 +588,10 @@ __fixup_a_pv_table:
b 2f
1: add r7, r3
ldrh ip, [r7, #2]
+ARM_BE8(rev16 ip, ip)
and ip, 0x8f00
orr ip, r6 @ mask in offset bits 31-24
+ARM_BE8(rev16 ip, ip)
strh ip, [r7, #2]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
@@ -594,8 +600,14 @@ __fixup_a_pv_table:
#else
b 2f
1: ldr ip, [r7, r3]
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ @ in BE8, we load data in BE, but instructions still in LE
+ bic ip, ip, #0xff000000
+ orr ip, ip, r6, lsl#24
+#else
bic ip, ip, #0x000000ff
orr ip, ip, r6 @ mask in offset bits 31-24
+#endif
str ip, [r7, r3]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 1e9be5d..7e13787 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -24,6 +24,7 @@
#include
#include
#include
+#include
#ifdef CONFIG_XIP_KERNEL
/*
@@ -60,6 +61,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
Elf32_Sym *sym;
const char *symname;
s32 offset;
+ u32 tmp;
#ifdef CONFIG_THUMB2_KERNEL
u32 upper, lower, sign, j1, j2;
#endif
@@ -95,7 +97,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
- offset = (*(u32 *)loc & 0x00ffffff) << 2;
+ offset = __mem_to_opcode_arm(*(u32 *)loc);
+ offset = (offset & 0x00ffffff) << 2;
if (offset & 0x02000000)
offset -= 0x04000000;
@@ -111,9 +114,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
}
offset >>= 2;
+ offset &= 0x00ffffff;
- *(u32 *)loc &= 0xff000000;
- *(u32 *)loc |= offset & 0x00ffffff;
+ *(u32 *)loc &= __opcode_to_mem_arm(0xff000000);
+ *(u32 *)loc |= __opcode_to_mem_arm(offset);
break;
case R_ARM_V4BX:
@@ -121,8 +125,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
* other bits to re-code instruction as
* MOV PC,Rm.
*/
- *(u32 *)loc &= 0xf000000f;
- *(u32 *)loc |= 0x01a0f000;
+ *(u32 *)loc &= __opcode_to_mem_arm(0xf000000f);
+ *(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000);
break;
case R_ARM_PREL31:
@@ -132,7 +136,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
case R_ARM_MOVW_ABS_NC:
case R_ARM_MOVT_ABS:
- offset = *(u32 *)loc;
+ offset = tmp = __mem_to_opcode_arm(*(u32 *)loc);
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
offset = (offset ^ 0x8000) - 0x8000;
@@ -140,16 +144,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
offset >>= 16;
- *(u32 *)loc &= 0xfff0f000;
- *(u32 *)loc |= ((offset & 0xf000) << 4) |
- (offset & 0x0fff);
+ tmp &= 0xfff0f000;
+ tmp |= ((offset & 0xf000) << 4) |
+ (offset & 0x0fff);
+
+ *(u32 *)loc = __opcode_to_mem_arm(tmp);
break;
#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
- upper = *(u16 *)loc;
- lower = *(u16 *)(loc + 2);
+ upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+ lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
/*
* 25 bit signed address range (Thumb-2 BL and B.W
@@ -198,17 +204,20 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
sign = (offset >> 24) & 1;
j1 = sign ^ (~(offset >> 23) & 1);
j2 = sign ^ (~(offset >> 22) & 1);
- *(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
+ upper = (u16)((upper & 0xf800) | (sign << 10) |
((offset >> 12) & 0x03ff));
- *(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
- (j1 << 13) | (j2 << 11) |
- ((offset >> 1) & 0x07ff));
+ lower = (u16)((lower & 0xd000) |
+ (j1 << 13) | (j2 << 11) |
+ ((offset >> 1) & 0x07ff));
+
+ *(u16 *)loc = __opcode_to_mem_thumb16(upper);
+ *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
break;
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
- upper = *(u16 *)loc;
- lower = *(u16 *)(loc + 2);
+ upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+ lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
/*
* MOVT/MOVW instructions encoding in Thumb-2:
@@ -229,12 +238,14 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
offset >>= 16;
- *(u16 *)loc = (u16)((upper & 0xfbf0) |
- ((offset & 0xf000) >> 12) |
- ((offset & 0x0800) >> 1));
- *(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
- ((offset & 0x0700) << 4) |
- (offset & 0x00ff));
+ upper = (u16)((upper & 0xfbf0) |
+ ((offset & 0xf000) >> 12) |
+ ((offset & 0x0800) >> 1));
+ lower = (u16)((lower & 0x8f00) |
+ ((offset & 0x0700) << 4) |
+ (offset & 0x00ff));
+ *(u16 *)loc = __opcode_to_mem_thumb16(upper);
+ *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
break;
#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b4b1d39..c628e20 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -97,6 +97,8 @@ EXPORT_SYMBOL(system_serial_high);
unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL(elf_hwcap);
+unsigned long cpu_clock_freq = 0;
+EXPORT_SYMBOL(cpu_clock_freq);
#ifdef MULTI_CPU
struct processor processor __read_mostly;
@@ -891,15 +893,11 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "model name\t: %s rev %d (%s)\n",
cpu_name, cpuid & 15, elf_platform);
-#if defined(CONFIG_SMP)
- seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
- per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
- (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
-#else
- seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
- loops_per_jiffy / (500000/HZ),
- (loops_per_jiffy / (5000/HZ)) % 100);
-#endif
+ if (cpu_clock_freq)
+ seq_printf(m, "Speed\t\t: %lu.%01luGHz\n",
+ cpu_clock_freq / 1000000000,
+ (cpu_clock_freq / 100000000) % 10);
+
/* dump out the processor features */
seq_puts(m, "Features\t: ");
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 987dcf3..954c28a 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -81,6 +81,7 @@ ENDPROC(cpu_resume_after_mmu)
.data
.align
ENTRY(cpu_resume)
+ARM_BE8(setend be) @ ensure we are in BE mode
#ifdef CONFIG_SMP
adr r0, sleep_save_sp
ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5919eb4..dfe9918 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -375,17 +375,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
- int cpu;
- unsigned long bogosum = 0;
-
- for_each_online_cpu(cpu)
- bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
-
- printk(KERN_INFO "SMP: Total of %d processors activated "
- "(%lu.%02lu BogoMIPS).\n",
- num_online_cpus(),
- bogosum / (500000/HZ),
- (bogosum / (5000/HZ)) % 100);
+ printk(KERN_INFO "SMP: Total of %d processors activated.\n",
+ num_online_cpus());
hyp_mode_check();
}
@@ -514,7 +505,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
evt->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DUMMY;
- evt->rating = 100;
+ evt->rating = 400;
evt->mult = 1;
evt->set_mode = broadcast_timer_set_mode;
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index c5a5954..8848c11 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -99,6 +99,7 @@ static void __init parse_dt_topology(void)
unsigned long max_capacity = 0;
unsigned long capacity = 0;
int alloc_size, cpu = 0;
+ extern unsigned long cpu_clock_freq;
alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
@@ -132,6 +133,14 @@ static void __init parse_dt_topology(void)
capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
+ if (!cpu_clock_freq) {
+ cpu_clock_freq = be32_to_cpup(rate);
+ if (cpu_clock_freq)
+ pr_info("CPU speed: %lu.%01luGHz\n",
+ cpu_clock_freq / 1000000000,
+ (cpu_clock_freq / 100000000) % 10);
+ }
+
/* Save min capacity of the system */
if (capacity < min_capacity)
min_capacity = capacity;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6b9567e..9532c7c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -341,15 +341,17 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
int is_valid_bugaddr(unsigned long pc)
{
#ifdef CONFIG_THUMB2_KERNEL
- unsigned short bkpt;
+ u16 bkpt;
+ u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
#else
- unsigned long bkpt;
+ u32 bkpt;
+ u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
#endif
if (probe_kernel_address((unsigned *)pc, bkpt))
return 0;
- return bkpt == BUG_INSTR_VALUE;
+ return bkpt == insn;
}
#endif
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 33f2ea3..e10974c 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -100,6 +100,9 @@ SECTIONS
*(.exception.text)
__exception_text_end = .;
IRQENTRY_TEXT
+#ifdef CONFIG_FUNC_REORDER
+#include "functionlist"
+#endif
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 370e1a8..1ae2d45 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -22,6 +22,7 @@ config KVM
select ANON_INODES
select KVM_MMIO
select KVM_ARM_HOST
+ select HAVE_KVM_EVENTFD
depends on ARM_VIRT_EXT && ARM_LPAE
---help---
Support hosting virtualized guest machines. You will also
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 53c5ed8..16513b2 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -14,7 +14,7 @@ CFLAGS_mmu.o := -I.
AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
-kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o)
obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 72a12f2..bb69475 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -106,6 +106,40 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return 0;
}
+/**
+ * Check whether kvm_io_bus can handle this MMIO request
+ *
+ * return non-zero means the request has been handled by kvm_io_bus, and
+ * we don't require user space.
+ */
+static int kvm_arm_mmio_read_write(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ int ret;
+ gpa_t addr = mmio->phys_addr;
+ int len = mmio->len;
+ void *v = mmio->data;
+
+ mutex_lock(&vcpu->kvm->slots_lock);
+ if (mmio->is_write)
+ ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
+ else
+ ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
+ mutex_unlock(&vcpu->kvm->slots_lock);
+
+ if (ret == 0) {
+ kvm_prepare_mmio(run, mmio);
+ kvm_handle_mmio_return(vcpu, run);
+ }
+
+ /*
+ * Returning 0 from kvm_io_bus_*() means the mmio request has been
+ * processed, but the caller handle_exit() and io_mem_abort() regards
+ * as requiring user-space.
+ */
+ return !ret;
+}
+
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa)
{
@@ -141,6 +175,9 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (vgic_handle_mmio(vcpu, run, &mmio))
return 1;
+ if (kvm_arm_mmio_read_write(vcpu, run, &mmio))
+ return 1;
+
kvm_prepare_mmio(run, &mmio);
return 0;
}
diff --git a/arch/arm/mach-alpine/al_fabric.c b/arch/arm/mach-alpine/al_fabric.c
new file mode 100644
index 0000000..fe5bb56
--- /dev/null
+++ b/arch/arm/mach-alpine/al_fabric.c
@@ -0,0 +1,297 @@
+/*
+ * Annapurna labs fabric.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+static struct of_device_id of_fabric_table[] = {
+ {.compatible = "annapurna-labs,al-fabric"},
+ { /* end of list */ },
+};
+
+static struct of_device_id of_nb_table[] = {
+ {.compatible = "annapurna-labs,al-nb-service"},
+ { /* end of list */ },
+};
+
+static struct of_device_id of_ccu_table[] = {
+ {.compatible = "annapurna-labs,al-ccu"},
+ { /* end of list */ },
+};
+
+struct sys_fabric_irq_struct {
+ unsigned int idx;
+ void __iomem *regs_base;
+ unsigned int irq_cause_base;
+ struct irq_chip_generic *irq_gc;
+};
+
+static struct sys_fabric_irq_struct sf_irq_arr[AL_FABRIC_INSTANCE_N];
+
+int al_fabric_hwcc;
+
+static int al_fabric_plat_device_notifier(struct notifier_block *nb,
+ unsigned long event, void *__dev)
+{
+ struct device *dev = __dev;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+
+#ifdef CONFIG_ARM_HWCC_FLAG
+ dev->archdata.hwcc = al_fabric_hwcc;
+#endif
+ dma_set_coherent_mask(dev, PHYS_MASK);
+
+ if (!al_fabric_hwcc)
+ return NOTIFY_OK;
+
+ set_dma_ops(dev, &arm_coherent_dma_ops);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block al_fabric_plat_device_nb = {
+ .notifier_call = al_fabric_plat_device_notifier,
+};
+
+
+static int al_fabric_pci_device_notifier(struct notifier_block *nb,
+ unsigned long event, void *__dev)
+{
+ struct device *dev = __dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u32 temp;
+
+ if (event != BUS_NOTIFY_BIND_DRIVER)
+ return NOTIFY_DONE;
+
+#ifdef CONFIG_ARM_HWCC_FLAG
+ dev->archdata.hwcc = al_fabric_hwcc;
+#endif
+ dma_set_coherent_mask(dev, PHYS_MASK);
+
+ if (!al_fabric_hwcc)
+ return NOTIFY_OK;
+
+ set_dma_ops(dev, &arm_coherent_dma_ops);
+
+ /* Force the PCIE adapter to set AXI attr to match CC*/
+ if(pci_domain_nr(pdev->bus) == 0) {
+ pci_read_config_dword(pdev, 0x110 ,&temp);
+ temp |= 0x3;
+ pci_write_config_dword(pdev, 0x110 ,temp);
+ /* Enable cache coherency for VF's (except USB and SATA) */
+ if (PCI_SLOT(pdev->devfn) < 6) {
+ pci_write_config_dword(pdev, 0x130 ,temp);
+ pci_write_config_dword(pdev, 0x150 ,temp);
+ pci_write_config_dword(pdev, 0x170 ,temp);
+ }
+
+ pci_read_config_dword(pdev, 0x220 ,&temp);
+ temp &= ~0xffff;
+ temp |= 0x3ff;
+ pci_write_config_dword(pdev, 0x220 ,temp);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block al_fabric_pci_device_nb = {
+ .notifier_call = al_fabric_pci_device_notifier,
+};
+
+static void sf_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ unsigned long pending, mask;
+ int offset;
+ struct sys_fabric_irq_struct *chip = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct al_nb_regs *nb_regs = chip->regs_base;
+
+ chained_irq_enter(irqchip, desc);
+
+ mask = chip->irq_gc->mask_cache;
+ pending = readl(&nb_regs->global.nb_int_cause) & mask;
+
+ /* deassert pending edge-triggered irqs */
+ writel(~(pending & ~NB_GLOBAL_NB_INT_CAUSE_LEVEL_IRQ_MASK),
+ &nb_regs->global.nb_int_cause);
+
+ /* handle pending irqs */
+ if (likely(pending)) {
+ int fabric_irq_base = al_fabric_get_cause_irq(chip->idx, 0);
+ for_each_set_bit(offset, &pending, AL_FABRIC_IRQ_N)
+ generic_handle_irq(fabric_irq_base + offset);
+ }
+
+ /* deassert pending level-triggered irqs */
+ writel(~(pending & NB_GLOBAL_NB_INT_CAUSE_LEVEL_IRQ_MASK),
+ &nb_regs->global.nb_int_cause);
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static void init_sf_irq_gc(struct sys_fabric_irq_struct *sfi)
+{
+ struct irq_chip_type *ct;
+
+ sfi->irq_gc = irq_alloc_generic_chip("alpine_sf_irq", 1,
+ sfi->irq_cause_base, sfi->regs_base, handle_simple_irq);
+ sfi->irq_gc->private = sfi;
+
+ ct = sfi->irq_gc->chip_types;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.mask = offsetof(struct al_nb_regs,
+ cpun_config_status[sfi->idx].local_cause_mask);
+
+ /* clear the no request field so irq can be requested */
+ irq_setup_generic_chip(sfi->irq_gc, IRQ_MSK(AL_FABRIC_IRQ_N),
+ IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0);
+}
+
+static int init_sf_irq_struct(struct sys_fabric_irq_struct *sfi_arr,
+ unsigned int idx, void __iomem *regs_base)
+{
+ int ret;
+
+ pr_debug("[%s] entered with idx = %d, regs_base = %p\n",
+ __func__, idx, regs_base);
+ sfi_arr[idx].idx = idx;
+ sfi_arr[idx].regs_base = regs_base;
+ /* allocate irq descriptors for the cause interrupts */
+ ret = irq_alloc_descs(-1, 0, AL_FABRIC_IRQ_N, -1);
+ if (ret < 0) {
+ pr_err("[%s] Failed to allocate IRQ descriptors\n", __func__);
+ return ret;
+ }
+ sfi_arr[idx].irq_cause_base = ret;
+ init_sf_irq_gc(&sfi_arr[idx]);
+ return 0;
+}
+
+int al_fabric_get_cause_irq(unsigned int idx, int irq)
+{
+ return sf_irq_arr[idx].irq_cause_base + irq;
+}
+
+int al_fabric_hwcc_enabled(void)
+{
+ return al_fabric_hwcc;
+}
+EXPORT_SYMBOL(al_fabric_hwcc_enabled);
+
+int __init al_fabric_init(void)
+{
+ struct device_node *ccu_node;
+ struct device_node *nb_node;
+ void __iomem *nb_base_address;
+ void __iomem *ccu_address;
+ void __iomem *nb_service_base_address;
+ u32 prop;
+ bool dev_ord_relax;
+ int nb_serv_irq[AL_FABRIC_INSTANCE_N];
+ int i, ret;
+
+ pr_info("Initializing System Fabric\n");
+
+ nb_node = of_find_matching_node(NULL, of_nb_table);
+ ccu_node = of_find_matching_node(NULL, of_ccu_table);
+
+
+ if (!nb_node)
+ return -EINVAL;
+
+ if (ccu_node) {
+ /* new devicetree */
+ ccu_address = of_iomap(ccu_node, 0);
+ BUG_ON(!ccu_address);
+
+ dev_ord_relax =
+ !of_property_read_u32(nb_node, "dev_ord_relax", &prop)
+ && prop;
+ } else {
+ /* old devicetree */
+ ccu_node = of_find_matching_node(NULL, of_fabric_table);
+ if (!ccu_node)
+ return -EINVAL;
+ nb_base_address = of_iomap(ccu_node, 0);
+ BUG_ON(!nb_base_address);
+ ccu_address = nb_base_address + 0x90000;
+
+ dev_ord_relax =
+ !of_property_read_u32(ccu_node, "dev_ord_relax", &prop)
+ && prop;
+ }
+ if (ccu_node && of_device_is_available(ccu_node)) {
+ al_fabric_hwcc = !of_property_read_u32(ccu_node, "io_coherency", &prop)
+ && prop;
+
+ if (al_fabric_hwcc)
+ printk("Enabling IO Cache Coherency.\n");
+
+ al_ccu_init(ccu_address, al_fabric_hwcc);
+
+ bus_register_notifier(&platform_bus_type,
+ &al_fabric_plat_device_nb);
+ bus_register_notifier(&pci_bus_type,
+ &al_fabric_pci_device_nb);
+ }
+
+ if (nb_node) {
+ nb_service_base_address = of_iomap(nb_node, 0);
+ BUG_ON(!nb_service_base_address);
+ al_nbservice_init(nb_service_base_address, dev_ord_relax);
+
+ for (i = 0 ; i < AL_FABRIC_INSTANCE_N ; ++i) {
+ ret = init_sf_irq_struct(sf_irq_arr, i,
+ nb_service_base_address);
+ if (ret < 0) {
+ pr_err("[%s] Failed to initialize sys-fabric "
+ "irq struct\n", __func__);
+ return ret;
+ }
+ nb_serv_irq[i] = irq_of_parse_and_map(nb_node, i);
+ irq_set_chained_handler(nb_serv_irq[i], sf_irq_handler);
+ ret = irq_set_handler_data(nb_serv_irq[i],
+ &sf_irq_arr[i]);
+ if (ret < 0) {
+ pr_err("[%s] Failed to set irq handler data\n"
+ , __func__);
+ return ret;
+ }
+ }
+
+ }
+
+ return 0;
+}
+
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_ddr.c b/arch/arm/mach-alpine/al_hal/al_hal_ddr.c
new file mode 100644
index 0000000..def2c75
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_ddr.c
@@ -0,0 +1,1116 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @addtogroup groupddr
+ *
+ * @{
+ * @file al_hal_ddr.c
+ *
+ * @brief DDR controller & PHY HAL driver
+ *
+ */
+
+#include "al_hal_ddr.h"
+#include "al_hal_ddr_ctrl_regs.h"
+#include "al_hal_ddr_phy_regs.h"
+#include "al_hal_nb_regs.h"
+#include "al_hal_ddr_utils.h"
+
+/* Wait for PHY BIST to be done */
+static int al_ddr_phy_wait_for_bist_done(
+ struct al_ddr_phy_regs __iomem *phy_regs);
+
+/*******************************************************************************
+ ******************************************************************************/
+int al_ddr_phy_datx_bist(
+ void __iomem *ddr_ctrl_regs_base,
+ void __iomem *ddr_phy_regs_base,
+ struct al_ddr_bist_params *params)
+{
+ int i;
+ int err;
+
+ struct al_ddr_phy_regs __iomem *phy_regs =
+ (struct al_ddr_phy_regs __iomem *)ddr_phy_regs_base;
+
+ uint32_t mode =
+ (params->mode == AL_DDR_BIST_MODE_LOOPBACK) ?
+ DWC_DDR_PHY_REGS_BISTRR_BMODE_LOOPBACK :
+ DWC_DDR_PHY_REGS_BISTRR_BMODE_DRAM;
+
+ uint32_t pattern =
+ (params->pat == AL_DDR_BIST_PATTERN_WALK_0) ?
+ DWC_DDR_PHY_REGS_BISTRR_BDPAT_WALKING_0 :
+ (params->pat == AL_DDR_BIST_PATTERN_WALK_1) ?
+ DWC_DDR_PHY_REGS_BISTRR_BDPAT_WALKING_1 :
+ (params->pat == AL_DDR_BIST_PATTERN_LFSR) ?
+ DWC_DDR_PHY_REGS_BISTRR_BDPAT_LFSR :
+ DWC_DDR_PHY_REGS_BISTRR_BDPAT_USER;
+
+ al_ddr_phy_vt_calc_disable(phy_regs);
+
+ al_ddr_ctrl_stop(ddr_ctrl_regs_base);
+
+ /**
+ * Init BIST mode of operation
+ */
+
+ /* BISTUDPR */
+ _al_reg_write32_masked(
+ &phy_regs->BISTUDPR,
+ DWC_DDR_PHY_REGS_BISTUDPR_BUDP0_MASK |
+ DWC_DDR_PHY_REGS_BISTUDPR_BUDP1_MASK,
+ DWC_DDR_PHY_REGS_BISTUDPR_BUDP0(params->user_pat_even) |
+ DWC_DDR_PHY_REGS_BISTUDPR_BUDP1(params->user_pat_odd));
+
+ /* BISTWCR */
+ _al_reg_write32_masked(
+ &phy_regs->BISTWCR,
+ DWC_DDR_PHY_REGS_BISTWCR_BWCNT_MASK,
+ DWC_DDR_PHY_REGS_BISTWCR_BWCNT(params->wc));
+
+ /* BISTAR0 */
+ _al_reg_write32_masked(
+ &phy_regs->BISTAR[0],
+ DWC_DDR_PHY_REGS_BISTAR0_BCOL_MASK |
+ DWC_DDR_PHY_REGS_BISTAR0_BROW_MASK |
+ DWC_DDR_PHY_REGS_BISTAR0_BBANK_MASK,
+ DWC_DDR_PHY_REGS_BISTAR0_BCOL(params->col_min) |
+ DWC_DDR_PHY_REGS_BISTAR0_BROW(params->row_min) |
+ DWC_DDR_PHY_REGS_BISTAR0_BBANK(params->bank_min));
+
+ /* BISTAR1 */
+ _al_reg_write32_masked(
+ &phy_regs->BISTAR[1],
+ DWC_DDR_PHY_REGS_BISTAR1_BRANK_MASK |
+ DWC_DDR_PHY_REGS_BISTAR1_BMRANK_MASK |
+ DWC_DDR_PHY_REGS_BISTAR1_BAINC_MASK,
+ DWC_DDR_PHY_REGS_BISTAR1_BRANK(params->rank_min) |
+ DWC_DDR_PHY_REGS_BISTAR1_BMRANK(params->rank_max) |
+ DWC_DDR_PHY_REGS_BISTAR1_BAINC(params->inc));
+
+ /* BISTAR2 */
+ _al_reg_write32_masked(
+ &phy_regs->BISTAR[2],
+ DWC_DDR_PHY_REGS_BISTAR2_BMCOL_MASK |
+ DWC_DDR_PHY_REGS_BISTAR2_BMROW_MASK |
+ DWC_DDR_PHY_REGS_BISTAR2_BMBANK_MASK,
+ DWC_DDR_PHY_REGS_BISTAR2_BMCOL(params->col_max) |
+ DWC_DDR_PHY_REGS_BISTAR2_BMROW(params->row_max) |
+ DWC_DDR_PHY_REGS_BISTAR2_BMBANK(params->bank_max));
+
+ /* Run DATX8 BIST */
+ for (i = 0; i < AL_DDR_PHY_NUM_BYTE_LANES; i++) {
+ if (!params->active_byte_lanes[i])
+ continue;
+
+ /* Reset status */
+ _al_reg_write32_masked(
+ &phy_regs->BISTRR,
+ DWC_DDR_PHY_REGS_BISTRR_BINST_MASK,
+ DWC_DDR_PHY_REGS_BISTRR_BINST_RESET);
+
+ /* Run BIST */
+ _al_reg_write32_masked(
+ &phy_regs->BISTRR,
+ DWC_DDR_PHY_REGS_BISTRR_BINST_MASK |
+ DWC_DDR_PHY_REGS_BISTRR_BMODE_MASK |
+ DWC_DDR_PHY_REGS_BISTRR_BDXEN |
+ DWC_DDR_PHY_REGS_BISTRR_BACEN |
+ DWC_DDR_PHY_REGS_BISTRR_BDPAT_MASK |
+ DWC_DDR_PHY_REGS_BISTRR_BDXSEL_MASK,
+ DWC_DDR_PHY_REGS_BISTRR_BINST_RUN |
+ mode |
+ DWC_DDR_PHY_REGS_BISTRR_BDXEN |
+ pattern |
+ DWC_DDR_PHY_REGS_BISTRR_BDXSEL(i));
+
+ al_data_memory_barrier();
+
+ /* Read BISTGSR for BIST done */
+ err = al_ddr_phy_wait_for_bist_done(phy_regs);
+ if (err) {
+ al_err(
+ "%s:%d: al_ddr_phy_wait_for_bist_done failed "
+ "(byte lane %d)!\n",
+ __func__,
+ __LINE__,
+ i);
+ return err;
+ }
+ }
+
+ /* stop BIST */
+ _al_reg_write32_masked(
+ &phy_regs->BISTRR,
+ DWC_DDR_PHY_REGS_BISTRR_BINST_MASK,
+ DWC_DDR_PHY_REGS_BISTRR_BINST_NOP);
+
+ /* PGCR3 - after BIST re-apply power down of unused DQs */
+ _al_reg_write32_masked(
+ &phy_regs->PGCR[3],
+ DWC_DDR_PHY_REGS_PGCR3_GATEDXCTLCLK |
+ DWC_DDR_PHY_REGS_PGCR3_GATEDXDDRCLK |
+ DWC_DDR_PHY_REGS_PGCR3_GATEDXRDCLK,
+ DWC_DDR_PHY_REGS_PGCR3_GATEDXCTLCLK |
+ DWC_DDR_PHY_REGS_PGCR3_GATEDXDDRCLK |
+ DWC_DDR_PHY_REGS_PGCR3_GATEDXRDCLK);
+
+ al_ddr_phy_vt_calc_enable(phy_regs);
+
+ al_ddr_ctrl_resume(ddr_ctrl_regs_base);
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+int al_ddr_phy_ac_bist(
+ void __iomem *ddr_phy_regs_base,
+ enum al_ddr_bist_pat pat)
+{
+ int err;
+
+ struct al_ddr_phy_regs __iomem *phy_regs =
+ (struct al_ddr_phy_regs __iomem *)ddr_phy_regs_base;
+
+ uint32_t pattern =
+ (pat == AL_DDR_BIST_PATTERN_WALK_0) ?
+ DWC_DDR_PHY_REGS_BISTRR_BDPAT_WALKING_0 :
+ (pat == AL_DDR_BIST_PATTERN_WALK_1) ?
+ DWC_DDR_PHY_REGS_BISTRR_BDPAT_WALKING_1 :
+ DWC_DDR_PHY_REGS_BISTRR_BDPAT_LFSR;
+
+ /* Run AC BIST */
+ /* Reset status */
+ _al_reg_write32_masked(
+ &phy_regs->BISTRR,
+ DWC_DDR_PHY_REGS_BISTRR_BINST_MASK,
+ DWC_DDR_PHY_REGS_BISTRR_BINST_RESET);
+
+ /* Power up I/O receivers */
+ _al_reg_write32_masked(
+ &phy_regs->ACIOCR[0],
+ DWC_DDR_PHY_REGS_ACIOCR0_ACPDR |
+ DWC_DDR_PHY_REGS_ACIOCR0_CKPDR_MASK |
+ DWC_DDR_PHY_REGS_ACIOCR0_RANKPDR_MASK |
+ DWC_DDR_PHY_REGS_ACIOCR0_RSTPDR,
+ 0);
+
+ /* Loopback before buffer in I/O */
+ al_reg_write32_masked(
+ &phy_regs->PGCR[1],
+ DWC_DDR_PHY_REGS_PGCR1_IOLB,
+ DWC_DDR_PHY_REGS_PGCR1_IOLB);
+
+ /* Run BIST */
+ _al_reg_write32_masked(
+ &phy_regs->BISTRR,
+ DWC_DDR_PHY_REGS_BISTRR_BINST_MASK |
+ DWC_DDR_PHY_REGS_BISTRR_BMODE_MASK |
+ DWC_DDR_PHY_REGS_BISTRR_BDXEN |
+ DWC_DDR_PHY_REGS_BISTRR_BACEN |
+ DWC_DDR_PHY_REGS_BISTRR_BDPAT_MASK,
+ DWC_DDR_PHY_REGS_BISTRR_BINST_RUN |
+ DWC_DDR_PHY_REGS_BISTRR_BMODE_LOOPBACK |
+ DWC_DDR_PHY_REGS_BISTRR_BACEN |
+ pattern);
+
+ al_data_memory_barrier();
+
+ /* Read BISTGSR for BIST done */
+ err = al_ddr_phy_wait_for_bist_done(phy_regs);
+ if (err) {
+ al_err(
+ "%s:%d: al_ddr_phy_wait_for_bist_done failed!\n",
+ __func__,
+ __LINE__);
+ return err;
+ }
+
+ /* Power down I/O receivers */
+ _al_reg_write32_masked(
+ &phy_regs->ACIOCR[0],
+ DWC_DDR_PHY_REGS_ACIOCR0_ACPDR |
+ DWC_DDR_PHY_REGS_ACIOCR0_CKPDR_MASK |
+ DWC_DDR_PHY_REGS_ACIOCR0_RANKPDR_MASK |
+ DWC_DDR_PHY_REGS_ACIOCR0_RSTPDR,
+ DWC_DDR_PHY_REGS_ACIOCR0_ACPDR |
+ DWC_DDR_PHY_REGS_ACIOCR0_CKPDR_MASK |
+ DWC_DDR_PHY_REGS_ACIOCR0_RANKPDR_MASK |
+ DWC_DDR_PHY_REGS_ACIOCR0_RSTPDR);
+
+ /* stop BIST */
+ _al_reg_write32_masked(
+ &phy_regs->BISTRR,
+ DWC_DDR_PHY_REGS_BISTRR_BINST_MASK,
+ DWC_DDR_PHY_REGS_BISTRR_BINST_NOP);
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+int al_ddr_power_mode_set(
+ void __iomem *ddr_ctrl_regs_base,
+ enum al_ddr_power_mode power_mode,
+ unsigned int timer_x32)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ uint32_t mode =
+ (power_mode == AL_DDR_POWERMODE_SELF_REFRESH) ?
+ DWC_DDR_UMCTL2_REGS_PWRCTL_SELFREF_EN :
+ (power_mode == AL_DDR_POWERMODE_POWER_DOWN) ?
+ DWC_DDR_UMCTL2_REGS_PWRCTL_POWERDOWN_EN :
+ 0;
+
+ /*
+ * Addressing RMN: 1037
+ *
+ * RMN description:
+ * In the current logic, it is possible for DRAM Read data and/or
+ * Write data to be active while/after one of the following occurs:
+ * Power Down Entry (PDE)
+ * Self Refresh Entry (SRE)
+ * This would violate the memory protocol DDR3 which require
+ * "no data bursts are in progress" when the above commands occur.
+ * Software flow:
+ * For violations related to PDE, issue can be avoided by ensuring
+ * that timer_x32>1. For violations related to SRE, ensure that
+ * AL_DDR_POWERMODE_SELF_REFRESH is set only after all Read data has
+ * been returned on your application interface and all write data has
+ * reached the DRAM.
+ */
+
+ if (timer_x32 <= 1) {
+ al_err(
+ "%s:%d: power mode timer must be greater than 1!\n",
+ __func__,
+ __LINE__);
+ return -EIO;
+ }
+
+ al_reg_write32(&ctrl_regs->pwrtmg,
+ timer_x32 << DWC_DDR_UMCTL2_REGS_PWRTMG_POWERDOWN_TO_X32_SHIFT);
+
+ al_reg_write32(&ctrl_regs->pwrctl, mode);
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+enum al_ddr_operating_mode al_ddr_operating_mode_get(
+ void __iomem *ddr_ctrl_regs_base)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+ uint32_t reg_val;
+ enum al_ddr_operating_mode operating_mode;
+
+ reg_val = al_reg_read32(&ctrl_regs->stat);
+ reg_val &= DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_MASK;
+
+ operating_mode =
+ (reg_val == DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_INIT) ?
+ AL_DDR_OPERATING_MODE_INIT :
+ (reg_val == DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_NORMAL) ?
+ AL_DDR_OPERATING_MODE_NORMAL :
+ (reg_val == DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_POWER_DOWN) ?
+ AL_DDR_OPERATING_MODE_POWER_DOWN :
+ AL_DDR_OPERATING_MODE_SELF_REFRESH;
+
+ return operating_mode;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+static int al_ddr_phy_wait_for_bist_done(
+ struct al_ddr_phy_regs __iomem *phy_regs)
+{
+ int err;
+ uint32_t reg_val;
+
+ err = al_ddr_reg_poll32(
+ &phy_regs->BISTGSR,
+ DWC_DDR_PHY_REGS_BISTGSR_BDONE,
+ DWC_DDR_PHY_REGS_BISTGSR_BDONE,
+ DEFAULT_TIMEOUT);
+
+ if (err) {
+ al_err("%s: al_ddr_reg_poll32 failed!\n", __func__);
+ return err;
+ }
+
+ reg_val = al_reg_read32(&phy_regs->BISTGSR);
+ /* Make sure no bist errors */
+ if (reg_val &
+ (DWC_DDR_PHY_REGS_BISTGSR_BACERR |
+ DWC_DDR_PHY_REGS_BISTGSR_BDXERR)) {
+ al_err("%s: PHY bist error (BISTGSR = %X)!\n",
+ __func__, reg_val);
+ al_dbg("%s: (BISTWER = %X)!\n",
+ __func__, al_reg_read32(&phy_regs->BISTWER));
+ al_dbg("%s: (BISTBER2 = %X)!\n",
+ __func__, al_reg_read32(&phy_regs->BISTBER[2]));
+ al_dbg("%s: (BISTBER3 = %X)!\n",
+ __func__, al_reg_read32(&phy_regs->BISTBER[3]));
+ al_dbg("%s: (BISTWCSR = %X)!\n",
+ __func__, al_reg_read32(&phy_regs->BISTWCSR));
+ al_dbg("%s: (BISTFWR2 = %X)!\n",
+ __func__, al_reg_read32(&phy_regs->BISTFWR[2]));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+unsigned int al_ddr_active_ranks_get(
+ void __iomem *ddr_ctrl_regs_base)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+ uint32_t mstr_val;
+ unsigned int active_ranks = 0;
+
+ mstr_val = al_reg_read32(&ctrl_regs->mstr);
+ mstr_val &= DWC_DDR_UMCTL2_REGS_MSTR_ACTIVE_RANKS_MASK;
+ mstr_val >>= DWC_DDR_UMCTL2_REGS_MSTR_ACTIVE_RANKS_SHIFT;
+
+ /* each bit on mstr_val is corresponding to an available rank */
+ while(mstr_val > 0) {
+ active_ranks += 1;
+ mstr_val >>= 1;
+ }
+
+ return active_ranks;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+int al_ddr_ecc_status_get(
+ void __iomem *ddr_ctrl_regs_base,
+ struct al_ddr_ecc_status *corr_status,
+ struct al_ddr_ecc_status *uncorr_status)
+{
+ uint32_t reg_val;
+
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ /* Correctable status */
+ if (corr_status) {
+ reg_val = al_reg_read32(&ctrl_regs->eccstat);
+ corr_status->ecc_corrected_bit_num = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_CORRECTED_BIT_NUM_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_CORRECTED_BIT_NUM_SHIFT;
+
+ reg_val = al_reg_read32(&ctrl_regs->eccerrcnt);
+ corr_status->err_cnt = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_CORR_ERR_CNT_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_CORR_ERR_CNT_SHIFT;
+
+ reg_val = al_reg_read32(&ctrl_regs->ecccaddr0);
+ corr_status->row = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_ROW_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_ROW_SHIFT;
+ corr_status->rank = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_RANK_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_RANK_SHIFT;
+
+ reg_val = al_reg_read32(&ctrl_regs->ecccaddr1);
+ corr_status->bank = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_BANK_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_BANK_SHIFT;
+ corr_status->col = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_COL_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_COL_SHIFT;
+
+ corr_status->syndromes_31_0 = al_reg_read32(&ctrl_regs->ecccsyn0);
+ corr_status->syndromes_63_32 = al_reg_read32(&ctrl_regs->ecccsyn1);
+ reg_val = al_reg_read32(&ctrl_regs->ecccsyn2);
+ corr_status->syndromes_ecc = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCCSYN2_ECC_CORR_SYNDROMES_71_64_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ECCCSYN2_ECC_CORR_SYNDROMES_71_64_SHIFT;
+
+ corr_status->corr_bit_mask_31_0 =
+ al_reg_read32(&ctrl_regs->eccbitmask0);
+ corr_status->corr_bit_mask_63_32 =
+ al_reg_read32(&ctrl_regs->eccbitmask1);
+ reg_val = al_reg_read32(&ctrl_regs->eccbitmask2);
+ corr_status->corr_bit_mask_ecc = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCBITMASK2_ECC_CORR_BIT_MASK_71_64_MASK) >>
+ DWC_DDR_UMCTL2_REGS_ECCBITMASK2_ECC_CORR_BIT_MASK_71_64_SHIFT;
+ }
+
+ /* Uncorrectable status */
+ if (uncorr_status) {
+ reg_val = al_reg_read32(&ctrl_regs->eccerrcnt);
+ uncorr_status->err_cnt = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_UNCORR_ERR_CNT_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_UNCORR_ERR_CNT_SHIFT;
+
+ reg_val = al_reg_read32(&ctrl_regs->eccuaddr0);
+ uncorr_status->row = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_ROW_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_ROW_SHIFT;
+ uncorr_status->rank = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_RANK_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_RANK_SHIFT;
+
+ reg_val = al_reg_read32(&ctrl_regs->eccuaddr1);
+ uncorr_status->bank = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_BANK_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_BANK_SHIFT;
+ uncorr_status->col = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_COL_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_COL_SHIFT;
+
+ uncorr_status->syndromes_31_0 = al_reg_read32(&ctrl_regs->eccusyn0);
+ uncorr_status->syndromes_63_32 = al_reg_read32(&ctrl_regs->eccusyn1);
+ reg_val = al_reg_read32(&ctrl_regs->eccusyn2);
+ uncorr_status->syndromes_ecc = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ECCUSYN2_ECC_UNCORR_SYNDROMES_71_64_MASK) >>
+ DWC_DDR_UMCTL2_REGS_ECCUSYN2_ECC_UNCORR_SYNDROMES_71_64_SHIFT;
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+void al_ddr_ecc_cfg_get(
+ void __iomem *ddr_ctrl_regs_base,
+ struct al_ddr_ecc_cfg *cfg)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+ uint32_t cfg_val;
+
+ cfg_val = al_reg_read32(&ctrl_regs->ecccfg0);
+
+ cfg->ecc_enabled =
+ ((cfg_val & DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_MASK) ==
+ DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_DIS) ? AL_FALSE : AL_TRUE;
+
+ /* dis_scrub is valid only when ecc mode is enabled */
+ if (cfg->ecc_enabled)
+ cfg->scrub_enabled =
+ (cfg_val & DWC_DDR_UMCTL2_REGS_ECCCFG0_DIS_SCRUB) ?
+ AL_FALSE : AL_TRUE;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+int al_ddr_ecc_corr_count_clear(
+ void __iomem *ddr_ctrl_regs_base)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ al_reg_write32(&ctrl_regs->eccclr,
+ DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_CORR_ERR_CNT);
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+int al_ddr_ecc_corr_int_clear(
+ void __iomem *nb_regs_base,
+ void __iomem *ddr_ctrl_regs_base)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ al_reg_write32(&ctrl_regs->eccclr,
+ DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_CORR_ERR);
+
+ if (nb_regs_base) {
+ struct al_nb_regs __iomem *nb_regs;
+
+ al_data_memory_barrier();
+
+ nb_regs = (struct al_nb_regs __iomem *)nb_regs_base;
+ al_reg_write32(&nb_regs->global.nb_int_cause,
+ ~NB_GLOBAL_NB_INT_CAUSE_MCTL_ECC_CORR_ERR);
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+int al_ddr_ecc_uncorr_count_clear(
+ void __iomem *ddr_ctrl_regs_base)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ al_reg_write32(&ctrl_regs->eccclr,
+ DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_UNCORR_ERR_CNT);
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+int al_ddr_ecc_uncorr_int_clear(
+ void __iomem *nb_regs_base,
+ void __iomem *ddr_ctrl_regs_base)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ al_reg_write32(&ctrl_regs->eccclr,
+ DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_UNCORR_ERR);
+
+ if (nb_regs_base) {
+ struct al_nb_regs __iomem *nb_regs;
+
+ al_data_memory_barrier();
+
+ nb_regs = (struct al_nb_regs __iomem *)nb_regs_base;
+ al_reg_write32(&nb_regs->global.nb_int_cause,
+ ~NB_GLOBAL_NB_INT_CAUSE_MCTL_ECC_UNCORR_ERR);
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+int al_ddr_ecc_data_poison_enable(
+ void __iomem *ddr_ctrl_regs_base,
+ unsigned int rank,
+ unsigned int bank,
+ unsigned int col,
+ unsigned int row)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ /* Set data poison address */
+ al_reg_write32(&ctrl_regs->eccpoisonaddr0,
+ (col <<
+ DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_ECC_POISON_COL_SHIFT) |
+ (rank <<
+ DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_ECC_POISON_RANK_SHIFT));
+ al_reg_write32(&ctrl_regs->eccpoisonaddr1,
+ (row <<
+ DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_ECC_POISON_ROW_SHIFT) |
+ (bank <<
+ DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_ECC_POISON_BANK_SHIFT));
+
+ /* Enable data poisoning */
+ al_reg_write32(&ctrl_regs->ecccfg1,
+ DWC_DDR_UMCTL2_REGS_ECCCFG1_DATA_POISON);
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+int al_ddr_ecc_data_poison_disable(
+ void __iomem *ddr_ctrl_regs_base)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ /* Disable data poisoning */
+ al_reg_write32(&ctrl_regs->ecccfg1, 0);
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+unsigned int al_ddr_parity_count_get(
+ void __iomem *ddr_ctrl_regs_base)
+{
+ uint32_t reg_val;
+ unsigned int parity_count;
+
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ reg_val = al_reg_read32(&ctrl_regs->parstat);
+ parity_count = (reg_val &
+ DWC_DDR_UMCTL2_REGS_PARSTAT_DFI_PARITY_ERR_CNT_MASK)
+ >> DWC_DDR_UMCTL2_REGS_PARSTAT_DFI_PARITY_ERR_CNT_SHIFT;
+
+ return parity_count;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+void al_ddr_parity_count_clear(
+ void __iomem *ddr_ctrl_regs_base)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ al_reg_write32_masked(&ctrl_regs->parctl,
+ DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_CNT_CLR,
+ DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_CNT_CLR);
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+void al_ddr_parity_int_clear(
+ void __iomem *nb_regs_base,
+ void __iomem *ddr_ctrl_regs_base)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ struct al_nb_regs __iomem *nb_regs =
+ (struct al_nb_regs __iomem *)nb_regs_base;
+
+ al_reg_write32_masked(&ctrl_regs->parctl,
+ DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_INT_CLR,
+ DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_INT_CLR);
+
+ al_data_memory_barrier();
+
+ al_reg_write32(&nb_regs->global.nb_int_cause,
+ ~NB_GLOBAL_NB_INT_CAUSE_MCTL_PARITY_ERR);
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+static int al_ddr_address_map_get(
+ void __iomem *ddr_ctrl_regs_base,
+ struct al_ddr_addrmap *addrmap)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ uint32_t reg_val;
+ int addrmap_col_b2_11[10];
+ int addrmap_bank_b0_2[3];
+ int addrmap_row_b0_2_10[3];
+ int addrmap_row_b11_15[5];
+ int addrmap_cs_b0_1[2];
+
+ unsigned int i;
+
+ enum al_ddr_data_width data_width =
+ al_ddr_data_width_get(ddr_ctrl_regs_base);
+
+ /**
+ * CS address mapping
+ */
+ reg_val = al_reg_read32(&ctrl_regs->addrmap0);
+ addrmap_cs_b0_1[1] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT1_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT1_SHIFT;
+ addrmap_cs_b0_1[0] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT0_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT0_SHIFT;
+
+ /**
+ * Bank address mapping
+ */
+ reg_val = al_reg_read32(&ctrl_regs->addrmap1);
+ addrmap_bank_b0_2[2] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B2_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B2_SHIFT;
+ addrmap_bank_b0_2[1] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B1_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B1_SHIFT;
+ addrmap_bank_b0_2[0] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B0_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B0_SHIFT;
+
+ /**
+ * Column address mapping
+ */
+ reg_val = al_reg_read32(&ctrl_regs->addrmap2);
+ addrmap_col_b2_11[3] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B5_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B5_SHIFT;
+ addrmap_col_b2_11[2] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B4_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B4_SHIFT;
+ addrmap_col_b2_11[1] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B3_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B3_SHIFT;
+ addrmap_col_b2_11[0] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B2_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B2_SHIFT;
+
+ reg_val = al_reg_read32(&ctrl_regs->addrmap3);
+ addrmap_col_b2_11[7] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B9_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B9_SHIFT;
+ addrmap_col_b2_11[6] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B8_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B8_SHIFT;
+ addrmap_col_b2_11[5] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B7_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B7_SHIFT;
+ addrmap_col_b2_11[4] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B6_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B6_SHIFT;
+
+ reg_val = al_reg_read32(&ctrl_regs->addrmap4);
+ addrmap_col_b2_11[9] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B11_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B11_SHIFT;
+ addrmap_col_b2_11[8] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B10_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B10_SHIFT;
+
+ /**
+ * Row address mapping
+ */
+ reg_val = al_reg_read32(&ctrl_regs->addrmap5);
+ addrmap_row_b11_15[0] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B11_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B11_SHIFT;
+ addrmap_row_b0_2_10[2] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B2_10_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B2_10_SHIFT;
+ addrmap_row_b0_2_10[1] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B1_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B1_SHIFT;
+ addrmap_row_b0_2_10[0] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B0_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B0_SHIFT;
+
+ reg_val = al_reg_read32(&ctrl_regs->addrmap6);
+ addrmap_row_b11_15[4] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B15_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B15_SHIFT;
+ addrmap_row_b11_15[3] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B14_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B14_SHIFT;
+ addrmap_row_b11_15[2] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B13_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B13_SHIFT;
+ addrmap_row_b11_15[1] = (reg_val &
+ DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B12_MASK)
+ >> DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B12_SHIFT;
+
+ /* Column */
+ for (i = 0; i < (AL_ARR_SIZE(addrmap->col_b3_9_b11_13) - 1); i++) {
+ int user_val;
+
+ user_val = addrmap_col_b2_11[i];
+
+ if (data_width == AL_DDR_DATA_WIDTH_64_BITS)
+ addrmap->col_b3_9_b11_13[i] =
+ (user_val == AL_DDR_ADDR_MAP_COL_DISABLED) ?
+ AL_DDR_ADDRMAP_NC :
+ (user_val + AL_DDR_ADDR_MAP_OFFSET +
+ AL_DDR_ADDR_MAP_COL_2_BASE + i);
+ else
+ addrmap->col_b3_9_b11_13[i + 1] =
+ (user_val == AL_DDR_ADDR_MAP_COL_DISABLED) ?
+ AL_DDR_ADDRMAP_NC :
+ (user_val + AL_DDR_ADDR_MAP_OFFSET +
+ AL_DDR_ADDR_MAP_COL_2_BASE + i);
+ }
+
+ if (data_width == AL_DDR_DATA_WIDTH_64_BITS)
+ addrmap->col_b3_9_b11_13[i] = AL_DDR_ADDRMAP_NC;
+ if (data_width == AL_DDR_DATA_WIDTH_32_BITS)
+ addrmap->col_b3_9_b11_13[0] = 5;
+
+ /* Bank */
+ for (i = 0; i < AL_ARR_SIZE(addrmap->bank_b0_2); i++) {
+ int user_val = addrmap_bank_b0_2[i];
+
+ addrmap->bank_b0_2[i] =
+ (user_val == AL_DDR_ADDR_MAP_BANK_DISABLED) ?
+ AL_DDR_ADDRMAP_NC :
+ (user_val + AL_DDR_ADDR_MAP_OFFSET +
+ AL_DDR_ADDR_MAP_BANK_0_BASE + i);
+ }
+
+ /* CS */
+ for (i = 0; i < AL_ARR_SIZE(addrmap->cs_b0_1); i++) {
+ int user_val = addrmap_cs_b0_1[i];
+
+ addrmap->cs_b0_1[i] =
+ (user_val == AL_DDR_ADDR_MAP_CS_DISABLED) ?
+ AL_DDR_ADDRMAP_NC :
+ (user_val + AL_DDR_ADDR_MAP_OFFSET +
+ AL_DDR_ADDR_MAP_CS_0_BASE + i);
+ }
+
+ /* Row */
+ for (i = 0; i < AL_ARR_SIZE(addrmap->row_b0_2_10); i++) {
+ int user_val = addrmap_row_b0_2_10[i];
+
+ addrmap->row_b0_2_10[i] =
+ (user_val == AL_DDR_ADDR_MAP_ROW_DISABLED) ?
+ AL_DDR_ADDRMAP_NC :
+ (user_val + AL_DDR_ADDR_MAP_OFFSET +
+ AL_DDR_ADDR_MAP_ROW_0_BASE + i);
+ }
+
+ for (i = 0; i < AL_ARR_SIZE(addrmap->row_b11_15); i++) {
+ int user_val = addrmap_row_b11_15[i];
+
+ addrmap->row_b11_15[i] =
+ (user_val == AL_DDR_ADDR_MAP_ROW_DISABLED) ?
+ AL_DDR_ADDRMAP_NC :
+ (user_val + AL_DDR_ADDR_MAP_OFFSET +
+ AL_DDR_ADDR_MAP_ROW_11_BASE + i);
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+enum al_ddr_data_width al_ddr_data_width_get(
+ void __iomem *ddr_ctrl_regs_base)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+ uint32_t reg_val;
+ enum al_ddr_data_width data_width;
+
+ reg_val = al_reg_read32(&ctrl_regs->mstr);
+ reg_val &= DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_MASK;
+
+ data_width =
+ (reg_val == DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_64) ?
+ AL_DDR_DATA_WIDTH_64_BITS :
+ AL_DDR_DATA_WIDTH_32_BITS;
+
+ return data_width;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+int al_ddr_address_translate_sys2dram(
+ void __iomem *ddr_ctrl_regs_base,
+ al_phys_addr_t sys_address,
+ unsigned int *rank,
+ unsigned int *bank,
+ unsigned int *col,
+ unsigned int *row)
+{
+
+ int i;
+ unsigned int temp_rank = 0;
+ unsigned int temp_bank = 0;
+ unsigned int temp_col = 0;
+ unsigned int temp_row = 0;
+ struct al_ddr_addrmap addrmap;
+
+ enum al_ddr_data_width data_width =
+ al_ddr_data_width_get(ddr_ctrl_regs_base);
+
+ al_ddr_address_map_get(ddr_ctrl_regs_base,&addrmap);
+
+ if (data_width == AL_DDR_DATA_WIDTH_64_BITS)
+ temp_col += ((sys_address >> 3) & 0x7);
+ else
+ temp_col += ((sys_address >> 2) & 0x7);
+
+ for (i = 0; i < 7; i++)
+ if (addrmap.col_b3_9_b11_13[i] != AL_DDR_ADDRMAP_NC){
+ temp_col += (((sys_address >> addrmap.col_b3_9_b11_13[i]) & 0x1) << (i + 3));
+ }
+ if (addrmap.col_b3_9_b11_13[7] != AL_DDR_ADDRMAP_NC)
+ temp_col += (((sys_address >> addrmap.col_b3_9_b11_13[7]) & 0x1) << 11);
+ if (addrmap.col_b3_9_b11_13[8] != AL_DDR_ADDRMAP_NC)
+ temp_col += (((sys_address >> addrmap.col_b3_9_b11_13[8]) & 0x1) << 13);
+
+ for (i = 0; i < 3; i++)
+ if (addrmap.bank_b0_2[i] != AL_DDR_ADDRMAP_NC){
+ temp_bank += (((sys_address >> addrmap.bank_b0_2[i]) & 0x1) << i);
+ }
+
+ for (i = 0; i < 2; i++)
+ if (addrmap.row_b0_2_10[i] != AL_DDR_ADDRMAP_NC){
+ temp_row += (((sys_address >> addrmap.row_b0_2_10[i]) & 0x1) << i);
+ }
+
+ if (addrmap.row_b0_2_10[2] != AL_DDR_ADDRMAP_NC)
+ for (i = 0; i < 9; i++){
+ temp_row += (((sys_address >> (addrmap.row_b0_2_10[2] + i)) & 0x1) << (i + 2));
+ }
+
+ for (i = 0; i < 5; i++)
+ if (addrmap.row_b11_15[i] != AL_DDR_ADDRMAP_NC){
+ temp_row += (((sys_address >> addrmap.row_b11_15[i]) & 0x1) << (i + 11));
+ }
+
+ for (i = 0; i < 2; i++)
+ if (addrmap.cs_b0_1[i] != AL_DDR_ADDRMAP_NC){
+ temp_rank += (((sys_address >> addrmap.cs_b0_1[i]) & 0x1) << i);
+ }
+
+ *rank = temp_rank;
+ *bank = temp_bank;
+ *col = temp_col;
+ *row = temp_row;
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+int al_ddr_address_translate_dram2sys(
+ void __iomem *ddr_ctrl_regs_base,
+ al_phys_addr_t *sys_address,
+ unsigned int rank,
+ unsigned int bank,
+ unsigned int col,
+ unsigned int row)
+{
+ int i;
+ struct al_ddr_addrmap addrmap;
+ al_phys_addr_t address = 0;
+
+ enum al_ddr_data_width data_width =
+ al_ddr_data_width_get(ddr_ctrl_regs_base);
+ al_ddr_address_map_get(ddr_ctrl_regs_base,&addrmap);
+
+ if (data_width == AL_DDR_DATA_WIDTH_64_BITS)
+ address += ((col & 0x7) << 3);
+ else
+ address += ((col & 0x7) << 2);
+
+ for (i = 0; i < 7; i++)
+ if (addrmap.col_b3_9_b11_13[i] != AL_DDR_ADDRMAP_NC){
+ address += ((((al_phys_addr_t)col >> (i + 3)) & 0x1) << addrmap.col_b3_9_b11_13[i]);
+ }
+ if (addrmap.col_b3_9_b11_13[7] != AL_DDR_ADDRMAP_NC)
+ address += ((((al_phys_addr_t)col >> 11) & 0x1) << addrmap.col_b3_9_b11_13[7]);
+ if (addrmap.col_b3_9_b11_13[8] != AL_DDR_ADDRMAP_NC)
+ address += ((((al_phys_addr_t)col >> 13) & 0x1) << addrmap.col_b3_9_b11_13[8]);
+
+ for (i = 0; i < 3; i++)
+ if (addrmap.bank_b0_2[i] != AL_DDR_ADDRMAP_NC){
+ address += ((((al_phys_addr_t)bank >> (i)) & 0x1) << addrmap.bank_b0_2[i]);
+ }
+
+ for (i = 0; i < 2; i++)
+ if (addrmap.row_b0_2_10[i] != AL_DDR_ADDRMAP_NC){
+ address += ((((al_phys_addr_t)row >> (i)) & 0x1) << addrmap.row_b0_2_10[i]);
+ }
+
+ if (addrmap.row_b0_2_10[2] != AL_DDR_ADDRMAP_NC)
+ for (i = 0; i < 9; i++){
+ address += ((((al_phys_addr_t)row >> (i + 2)) & 0x1) << (addrmap.row_b0_2_10[2] + i));
+ }
+
+
+ for (i = 0; i < 5; i++)
+ if (addrmap.row_b11_15[i] != AL_DDR_ADDRMAP_NC){
+ address += ((((al_phys_addr_t)row >> (i + 11)) & 0x1) << addrmap.row_b11_15[i]);
+ }
+
+ for (i = 0; i < 2; i++)
+ if (addrmap.cs_b0_1[i] != AL_DDR_ADDRMAP_NC){
+ address += ((((al_phys_addr_t)rank >> (i)) & 0x1) << addrmap.cs_b0_1[i]);
+ }
+
+ *sys_address = address;
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+unsigned int al_ddr_bits_per_rank_get(
+ void __iomem *ddr_ctrl_regs_base)
+{
+ int i, active_bits = 0;
+ struct al_ddr_addrmap addrmap;
+ enum al_ddr_data_width data_width =
+ al_ddr_data_width_get(ddr_ctrl_regs_base);
+
+ al_ddr_address_map_get(ddr_ctrl_regs_base,&addrmap);
+
+ /* 64bit systems have a 6bit offset, 32bit systems have a 5bit offset */
+ if (data_width == AL_DDR_DATA_WIDTH_64_BITS)
+ active_bits += 6;
+ else
+ active_bits += 5;
+
+ /* iterate over addrmap, count the amount of connected bits */
+ for (i = 0; i < 9; i++)
+ if (addrmap.col_b3_9_b11_13[i] != AL_DDR_ADDRMAP_NC)
+ active_bits++;
+
+ for (i = 0; i < 3; i++)
+ if (addrmap.bank_b0_2[i] != AL_DDR_ADDRMAP_NC)
+ active_bits++;
+
+ for (i = 0; i < 2; i++)
+ if (addrmap.row_b0_2_10[i] != AL_DDR_ADDRMAP_NC)
+ active_bits++;
+
+ if (addrmap.row_b0_2_10[2] != AL_DDR_ADDRMAP_NC)
+ active_bits += 9;
+
+ for (i = 0; i < 5; i++)
+ if (addrmap.row_b11_15[i] != AL_DDR_ADDRMAP_NC)
+ active_bits++;
+
+ return active_bits;
+}
+
+/** @} end of DDR group */
+
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_ddr_ctrl_regs.h b/arch/arm/mach-alpine/al_hal/al_hal_ddr_ctrl_regs.h
new file mode 100644
index 0000000..0191b97
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_ddr_ctrl_regs.h
@@ -0,0 +1,1335 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @addtogroup groupddr
+ *
+ * @{
+ * @file al_hal_ddr_ctrl_regs.h
+ *
+ * @brief DDR controller registers
+ *
+ */
+
+#ifndef __AL_HAL_DDR_CTRL_REGS_H__
+#define __AL_HAL_DDR_CTRL_REGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+struct al_dwc_ddr_umctl2_regs {
+ uint32_t mstr; /* Master Register */
+ uint32_t stat; /* Operating Mode Status Regi ... */
+ uint32_t rsrvd_0[2];
+ uint32_t mrctrl0; /* Mode Register Read/Write C ... */
+ uint32_t mrctrl1; /* Mode Register Read/Write C ... */
+ uint32_t mrstat; /* Mode Register Read/Write S ... */
+ uint32_t rsrvd_1[5];
+ uint32_t pwrctl; /* Low Power Control Register ... */
+ uint32_t pwrtmg; /* Low Power Timing Register */
+ uint32_t rsrvd_2[6];
+ uint32_t rfshctl0; /* Refresh Control Register 0 ... */
+ uint32_t rfshctl1; /* Refresh Control Register 1 ... */
+ uint32_t rfshctl2; /* Refresh Control Register 2 ... */
+ uint32_t rsrvd_3;
+ uint32_t rfshctl3; /* Refresh Control Register 0 ... */
+ uint32_t rfshtmg; /* Refresh Timing Register */
+ uint32_t rsrvd_4[2];
+ uint32_t ecccfg0; /* ECC Configuration Register ... */
+ uint32_t ecccfg1; /* ECC Configuration Register ... */
+ uint32_t eccstat; /* ECC Status Register */
+ uint32_t eccclr; /* ECC Clear Register */
+ uint32_t eccerrcnt; /* ECC Error Counter Register ... */
+ uint32_t ecccaddr0; /* ECC Corrected Error Addres ... */
+ uint32_t ecccaddr1; /* ECC Corrected Error Addres ... */
+ uint32_t ecccsyn0; /* ECC Corrected Syndrome Reg ... */
+ uint32_t ecccsyn1; /* ECC Corrected Syndrome Reg ... */
+ uint32_t ecccsyn2; /* ECC Corrected Syndrome Reg ... */
+ uint32_t eccbitmask0; /* ECC Corrected Data Bit Mas ... */
+ uint32_t eccbitmask1; /* ECC Corrected Data Bit Mas ... */
+ uint32_t eccbitmask2; /* ECC Corrected Data Bit Mas ... */
+ uint32_t eccuaddr0; /* ECC Uncorrected Error Addr ... */
+ uint32_t eccuaddr1; /* ECC Unorrected Error Addre ... */
+ uint32_t eccusyn0; /* ECC Unorrected Syndrome Re ... */
+ uint32_t eccusyn1; /* ECC Uncorrected Syndrome R ... */
+ uint32_t eccusyn2; /* ECC Uncorrected Syndrome R ... */
+ uint32_t eccpoisonaddr0; /* ECC Data Poisoning Address ... */
+ uint32_t eccpoisonaddr1; /* ECC Data Poisoning Address ... */
+ uint32_t parctl; /* Parity Control Register */
+ uint32_t parstat; /* Parity Status Register */
+ uint32_t rsrvd_5[2];
+ uint32_t init0; /* SDRAM Initialization Regis ... */
+ uint32_t init1; /* SDRAM Initialization Regis ... */
+ uint32_t rsrvd_6;
+ uint32_t init3; /* SDRAM Initialization Regis ... */
+ uint32_t init4; /* SDRAM Initialization Regis ... */
+ uint32_t init5; /* SDRAM Initialization Regis ... */
+ uint32_t rsrvd_7[2];
+ uint32_t dimmctl; /* DIMM Control Register */
+ uint32_t rankctl; /* Rank Control Register */
+ uint32_t rsrvd_8[2];
+ uint32_t dramtmg0; /* SDRAM Timing Register 0 */
+ uint32_t dramtmg1; /* SDRAM Timing Register 1 */
+ uint32_t dramtmg2; /* SDRAM Timing Register 2 */
+ uint32_t dramtmg3; /* SDRAM Timing Register 3 */
+ uint32_t dramtmg4; /* SDRAM Timing Register 4 */
+ uint32_t dramtmg5; /* SDRAM Timing Register 5 */
+ uint32_t rsrvd_9[2];
+ uint32_t dramtmg8; /* SDRAM Timing Register 8 */
+ uint32_t rsrvd_10[23];
+ uint32_t zqctl0; /* ZQ Control Register 0 */
+ uint32_t zqctl1; /* ZQ Control Register 1 */
+ uint32_t rsrvd_11[2];
+ uint32_t dfitmg0; /* DFI Timing Register 0 */
+ uint32_t dfitmg1; /* DFI Timing Register 1 */
+ uint32_t rsrvd_12[2];
+ uint32_t dfiupd0; /* DFI Update Register 0 */
+ uint32_t dfiupd1; /* DFI Update Register 1 */
+ uint32_t dfiupd2; /* DFI Update Register 2 */
+ uint32_t dfiupd3; /* DFI Update Register 3 */
+ uint32_t dfimisc; /* DFI Miscellaneous Control ... */
+ uint32_t rsrvd_13[19];
+ uint32_t addrmap0; /* Address Map Register 0 */
+ uint32_t addrmap1; /* Address Map Register 1 */
+ uint32_t addrmap2; /* Address Map Register 2 */
+ uint32_t addrmap3; /* Address Map Register 3 */
+ uint32_t addrmap4; /* Address Map Register 4 */
+ uint32_t addrmap5; /* Address Map Register 5 */
+ uint32_t addrmap6; /* Address Map Register 6 */
+ uint32_t rsrvd_14[9];
+ uint32_t odtcfg; /* ODT Configuration Register ... */
+ uint32_t odtmap; /* ODT/Rank Map Register */
+ uint32_t rsrvd_15[2];
+ uint32_t sched; /* Scheduler Control Register ... */
+ uint32_t rsrvd_16;
+ uint32_t perfhpr0; /* High Priority Read CAM Reg ... */
+ uint32_t perfhpr1; /* High Priority Read CAM Reg ... */
+ uint32_t perflpr0; /* Low Priority Read CAM Regi ... */
+ uint32_t perflpr1; /* Low Priority Read CAM Regi ... */
+ uint32_t perfwr0; /* Write CAM Register 0 */
+ uint32_t perfwr1; /* Write CAM Register 1 */
+ uint32_t rsrvd_17[36];
+ uint32_t dbg0; /* Debug Register 0 */
+ uint32_t dbg1; /* Debug Register 1 */
+ uint32_t dbgcam; /* CAM Debug Register */
+ uint32_t rsrvd[61];
+};
+struct al_dwc_ddr_umctl2_mp {
+ uint32_t pccfg; /* Port Common Configuration ... */
+ uint32_t pcfgr_0; /* Port 0 Configuration Read ... */
+ uint32_t pcfgw_0; /* Port 0 Configuration Write ... */
+ uint32_t pcfgidmaskch0_0; /* Port 0 Channel 0 Configura ... */
+ uint32_t pcfgidvaluech0_0; /* Port 0 Channel 0 Configura ... */
+ uint32_t rsrvd[1787];
+};
+
+struct al_ddr_ctrl_regs {
+ struct al_dwc_ddr_umctl2_regs umctl2_regs;
+ struct al_dwc_ddr_umctl2_mp umctl2_mp;
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** MSTR register ****/
+/* Select DDR3 SDRAM - 1 - DDR3 operating mode - 0 - DDR2 opera ... */
+#define DWC_DDR_UMCTL2_REGS_MSTR_DDR3 (1 << 0)
+
+#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED_MASK 0x000000FE
+#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED_SHIFT 1
+/* Indicates burst mode */
+#define DWC_DDR_UMCTL2_REGS_MSTR_BURST_MODE (1 << 8)
+/* When set, enable burst-chop in DDR3. */
+#define DWC_DDR_UMCTL2_REGS_MSTR_BURSTCHOP (1 << 9)
+/* If 1, then uMCTL2 uses 2T timing */
+#define DWC_DDR_UMCTL2_REGS_MSTR_EN_2T_TIMING_MODE (1 << 10)
+
+#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED1 (1 << 11)
+/* Selects proportion of DQ bus width that is used by the SDRAM ... */
+#define DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_MASK 0x00003000
+#define DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_SHIFT 12
+
+#define DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_64 \
+ (0 << DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_SHIFT)
+#define DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_32 \
+ (1 << DWC_DDR_UMCTL2_REGS_MSTR_DATA_BUS_WIDTH_SHIFT)
+
+#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED2_MASK 0x0000C000
+#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED2_SHIFT 14
+/* SDRAM burst length used: - 0001 - Burst length of 2 (only su ... */
+#define DWC_DDR_UMCTL2_REGS_MSTR_BURST_RDWR_MASK 0x000F0000
+#define DWC_DDR_UMCTL2_REGS_MSTR_BURST_RDWR_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED3_MASK 0x00F00000
+#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED3_SHIFT 20
+/* Only present for multi-rank configurations */
+#define DWC_DDR_UMCTL2_REGS_MSTR_ACTIVE_RANKS_MASK 0x0F000000
+#define DWC_DDR_UMCTL2_REGS_MSTR_ACTIVE_RANKS_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED4_MASK 0xF0000000
+#define DWC_DDR_UMCTL2_REGS_MSTR_RESERVED4_SHIFT 28
+
+/**** STAT register ****/
+/* Operating mode */
+#define DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_MASK 0x00000003
+#define DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_INIT \
+ (0 << DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_SHIFT)
+#define DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_NORMAL \
+ (1 << DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_SHIFT)
+#define DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_POWER_DOWN \
+ (2 << DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_SHIFT)
+#define DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_SELF_REFRESH \
+ (3 << DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_SHIFT)
+
+#define DWC_DDR_UMCTL2_REGS_STAT_RESERVED_MASK 0xFFFFFFFC
+#define DWC_DDR_UMCTL2_REGS_STAT_RESERVED_SHIFT 2
+
+/**** MRCTRL0 register ****/
+
+#define DWC_DDR_UMCTL2_REGS_MRCTRL0_RESERVED_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_MRCTRL0_RESERVED_SHIFT 0
+/* Controls which rank is accessed by MRCTRL0 */
+#define DWC_DDR_UMCTL2_REGS_MRCTRL0_MR_RANK_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_MRCTRL0_MR_RANK_SHIFT 4
+
+#define DWC_DDR_UMCTL2_REGS_MRCTRL0_RESERVED1_MASK 0x00000F00
+#define DWC_DDR_UMCTL2_REGS_MRCTRL0_RESERVED1_SHIFT 8
+/* Address of the mode register that is to be written to */
+#define DWC_DDR_UMCTL2_REGS_MRCTRL0_MR_ADDR_MASK 0x00007000
+#define DWC_DDR_UMCTL2_REGS_MRCTRL0_MR_ADDR_SHIFT 12
+
+#define DWC_DDR_UMCTL2_REGS_MRCTRL0_RESERVED2_MASK 0x7FFF8000
+#define DWC_DDR_UMCTL2_REGS_MRCTRL0_RESERVED2_SHIFT 15
+/* Setting this register bit to 1 triggers a mode register read ... */
+#define DWC_DDR_UMCTL2_REGS_MRCTRL0_MR_WR (1 << 31)
+
+/**** MRCTRL1 register ****/
+/* Mode register write data for all non-LPDDR2 modes */
+#define DWC_DDR_UMCTL2_REGS_MRCTRL1_MR_DATA_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_MRCTRL1_MR_DATA_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_MRCTRL1_RESERVED_MASK 0xFFFF0000
+#define DWC_DDR_UMCTL2_REGS_MRCTRL1_RESERVED_SHIFT 16
+
+/**** MRSTAT register ****/
+/* SoC core may initiate a MR write operation only if this signa ... */
+#define DWC_DDR_UMCTL2_REGS_MRSTAT_MR_WR_BUSY (1 << 0)
+
+#define DWC_DDR_UMCTL2_REGS_MRSTAT_RESERVED_MASK 0xFFFFFFFE
+#define DWC_DDR_UMCTL2_REGS_MRSTAT_RESERVED_SHIFT 1
+
+/**** PWRCTL register ****/
+/* If set, then the uMCTL2 puts the SDRAM into self refresh when ... */
+#define DWC_DDR_UMCTL2_REGS_PWRCTL_SELFREF_EN (1 << 0)
+/* If true then the uMCTL2 goes into power-down after a programm ... */
+#define DWC_DDR_UMCTL2_REGS_PWRCTL_POWERDOWN_EN (1 << 1)
+
+#define DWC_DDR_UMCTL2_REGS_PWRCTL_RESERVED (1 << 2)
+/* Enable the assertion of dfi_dram_clk_disable whenever a clock ... */
+#define DWC_DDR_UMCTL2_REGS_PWRCTL_EN_DFI_DRAM_CLK_DISABLE (1 << 3)
+
+#define DWC_DDR_UMCTL2_REGS_PWRCTL_RESERVED1_MASK 0xFFFFFFF0
+#define DWC_DDR_UMCTL2_REGS_PWRCTL_RESERVED1_SHIFT 4
+
+/**** PWRTMG register ****/
+/* After this many clocks of NOP or deselect the uMCTL2 puts the ... */
+#define DWC_DDR_UMCTL2_REGS_PWRTMG_POWERDOWN_TO_X32_MASK 0x0000001F
+#define DWC_DDR_UMCTL2_REGS_PWRTMG_POWERDOWN_TO_X32_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_PWRTMG_RESERVED_MASK 0xFFFFFFE0
+#define DWC_DDR_UMCTL2_REGS_PWRTMG_RESERVED_SHIFT 5
+
+/**** RFSHCTL0 register ****/
+
+#define DWC_DDR_UMCTL2_REGS_RFSHCTL0_RESERVED_MASK 0x000000FF
+#define DWC_DDR_UMCTL2_REGS_RFSHCTL0_RESERVED_SHIFT 0
+/* The programmed value + 1 is the number of refresh timeouts th ... */
+#define DWC_DDR_UMCTL2_REGS_RFSHCTL0_REFRESH_BURST_MASK 0x00000700
+#define DWC_DDR_UMCTL2_REGS_RFSHCTL0_REFRESH_BURST_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_RFSHCTL0_REFRESH_BURST(n) \
+ (((n) - 1) << DWC_DDR_UMCTL2_REGS_RFSHCTL0_REFRESH_BURST_SHIFT)
+
+#define DWC_DDR_UMCTL2_REGS_RFSHCTL0_RESERVED1 (1 << 11)
+/* If the refresh timer (tRFCnom, also known as tRFC (min): Minimum time from refresh to refresh ... */
+#define DWC_DDR_UMCTL2_REGS_RFSHTMG_T_RFC_MIN_MASK 0x000001FF
+#define DWC_DDR_UMCTL2_REGS_RFSHTMG_T_RFC_MIN_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_RFSHTMG_RESERVED_MASK 0x0000FE00
+#define DWC_DDR_UMCTL2_REGS_RFSHTMG_RESERVED_SHIFT 9
+/* tREFI: Average time interval between refreshes per ... */
+#define DWC_DDR_UMCTL2_REGS_RFSHTMG_T_RFC_NOM_X32_MASK 0x0FFF0000
+#define DWC_DDR_UMCTL2_REGS_RFSHTMG_T_RFC_NOM_X32_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_RFSHTMG_RESERVED1_MASK 0xF0000000
+#define DWC_DDR_UMCTL2_REGS_RFSHTMG_RESERVED1_SHIFT 28
+
+/**** ECCCFG0 register ****/
+/* ECC mode indicator */
+#define DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_MASK 0x00000007
+#define DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_DIS \
+ (0 << DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_SHIFT)
+#define DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_EN \
+ (4 << DWC_DDR_UMCTL2_REGS_ECCCFG0_ECC_MODE_SHIFT)
+
+#define DWC_DDR_UMCTL2_REGS_ECCCFG0_RESERVED (1 << 3)
+/* Disable ECC scrubs */
+#define DWC_DDR_UMCTL2_REGS_ECCCFG0_DIS_SCRUB (1 << 4)
+
+#define DWC_DDR_UMCTL2_REGS_ECCCFG0_RESERVED1_MASK 0xFFFFFFE0
+#define DWC_DDR_UMCTL2_REGS_ECCCFG0_RESERVED1_SHIFT 5
+
+/**** ECCCFG1 register ****/
+/* Enable ECC data poisoning - introduces ECC errors on writes t ... */
+#define DWC_DDR_UMCTL2_REGS_ECCCFG1_DATA_POISON (1 << 0)
+
+#define DWC_DDR_UMCTL2_REGS_ECCCFG1_RESERVED_MASK 0xFFFFFFFE
+#define DWC_DDR_UMCTL2_REGS_ECCCFG1_RESERVED_SHIFT 1
+
+/**** ECCSTAT register ****/
+/* Bit number corrected by single-bit ECC error */
+#define DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_CORRECTED_BIT_NUM_MASK 0x0000007F
+#define DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_CORRECTED_BIT_NUM_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ECCSTAT_RESERVED (1 << 7)
+/* Single-bit error indicators, 1 per ECC lane */
+#define DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_CORRECTED_ERR_MASK 0x00000300
+#define DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_CORRECTED_ERR_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_ECCSTAT_RESERVED1_MASK 0x0000FC00
+#define DWC_DDR_UMCTL2_REGS_ECCSTAT_RESERVED1_SHIFT 10
+/* Double-bit error indicators, 1 per ECC lane */
+#define DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_UNCORRECTED_ERR_MASK 0x00030000
+#define DWC_DDR_UMCTL2_REGS_ECCSTAT_ECC_UNCORRECTED_ERR_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_ECCSTAT_RESERVED2_MASK 0xFFFC0000
+#define DWC_DDR_UMCTL2_REGS_ECCSTAT_RESERVED2_SHIFT 18
+
+/**** ECCCLR register ****/
+/* Setting this regsiter bit to 1 clears the currently stored co ... */
+#define DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_CORR_ERR (1 << 0)
+/* Setting this regsiter bit to 1 clears the currently stored un ... */
+#define DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_UNCORR_ERR (1 << 1)
+/* Setting this regsiter bit to 1 clears the currently stored co ... */
+#define DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_CORR_ERR_CNT (1 << 2)
+/* Setting this regsiter bit to 1 clears the currently stored un ... */
+#define DWC_DDR_UMCTL2_REGS_ECCCLR_ECC_CLR_UNCORR_ERR_CNT (1 << 3)
+
+#define DWC_DDR_UMCTL2_REGS_ECCCLR_RESERVED_MASK 0xFFFFFFF0
+#define DWC_DDR_UMCTL2_REGS_ECCCLR_RESERVED_SHIFT 4
+
+/**** ECCERRCNT register ****/
+/* Number of correctable ECC errors detected */
+#define DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_CORR_ERR_CNT_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_CORR_ERR_CNT_SHIFT 0
+/* Number of uncorrectable ECC errors detected */
+#define DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_UNCORR_ERR_CNT_MASK 0xFFFF0000
+#define DWC_DDR_UMCTL2_REGS_ECCERRCNT_ECC_UNCORR_ERR_CNT_SHIFT 16
+
+/**** ECCCADDR0 register ****/
+/* Page/row number of a read resulting in a corrected ECC error */
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_ROW_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_ROW_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_RESERVED_MASK 0x00FF0000
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_RESERVED_SHIFT 16
+/* Rank number of a read resulting in a corrected ECC error */
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_RANK_MASK 0x03000000
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_ECC_CORR_RANK_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_RESERVED1_MASK 0xFC000000
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR0_RESERVED1_SHIFT 26
+
+/**** ECCCADDR1 register ****/
+/* Block number of a read resulting in a corrected ECC error (lo ... */
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_COL_MASK 0x00000FFF
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_COL_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_RESERVED_MASK 0x0000F000
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_RESERVED_SHIFT 12
+/* Bank number of a read resulting in a corrected ECC error */
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_BANK_MASK 0x00070000
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_ECC_CORR_BANK_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_RESERVED1_MASK 0xFFF80000
+#define DWC_DDR_UMCTL2_REGS_ECCCADDR1_RESERVED1_SHIFT 19
+
+/**** ECCCSYN2 register ****/
+/* Data pattern that resulted in a corrected error one for each ... */
+#define DWC_DDR_UMCTL2_REGS_ECCCSYN2_ECC_CORR_SYNDROMES_71_64_MASK 0x000000FF
+#define DWC_DDR_UMCTL2_REGS_ECCCSYN2_ECC_CORR_SYNDROMES_71_64_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ECCCSYN2_RESERVED_MASK 0xFFFFFF00
+#define DWC_DDR_UMCTL2_REGS_ECCCSYN2_RESERVED_SHIFT 8
+
+/**** ECCBITMASK2 register ****/
+/* Mask for the corrected data portion - 1 on any bit indicat ... */
+#define DWC_DDR_UMCTL2_REGS_ECCBITMASK2_ECC_CORR_BIT_MASK_71_64_MASK 0x000000FF
+#define DWC_DDR_UMCTL2_REGS_ECCBITMASK2_ECC_CORR_BIT_MASK_71_64_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ECCBITMASK2_RESERVED_MASK 0xFFFFFF00
+#define DWC_DDR_UMCTL2_REGS_ECCBITMASK2_RESERVED_SHIFT 8
+
+/**** ECCUADDR0 register ****/
+/* Page/row number of a read resulting in an uncorrected ECC err ... */
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_ROW_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_ROW_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_RESERVED_MASK 0x00FF0000
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_RESERVED_SHIFT 16
+/* Rank number of a read resulting in an uncorrected ECC error */
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_RANK_MASK 0x03000000
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_ECC_UNCORR_RANK_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_RESERVED1_MASK 0xFC000000
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR0_RESERVED1_SHIFT 26
+
+/**** ECCUADDR1 register ****/
+/* Block number of a read resulting in an uncorrected ECC error ... */
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_COL_MASK 0x00000FFF
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_COL_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_RESERVED_MASK 0x0000F000
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_RESERVED_SHIFT 12
+/* Bank number of a read resulting in an uncorrected ECC error */
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_BANK_MASK 0x00070000
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_ECC_UNCORR_BANK_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_RESERVED1_MASK 0xFFF80000
+#define DWC_DDR_UMCTL2_REGS_ECCUADDR1_RESERVED1_SHIFT 19
+
+/**** ECCUSYN2 register ****/
+/* Data pattern that resulted in an uncorrected error one for ea ... */
+#define DWC_DDR_UMCTL2_REGS_ECCUSYN2_ECC_UNCORR_SYNDROMES_71_64_MASK 0x000000FF
+#define DWC_DDR_UMCTL2_REGS_ECCUSYN2_ECC_UNCORR_SYNDROMES_71_64_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ECCUSYN2_RESERVED_MASK 0xFFFFFF00
+#define DWC_DDR_UMCTL2_REGS_ECCUSYN2_RESERVED_SHIFT 8
+
+/**** ECCPOISONADDR0 register ****/
+/* Column address for ECC poisoning */
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_ECC_POISON_COL_MASK 0x00000FFF
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_ECC_POISON_COL_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_RESERVED_MASK 0x00FFF000
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_RESERVED_SHIFT 12
+/* Rank address for ECC poisoning */
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_ECC_POISON_RANK_MASK 0x03000000
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_ECC_POISON_RANK_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_RESERVED1_MASK 0xFC000000
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR0_RESERVED1_SHIFT 26
+
+/**** ECCPOISONADDR1 register ****/
+/* Row address for ECC poisoning */
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_ECC_POISON_ROW_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_ECC_POISON_ROW_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_RESERVED_MASK 0x00FF0000
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_RESERVED_SHIFT 16
+/* Bank address for ECC poisoning */
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_ECC_POISON_BANK_MASK 0x07000000
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_ECC_POISON_BANK_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_RESERVED1_MASK 0xF8000000
+#define DWC_DDR_UMCTL2_REGS_ECCPOISONADDR1_RESERVED1_SHIFT 27
+
+/**** PARCTL register ****/
+/* Interrupt enable bit for DFI parity error */
+#define DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_INT_EN (1 << 0)
+/* Interrupt clear bit for DFI parity error */
+#define DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_INT_CLR (1 << 1)
+/* DFI parity error count clear */
+#define DWC_DDR_UMCTL2_REGS_PARCTL_DFI_PARITY_ERR_CNT_CLR (1 << 2)
+
+#define DWC_DDR_UMCTL2_REGS_PARCTL_RESERVED_MASK 0xFFFFFFF8
+#define DWC_DDR_UMCTL2_REGS_PARCTL_RESERVED_SHIFT 3
+
+/**** PARSTAT register ****/
+/* DFI parity error count */
+#define DWC_DDR_UMCTL2_REGS_PARSTAT_DFI_PARITY_ERR_CNT_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_PARSTAT_DFI_PARITY_ERR_CNT_SHIFT 0
+/* DFI parity error interrupt */
+#define DWC_DDR_UMCTL2_REGS_PARSTAT_DFI_PARITY_ERR_INT (1 << 16)
+
+#define DWC_DDR_UMCTL2_REGS_PARSTAT_RESERVED_MASK 0xFFFE0000
+#define DWC_DDR_UMCTL2_REGS_PARSTAT_RESERVED_SHIFT 17
+
+/**** INIT0 register ****/
+/* Cycles to wait after reset before driving CKE high to start t ... */
+#define DWC_DDR_UMCTL2_REGS_INIT0_PRE_CKE_X1024_MASK 0x000003FF
+#define DWC_DDR_UMCTL2_REGS_INIT0_PRE_CKE_X1024_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_INIT0_RESERVED_MASK 0x0000FC00
+#define DWC_DDR_UMCTL2_REGS_INIT0_RESERVED_SHIFT 10
+/* Cycles to wait after driving CKE high to start the SDRAM init ... */
+#define DWC_DDR_UMCTL2_REGS_INIT0_POST_CKE_X1024_MASK 0x03FF0000
+#define DWC_DDR_UMCTL2_REGS_INIT0_POST_CKE_X1024_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_INIT0_RESERVED1_MASK 0xFC000000
+#define DWC_DDR_UMCTL2_REGS_INIT0_RESERVED1_SHIFT 26
+
+/**** INIT1 register ****/
+/* Wait period before driving the OCD complete command to SDRAM */
+#define DWC_DDR_UMCTL2_REGS_INIT1_PRE_OCD_X32_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_INIT1_PRE_OCD_X32_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_INIT1_RESERVED_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_INIT1_RESERVED_SHIFT 4
+/* Cycles to wait after completing the SDRAM initialization sequ ... */
+#define DWC_DDR_UMCTL2_REGS_INIT1_FINAL_WAIT_X32_MASK 0x00007F00
+#define DWC_DDR_UMCTL2_REGS_INIT1_FINAL_WAIT_X32_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_INIT1_RESERVED1 (1 << 15)
+/* Number of cycles to assert SDRAM reset signal during init seq ... */
+#define DWC_DDR_UMCTL2_REGS_INIT1_DRAM_RSTN_X1024_MASK 0x00FF0000
+#define DWC_DDR_UMCTL2_REGS_INIT1_DRAM_RSTN_X1024_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_INIT1_RESERVED2_MASK 0xFF000000
+#define DWC_DDR_UMCTL2_REGS_INIT1_RESERVED2_SHIFT 24
+
+/**** INIT3 register ****/
+/* Non LPDDR2-Value to be loaded into SDRAM EMR registers */
+#define DWC_DDR_UMCTL2_REGS_INIT3_EMR_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_INIT3_EMR_SHIFT 0
+/* Non LPDDR2-Value to be loaded into the SDRAM Mode register */
+#define DWC_DDR_UMCTL2_REGS_INIT3_MR_MASK 0xFFFF0000
+#define DWC_DDR_UMCTL2_REGS_INIT3_MR_SHIFT 16
+
+/**** INIT4 register ****/
+/* Non LPDDR2- Value to be loaded into SDRAM EMR3 registers */
+#define DWC_DDR_UMCTL2_REGS_INIT4_EMR3_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_INIT4_EMR3_SHIFT 0
+/* Non LPDDR2- Value to be loaded into SDRAM EMR2 registers */
+#define DWC_DDR_UMCTL2_REGS_INIT4_EMR2_MASK 0xFFFF0000
+#define DWC_DDR_UMCTL2_REGS_INIT4_EMR2_SHIFT 16
+
+/**** INIT5 register ****/
+
+#define DWC_DDR_UMCTL2_REGS_INIT5_RESERVED_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_INIT5_RESERVED_SHIFT 0
+/* ZQ initial calibration, tZQINIT */
+#define DWC_DDR_UMCTL2_REGS_INIT5_DEV_ZQINIT_X32_MASK 0x00FF0000
+#define DWC_DDR_UMCTL2_REGS_INIT5_DEV_ZQINIT_X32_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_INIT5_RESERVED1_MASK 0xFF000000
+#define DWC_DDR_UMCTL2_REGS_INIT5_RESERVED1_SHIFT 24
+
+/**** DIMMCTL register ****/
+/* Staggering enable for multi-rank accesses (for multi-rank UDI ... */
+#define DWC_DDR_UMCTL2_REGS_DIMMCTL_DIMM_STAGGER_CS_EN (1 << 0)
+/* Address Mirroring Enable (for multi-rank UDIMM implementation ... */
+#define DWC_DDR_UMCTL2_REGS_DIMMCTL_DIMM_ADDR_MIRR_EN (1 << 1)
+
+#define DWC_DDR_UMCTL2_REGS_DIMMCTL_RESERVED_MASK 0xFFFFFFFC
+#define DWC_DDR_UMCTL2_REGS_DIMMCTL_RESERVED_SHIFT 2
+
+/**** RANKCTL register ****/
+/* Only present for multi-rank configurations */
+#define DWC_DDR_UMCTL2_REGS_RANKCTL_MAX_RANK_RD_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_RANKCTL_MAX_RANK_RD_SHIFT 0
+/* Only present for multi-rank configurations */
+#define DWC_DDR_UMCTL2_REGS_RANKCTL_DIFF_RANK_RD_GAP_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_RANKCTL_DIFF_RANK_RD_GAP_SHIFT 4
+/* Only present for multi-rank configurations */
+#define DWC_DDR_UMCTL2_REGS_RANKCTL_DIFF_RANK_WR_GAP_MASK 0x00000F00
+#define DWC_DDR_UMCTL2_REGS_RANKCTL_DIFF_RANK_WR_GAP_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_RANKCTL_RESERVED_MASK 0xFFFFF000
+#define DWC_DDR_UMCTL2_REGS_RANKCTL_RESERVED_SHIFT 12
+
+/**** DRAMTMG0 register ****/
+/* tRAS(min): Minimum time between activate and prec ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_T_RAS_MIN_MASK 0x0000003F
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_T_RAS_MIN_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED_MASK 0x000000C0
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED_SHIFT 6
+/* tRAS(max): Maximum time between activate and prec ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_T_RAS_MAX_MASK 0x00003F00
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_T_RAS_MAX_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED1_MASK 0x0000C000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED1_SHIFT 14
+/* tFAW Valid only when 8 banks are present */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_T_FAW_MASK 0x003F0000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_T_FAW_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED2_MASK 0x00C00000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED2_SHIFT 22
+/* Minimum time between write and precharge to same bank */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_WR2PRE_MASK 0x3F000000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_WR2PRE_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED3_MASK 0xC0000000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG0_RESERVED3_SHIFT 30
+
+/**** DRAMTMG1 register ****/
+/* tRC: Minimum time between activates to same bank */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_T_RC_MASK 0x0000003F
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_T_RC_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RESERVED_MASK 0x000000C0
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RESERVED_SHIFT 6
+/* tRTP: Minimum time from read to precharge of same ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RD2PRE_MASK 0x00001F00
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RD2PRE_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RESERVED1_MASK 0x0000E000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RESERVED1_SHIFT 13
+/* tXP: Minimum time after power-down exit to any ope ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_T_XP_MASK 0x001F0000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_T_XP_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RESERVED2_MASK 0xFFE00000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG1_RESERVED2_SHIFT 21
+
+/**** DRAMTMG2 register ****/
+/* WL + BL/2 + tWTR
Minimum time from write comman ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG2_WR2RD_MASK 0x0000003F
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG2_WR2RD_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG2_RESERVED_MASK 0x000000C0
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG2_RESERVED_SHIFT 6
+/* DDR2/3/mDDR: RL + BL/2 + 2 - WL
LPDDR2: RL + BL/2 + RU(tMOD: Present if MEMC_DDR3 = 1 only */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_T_MOD_MASK 0x000003FF
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_T_MOD_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_RESERVED_MASK 0x00000C00
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_RESERVED_SHIFT 10
+/* tMRD: Cycles between load mode commands */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_T_MRD_MASK 0x00007000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_T_MRD_SHIFT 12
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_RESERVED1_MASK 0xFFFF8000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG3_RESERVED1_SHIFT 15
+
+/**** DRAMTMG4 register ****/
+/* tRP: Minimum time from precharge to activate of s ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_RP_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_RP_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED_SHIFT 4
+/* tRRD: Minimum time between activates from bank "a ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_RRD_MASK 0x00000700
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_RRD_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED1_MASK 0x0000F800
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED1_SHIFT 11
+/* tCCD: This is the minimum time between two reads ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_CCD_MASK 0x00070000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_CCD_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED2_MASK 0x00F80000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED2_SHIFT 19
+/* tRCD - tAL: Minimum time from activate ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_RCD_MASK 0x0F000000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_T_RCD_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED3_MASK 0xF0000000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG4_RESERVED3_SHIFT 28
+
+/**** DRAMTMG5 register ****/
+/* Minimum number of cycles of CKE HIGH/LOW during power-down an ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKE_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKE_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED_SHIFT 4
+/* Minimum CKE low width for Self refresh entry to exit timing i ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKESR_MASK 0x00003F00
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKESR_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED1_MASK 0x0000C000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED1_SHIFT 14
+/* Specifies the number of DFI clock cycles from the de-assertio ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKSRE_MASK 0x000F0000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKSRE_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED2_MASK 0x00F00000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED2_SHIFT 20
+/* This is the time before Self Refresh Exit that CK is maintain ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKSRX_MASK 0x0F000000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_T_CKSRX_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED3_MASK 0xF0000000
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG5_RESERVED3_SHIFT 28
+
+/**** DRAMTMG8 register ****/
+/* Minimum time to wait after coming out of self refresh before ... */
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG8_POST_SELFREF_GAP_X32_MASK 0x0000007F
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG8_POST_SELFREF_GAP_X32_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG8_RESERVED_MASK 0xFFFFFF80
+#define DWC_DDR_UMCTL2_REGS_DRAMTMG8_RESERVED_SHIFT 7
+
+/**** ZQCTL0 register ****/
+/* tZQCS: Number of cycles of NOP required after a ZQ ... */
+#define DWC_DDR_UMCTL2_REGS_ZQCTL0_T_ZQ_SHORT_NOP_MASK 0x000003FF
+#define DWC_DDR_UMCTL2_REGS_ZQCTL0_T_ZQ_SHORT_NOP_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ZQCTL0_RESERVED_MASK 0x0000FC00
+#define DWC_DDR_UMCTL2_REGS_ZQCTL0_RESERVED_SHIFT 10
+/* tZQOPER for DDR3, tZQCL for LPDDR2: Num ... */
+#define DWC_DDR_UMCTL2_REGS_ZQCTL0_T_ZQ_LONG_NOP_MASK 0x03FF0000
+#define DWC_DDR_UMCTL2_REGS_ZQCTL0_T_ZQ_LONG_NOP_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_ZQCTL0_RESERVED1_MASK 0x3C000000
+#define DWC_DDR_UMCTL2_REGS_ZQCTL0_RESERVED1_SHIFT 26
+/* - 1 - Disable issuing of ZQCL command at Self-Refresh exit */
+#define DWC_DDR_UMCTL2_REGS_ZQCTL0_DIS_SRX_ZQCL (1 << 30)
+/* - 1 - Disable uMCTL2 generation of ZQCS command */
+#define DWC_DDR_UMCTL2_REGS_ZQCTL0_DIS_AUTO_ZQ (1 << 31)
+
+/**** ZQCTL1 register ****/
+/* Average interval to wait between automatically issuing ZQCS ( ... */
+#define DWC_DDR_UMCTL2_REGS_ZQCTL1_T_ZQ_SHORT_INTERVAL_X1024_MASK 0x000FFFFF
+#define DWC_DDR_UMCTL2_REGS_ZQCTL1_T_ZQ_SHORT_INTERVAL_X1024_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ZQCTL1_RESERVED_MASK 0xFFF00000
+#define DWC_DDR_UMCTL2_REGS_ZQCTL1_RESERVED_SHIFT 20
+
+/**** DFITMG0 register ****/
+/* Write latency
Number of clocks from the write command to w ... */
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_WRITE_LATENCY_MASK 0x0000001F
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_WRITE_LATENCY_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED_MASK 0x000000E0
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED_SHIFT 5
+/* Specifies the number of clock cycles between when dfi_wrdata_ ... */
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_DFI_TPHY_WRDATA_MASK 0x00001F00
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_DFI_TPHY_WRDATA_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED1_MASK 0x0000E000
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED1_SHIFT 13
+/* Time from the assertion of a read command on the DFI interfac ... */
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_DFI_T_RDDATA_EN_MASK 0x001F0000
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_DFI_T_RDDATA_EN_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED2_MASK 0x00E00000
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED2_SHIFT 21
+/* Specifies the number of DFI clock cycles after an assertion o ... */
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_DFI_T_CTRL_DELAY_MASK 0x0F000000
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_DFI_T_CTRL_DELAY_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED3_MASK 0xF0000000
+#define DWC_DDR_UMCTL2_REGS_DFITMG0_RESERVED3_SHIFT 28
+
+/**** DFITMG1 register ****/
+/* Specifies the number of DFI clock cycles from the de-assertio ... */
+#define DWC_DDR_UMCTL2_REGS_DFITMG1_DFI_T_DRAM_CLK_ENABLE_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_DFITMG1_DFI_T_DRAM_CLK_ENABLE_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DFITMG1_RESERVED_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_DFITMG1_RESERVED_SHIFT 4
+/* Specifies the number of DFI clock cycles from the assertion o ... */
+#define DWC_DDR_UMCTL2_REGS_DFITMG1_DFI_T_DRAM_CLK_DISABLE_MASK 0x00000F00
+#define DWC_DDR_UMCTL2_REGS_DFITMG1_DFI_T_DRAM_CLK_DISABLE_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_DFITMG1_RESERVED1_MASK 0xFFFFF000
+#define DWC_DDR_UMCTL2_REGS_DFITMG1_RESERVED1_SHIFT 12
+
+/**** DFIUPD0 register ****/
+/* Specifies the minimum number of clock cycles that the dfi_ctr ... */
+#define DWC_DDR_UMCTL2_REGS_DFIUPD0_DFI_T_CTRLUP_MIN_MASK 0x000003FF
+#define DWC_DDR_UMCTL2_REGS_DFIUPD0_DFI_T_CTRLUP_MIN_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DFIUPD0_RESERVED_MASK 0x0000FC00
+#define DWC_DDR_UMCTL2_REGS_DFIUPD0_RESERVED_SHIFT 10
+/* Specifies the maximum number of clock cycles that the dfi_ctr ... */
+#define DWC_DDR_UMCTL2_REGS_DFIUPD0_DFI_T_CTRLUP_MAX_MASK 0x03FF0000
+#define DWC_DDR_UMCTL2_REGS_DFIUPD0_DFI_T_CTRLUP_MAX_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_DFIUPD0_RESERVED1_MASK 0x7C000000
+#define DWC_DDR_UMCTL2_REGS_DFIUPD0_RESERVED1_SHIFT 26
+/* When '1', disable co_gs_dll_calib generated by the uMCTL2 */
+#define DWC_DDR_UMCTL2_REGS_DFIUPD0_DIS_DLL_CALIB (1 << 31)
+
+/**** DFIUPD1 register ****/
+/* This is the maximum amount of time between uMCTL2 initiated D ... */
+#define DWC_DDR_UMCTL2_REGS_DFIUPD1_DFI_T_CTRLUPD_INTERVAL_MAX_X1024_MASK 0x000000FF
+#define DWC_DDR_UMCTL2_REGS_DFIUPD1_DFI_T_CTRLUPD_INTERVAL_MAX_X1024_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DFIUPD1_RESERVED_MASK 0x0000FF00
+#define DWC_DDR_UMCTL2_REGS_DFIUPD1_RESERVED_SHIFT 8
+/* This is the minimum amount of time between uMCTL2 initiated D ... */
+#define DWC_DDR_UMCTL2_REGS_DFIUPD1_DFI_T_CTRLUPD_INTERVAL_MIN_X1024_MASK 0x00FF0000
+#define DWC_DDR_UMCTL2_REGS_DFIUPD1_DFI_T_CTRLUPD_INTERVAL_MIN_X1024_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_DFIUPD1_RESERVED1_MASK 0xFF000000
+#define DWC_DDR_UMCTL2_REGS_DFIUPD1_RESERVED1_SHIFT 24
+
+/**** DFIUPD2 register ****/
+/* Specifies the maximum number of DFI clock cycles that the dfi ... */
+#define DWC_DDR_UMCTL2_REGS_DFIUPD2_DFI_PHYUPD_TYPE0_MASK 0x00000FFF
+#define DWC_DDR_UMCTL2_REGS_DFIUPD2_DFI_PHYUPD_TYPE0_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DFIUPD2_RESERVED_MASK 0x0000F000
+#define DWC_DDR_UMCTL2_REGS_DFIUPD2_RESERVED_SHIFT 12
+/* Specifies the maximum number of DFI clock cycles that the dfi ... */
+#define DWC_DDR_UMCTL2_REGS_DFIUPD2_DFI_PHYUPD_TYPE1_MASK 0x0FFF0000
+#define DWC_DDR_UMCTL2_REGS_DFIUPD2_DFI_PHYUPD_TYPE1_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_DFIUPD2_RESERVED1_MASK 0x70000000
+#define DWC_DDR_UMCTL2_REGS_DFIUPD2_RESERVED1_SHIFT 28
+/* Enables the support for acknowledging PHY-initiated updates: ... */
+#define DWC_DDR_UMCTL2_REGS_DFIUPD2_DFI_PHYUPD_EN (1 << 31)
+
+/**** DFIUPD3 register ****/
+/* Specifies the maximum number of DFI clock cycles that the dfi ... */
+#define DWC_DDR_UMCTL2_REGS_DFIUPD3_DFI_PHYUPD_TYPE2_MASK 0x00000FFF
+#define DWC_DDR_UMCTL2_REGS_DFIUPD3_DFI_PHYUPD_TYPE2_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DFIUPD3_RESERVED_MASK 0x0000F000
+#define DWC_DDR_UMCTL2_REGS_DFIUPD3_RESERVED_SHIFT 12
+/* Specifies the maximum number of DFI clock cycles that the dfi ... */
+#define DWC_DDR_UMCTL2_REGS_DFIUPD3_DFI_PHYUPD_TYPE3_MASK 0x0FFF0000
+#define DWC_DDR_UMCTL2_REGS_DFIUPD3_DFI_PHYUPD_TYPE3_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_DFIUPD3_RESERVED1_MASK 0xF0000000
+#define DWC_DDR_UMCTL2_REGS_DFIUPD3_RESERVED1_SHIFT 28
+
+/**** DFIMISC register ****/
+/* PHY initialization complete enable signal */
+#define DWC_DDR_UMCTL2_REGS_DFIMISC_DFI_INIT_COMPLETE_EN (1 << 0)
+
+#define DWC_DDR_UMCTL2_REGS_DFIMISC_RESERVED_MASK 0xFFFFFFFE
+#define DWC_DDR_UMCTL2_REGS_DFIMISC_RESERVED_SHIFT 1
+
+/* Address mapping bases */
+#define AL_DDR_ADDR_MAP_CS_0_BASE 6
+
+#define AL_DDR_ADDR_MAP_CS_DISABLED 31
+
+#define AL_DDR_ADDR_MAP_BANK_0_BASE 2
+
+#define AL_DDR_ADDR_MAP_BANK_DISABLED 15
+
+#define AL_DDR_ADDR_MAP_COL_2_BASE 2
+
+#define AL_DDR_ADDR_MAP_COL_DISABLED 15
+
+#define AL_DDR_ADDR_MAP_ROW_0_BASE 6
+#define AL_DDR_ADDR_MAP_ROW_11_BASE 17
+
+#define AL_DDR_ADDR_MAP_ROW_DISABLED 15
+
+#define AL_DDR_ADDR_MAP_OFFSET 4
+
+/**** ADDRMAP0 register ****/
+/* Selects the HIF address bit used as rank address bit 0 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT0_MASK 0x0000001F
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT0_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_RESERVED_MASK 0x000000E0
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_RESERVED_SHIFT 5
+/* Selects the HIF address bit used as rank address bit 1 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT1_MASK 0x00001F00
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_ADDRMAP_CS_BIT1_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_RESERVED1_MASK 0xFFFFE000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP0_RESERVED1_SHIFT 13
+
+/**** ADDRMAP1 register ****/
+/* Selects the HIF address bits used as bank address bit 0 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B0_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B0_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_RESERVED_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_RESERVED_SHIFT 4
+/* Selects the HIF address bits used as bank address bit 1 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B1_MASK 0x00000F00
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B1_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_RESERVED1_MASK 0x0000F000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_RESERVED1_SHIFT 12
+/* Selects the HIF address bit used as bank address bit 2 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B2_MASK 0x000F0000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_ADDRMAP_BANK_B2_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_RESERVED2_MASK 0xFFF00000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP1_RESERVED2_SHIFT 20
+
+/**** ADDRMAP2 register ****/
+/* Full bus width mode: Selects the HIF address bit used as colu ... */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B2_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B2_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED_SHIFT 4
+/* Full bus width mode: Selects the HIF address bit used as colu ... */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B3_MASK 0x00000F00
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B3_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED1_MASK 0x0000F000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED1_SHIFT 12
+/* Full bus width mode: Selects the HIF address bit used as colu ... */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B4_MASK 0x000F0000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B4_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED2_MASK 0x00F00000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED2_SHIFT 20
+/* Full bus width mode: Selects the HIF address bit used as colu ... */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B5_MASK 0x0F000000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_ADDRMAP_COL_B5_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED3_MASK 0xF0000000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP2_RESERVED3_SHIFT 28
+
+/**** ADDRMAP3 register ****/
+/* Full bus width mode: Selects the HIF address bit used as colu ... */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B6_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B6_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED_SHIFT 4
+/* Full bus width mode: Selects the HIF address bit used as colu ... */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B7_MASK 0x00000F00
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B7_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED1_MASK 0x0000F000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED1_SHIFT 12
+/* Full bus width mode: Selects the HIF address bit used as colu ... */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B8_MASK 0x000F0000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B8_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED2_MASK 0x00F00000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED2_SHIFT 20
+/* Full bus width mode: Selects the HIF address bit used as colu ... */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B9_MASK 0x0F000000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_ADDRMAP_COL_B9_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED3_MASK 0xF0000000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP3_RESERVED3_SHIFT 28
+
+/**** ADDRMAP4 register ****/
+/* Full bus width mode: Selects the HIF address bit used as colu ... */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B10_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B10_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_RESERVED_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_RESERVED_SHIFT 4
+/* Full bus width mode: Selects the HIF address bit used as colu ... */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B11_MASK 0x00000F00
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_ADDRMAP_COL_B11_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_RESERVED1_MASK 0xFFFFF000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP4_RESERVED1_SHIFT 12
+
+/**** ADDRMAP5 register ****/
+/* Selects the HIF address bits used as row address bit 0 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B0_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B0_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED_SHIFT 4
+/* Selects the HIF address bits used as row address bit 1 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B1_MASK 0x00000F00
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B1_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED1_MASK 0x0000F000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED1_SHIFT 12
+/* Selects the HIF address bits used as row address bits 2 to 10 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B2_10_MASK 0x000F0000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B2_10_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED2_MASK 0x00F00000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED2_SHIFT 20
+/* Selects the HIF address bit used as row address bit 11 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B11_MASK 0x0F000000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_ADDRMAP_ROW_B11_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED3_MASK 0xF0000000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP5_RESERVED3_SHIFT 28
+
+/**** ADDRMAP6 register ****/
+/* Selects the HIF address bit used as row address bit 12 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B12_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B12_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED_SHIFT 4
+/* Selects the HIF address bit used as row address bit 13 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B13_MASK 0x00000F00
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B13_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED1_MASK 0x0000F000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED1_SHIFT 12
+/* Selects the HIF address bit used as row address bit 14 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B14_MASK 0x000F0000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B14_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED2_MASK 0x00F00000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED2_SHIFT 20
+/* Selects the HIF address bit used as row address bit 15 */
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B15_MASK 0x0F000000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_ADDRMAP_ROW_B15_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED3_MASK 0xF0000000
+#define DWC_DDR_UMCTL2_REGS_ADDRMAP6_RESERVED3_SHIFT 28
+
+/**** ODTCFG register ****/
+/* Controls blocking of commands for ODT - 00 - Block read/write ... */
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_WR_ODT_BLOCK_MASK 0x00000003
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_WR_ODT_BLOCK_SHIFT 0
+
+/* The delay, in clock cycles, from issuing a read command to ... */
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_RD_ODT_DELAY_MASK 0x0000003C
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_RD_ODT_DELAY_SHIFT 2
+
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_RESERVED_MASK 0x000000C0
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_RESERVED_SHIFT 6
+
+/* Cycles to hold ODT for a read command */
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_RD_ODT_HOLD_MASK 0x00000F00
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_RD_ODT_HOLD_SHIFT 8
+
+/* The delay, in clock cycles, from issuing a write command to ... */
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_WR_ODT_DELAY_MASK 0x000F0000
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_WR_ODT_DELAY_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_RESERVED1_MASK 0x00F00000
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_RESERVED1_SHIFT 20
+/* Cycles to hold ODT for a write command */
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_WR_ODT_HOLD_MASK 0x0F000000
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_WR_ODT_HOLD_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_RESERVED2_MASK 0xF0000000
+#define DWC_DDR_UMCTL2_REGS_ODTCFG_RESERVED2_SHIFT 28
+
+/**** ODTMAP register ****/
+/* Indicates which remote ODTs must be turned on during a write ... */
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK0_WR_ODT_MASK 0x0000000F
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK0_WR_ODT_SHIFT 0
+
+/* Indicates which remote ODTs must be turned on during a read ... */
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK0_RD_ODT_MASK 0x000000F0
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK0_RD_ODT_SHIFT 4
+
+/* Indicates which remote ODTs must be turned on during a write ... */
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK1_WR_ODT_MASK 0x00000F00
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK1_WR_ODT_SHIFT 8
+
+/* Indicates which remote ODTs must be turned on during a read ... */
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK1_RD_ODT_MASK 0x0000F000
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK1_RD_ODT_SHIFT 12
+
+/* Indicates which remote ODTs must be turned on during a write ... */
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK2_WR_ODT_MASK 0x000F0000
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK2_WR_ODT_SHIFT 16
+
+/* Indicates which remote ODTs must be turned on during a read ... */
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK2_RD_ODT_MASK 0x00F00000
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK2_RD_ODT_SHIFT 20
+
+/* Indicates which remote ODTs must be turned on during a write ... */
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK3_WR_ODT_MASK 0x0F000000
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK3_WR_ODT_SHIFT 24
+
+/* Indicates which remote ODTs must be turned on during a read ... */
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK3_RD_ODT_MASK 0xF0000000
+#define DWC_DDR_UMCTL2_REGS_ODTMAP_RANK3_RD_ODT_SHIFT 28
+
+/**** SCHED register ****/
+/* Active low signal */
+#define DWC_DDR_UMCTL2_REGS_SCHED_FORCE_LOW_PRI_N (1 << 0)
+/* If set then the bank selector prefers writes over reads */
+#define DWC_DDR_UMCTL2_REGS_SCHED_PREFER_WRITE (1 << 1)
+/* If true, bank is closed until transactions are available for ... */
+#define DWC_DDR_UMCTL2_REGS_SCHED_PAGECLOSE (1 << 2)
+
+#define DWC_DDR_UMCTL2_REGS_SCHED_RESERVED_MASK 0x000000F8
+#define DWC_DDR_UMCTL2_REGS_SCHED_RESERVED_SHIFT 3
+/* Number of entries in the low priority transaction store is th ... */
+#define DWC_DDR_UMCTL2_REGS_SCHED_LPR_NUM_ENTRIES_MASK 0x00001F00
+#define DWC_DDR_UMCTL2_REGS_SCHED_LPR_NUM_ENTRIES_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_SCHED_RESERVED1_MASK 0x0000E000
+#define DWC_DDR_UMCTL2_REGS_SCHED_RESERVED1_SHIFT 13
+/* Describes the number of cycles that co_gs_go2critical_rd or c ... */
+#define DWC_DDR_UMCTL2_REGS_SCHED_GO2CRITICAL_HYSTERESIS_MASK 0x00FF0000
+#define DWC_DDR_UMCTL2_REGS_SCHED_GO2CRITICAL_HYSTERESIS_SHIFT 16
+/* When the preferred transaction store is empty for these many ... */
+#define DWC_DDR_UMCTL2_REGS_SCHED_RDWR_IDLE_GAP_MASK 0x7F000000
+#define DWC_DDR_UMCTL2_REGS_SCHED_RDWR_IDLE_GAP_SHIFT 24
+
+#define DWC_DDR_UMCTL2_REGS_SCHED_RESERVED2 (1 << 31)
+
+/**** PERFHPR0 register ****/
+/* Number of clocks that the HPR queue is guaranteed to stay in ... */
+#define DWC_DDR_UMCTL2_REGS_PERFHPR0_HPR_MIN_NON_CRITICAL_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_PERFHPR0_HPR_MIN_NON_CRITICAL_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_PERFHPR0_RESERVED_MASK 0xFFFF0000
+#define DWC_DDR_UMCTL2_REGS_PERFHPR0_RESERVED_SHIFT 16
+
+/**** PERFHPR1 register ****/
+/* Number of clocks that the HPR queue can be starved before it ... */
+#define DWC_DDR_UMCTL2_REGS_PERFHPR1_HPR_MAX_STARVE_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_PERFHPR1_HPR_MAX_STARVE_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_PERFHPR1_RESERVED_MASK 0x00FF0000
+#define DWC_DDR_UMCTL2_REGS_PERFHPR1_RESERVED_SHIFT 16
+/* Number of transactions that are serviced once the HPR queue g ... */
+#define DWC_DDR_UMCTL2_REGS_PERFHPR1_HPR_XACT_RUN_LENGTH_MASK 0xFF000000
+#define DWC_DDR_UMCTL2_REGS_PERFHPR1_HPR_XACT_RUN_LENGTH_SHIFT 24
+
+/**** PERFLPR0 register ****/
+/* Number of clocks that the LPR queue is guaranteed to be non-c ... */
+#define DWC_DDR_UMCTL2_REGS_PERFLPR0_LPR_MIN_NON_CRITICAL_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_PERFLPR0_LPR_MIN_NON_CRITICAL_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_PERFLPR0_RESERVED_MASK 0xFFFF0000
+#define DWC_DDR_UMCTL2_REGS_PERFLPR0_RESERVED_SHIFT 16
+
+/**** PERFLPR1 register ****/
+/* Number of clocks that the LPR queue can be starved before it ... */
+#define DWC_DDR_UMCTL2_REGS_PERFLPR1_LPR_MAX_STARVE_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_PERFLPR1_LPR_MAX_STARVE_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_PERFLPR1_RESERVED_MASK 0x00FF0000
+#define DWC_DDR_UMCTL2_REGS_PERFLPR1_RESERVED_SHIFT 16
+/* Number of transactions that are serviced once the LPR queue g ... */
+#define DWC_DDR_UMCTL2_REGS_PERFLPR1_LPR_XACT_RUN_LENGTH_MASK 0xFF000000
+#define DWC_DDR_UMCTL2_REGS_PERFLPR1_LPR_XACT_RUN_LENGTH_SHIFT 24
+
+/**** PERFWR0 register ****/
+/* Number of clocks that the write queue is guaranteed to be non ... */
+#define DWC_DDR_UMCTL2_REGS_PERFWR0_W_MIN_NON_CRITICAL_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_PERFWR0_W_MIN_NON_CRITICAL_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_PERFWR0_RESERVED_MASK 0xFFFF0000
+#define DWC_DDR_UMCTL2_REGS_PERFWR0_RESERVED_SHIFT 16
+
+/**** PERFWR1 register ****/
+/* Number of clocks that the write queue can be starved before i ... */
+#define DWC_DDR_UMCTL2_REGS_PERFWR1_W_MAX_STARVE_MASK 0x0000FFFF
+#define DWC_DDR_UMCTL2_REGS_PERFWR1_W_MAX_STARVE_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_PERFWR1_RESERVED_MASK 0x00FF0000
+#define DWC_DDR_UMCTL2_REGS_PERFWR1_RESERVED_SHIFT 16
+/* Number of transactions that are serviced once the WR queue go ... */
+#define DWC_DDR_UMCTL2_REGS_PERFWR1_W_XACT_RUN_LENGTH_MASK 0xFF000000
+#define DWC_DDR_UMCTL2_REGS_PERFWR1_W_XACT_RUN_LENGTH_SHIFT 24
+
+/**** DBG0 register ****/
+/* When 1, disable write combine.
FOR DEBUG ONLY */
+#define DWC_DDR_UMCTL2_REGS_DBG0_DIS_WC (1 << 0)
+/* Only present in designs supporting read bypass */
+#define DWC_DDR_UMCTL2_REGS_DBG0_DIS_RD_BYPASS (1 << 1)
+/* Only present in designs supporting activate bypass */
+#define DWC_DDR_UMCTL2_REGS_DBG0_DIS_ACT_BYPASS (1 << 2)
+
+#define DWC_DDR_UMCTL2_REGS_DBG0_RESERVED (1 << 3)
+/* When this is set to '0', auto-precharge is disabled for the f ... */
+#define DWC_DDR_UMCTL2_REGS_DBG0_DIS_COLLISION_PAGE_OPT (1 << 4)
+
+#define DWC_DDR_UMCTL2_REGS_DBG0_RESERVED1_MASK 0xFFFFFFE0
+#define DWC_DDR_UMCTL2_REGS_DBG0_RESERVED1_SHIFT 5
+
+/**** DBG1 register ****/
+/* When 1, uMCTL2 will not de-queue any transactions from the CA ... */
+#define DWC_DDR_UMCTL2_REGS_DBG1_DIS_DQ (1 << 0)
+
+#define DWC_DDR_UMCTL2_REGS_DBG1_RESERVED_MASK 0xFFFFFFFE
+#define DWC_DDR_UMCTL2_REGS_DBG1_RESERVED_SHIFT 1
+
+/**** DBGCAM register ****/
+/* High priority read queue depth
FOR DEBUG ONLY */
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_HPR_Q_DEPTH_MASK 0x0000003F
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_HPR_Q_DEPTH_SHIFT 0
+
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED_MASK 0x000000C0
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED_SHIFT 6
+/* Low priority read queue depth
FOR DEBUG ONLY */
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_LPR_Q_DEPTH_MASK 0x00003F00
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_LPR_Q_DEPTH_SHIFT 8
+
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED1_MASK 0x0000C000
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED1_SHIFT 14
+/* Write queue depth
FOR DEBUG ONLY */
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_W_Q_DEPTH_MASK 0x003F0000
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_W_Q_DEPTH_SHIFT 16
+
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED2_MASK 0x00C00000
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED2_SHIFT 22
+/* Stall
FOR DEBUG ONLY */
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_DBG_STALL (1 << 24)
+
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED3_MASK 0xFE000000
+#define DWC_DDR_UMCTL2_REGS_DBGCAM_RESERVED3_SHIFT 25
+
+/**** PCCFG register ****/
+/* If set to 1 (enabled), sets co_gs_go2critical_wr and co_gs_go ... */
+#define DWC_DDR_UMCTL2_MP_PCCFG_GO2CRITICAL_EN (1 << 0)
+
+#define DWC_DDR_UMCTL2_MP_PCCFG_RESERVED_MASK 0x0000000E
+#define DWC_DDR_UMCTL2_MP_PCCFG_RESERVED_SHIFT 1
+/* Page match four limit */
+#define DWC_DDR_UMCTL2_MP_PCCFG_PAGEMATCH_LIMIT (1 << 4)
+
+#define DWC_DDR_UMCTL2_MP_PCCFG_RESERVED1_MASK 0xFFFFFFE0
+#define DWC_DDR_UMCTL2_MP_PCCFG_RESERVED1_SHIFT 5
+
+/**** PCFGR_0 register ****/
+/* Determines the initial load value of read aging counters */
+#define DWC_DDR_UMCTL2_MP_PCFGR_0_RD_PORT_PRIORITY_MASK 0x000003FF
+#define DWC_DDR_UMCTL2_MP_PCFGR_0_RD_PORT_PRIORITY_SHIFT 0
+
+#define DWC_DDR_UMCTL2_MP_PCFGR_0_RESERVED (1 << 10)
+/* If set to 1, read transactions with ID not covered by any of ... */
+#define DWC_DDR_UMCTL2_MP_PCFGR_0_READ_REORDER_BYPASS_EN (1 << 11)
+/* If set to 1, enables aging function for the read channel of t ... */
+#define DWC_DDR_UMCTL2_MP_PCFGR_0_RD_PORT_AGING_EN (1 << 12)
+/* If set to 1, enables the AXI urgent sideband signal (arurgent ... */
+#define DWC_DDR_UMCTL2_MP_PCFGR_0_RD_PORT_URGENT_EN (1 << 13)
+/* If set to 1, enables the Page Match feature */
+#define DWC_DDR_UMCTL2_MP_PCFGR_0_RD_PORT_PAGEMATCH_EN (1 << 14)
+/* If set to 1, enables reads to be generated as "High Priority ... */
+#define DWC_DDR_UMCTL2_MP_PCFGR_0_RD_PORT_HPR_EN (1 << 15)
+
+#define DWC_DDR_UMCTL2_MP_PCFGR_0_RESERVED1_MASK 0xFFFF0000
+#define DWC_DDR_UMCTL2_MP_PCFGR_0_RESERVED1_SHIFT 16
+
+/**** PCFGW_0 register ****/
+/* Determines the initial load value of write aging counters */
+#define DWC_DDR_UMCTL2_MP_PCFGW_0_WR_PORT_PRIORITY_MASK 0x000003FF
+#define DWC_DDR_UMCTL2_MP_PCFGW_0_WR_PORT_PRIORITY_SHIFT 0
+
+#define DWC_DDR_UMCTL2_MP_PCFGW_0_RESERVED_MASK 0x00000C00
+#define DWC_DDR_UMCTL2_MP_PCFGW_0_RESERVED_SHIFT 10
+/* If set to 1, enables aging function for the write channel of ... */
+#define DWC_DDR_UMCTL2_MP_PCFGW_0_WR_PORT_AGING_EN (1 << 12)
+/* If set to 1, enables the AXI urgent sideband signal (awurgent ... */
+#define DWC_DDR_UMCTL2_MP_PCFGW_0_WR_PORT_URGENT_EN (1 << 13)
+/* If set to 1, enables the Page Match feature */
+#define DWC_DDR_UMCTL2_MP_PCFGW_0_WR_PORT_PAGEMATCH_EN (1 << 14)
+
+#define DWC_DDR_UMCTL2_MP_PCFGW_0_RESERVED1_MASK 0xFFFF8000
+#define DWC_DDR_UMCTL2_MP_PCFGW_0_RESERVED1_SHIFT 15
+
+/**** PCFGIDMASKCH0_0 register ****/
+/* Determines the mask used in the ID mapping function for virtu ... */
+#define DWC_DDR_UMCTL2_MP_PCFGIDMASKCH0_0_ID_MASK_MASK 0x003FFFFF
+#define DWC_DDR_UMCTL2_MP_PCFGIDMASKCH0_0_ID_MASK_SHIFT 0
+
+#define DWC_DDR_UMCTL2_MP_PCFGIDMASKCH0_0_RESERVED_MASK 0xFFC00000
+#define DWC_DDR_UMCTL2_MP_PCFGIDMASKCH0_0_RESERVED_SHIFT 22
+
+/**** PCFGIDVALUECH0_0 register ****/
+/* Determines the value used in the ID mapping function for virt ... */
+#define DWC_DDR_UMCTL2_MP_PCFGIDVALUECH0_0_ID_VALUE_MASK 0x003FFFFF
+#define DWC_DDR_UMCTL2_MP_PCFGIDVALUECH0_0_ID_VALUE_SHIFT 0
+
+#define DWC_DDR_UMCTL2_MP_PCFGIDVALUECH0_0_RESERVED_MASK 0xFFC00000
+#define DWC_DDR_UMCTL2_MP_PCFGIDVALUECH0_0_RESERVED_SHIFT 22
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+/** @} end of DDR group */
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_ddr_phy_regs.h b/arch/arm/mach-alpine/al_hal/al_hal_ddr_phy_regs.h
new file mode 100644
index 0000000..43367ca
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_ddr_phy_regs.h
@@ -0,0 +1,1148 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @addtogroup groupddr
+ *
+ * @{
+ * @file al_hal_ddr_phy_regs.h
+ *
+ * @brief DDR PHY registers
+ *
+ */
+#ifndef __AL_HAL_DDR_PHY_REGS_REGS_H__
+#define __AL_HAL_DDR_PHY_REGS_REGS_H__
+
+#include "al_hal_ddr_cfg.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* The index of the ECC byte lane */
+#define AL_DDR_PHY_ECC_BYTE_LANE_INDEX 4
+
+/* The clock frequency on which the PLL frequency select need to be changed */
+#define AL_DDR_PHY_PLL_FREQ_SEL_MHZ 700
+
+#define AL_DDR_PHY_NUM_ZQ_SEGMANTS 3
+
+struct al_ddr_phy_zq_regs {
+ uint32_t PR; /* ZQ Impedance Control Program Register */
+ uint32_t DR; /* ZQ Impedance Control Data Register */
+ uint32_t SR; /* ZQ Impedance Control Status Register */
+ uint32_t reserved;
+};
+
+struct al_ddr_phy_datx8_regs {
+ uint32_t GCR[4]; /* General Configuration Registers 0-3 */
+ uint32_t GSR[3]; /* General Status Registers 0-2 */
+ uint32_t BDLR[7]; /* Bit Delay Line Registers 0-6 */
+ uint32_t LCDLR[3]; /* Local Calibrated Delay Line Registers 0-2 */
+ uint32_t MDLR; /* Master Delay Line Register */
+ uint32_t GTR; /* General Timing Register */
+ uint32_t reserved[13];
+};
+
+struct al_ddr_phy_regs {
+ uint32_t RIDR; /* Revision Identification Reg */
+ uint32_t PIR; /* PHY Initialization Reg */
+ uint32_t PGCR[4]; /* PHY General Configuration Regs 0-3 */
+ uint32_t PGSR[2]; /* PHY General Status Regs 0-1 */
+ uint32_t PLLCR; /* PLL Control Reg */
+ uint32_t PTR[5]; /* PHY Timing Regs 0-4 */
+ uint32_t ACMDLR; /* AC Master Delay Line Reg */
+ uint32_t ACLCDLR; /* AC Local Calibrated Delay Line Reg */
+ uint32_t ACBDLR[10]; /* AC Bit Delay Line Regs 0-9 */
+ uint32_t ACIOCR[6]; /* AC I/O Configuration Regs 0-5 */
+ uint32_t DXCCR; /* DATX8 Common Configuration Reg */
+ uint32_t DSGCR; /* DDR System General Configuration Reg */
+ uint32_t DCR; /* DRAM Configuration Reg */
+ uint32_t DTPR[4]; /* DRAM Timing Parameters Registesr 0-3 */
+ uint32_t MR[4]; /* Mode Regs 0-3 */
+ uint32_t ODTCR; /* ODT Configuration Reg */
+ uint32_t DTCR; /* Data Training Configuration Reg */
+ uint32_t DTAR[4]; /* Data Training Address Register 0-3 */
+ uint32_t DTDR[2]; /* Data Training Data Register 0-1 */
+ uint32_t DTEDR[2]; /* Data Training Eye Data Register 0-1 */
+ uint32_t RDIMMGCR[2]; /* RDIMM General Configuration Register 0-1 */
+ uint32_t RDIMMCR[2]; /* RDIMM Control Register 0-1 */
+ uint32_t reserved1[0x3D - 0x39];
+ uint32_t ODTCTLR; /* ODT Control Reg */
+ uint32_t reserved2[0x70 - 0x3E];
+ uint32_t BISTRR; /* BIST Run Register */
+ uint32_t BISTWCR; /* BIST Word Count Register */
+ uint32_t BISTMSKR[3]; /* BIST Mask Register 0-2 */
+ uint32_t BISTLSR; /* BIST LFSR Seed Register */
+ uint32_t BISTAR[3]; /* BIST Address Register 0-2 */
+ uint32_t BISTUDPR; /* BIST User Data Pattern Register */
+ uint32_t BISTGSR; /* BIST General Status Register */
+ uint32_t BISTWER; /* BIST Word Error Register */
+ uint32_t BISTBER[4]; /* BIST Bit Error Register 0-3 */
+ uint32_t BISTWCSR; /* BIST Word Count Status Register */
+ uint32_t BISTFWR[3]; /* BIST Fail Word Register 0-2 */
+ uint32_t reserved3[0x8E - 0x84];
+ uint32_t IOVCR[2]; /* IO VREF Control Register 0-1 */
+ uint32_t ZQCR; /* ZQ Impedance Control Register */
+ struct al_ddr_phy_zq_regs ZQ[AL_DDR_PHY_NUM_ZQ_SEGMANTS];
+ uint32_t reserved4[0xA0 - 0x9D];
+ struct al_ddr_phy_datx8_regs DATX8[AL_DDR_PHY_NUM_BYTE_LANES];
+};
+
+/* Register PGSR0 field iDONE */
+/**
+ * Initialization Done: Indicates if set that the DDR system initialization has
+ * completed. This bit is set after all the selected initialization routines in
+ * PIR register have completed.
+ */
+#define DWC_DDR_PHY_REGS_PGSR0_IDONE 0x00000001
+
+/* Register PGSR0 field ZCERR */
+/**
+ * Impedance Calibration Error: Indicates if set that there is an error in
+ * impedance calibration.
+ */
+#define DWC_DDR_PHY_REGS_PGSR0_ZCERR 0x00100000
+
+/* Register PGSR0 field WLERR */
+/**
+ * Write Leveling Error: Indicates if set that there is an error in write
+ * leveling.
+ */
+#define DWC_DDR_PHY_REGS_PGSR0_WLERR 0x00200000
+
+/* Register PGSR0 field QSGERR */
+/**
+ * DQS Gate Training Error: Indicates if set that there is an error in DQS gate
+ * training.
+ */
+#define DWC_DDR_PHY_REGS_PGSR0_QSGERR 0x00400000
+
+/* Register PGSR0 field WLAERR */
+/**
+ * Write Leveling Adjustment Error: Indicates if set that there is an error in
+ * write leveling adjustment.
+ */
+#define DWC_DDR_PHY_REGS_PGSR0_WLAERR 0x00800000
+
+/* Register PGSR0 field RDERR */
+/**
+ * Read Bit Deskew Error: Indicates if set that there is an error in read bit
+ * deskew.
+ */
+#define DWC_DDR_PHY_REGS_PGSR0_RDERR 0x01000000
+
+/* Register PGSR0 field WDERR */
+/**
+ * Write Bit Deskew Error: Indicates if set that there is an error in write bit
+ * deskew.
+ */
+#define DWC_DDR_PHY_REGS_PGSR0_WDERR 0x02000000
+
+/* Register PGSR0 field REERR */
+/**
+ * Read Eye Training Error: Indicates if set that there is an error in read eye
+ * training.
+ */
+#define DWC_DDR_PHY_REGS_PGSR0_REERR 0x04000000
+
+/* Register PGSR0 field WEERR */
+/**
+ * Write Eye Training Error: Indicates if set that there is an error in write
+ * eye training.
+ */
+#define DWC_DDR_PHY_REGS_PGSR0_WEERR 0x08000000
+
+/* Register PGSR0 field VTDONE */
+/**
+ * AC VT Done: Indicates if set that VT compensation calculation has
+ * been completed for all enabled AC BDLs and LCDL.
+ */
+#define DWC_DDR_PHY_REGS_PGSR0_VTDONE 0x40000000
+
+/* Register PGSR1 field VTSTOP */
+/**
+ * VT Stop: Indicates if set that the VT calculation logic has stopped
+ * computing the next values for the VT compensated delay line values. After
+ * assertion of the PGCR.INHVT, the VTSTOP bit should be read to ensure all VT
+ * compensation logic has stopped computations before writing to the delay line
+ * registers.
+ */
+#define DWC_DDR_PHY_REGS_PGSR1_VTSTOP 0x40000000
+
+/* Register PGCR0 field PHYFRST */
+/**
+ * A write of ‘0’ to this bit resets the AC and DATX8 FIFOs without
+ * resetting PUB RTL logic. This bit is not self-clearing and a ‘1’
+ * must be written to deassert the reset.
+ */
+#define DWC_DDR_PHY_REGS_PGCR0_PHYFRST 0x04000000
+
+/* Register PGCR1 field DLBYPMODE */
+/**
+ * Controls DDL Bypass Modes. Valid values are:
+ * 00 = Normal dynamic control
+ * 01 = All DDLs bypassed
+ * 10 = No DDLs bypassed
+ * 11 = Reserved
+ */
+#define DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_MASK 0x00000030
+#define DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_SHIFT 4
+
+#define DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_DYNAMIC \
+ (0 << DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_SHIFT)
+
+#define DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_BYPASS \
+ (1 << DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_SHIFT)
+
+#define DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_NO_BYPASS \
+ (2 << DWC_DDR_PHY_REGS_PGCR1_DLBYPMODE_SHIFT)
+
+/* Register PGCR1 field IODDRM */
+/**
+ * I/O DDR Mode (D3F I/O Only): Selects the DDR mode for the I/Os.
+ * These bits connect to bits [2:1] of the IOM pin of the SSTL I/O.
+ * I/O Mode: I/O Mode select
+ * 000 = DDR2 mode
+ * 001 = LVCMOS mode
+ * 010 = DDR3 mode
+ * 011 = Reserved
+ * 100 = DDR3L mode
+ * 101 = Reserved
+ * 110 = Reserved
+ * 111 = Reserved
+ */
+#define DWC_DDR_PHY_REGS_PGCR1_IODDRM_MASK AL_FIELD_MASK(8, 7)
+#define DWC_DDR_PHY_REGS_PGCR1_IODDRM_SHIFT 7
+#define DWC_DDR_PHY_REGS_PGCR1_IODDRM_VAL_DDR3 \
+ (1 << DWC_DDR_PHY_REGS_PGCR1_IODDRM_SHIFT)
+
+/* Register PGCR1 field PHYHRST */
+/**
+ * PHY High-Speed Reset: A write of ‘0’ to this bit resets the AC and DATX8
+ * macros without resetting PUBm2 RTL logic. This bit is not self-clearing
+ * and a ‘1’ must be written to de-assert the reset.
+ */
+#define DWC_DDR_PHY_REGS_PGCR1_PHYHRST 0x02000000
+
+/* Register PGCR1 field INHVT */
+/**
+ * VT Calculation Inhibit: Inhibits calculation of the next VT compensated
+ * delay line values. A value of 1 will initiate a stop of the VT compensation
+ * logic. The bit PGSR1[30] (VSTOP) will be set to a logic 1 when VT
+ * compensation has stopped. This bit should be set to 1 during writes to the
+ * delay line registers. A value of 0 will re-enable the VT compensation
+ * logic.
+ */
+#define DWC_DDR_PHY_REGS_PGCR1_INHVT 0x04000000
+
+/* Register PGCR1 field IOLB */
+/**
+ * I/O Loop-Back Select: Selects where inside the I/O the loop-back of signals
+ * happens. Valid values are:
+ * 0 = Loopback is after output buffer; output enable must be asserted
+ * 1 = Loopback is before output buffer; output enable is don’t care
+ */
+#define DWC_DDR_PHY_REGS_PGCR1_IOLB 0x08000000
+
+/* Register PGCR3 field RDMODE */
+/**
+ */
+#define DWC_DDR_PHY_REGS_PGCR3_RDMODE_MASK 0x00000018
+#define DWC_DDR_PHY_REGS_PGCR3_RDMODE_SHIFT 3
+
+#define DWC_DDR_PHY_REGS_PGCR3_RDMODE_STATIC_RD_RSP_OFF \
+ (0 << DWC_DDR_PHY_REGS_PGCR3_RDMODE_SHIFT)
+
+#define DWC_DDR_PHY_REGS_PGCR3_RDMODE_STATIC_RD_RSP_CMP \
+ (2 << DWC_DDR_PHY_REGS_PGCR3_RDMODE_SHIFT)
+
+#define DWC_DDR_PHY_REGS_PGCR3_RDMODE_STATIC_RD_RSP_PRG \
+ (3 << DWC_DDR_PHY_REGS_PGCR3_RDMODE_SHIFT)
+
+/* Register PGCR3 field RDDLY */
+/**
+ */
+#define DWC_DDR_PHY_REGS_PGCR3_RDDLY_MASK 0x000001e0
+#define DWC_DDR_PHY_REGS_PGCR3_RDDLY_SHIFT 5
+
+/* Register PGCR3 field GATEDXCTLCLK */
+/**
+Enable Clock Gating for DX ctl_clk: Enables, when set, clock gating for power
+saving. Valid values are:
+0 = Clock gating is disabled .
+1 = Clock gating is enabled
+ */
+#define DWC_DDR_PHY_REGS_PGCR3_GATEDXCTLCLK 0x00002000
+
+/* Register PGCR3 field GATEDXDDRCLK */
+/**
+Enable Clock Gating for DX ddr_clk: Enables, when set, clock gating for power
+saving. Valid values are:
+0 = Clock gating is disabled .
+1 = Clock gating is enabled
+ */
+#define DWC_DDR_PHY_REGS_PGCR3_GATEDXDDRCLK 0x00004000
+
+/* Register PGCR3 field GATEDXRDCLK */
+/**
+Enable Clock Gating for DX rd_clk: Enables, when set, clock gating for power
+saving. Valid values are:
+0 = Clock gating is disabled .
+1 = Clock gating is enabled
+ */
+#define DWC_DDR_PHY_REGS_PGCR3_GATEDXRDCLK 0x00008000
+
+/* Register PLLCR field FRQSEL */
+/**
+ * PLL Frequency Select: Selects the operating range of the PLL.
+ * 00 = PLL reference clock (ctl_clk/REF_CLK) ranges from 335MHz to 533MHz
+ * 01 = PLL reference clock (ctl_clk/REF_CLK) ranges from 225MHz to 385MHz
+ * 10 = Reserved
+ * 11 = PLL reference clock (ctl_clk/REF_CLK) ranges from 166MHz to 275MHz
+ */
+#define DWC_DDR_PHY_REGS_PLLCR_FRQSEL_MASK 0x00180000
+#define DWC_DDR_PHY_REGS_PLLCR_FRQSEL_SHIFT 19
+#define DWC_DDR_PHY_REGS_PLLCR_FRQSEL_335MHZ_533MHz \
+ (0x0 << DWC_DDR_PHY_REGS_PLLCR_FRQSEL_SHIFT)
+#define DWC_DDR_PHY_REGS_PLLCR_FRQSEL_225MHZ_385MHz \
+ (0x1 << DWC_DDR_PHY_REGS_PLLCR_FRQSEL_SHIFT)
+#define DWC_DDR_PHY_REGS_PLLCR_FRQSEL_166MHZ_275MHz \
+ (0x3 << DWC_DDR_PHY_REGS_PLLCR_FRQSEL_SHIFT)
+
+/* Register ACIOCR0 field ACPDR */
+/**
+ * AC Power Down Receiver: Powers down, when set, the input receiver on the I/O for
+ * RAS#, CAS#, WE#, BA[2:0], and A[15:0] pins.
+ */
+#define DWC_DDR_PHY_REGS_ACIOCR0_ACPDR 0x00000010
+
+/* Register ACIOCR0 field CKPDR */
+/**
+ * CK Power Down Receiver: Powers down, when set, the input receiver on the I/O for
+ * CK[0], CK[1], CK[2], and CK[3] pins, respectively.
+ */
+#define DWC_DDR_PHY_REGS_ACIOCR0_CKPDR_MASK 0x00003c00
+#define DWC_DDR_PHY_REGS_ACIOCR0_CKPDR_SHIFT 10
+
+/* Register ACIOCR0 field RANKPDR */
+/**
+ * Rank Power Down Receiver: Powers down, when set, the input receiver on the I/O
+ * CKE[3:0], ODT[3:0], and CS#[3:0] pins. RANKPDR[0] controls the power down for
+ * CKE[0], ODT[0], and CS#[0], RANKPDR[1] controls the power down for CKE[1],
+ * ODT[1], and CS#[1], and so on.
+ */
+#define DWC_DDR_PHY_REGS_ACIOCR0_RANKPDR_MASK 0x03c00000
+#define DWC_DDR_PHY_REGS_ACIOCR0_RANKPDR_SHIFT 22
+
+/* Register ACIOCR0 field RSTPDR */
+/**
+ * SDRAM Reset Power Down Receiver: Powers down, when set, the input receiver
+ * on the I/O for SDRAM RST# pin.
+ */
+#define DWC_DDR_PHY_REGS_ACIOCR0_RSTPDR 0x10000000
+
+/* Register DSGCR field PUREN */
+/**
+ * PHY Update Request Enable: Specifies if set, that the PHY should issue
+ * PHY-initiated update request when there is DDL VT drift.
+ */
+#define DWC_DDR_PHY_REGS_DSGCR_PUREN 0x00000001
+
+
+/* Register DSGCR field DQSGX */
+/**
+ * DQSGX DQS Gate Extension: Specifies if set that the read DQS gating mustgate
+ * will be extended. This should be set ONLY when used with DQS
+ * pulldown and then re-centered, i.e. one clock cycle extension on either
+ * side.DQSn pullup. Allowable settings are:
+ * 00 = do not extend the gate
+ * 01 = extend the gate by ½ tCK in both directions (but never earlier than
+ * zero read latency)
+ * 10 = extend the gate earlier by ½ tCK and later by 2 * tCK (to facilitate
+ * LPDDR2/LPDDR3 usage without training for systems supporting upto
+ * 800Mbps)
+ * 11 = extend the gate earlier by ½ tCK and later by 3 * tCK (to facilitate
+ * LPDDR2/LPDDR3 usage without training for systems supporting upto
+ * 1600Mbps))
+ */
+#define DWC_DDR_PHY_REGS_DSGCR_DQSGX_MASK 0x000000c0
+#define DWC_DDR_PHY_REGS_DSGCR_DQSGX_SHIFT 6
+
+/* Register DSGCR field RRMODE */
+/**
+ * Rise-to-Rise Mode: Indicates if set that the PHY mission mode is configured
+ * to run in rise-to-rise mode. Otherwise if not set the PHY mission mode is
+ * running in rise-to-fall mode.
+ */
+#define DWC_DDR_PHY_REGS_DSGCR_RRMODE 0x00040000
+
+/* Register DCR field NOSRA */
+/**
+ * No Simultaneous Rank Access: Specifies if set that simultaneous rank access
+ * on the same clock cycle is not allowed. This means that multiple chip select
+ * signals should not be asserted at the same time. This may be required on
+ * some DIMM systems.
+ */
+#define DWC_DDR_PHY_REGS_DCR_NOSRA 0x08000000
+
+/* Register DCR field DDR2T */
+/**
+ * DDR 2T Timing: Indicates if set that 2T timing should be used by PUBm2
+ * internally generated SDRAM transactions.
+ */
+#define DWC_DDR_PHY_REGS_DCR_DDR2T 0x10000000
+
+/* Register DCR field UDIMM */
+/**
+ * Un-buffered DIMM Address Mirroring: Indicates if set that there is address
+ * mirroring on the second rank of an un-buffered DIMM (the rank connected to
+ * CS#[1]). In this case, the PUBm2 re-scrambles the bank and address when
+ * sending mode register commands to the second rank. This only applies to
+ * PUBm2 internal SDRAM transactions. Transactions generated by the controller
+ * must make its own adjustments when using an un-buffered DIMM. DCR[NOSRA]
+ * must be set if address mirroring is enabled.
+ */
+#define DWC_DDR_PHY_REGS_DCR_UDIMM 0x20000000
+
+/* Register DTPR0 field t_rtp */
+/* Internal read to precharge command delay. Valid values are 2 to 15 */
+#define DWC_DDR_PHY_REGS_DTPR0_T_RTP_MASK 0x0000000f
+#define DWC_DDR_PHY_REGS_DTPR0_T_RTP_SHIFT 0
+
+/* Register DTPR0 field t_wtr */
+/* Internal write to read command delay. Valid values are 1 to 15 */
+#define DWC_DDR_PHY_REGS_DTPR0_T_WTR_MASK 0x000000f0
+#define DWC_DDR_PHY_REGS_DTPR0_T_WTR_SHIFT 4
+
+/* Register DTPR0 field t_rp */
+/* Precharge command period: The minimum time between a precharge command
+and any other command. Note that the Controller automatically derives tRPA for
+8-bank DDR2 devices by adding 1 to tRP. Valid values are 2 to 15.
+In LPDDR3 mode, PUBm2 adds an offset of 8 to the register value, so valid range
+is 8 to 2 */
+#define DWC_DDR_PHY_REGS_DTPR0_T_RP_MASK 0x00000f00
+#define DWC_DDR_PHY_REGS_DTPR0_T_RP_SHIFT 8
+
+/* Register DTPR0 field t_rcd */
+/* Activate to read or write delay. Minimum time from when an activate command
+ * is issued to when a read or write to the activated row can be issued. Valid
+ * values are 2 to 15. In LPDDR3 mode, PUBm2 adds an offset of 8 to the register
+ * value, so valid range is 8 to 23.
+ */
+#define DWC_DDR_PHY_REGS_DTPR0_T_RCD_MASK 0x0000f000
+#define DWC_DDR_PHY_REGS_DTPR0_T_RCD_SHIFT 12
+
+/* Register DTPR0 field t_ras_min */
+/* Activate to precharge command delay. Valid values are 2 to 63 */
+#define DWC_DDR_PHY_REGS_DTPR0_T_RAS_MIN_MASK 0x003f0000
+#define DWC_DDR_PHY_REGS_DTPR0_T_RAS_MIN_SHIFT 16
+
+/* Register DTPR0 field t_rrd */
+/* Activate to activate command delay (different banks). Valid values are 1 to
+ * 15
+ */
+#define DWC_DDR_PHY_REGS_DTPR0_T_RRD_MASK 0x03c00000
+#define DWC_DDR_PHY_REGS_DTPR0_T_RRD_SHIFT 22
+
+/* Register DTPR0 field t_rc */
+/* Activate to activate command delay (same bank). Valid values are 2 to 63 */
+#define DWC_DDR_PHY_REGS_DTPR0_T_RC_MASK 0xfc000000
+#define DWC_DDR_PHY_REGS_DTPR0_T_RC_SHIFT 26
+
+/* Register DTPR1 field T_AOND */
+/* Read ODT turn-on delay */
+#define DWC_DDR_PHY_REGS_DTPR1_T_AOND_MASK 0xc0000000
+#define DWC_DDR_PHY_REGS_DTPR1_T_AOND_SHIFT 30
+
+/* Register DTPR3 field T_OFDX */
+/* ODT turn-on length (read and write) */
+#define DWC_DDR_PHY_REGS_DTPR3_T_OFDX_MASK 0xe0000000
+#define DWC_DDR_PHY_REGS_DTPR3_T_OFDX_SHIFT 29
+
+/* Register ODTCR field RDODT0 */
+/**
+ * Read ODT: Specifies whether ODT should be enabled (‘1’) or disabled (‘0’) on
+ * each of the up to four ranks when a read command is sent to rank n. RDODT0,
+ * RDODT1, RDODT2, and RDODT3 specify ODT settings when a read is to rank 0,
+ * rank 1, rank 2, and rank 3, respectively. The four bits of each field each
+ * represent a rank, the LSB being rank 0 and the MSB being rank 3.
+ * Default is to disable ODT during reads.
+*/
+#define DWC_DDR_PHY_REGS_ODTCR_RDODT0_MASK 0x0000000F
+#define DWC_DDR_PHY_REGS_ODTCR_RDODT0_SHIFT 0
+
+#define DWC_DDR_PHY_REGS_ODTCR_RDODT1_MASK 0x000000F0
+#define DWC_DDR_PHY_REGS_ODTCR_RDODT1_SHIFT 4
+
+#define DWC_DDR_PHY_REGS_ODTCR_RDODT2_MASK 0x00000F00
+#define DWC_DDR_PHY_REGS_ODTCR_RDODT2_SHIFT 8
+
+#define DWC_DDR_PHY_REGS_ODTCR_RDODT3_MASK 0x0000F000
+#define DWC_DDR_PHY_REGS_ODTCR_RDODT3_SHIFT 12
+
+/* Register ODTCR field WRODT0 */
+/**
+ * Write ODT: Specifies whether ODT should be enabled (‘1’) or disabled (‘0’) on
+ * each of the up to four ranks when a write command is sent to rank n. WRODT0,
+ * WRODT1, WRODT2, and WRODT3 specify ODT settings when a write is to rank 0,
+ * rank 1, rank 2, and rank 3, respectively. The four bits of each field each
+ * represent a rank, the LSB being rank 0 and the MSB being rank 3.
+ * Default is to enable ODT only on rank being written to.
+*/
+#define DWC_DDR_PHY_REGS_ODTCR_WRODT0_MASK 0x000F0000
+#define DWC_DDR_PHY_REGS_ODTCR_WRODT0_SHIFT 16
+
+#define DWC_DDR_PHY_REGS_ODTCR_WRODT1_MASK 0x00F00000
+#define DWC_DDR_PHY_REGS_ODTCR_WRODT1_SHIFT 20
+
+#define DWC_DDR_PHY_REGS_ODTCR_WRODT2_MASK 0x0F000000
+#define DWC_DDR_PHY_REGS_ODTCR_WRODT2_SHIFT 24
+
+#define DWC_DDR_PHY_REGS_ODTCR_WRODT3_MASK 0xF0000000
+#define DWC_DDR_PHY_REGS_ODTCR_WRODT3_SHIFT 28
+
+/* Register DTCR field DTRPTN */
+/**
+ * Data Training Repeat Number: Repeat number used to confirm stability of DDR
+ * write or read
+*/
+#define DWC_DDR_PHY_REGS_DTCR_DTRPTN_MASK 0x0000000f
+#define DWC_DDR_PHY_REGS_DTCR_DTRPTN_SHIFT 0
+
+/* Register DTCR field DTRANK */
+/**
+ * Data Training Rank: Selects the SDRAM rank to be used during data bit deskew
+ * and eye centering.
+*/
+#define DWC_DDR_PHY_REGS_DTCR_DTRANK_MASK 0x00000030
+#define DWC_DDR_PHY_REGS_DTCR_DTRANK_SHIFT 4
+
+/* Register DTCR field DTMPR */
+/**
+ * Data Training Using MPR (DDR3 Only): Specifies, if set, that DQS gate
+ * training should use the SDRAM Multi-Purpose Register (MPR) register.
+ * Otherwise datatraining is performed by first writing to some locations in
+ * the SDRAM and then reading them back.
+ */
+#define DWC_DDR_PHY_REGS_DTCR_DTMPR 0x00000040
+
+/* Register DTCR field DTDBS */
+/**
+ * Data Training Debug Byte Select: Selects the byte during data training debug
+ * mode.
+ */
+#define DWC_DDR_PHY_REGS_DTCR_DTDBS_MASK 0x000f0000
+#define DWC_DDR_PHY_REGS_DTCR_DTDBS_SHIFT 16
+#define DWC_DDR_PHY_REGS_DTCR_DTDBS(i) \
+ ((i) << DWC_DDR_PHY_REGS_DTCR_DTDBS_SHIFT)
+
+/* Register DTCR field DTEXG */
+/**
+ * Data Training with Early/Extended Gate: Specifies if set that the DQS gate
+ * training should be performed with an early/extended gate as specified in
+ * DSGCR.DQSGX.
+ */
+#define DWC_DDR_PHY_REGS_DTCR_DTEXG 0x00800000
+
+/* Register DTCR field RANKEN */
+/**
+ * Rank Enable: Specifies the ranks that are enabled for data-training. Bit 0
+ * controls rank 0, bit 1 controls rank 1, bit 2 controls rank 2, and bit 3
+ * controls rank 3. Setting the bit to '1' enables the rank, and setting it to
+ * '0' disables the rank.
+ */
+#define DWC_DDR_PHY_REGS_DTCR_RANKEN_MASK 0x0f000000
+#define DWC_DDR_PHY_REGS_DTCR_RANKEN_SHIFT 24
+
+/* Register DTEDR1 field DTRLMN */
+/* Data Training RDQS LCDL Minimum */
+#define DWC_DDR_PHY_REGS_DTEDR1_DTRLMN_MASK 0x000000ff
+#define DWC_DDR_PHY_REGS_DTEDR1_DTRLMN_SHIFT 0
+
+/* Register DTEDR1 field DTRLMX */
+/* Data Training RDQS LCDL Maximum */
+#define DWC_DDR_PHY_REGS_DTEDR1_DTRLMX_MASK 0x0000ff00
+#define DWC_DDR_PHY_REGS_DTEDR1_DTRLMX_SHIFT 8
+
+/* Register DTEDR1 field DTRBMN */
+/* Data Training Read BDL Shift Minimum */
+#define DWC_DDR_PHY_REGS_DTEDR1_DTRBMN_MASK 0x00ff0000
+#define DWC_DDR_PHY_REGS_DTEDR1_DTRBMN_SHIFT 16
+
+/* Register DTEDR1 field DTRBMX */
+/* Data Training Read BDL Shift Minimum */
+#define DWC_DDR_PHY_REGS_DTEDR1_DTRBMX_MASK 0xff000000
+#define DWC_DDR_PHY_REGS_DTEDR1_DTRBMX_SHIFT 24
+
+/* Register RDIMMGCR0 field RDIMM */
+/**
+ * Registered DIMM: Indicates if set that a registered DIMM is used. In this
+ * case, the PUBm2 increases the SDRAM write and read latencies (WL/RL) by 1
+ * and also enforces that accesses adhere to RDIMM buffer chip. This only
+ * applies to PUBm2 internal SDRAM transactions. Transactions generated by the
+ * controller must make its own adjustments to WL/RL when using a registered
+ * DIMM. The DCR.NOSRA register bit must be set to '1' if using the standard
+ * RDIMM buffer chip so that normal DRAM accesses do not assert multiple chip
+ * select bits at the same time.
+ */
+#define DWC_DDR_PHY_REGS_RDIMMGCR0_RDIMM 0x00000001
+
+/* Register ODTCTLR field FRCEN */
+/**
+ * ODT force value enable : when this field is set, the ODT
+ * value is taken from the FRCVAL field.
+ * One bit for each rank.
+ */
+#define DWC_DDR_PHY_REGS_ODTCTLR_FRCEN_MASK 0x0000000f
+#define DWC_DDR_PHY_REGS_ODTCTLR_FRCEN_SHIFT 0
+
+/* Register ODTCTLR field FRCVAL */
+/**
+ * ODT force value : when FRCEN field is set, the ODT
+ * value is taken from this field.
+ * One bit for each rank.
+ */
+#define DWC_DDR_PHY_REGS_ODTCTLR_FRCVAL_MASK 0x000000f0
+#define DWC_DDR_PHY_REGS_ODTCTLR_FRCVAL_SHIFT 4
+
+/* Register BISTRR field BINST */
+/**
+Selects the BIST instruction to be executed: Valid values are:
+000 = NOP: No operation
+001 = Run: Triggers the running of the BIST.
+010 = Stop: Stops the running of the BIST.
+011 = Reset: Resets all BIST run-time registers, such as error counters.
+100 – 111 Reserved
+ */
+#define DWC_DDR_PHY_REGS_BISTRR_BINST_MASK 0x00000007
+#define DWC_DDR_PHY_REGS_BISTRR_BINST_SHIFT 0
+#define DWC_DDR_PHY_REGS_BISTRR_BINST_NOP \
+ (0x0 << DWC_DDR_PHY_REGS_BISTRR_BINST_SHIFT)
+#define DWC_DDR_PHY_REGS_BISTRR_BINST_RUN \
+ (0x1 << DWC_DDR_PHY_REGS_BISTRR_BINST_SHIFT)
+#define DWC_DDR_PHY_REGS_BISTRR_BINST_STOP \
+ (0x2 << DWC_DDR_PHY_REGS_BISTRR_BINST_SHIFT)
+#define DWC_DDR_PHY_REGS_BISTRR_BINST_RESET \
+ (0x3 << DWC_DDR_PHY_REGS_BISTRR_BINST_SHIFT)
+
+/* Register BISTRR field BMODE */
+/**
+BIST Mode: Selects the mode in which BIST is run. Valid values are:
+0 = Loopback mode: Address, commands and data loop back at the PHY I/Os.
+1 = DRAM mode: Address, commands and data go to DRAM for normal memory
+accesses.
+*/
+#define DWC_DDR_PHY_REGS_BISTRR_BMODE_MASK 0x00000008
+#define DWC_DDR_PHY_REGS_BISTRR_BMODE_SHIFT 3
+#define DWC_DDR_PHY_REGS_BISTRR_BMODE_LOOPBACK \
+ (0x0 << DWC_DDR_PHY_REGS_BISTRR_BMODE_SHIFT)
+#define DWC_DDR_PHY_REGS_BISTRR_BMODE_DRAM \
+ (0x1 << DWC_DDR_PHY_REGS_BISTRR_BMODE_SHIFT)
+
+/* Register BISTRR field BDXEN */
+/**
+ * DATX8 Enable: Enables the running of BIST on the data byte lane PHYs.
+ * This bit is exclusive with BACEN, i.e. both cannot be set to '1' at the same
+ * time.
+ */
+#define DWC_DDR_PHY_REGS_BISTRR_BDXEN 0x00004000
+
+/* Register BISTRR field BACEN */
+/**
+ * BIST AC Enable: Enables the running of BIST on the address/command lane PHY.
+ * This bit is exclusive with BDXEN, i.e. both cannot be set to '1' at the same
+ * time.
+ */
+#define DWC_DDR_PHY_REGS_BISTRR_BACEN 0x00008000
+
+/* Register BISTRR field BDPAT */
+/**
+BIST Data Pattern: Selects the data pattern used during BIST. Valid values are:
+00 = Walking 0
+01 = Walking 1
+10 = LFSR-based pseudo-random
+11 = User programmable (Not valid for AC loopback).
+*/
+#define DWC_DDR_PHY_REGS_BISTRR_BDPAT_MASK 0x00060000
+#define DWC_DDR_PHY_REGS_BISTRR_BDPAT_SHIFT 17
+#define DWC_DDR_PHY_REGS_BISTRR_BDPAT_WALKING_0 \
+ (0x0 << DWC_DDR_PHY_REGS_BISTRR_BDPAT_SHIFT)
+#define DWC_DDR_PHY_REGS_BISTRR_BDPAT_WALKING_1 \
+ (0x1 << DWC_DDR_PHY_REGS_BISTRR_BDPAT_SHIFT)
+#define DWC_DDR_PHY_REGS_BISTRR_BDPAT_LFSR \
+ (0x2 << DWC_DDR_PHY_REGS_BISTRR_BDPAT_SHIFT)
+#define DWC_DDR_PHY_REGS_BISTRR_BDPAT_USER \
+ (0x3 << DWC_DDR_PHY_REGS_BISTRR_BDPAT_SHIFT)
+
+/* Register BISTRR field BDXSEL */
+/**
+BIST DATX8 Select: Select the byte lane for comparison of loopback/read data.
+Valid values are 0 to 8.
+*/
+#define DWC_DDR_PHY_REGS_BISTRR_BDXSEL_MASK 0x00780000
+#define DWC_DDR_PHY_REGS_BISTRR_BDXSEL_SHIFT 19
+#define DWC_DDR_PHY_REGS_BISTRR_BDXSEL(val) \
+ ((val) << DWC_DDR_PHY_REGS_BISTRR_BDXSEL_SHIFT)
+
+/* Register BISTWCR field BWCNT */
+/**
+BIST Word Count: Indicates the number of words to generate during BIST. This
+must be a multiple of DRAM burst length (BL) divided by 2, e.g. for BL=8, valid
+values are 4, 8, 12, 16, and so on.
+*/
+#define DWC_DDR_PHY_REGS_BISTWCR_BWCNT_MASK 0x0000ffff
+#define DWC_DDR_PHY_REGS_BISTWCR_BWCNT_SHIFT 0
+#define DWC_DDR_PHY_REGS_BISTWCR_BWCNT(cnt) \
+ ((cnt) << DWC_DDR_PHY_REGS_BISTWCR_BWCNT_SHIFT)
+
+/* Register BISTAR0 field BCOL */
+/**
+ * BIST Column Address: Selects the SDRAM column address to be used during
+ * BIST. The lower bits of this address must be "0000" for BL16, "000" for BL8,
+ * "00" for BL4 and "0" for BL2.
+ */
+#define DWC_DDR_PHY_REGS_BISTAR0_BCOL_MASK 0x00000fff
+#define DWC_DDR_PHY_REGS_BISTAR0_BCOL_SHIFT 0
+#define DWC_DDR_PHY_REGS_BISTAR0_BCOL(val) \
+ ((val) << DWC_DDR_PHY_REGS_BISTAR0_BCOL_SHIFT)
+
+/* Register BISTAR0 field BROW */
+/**
+BIST Row Address: Selects the SDRAM row address to be used during BIST
+*/
+#define DWC_DDR_PHY_REGS_BISTAR0_BROW_MASK 0x0ffff000
+#define DWC_DDR_PHY_REGS_BISTAR0_BROW_SHIFT 12
+#define DWC_DDR_PHY_REGS_BISTAR0_BROW(val) \
+ ((val) << DWC_DDR_PHY_REGS_BISTAR0_BROW_SHIFT)
+
+/* Register BISTAR0 field BBANK */
+/**
+BIST Bank Address: Selects the SDRAM bank address to be used during BIST.
+*/
+#define DWC_DDR_PHY_REGS_BISTAR0_BBANK_MASK 0x70000000
+#define DWC_DDR_PHY_REGS_BISTAR0_BBANK_SHIFT 28
+#define DWC_DDR_PHY_REGS_BISTAR0_BBANK(val) \
+ ((val) << DWC_DDR_PHY_REGS_BISTAR0_BBANK_SHIFT)
+
+/* Register BISTAR1 field BRANK */
+/**
+BIST Rank: Selects the SDRAM rank to be used during BIST. Valid values range
+from 0 to maximum ranks minus 1.
+*/
+#define DWC_DDR_PHY_REGS_BISTAR1_BRANK_MASK 0x00000003
+#define DWC_DDR_PHY_REGS_BISTAR1_BRANK_SHIFT 0
+#define DWC_DDR_PHY_REGS_BISTAR1_BRANK(val) \
+ ((val) << DWC_DDR_PHY_REGS_BISTAR1_BRANK_SHIFT)
+
+/* Register BISTAR1 field BMRANK */
+/**
+BIST Maximum Rank: Specifies the maximum SDRAM rank to be used during BIST.
+The default value is set to maximum ranks minus 1.
+*/
+#define DWC_DDR_PHY_REGS_BISTAR1_BMRANK_MASK 0x0000000c
+#define DWC_DDR_PHY_REGS_BISTAR1_BMRANK_SHIFT 2
+#define DWC_DDR_PHY_REGS_BISTAR1_BMRANK(val) \
+ ((val) << DWC_DDR_PHY_REGS_BISTAR1_BMRANK_SHIFT)
+
+/* Register BISTAR1 field BAINC */
+/**
+ * BIST Address Increment: Selects the value by which the SDRAM address is
+ * incremented for each write/read access. This value must be at the beginning
+ * of a burst boundary, i.e. the lower bits must be "0000" for BL16, "000" for
+ * BL8, "00" for BL4 and "0" for BL2.
+*/
+#define DWC_DDR_PHY_REGS_BISTAR1_BAINC_MASK 0x0000fff0
+#define DWC_DDR_PHY_REGS_BISTAR1_BAINC_SHIFT 4
+#define DWC_DDR_PHY_REGS_BISTAR1_BAINC(val) \
+ ((val) << DWC_DDR_PHY_REGS_BISTAR1_BAINC_SHIFT)
+
+/* Register BISTAR2 field BMCOL */
+/**
+BIST Maximum Column Address: Specifies the maximum SDRAM column address
+to be used during BIST before the address increments to the next row.
+*/
+#define DWC_DDR_PHY_REGS_BISTAR2_BMCOL_MASK 0x00000fff
+#define DWC_DDR_PHY_REGS_BISTAR2_BMCOL_SHIFT 0
+#define DWC_DDR_PHY_REGS_BISTAR2_BMCOL(val) \
+ ((val) << DWC_DDR_PHY_REGS_BISTAR2_BMCOL_SHIFT)
+
+/* Register BISTAR2 field BMROW */
+/**
+BIST Maximum Row Address: Specifies the maximum SDRAM row address to be
+used during BIST before the address increments to the next bank.
+*/
+#define DWC_DDR_PHY_REGS_BISTAR2_BMROW_MASK 0x0ffff000
+#define DWC_DDR_PHY_REGS_BISTAR2_BMROW_SHIFT 12
+#define DWC_DDR_PHY_REGS_BISTAR2_BMROW(val) \
+ ((val) << DWC_DDR_PHY_REGS_BISTAR2_BMROW_SHIFT)
+
+/* Register BISTAR2 field BMBANK */
+/**
+BIST Maximum Bank Address: Specifies the maximum SDRAM bank address to be
+used during BIST before the address increments to the next rank.
+*/
+#define DWC_DDR_PHY_REGS_BISTAR2_BMBANK_MASK 0x70000000
+#define DWC_DDR_PHY_REGS_BISTAR2_BMBANK_SHIFT 28
+#define DWC_DDR_PHY_REGS_BISTAR2_BMBANK(val) \
+ ((val) << DWC_DDR_PHY_REGS_BISTAR2_BMBANK_SHIFT)
+
+/* Register BISTUDPR field BUDP0 */
+/**
+ * BIST User Data Pattern 0: Data to be applied on even DQ pins during BIST.
+ */
+#define DWC_DDR_PHY_REGS_BISTUDPR_BUDP0_MASK 0x0000ffff
+#define DWC_DDR_PHY_REGS_BISTUDPR_BUDP0_SHIFT 0
+#define DWC_DDR_PHY_REGS_BISTUDPR_BUDP0(val) \
+ ((val) << DWC_DDR_PHY_REGS_BISTUDPR_BUDP0_SHIFT)
+
+/* Register BISTUDPR field BUDP1 */
+/**
+ * BIST User Data Pattern 1: Data to be applied on odd DQ pins during BIST.
+ */
+#define DWC_DDR_PHY_REGS_BISTUDPR_BUDP1_MASK 0xffff0000
+#define DWC_DDR_PHY_REGS_BISTUDPR_BUDP1_SHIFT 16
+#define DWC_DDR_PHY_REGS_BISTUDPR_BUDP1(val) \
+ ((val) << DWC_DDR_PHY_REGS_BISTUDPR_BUDP1_SHIFT)
+
+/* Register BISTGSR field BDONE */
+/**
+BIST Done: Indicates if set that the BIST has finished executing. This bit is reset to
+zero when BIST is triggered.
+*/
+#define DWC_DDR_PHY_REGS_BISTGSR_BDONE 0x00000001
+
+/* Register BISTGSR field BACERR */
+/**
+BIST Address/Command Error: indicates if set that there is a data comparison error
+in the address/command lane.
+*/
+#define DWC_DDR_PHY_REGS_BISTGSR_BACERR 0x00000002
+
+/* Register BISTGSR field BDXERR */
+/**
+BIST Data Error: indicates if set that there is a data comparison error in the byte
+lane.
+*/
+#define DWC_DDR_PHY_REGS_BISTGSR_BDXERR 0x00000004
+
+/* Register DXnGCR0 field DXEN */
+/**
+ * Data Byte Enable: Enables if set the data byte. Setting this bit to ‘0’
+ * disables the byte, i.e. the byte is not used in PHY initialization or
+ * training and is ignored during SDRAM read/write operations.
+ */
+#define DWC_DDR_PHY_REGS_DXNGCR0_DXEN 0x00000001
+
+/* Register DXnGCR0 field DQSGOE */
+/**
+DQSG Output Enable: Enables, when set, the output driver (OE pin)on the I/O for
+DQS gate.
+ */
+#define DWC_DDR_PHY_REGS_DXNGCR0_DQSGOE 0x00000004
+
+/* Register DXnGCR0 field DQSGODT */
+/**
+DQSG On-Die Termination: Enables, when set, the on-die termination (TE pin)on
+the I/O for DQS gate. Note that in typical usage, DQSGOE will always be on,
+rendering this control bit meaningless.
+ */
+#define DWC_DDR_PHY_REGS_DXNGCR0_DQSGODT 0x00000008
+
+/* Register DXnGCR0 field DQSGPDD */
+/**
+DQSG Power Down Driver: Powers down, if set, the output driver on the I/O for
+DQS gate. This bit is ORed with the common PDD configuration bit.
+ */
+#define DWC_DDR_PHY_REGS_DXNGCR0_DQSGPDD 0x00000010
+
+/* Register DXnGCR0 field DQSGPDR */
+/**
+DQSG Power Down Receiver: Powers down, if set, the input receiver on the I/O for
+DQS gate. This bit is ORed with the common PDR configuration bit.
+ */
+#define DWC_DDR_PHY_REGS_DXNGCR0_DQSGPDR 0x00000020
+
+/* Register DXnGCR0 field DQSRPD */
+/**
+DQSR Power Down: Powers down, if set, the PDQSR cell. This bit is ORed with the
+common PDR configuration bit
+ */
+#define DWC_DDR_PHY_REGS_DXNGCR0_DQSRPD 0x00000040
+
+/* Register DXnGCR0 field PLLPD */
+/**
+ * PLL Power Down: Puts the byte PLL in power down mode by driving the PLL
+ * power down pin. This bit is not self-clearing and a '0' must be written to
+ * de-assert the power-down. This bit is ORed with the global PLLPD
+ * configuration bit
+ */
+#define DWC_DDR_PHY_REGS_DXNGCR0_PLLPD 0x00020000
+
+/* Register DXnGCR0 field WLRKEN */
+/* Write Level Rank Enable: Specifies the ranks that should be write leveled
+ * for this byte. Write leveling responses from ranks that are not enabled for
+ * write leveling for a particular byte are ignored and write leveling is
+ * flagged as done for these ranks. WLRKEN[0] enables rank 0, [1] enables rank
+ * 1, [2] enables rank 2, and [3] enables rank 3.
+ */
+#define DWC_DDR_PHY_REGS_DXNGCR0_WLRKEN_MASK 0x3c000000
+#define DWC_DDR_PHY_REGS_DXNGCR0_WLRKEN_SHIFT 26
+
+/* Register DXnGCR0 field MDLEN */
+/**
+Master Delay Line Enable: Enables, if set, the DATX8 master delay line calibration
+to perform subsequent period measurements following the initial period
+measurements that are performed after reset or on when calibration is manually
+triggered. These additional measurements are accumulated and filtered as long as
+this bit remains high. This bit is combined with the common DATX8 MDL enable bit
+ */
+#define DWC_DDR_PHY_REGS_DXNGCR0_MDLEN 0x40000000
+
+/* Register DXnGCR0 field CALBYP */
+/**
+Calibration Bypass: Prevents, if set, period measurement calibration from
+automatically triggering after PHY initialization.
+ */
+#define DWC_DDR_PHY_REGS_DXNGCR0_CALBYP 0x80000000
+
+/* Register DXnGCR3 field DSPDRMODE */
+/**
+ * Enables the PDR mode values for DQS.
+ * 00 : PDR Dynamic
+ * 01 : PDR always ON
+ * 10 : PDR always OFF
+ * 11 : Reserved
+ */
+#define DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_MASK 0x0000000c
+#define DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_SHIFT 2
+#define DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_PDR_DYNAMIC \
+ (0x0 << DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_SHIFT)
+#define DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_PDR_ALWAYS_ON \
+ (0x1 << DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_SHIFT)
+#define DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_PDR_ALWAYS_OFF \
+ (0x2 << DWC_DDR_PHY_REGS_DXNGCR3_DSPDRMODE_SHIFT)
+
+/* Register DXnLCDLR0 field R0WLD */
+/** Rank 0 Write Leveling Delay: Rank 0 delay select for the write leveling
+ * (WL) LCDL
+ */
+#define DWC_DDR_PHY_REGS_DXNLCDLR0_R0WLD_MASK 0x000000ff
+#define DWC_DDR_PHY_REGS_DXNLCDLR0_R0WLD_SHIFT 0
+
+/* Register DXnLCDLR0 field R1WLD */
+/** Rank 0 Write Leveling Delay: Rank 1 delay select for the write leveling
+ * (WL) LCDL
+ */
+#define DWC_DDR_PHY_REGS_DXNLCDLR0_R1WLD_MASK 0x0000ff00
+#define DWC_DDR_PHY_REGS_DXNLCDLR0_R1WLD_SHIFT 8
+
+/* Register DXnLCDLR0 field R2WLD */
+/** Rank 0 Write Leveling Delay: Rank 2 delay select for the write leveling
+ * (WL) LCDL
+ */
+#define DWC_DDR_PHY_REGS_DXNLCDLR0_R2WLD_MASK 0x00ff0000
+#define DWC_DDR_PHY_REGS_DXNLCDLR0_R2WLD_SHIFT 16
+
+/* Register DXnLCDLR0 field R3WLD */
+/** Rank 0 Write Leveling Delay: Rank 3 delay select for the write leveling
+ * (WL) LCDL
+ */
+#define DWC_DDR_PHY_REGS_DXNLCDLR0_R3WLD_MASK 0xff000000
+#define DWC_DDR_PHY_REGS_DXNLCDLR0_R3WLD_SHIFT 24
+
+/* Register DXnLCDLR1 field WDQD */
+/* Write Data Delay: Delay select for the write data (WDQ) LCDL. */
+#define DWC_DDR_PHY_REGS_DXNLCDLR1_WDQD_MASK 0x000000ff
+#define DWC_DDR_PHY_REGS_DXNLCDLR1_WDQD_SHIFT 0
+
+/* Register DXnLCDLR1 field RDQSD */
+/* Read DQS Delay: Delay select for the read DQS (RDQS) LCDL. */
+#define DWC_DDR_PHY_REGS_DXNLCDLR1_RDQSD_MASK 0x0000ff00
+#define DWC_DDR_PHY_REGS_DXNLCDLR1_RDQSD_SHIFT 8
+
+/* Register DXnLCDLR1 field RDQSND */
+/* Read DQSN Delay: Delay select for the read DQSN (RDQS) LCDL. */
+#define DWC_DDR_PHY_REGS_DXNLCDLR1_RDQSND_MASK 0x00ff0000
+#define DWC_DDR_PHY_REGS_DXNLCDLR1_RDQSND_SHIFT 16
+
+/* Register DXnLCDLR2 field R0DQSGD */
+/** Rank 0 Read DQS Gating Delay: Rank 0 delay select for the read DQS gating
+ * (DQSG) LCDL.
+ */
+#define DWC_DDR_PHY_REGS_DXNLCDLR2_R0DQSGD_MASK 0x000000ff
+#define DWC_DDR_PHY_REGS_DXNLCDLR2_R0DQSGD_SHIFT 0
+
+/* Register DXnLCDLR2 field R1DQSGD */
+/** Rank 1 Read DQS Gating Delay: Rank 1 delay select for the read DQS gating
+ * (DQSG) LCDL.
+ */
+#define DWC_DDR_PHY_REGS_DXNLCDLR2_R1DQSGD_MASK 0x0000ff00
+#define DWC_DDR_PHY_REGS_DXNLCDLR2_R1DQSGD_SHIFT 8
+
+/* Register DXnLCDLR2 field R2DQSGD */
+/** Rank 2 Read DQS Gating Delay: Rank 2 delay select for the read DQS gating
+ * (DQSG) LCDL.
+ */
+#define DWC_DDR_PHY_REGS_DXNLCDLR2_R2DQSGD_MASK 0x00ff0000
+#define DWC_DDR_PHY_REGS_DXNLCDLR2_R2DQSGD_SHIFT 16
+
+/* Register DXnLCDLR2 field R3DQSGD */
+/** Rank 3 Read DQS Gating Delay: Rank 3 delay select for the read DQS gating
+ * (DQSG) LCDL.
+ */
+#define DWC_DDR_PHY_REGS_DXNLCDLR2_R3DQSGD_MASK 0xff000000
+#define DWC_DDR_PHY_REGS_DXNLCDLR2_R3DQSGD_SHIFT 24
+
+/* Register DXnGTR field R0DGSL */
+/** Rank n DQS Gating System Latency: This is used to increase the number of clock
+ * cycles needed to expect valid DDR read data by up to seven extra clock cycles.
+ * This is used to compensate for board delays and other system delays. Power-up
+ * default is 000 (i.e. no extra clock cycles required). The SL fields are initially set by
+ * the PUBm2 during automatic DQS data training but these values can be
+ * overwritten by a direct write to this register. Every three bits of this register control
+ * the latency of each of the (up to) four ranks. R0DGSL controls the latency of rank
+ * 0, R1DGSL controls rank 1, and so on. Valid values are 0 to 7:
+ */
+#define DWC_DDR_PHY_REGS_DXNGTR_R0DGSL_MASK 0x00000007
+#define DWC_DDR_PHY_REGS_DXNGTR_R0DGSL_SHIFT 0
+
+/* Register DXnGTR field R1DGSL */
+/** Rank n DQS Gating System Latency: This is used to increase the number of clock
+ * cycles needed to expect valid DDR read data by up to seven extra clock cycles.
+ * This is used to compensate for board delays and other system delays. Power-up
+ * default is 000 (i.e. no extra clock cycles required). The SL fields are initially set by
+ * the PUBm2 during automatic DQS data training but these values can be
+ * overwritten by a direct write to this register. Every three bits of this register control
+ * the latency of each of the (up to) four ranks. R0DGSL controls the latency of rank
+ * 0, R1DGSL controls rank 1, and so on. Valid values are 0 to 7:
+ */
+#define DWC_DDR_PHY_REGS_DXNGTR_R1DGSL_MASK 0x00000038
+#define DWC_DDR_PHY_REGS_DXNGTR_R1DGSL_SHIFT 3
+
+/* Register DXnGTR field R2DGSL */
+/** Rank n DQS Gating System Latency: This is used to increase the number of clock
+ * cycles needed to expect valid DDR read data by up to seven extra clock cycles.
+ * This is used to compensate for board delays and other system delays. Power-up
+ * default is 000 (i.e. no extra clock cycles required). The SL fields are initially set by
+ * the PUBm2 during automatic DQS data training but these values can be
+ * overwritten by a direct write to this register. Every three bits of this register control
+ * the latency of each of the (up to) four ranks. R0DGSL controls the latency of rank
+ * 0, R1DGSL controls rank 1, and so on. Valid values are 0 to 7:
+ */
+#define DWC_DDR_PHY_REGS_DXNGTR_R2DGSL_MASK 0x000001c0
+#define DWC_DDR_PHY_REGS_DXNGTR_R2DGSL_SHIFT 6
+
+/* Register DXnGTR field R3DGSL */
+/** Rank n DQS Gating System Latency: This is used to increase the number of clock
+ * cycles needed to expect valid DDR read data by up to seven extra clock cycles.
+ * This is used to compensate for board delays and other system delays. Power-up
+ * default is 000 (i.e. no extra clock cycles required). The SL fields are initially set by
+ * the PUBm2 during automatic DQS data training but these values can be
+ * overwritten by a direct write to this register. Every three bits of this register control
+ * the latency of each of the (up to) four ranks. R0DGSL controls the latency of rank
+ * 0, R1DGSL controls rank 1, and so on. Valid values are 0 to 7:
+ */
+#define DWC_DDR_PHY_REGS_DXNGTR_R3DGSL_MASK 0x00000e00
+#define DWC_DDR_PHY_REGS_DXNGTR_R3DGSL_SHIFT 9
+
+/* Register DXnGTR field R0WLSL */
+/** Rank n Write Leveling System Latency: This is used to adjust the write latency
+ * after write leveling. Power-up default is 01 (i.e. no extra clock cycles required). The
+ * SL fields are initially set by the PUBm2 during automatic write leveling but these
+ * values can be overwritten by a direct write to this register. Every two bits of this
+ * register control the latency of each of the (up to) four ranks. R0WLSL controls the
+ * latency of rank 0, R1WLSL controls rank 1, and so on. Valid values:
+ * 00 = Write latency = WL - 1
+ * 01 = Write latency = WL
+ * 10 = Write latency = WL + 1
+ * 11 = Reserved
+ */
+#define DWC_DDR_PHY_REGS_DXNGTR_R0WLSL_MASK 0x00003000
+#define DWC_DDR_PHY_REGS_DXNGTR_R0WLSL_SHIFT 12
+
+/* Register DXnGTR field R1WLSL */
+/** Rank n Write Leveling System Latency: This is used to adjust the write latency
+ * after write leveling. Power-up default is 01 (i.e. no extra clock cycles required). The
+ * SL fields are initially set by the PUBm2 during automatic write leveling but these
+ * values can be overwritten by a direct write to this register. Every two bits of this
+ * register control the latency of each of the (up to) four ranks. R0WLSL controls the
+ * latency of rank 0, R1WLSL controls rank 1, and so on. Valid values:
+ * 00 = Write latency = WL - 1
+ * 01 = Write latency = WL
+ * 10 = Write latency = WL + 1
+ * 11 = Reserved
+ */
+#define DWC_DDR_PHY_REGS_DXNGTR_R1WLSL_MASK 0x0000c000
+#define DWC_DDR_PHY_REGS_DXNGTR_R1WLSL_SHIFT 14
+
+/* Register DXnGTR field R2WLSL */
+/** Rank n Write Leveling System Latency: This is used to adjust the write latency
+ * after write leveling. Power-up default is 01 (i.e. no extra clock cycles required). The
+ * SL fields are initially set by the PUBm2 during automatic write leveling but these
+ * values can be overwritten by a direct write to this register. Every two bits of this
+ * register control the latency of each of the (up to) four ranks. R0WLSL controls the
+ * latency of rank 0, R1WLSL controls rank 1, and so on. Valid values:
+ * 00 = Write latency = WL - 1
+ * 01 = Write latency = WL
+ * 10 = Write latency = WL + 1
+ * 11 = Reserved
+ */
+#define DWC_DDR_PHY_REGS_DXNGTR_R2WLSL_MASK 0x00030000
+#define DWC_DDR_PHY_REGS_DXNGTR_R2WLSL_SHIFT 16
+
+/* Register DXnGTR field R3WLSL */
+/** Rank n Write Leveling System Latency: This is used to adjust the write latency
+ * after write leveling. Power-up default is 01 (i.e. no extra clock cycles required). The
+ * SL fields are initially set by the PUBm2 during automatic write leveling but these
+ * values can be overwritten by a direct write to this register. Every two bits of this
+ * register control the latency of each of the (up to) four ranks. R0WLSL controls the
+ * latency of rank 0, R1WLSL controls rank 1, and so on. Valid values:
+ * 00 = Write latency = WL - 1
+ * 01 = Write latency = WL
+ * 10 = Write latency = WL + 1
+ * 11 = Reserved
+ */
+#define DWC_DDR_PHY_REGS_DXNGTR_R3WLSL_MASK 0x000c0000
+#define DWC_DDR_PHY_REGS_DXNGTR_R3WLSL_SHIFT 18
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+/** @} end of DDR group */
+
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_ddr_utils.h b/arch/arm/mach-alpine/al_hal/al_hal_ddr_utils.h
new file mode 100644
index 0000000..eb13049
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_ddr_utils.h
@@ -0,0 +1,253 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @addtogroup groupddr
+ *
+ * @{
+ * @file al_hal_ddr_utils.h
+ *
+ */
+
+#ifndef __AL_HAL_DDR_UTILS_H__
+#define __AL_HAL_DDR_UTILS_H__
+
+#include "al_hal_common.h"
+#include "al_hal_ddr_ctrl_regs.h"
+#include "al_hal_ddr_phy_regs.h"
+#include "al_hal_reg_utils.h"
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/* Default timeout for register polling operations */
+#define DEFAULT_TIMEOUT 5000
+
+#if 0 /* Check if masked required */
+#define _al_reg_write32_masked(reg, mask, data) \
+ __al_reg_write32_masked(__LINE__, reg, mask, data)
+
+#define ___al_reg_write32_masked(reg, mask, data) \
+ ____al_reg_write32_masked(__LINE__, reg, mask, data)
+
+void __al_reg_write32_masked(
+ int line,
+ uint32_t *reg,
+ uint32_t mask,
+ uint32_t data)
+{
+ if (!(al_reg_read32(reg) & ~mask))
+ al_info("%d can be non masked!\n", line);
+ al_reg_write32_masked(reg, mask, data);
+}
+
+void ____al_reg_write32_masked(
+ int line,
+ uint32_t *reg,
+ uint32_t mask,
+ uint32_t data)
+{
+ if (al_reg_read32(reg) & ~mask)
+ al_info("%d can not be non masked!\n", line);
+ al_reg_write32_masked(reg, mask, data);
+}
+#else
+#define _al_reg_write32_masked(reg, mask, data) \
+ al_reg_write32_masked(reg, mask, data)
+
+static inline void ___al_reg_write32_masked(
+ uint32_t *reg,
+ uint32_t mask __attribute__((__unused__)),
+ uint32_t data)
+{
+ al_reg_write32(reg, data);
+}
+#endif
+
+/*******************************************************************************
+ ******************************************************************************/
+static int al_ddr_reg_poll32(
+ uint32_t __iomem *reg,
+ uint32_t mask,
+ uint32_t data,
+ unsigned int timeout)
+{
+ while ((al_reg_read32(reg) & mask) != data) {
+ if (timeout) {
+ al_udelay(1);
+ timeout--;
+ } else {
+ return -ETIME;
+ }
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+/* Wait for controller normal operating mode */
+static int al_ddr_ctrl_wait_for_normal_operating_mode(
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs)
+{
+ int err;
+
+ err = al_ddr_reg_poll32(
+ &ctrl_regs->stat,
+ DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_MASK,
+ DWC_DDR_UMCTL2_REGS_STAT_OPERATING_MODE_NORMAL,
+ DEFAULT_TIMEOUT);
+
+ if (err) {
+ al_err("%s: al_ddr_reg_poll32 failed!\n", __func__);
+ return err;
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+/**
+ * Disabling VT calculation
+ * VP calculation must be disabled during writes to the delay line registers
+ */
+static int al_ddr_phy_vt_calc_disable(
+ struct al_ddr_phy_regs __iomem *phy_regs)
+{
+ int err;
+
+ _al_reg_write32_masked(
+ &phy_regs->PGCR[1],
+ DWC_DDR_PHY_REGS_PGCR1_INHVT,
+ DWC_DDR_PHY_REGS_PGCR1_INHVT);
+
+ err = al_ddr_reg_poll32(
+ &phy_regs->PGSR[1],
+ DWC_DDR_PHY_REGS_PGSR1_VTSTOP,
+ DWC_DDR_PHY_REGS_PGSR1_VTSTOP,
+ DEFAULT_TIMEOUT);
+
+ if (err) {
+ al_err("%s: al_ddr_reg_poll32 failed!\n", __func__);
+ return err;
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+/* Enabling VT calculation */
+static void al_ddr_phy_vt_calc_enable(
+ struct al_ddr_phy_regs __iomem *phy_regs)
+{
+ _al_reg_write32_masked(
+ &phy_regs->PGCR[1],
+ DWC_DDR_PHY_REGS_PGCR1_INHVT,
+ 0);
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+/* Stop DDR controller access to the PHY*/
+static inline void al_ddr_ctrl_stop(
+ void __iomem *ddr_ctrl_regs_base)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ ___al_reg_write32_masked(
+ &ctrl_regs->dfimisc,
+ DWC_DDR_UMCTL2_REGS_DFIMISC_DFI_INIT_COMPLETE_EN,
+ 0);
+
+ /* Stop controller refresh and ZQ calibration commands */
+ _al_reg_write32_masked(
+ &ctrl_regs->rfshctl3,
+ DWC_DDR_UMCTL2_REGS_RFSHCTL3_DIS_AUTO_REFRESH,
+ DWC_DDR_UMCTL2_REGS_RFSHCTL3_DIS_AUTO_REFRESH);
+
+ _al_reg_write32_masked(
+ &ctrl_regs->zqctl0,
+ DWC_DDR_UMCTL2_REGS_ZQCTL0_DIS_AUTO_ZQ,
+ DWC_DDR_UMCTL2_REGS_ZQCTL0_DIS_AUTO_ZQ);
+
+ al_data_memory_barrier();
+}
+
+/*******************************************************************************
+ ******************************************************************************/
+/* Resume DDR controller access to the PHY*/
+static inline void al_ddr_ctrl_resume(
+ void __iomem *ddr_ctrl_regs_base)
+{
+ struct al_dwc_ddr_umctl2_regs __iomem *ctrl_regs =
+ &((struct al_ddr_ctrl_regs __iomem *)
+ ddr_ctrl_regs_base)->umctl2_regs;
+
+ ___al_reg_write32_masked(
+ &ctrl_regs->dfimisc,
+ DWC_DDR_UMCTL2_REGS_DFIMISC_DFI_INIT_COMPLETE_EN,
+ DWC_DDR_UMCTL2_REGS_DFIMISC_DFI_INIT_COMPLETE_EN);
+
+ /* Resume controller refresh and ZQ calibration commands */
+ _al_reg_write32_masked(
+ &ctrl_regs->rfshctl3,
+ DWC_DDR_UMCTL2_REGS_RFSHCTL3_DIS_AUTO_REFRESH,
+ 0);
+
+ _al_reg_write32_masked(
+ &ctrl_regs->zqctl0,
+ DWC_DDR_UMCTL2_REGS_ZQCTL0_DIS_AUTO_ZQ,
+ 0);
+
+ al_data_memory_barrier();
+
+ al_ddr_ctrl_wait_for_normal_operating_mode(ctrl_regs);
+}
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+/** @} end of DDR group */
+#endif
+
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_exports.c b/arch/arm/mach-alpine/al_hal/al_hal_exports.c
new file mode 100644
index 0000000..81e7de4
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_exports.c
@@ -0,0 +1,69 @@
+/*
+* Copyright (C) 2015 Annapurna Labs Ltd.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; version 2 of the License.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include "linux/export.h"
+#include "al_hal_udma.h"
+#include "al_hal_udma_config.h"
+#include "al_hal_iofic.h"
+#include "al_hal_udma_iofic.h"
+#include "al_hal_udma_debug.h"
+#include "al_hal_udma_fast.h"
+#include "al_hal_m2m_udma.h"
+#include "al_hal_serdes.h"
+#include "al_hal_ssm.h"
+
+EXPORT_SYMBOL(al_iofic_moder_res_config);
+EXPORT_SYMBOL(al_udma_q_handle_get);
+EXPORT_SYMBOL(al_udma_m2s_packet_size_cfg_set);
+EXPORT_SYMBOL(al_udma_q_init);
+EXPORT_SYMBOL(al_iofic_read_cause);
+EXPORT_SYMBOL(al_udma_cdesc_packet_get);
+EXPORT_SYMBOL(al_iofic_msix_moder_interval_config);
+EXPORT_SYMBOL(al_udma_iofic_config);
+EXPORT_SYMBOL(al_udma_init);
+EXPORT_SYMBOL(al_iofic_config);
+EXPORT_SYMBOL(al_udma_states_name);
+EXPORT_SYMBOL(al_udma_state_set);
+EXPORT_SYMBOL(al_udma_iofic_unmask_offset_get);
+EXPORT_SYMBOL(al_iofic_mask);
+EXPORT_SYMBOL(al_iofic_unmask);
+EXPORT_SYMBOL(al_iofic_clear_cause);
+EXPORT_SYMBOL(al_udma_state_get);
+EXPORT_SYMBOL(al_udma_q_struct_print);
+EXPORT_SYMBOL(al_udma_regs_print);
+EXPORT_SYMBOL(al_udma_ring_print);
+EXPORT_SYMBOL(al_m2m_udma_handle_get);
+EXPORT_SYMBOL(al_m2m_udma_state_set);
+EXPORT_SYMBOL(al_m2m_udma_q_init);
+EXPORT_SYMBOL(al_m2m_udma_init);
+EXPORT_SYMBOL(al_serdes_tx_deemph_inc);
+EXPORT_SYMBOL(al_serdes_signal_is_detected);
+EXPORT_SYMBOL(al_serdes_rx_advanced_params_set);
+EXPORT_SYMBOL(al_serdes_tx_advanced_params_set);
+EXPORT_SYMBOL(al_serdes_eye_measure_run);
+EXPORT_SYMBOL(al_udma_m2s_max_descs_set);
+EXPORT_SYMBOL(al_serdes_tx_deemph_dec);
+EXPORT_SYMBOL(al_serdes_handle_init);
+EXPORT_SYMBOL(al_serdes_tx_deemph_preset);
+EXPORT_SYMBOL(al_serdes_pma_hard_reset_lane);
+EXPORT_SYMBOL(al_ssm_dma_init);
+EXPORT_SYMBOL(al_ssm_dma_q_init);
+EXPORT_SYMBOL(al_ssm_dma_state_set);
+EXPORT_SYMBOL(al_ssm_dma_handle_get);
+EXPORT_SYMBOL(al_ssm_dma_rx_queue_handle_get);
+EXPORT_SYMBOL(al_ssm_dma_tx_queue_handle_get);
+EXPORT_SYMBOL(al_udma_fast_memcpy_q_prepare);
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_iofic.c b/arch/arm/mach-alpine/al_hal/al_hal_iofic.c
new file mode 100644
index 0000000..57903eb
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_iofic.c
@@ -0,0 +1,245 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_iofic.c
+ *
+ * @brief interrupt controller hal
+ *
+ */
+
+#include "al_hal_iofic.h"
+#include "al_hal_iofic_regs.h"
+
+/*
+ * configure the interrupt registers, interrupts will are kept masked
+ */
+int al_iofic_config(void __iomem *regs_base, int group, uint32_t flags)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ al_reg_write32(®s->ctrl[group].int_control_grp, flags);
+
+ return 0;
+}
+
+/*
+ * configure the moderation timer resolution for a given group
+ */
+int al_iofic_moder_res_config(void __iomem *regs_base, int group,
+ uint8_t resolution)
+
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+ uint32_t reg;
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ reg = al_reg_read32(®s->ctrl[group].int_control_grp);
+ AL_REG_FIELD_SET(reg,
+ INT_CONTROL_GRP_MOD_RES_MASK,
+ INT_CONTROL_GRP_MOD_RES_SHIFT,
+ resolution);
+ al_reg_write32(®s->ctrl[group].int_control_grp, reg);
+
+ return 0;
+}
+
+/*
+ * configure the moderation timer interval for a given legacy interrupt group
+ */
+int al_iofic_legacy_moder_interval_config(void __iomem *regs_base, int group,
+ uint8_t interval)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+ uint32_t reg;
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ reg = al_reg_read32(®s->ctrl[group].int_control_grp);
+ AL_REG_FIELD_SET(reg,
+ INT_CONTROL_GRP_MOD_INTV_MASK,
+ INT_CONTROL_GRP_MOD_INTV_SHIFT,
+ interval);
+ al_reg_write32(®s->ctrl[group].int_control_grp, reg);
+
+ return 0;
+}
+
+
+/*
+ * configure the moderation timer interval for a given msix vector.
+ */
+int al_iofic_msix_moder_interval_config(void __iomem *regs_base, int group,
+ uint8_t vector, uint8_t interval)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+ uint32_t reg;
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ reg = al_reg_read32(®s->grp_int_mod[group][vector].grp_int_mod_reg);
+ AL_REG_FIELD_SET(reg,
+ INT_MOD_INTV_MASK,
+ INT_MOD_INTV_SHIFT,
+ interval);
+ al_reg_write32(®s->grp_int_mod[group][vector].grp_int_mod_reg, reg);
+
+ return 0;
+}
+
+/*
+ * return the offset of the unmask register for a given group
+ */
+uint32_t __iomem * al_iofic_unmask_offset_get(void __iomem *regs_base, int group)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ return ®s->ctrl[group].int_mask_clear_grp;
+}
+
+
+/*
+ * unmask specific interrupts for a given group
+ */
+void al_iofic_unmask(void __iomem *regs_base, int group, uint32_t mask)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ /*
+ * use the mask clear register, no need to read the mask register
+ * itself. write 0 to unmask, 1 has no effect
+ */
+ al_reg_write32_relaxed(®s->ctrl[group].int_mask_clear_grp, ~mask);
+}
+
+/*
+ * mask specific interrupts for a given group
+ */
+void al_iofic_mask(void __iomem *regs_base, int group, uint32_t mask)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+ uint32_t reg;
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ reg = al_reg_read32(®s->ctrl[group].int_mask_grp);
+
+ al_reg_write32(®s->ctrl[group].int_mask_grp, reg | mask);
+}
+
+/*
+ * read the mask for a given group
+ */
+uint32_t al_iofic_read_mask(void __iomem *regs_base, int group)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ return al_reg_read32(®s->ctrl[group].int_mask_grp);
+}
+
+/*
+ * read interrupt cause register for a given group
+ */
+uint32_t al_iofic_read_cause(void __iomem *regs_base, int group)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ return al_reg_read32(®s->ctrl[group].int_cause_grp);
+}
+
+/*
+ * clear bits in the interrupt cause register for a given group
+ */
+void al_iofic_clear_cause(void __iomem *regs_base, int group, uint32_t mask)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ /* inverse mask, writing 1 has no effect */
+ al_reg_write32(®s->ctrl[group].int_cause_grp, ~mask);
+}
+
+/*
+ * Set the cause register for a given group
+ */
+void al_iofic_set_cause(void __iomem *regs_base, int group, uint32_t mask)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ al_reg_write32(®s->ctrl[group].int_cause_set_grp, mask);
+}
+
+
+/*
+ * unmask specific interrupts from aborting the udma a given group
+ */
+void al_iofic_abort_mask(void __iomem *regs_base, int group, uint32_t mask)
+{
+ struct al_iofic_regs __iomem *regs = (struct al_iofic_regs __iomem *)(regs_base);
+
+ al_assert(regs_base);
+ al_assert(group < AL_IOFIC_MAX_GROUPS);
+
+ al_reg_write32(®s->ctrl[group].int_abort_msk_grp, mask);
+
+}
+
+/** @} end of interrupt controller group */
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_m2m_udma.c b/arch/arm/mach-alpine/al_hal/al_hal_m2m_udma.c
new file mode 100644
index 0000000..f5fb65f
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_m2m_udma.c
@@ -0,0 +1,165 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @file al_hal_m2m_udma.c
+ *
+ * @brief HAL driver for DMA that is a compound of M2S and S2M UDMAs.
+ *
+ */
+
+#include "al_hal_m2m_udma.h"
+#include "al_hal_udma_config.h"
+
+/* Init M2M UDMA */
+/*
+ * initialize M2M UDMA
+ */
+int al_m2m_udma_init(struct al_m2m_udma *m2m_udma,
+ struct al_m2m_udma_params *params)
+{
+ struct al_udma_params dma_params;
+ int rc;
+
+ al_dbg("raid [%s]: Initialize unit\n", params->name);
+
+ m2m_udma->name = params->name;
+ m2m_udma->m2s_regs_base = params->m2s_regs_base;
+ m2m_udma->s2m_regs_base = params->s2m_regs_base;
+ m2m_udma->num_of_queues = params->num_of_queues;
+
+ /* initialize tx udma component */
+ dma_params.udma_reg =
+ (union udma_regs __iomem *)m2m_udma->m2s_regs_base;
+ dma_params.type = UDMA_TX;
+ dma_params.num_of_queues = m2m_udma->num_of_queues;
+ dma_params.name = "tx dma";
+
+ rc = al_udma_init(&m2m_udma->tx_udma, &dma_params);
+ if (rc != 0) {
+ al_err("failed to initialize %s, error %d\n",
+ dma_params.name, rc);
+ return rc;
+ }
+
+ al_udma_m2s_max_descs_set(&m2m_udma->tx_udma, params->max_m2s_descs_per_pkt);
+
+ /* initialize rx udma component */
+ dma_params.udma_reg =
+ (union udma_regs __iomem *)m2m_udma->s2m_regs_base;
+ dma_params.type = UDMA_RX;
+ dma_params.num_of_queues = m2m_udma->num_of_queues;
+ dma_params.name = "rx dma";
+
+ rc = al_udma_init(&m2m_udma->rx_udma, &dma_params);
+ if (rc != 0) {
+ al_err("failed to initialize %s, error %d\n",
+ dma_params.name, rc);
+ return rc;
+ }
+
+ al_udma_s2m_max_descs_set(&m2m_udma->rx_udma, params->max_s2m_descs_per_pkt);
+
+ return 0;
+}
+
+/*
+ * initialize the m2s(tx) and s2m(tx) udmas of the queue
+ */
+int al_m2m_udma_q_init(struct al_m2m_udma *m2m_udma, uint32_t qid,
+ struct al_udma_q_params *tx_params,
+ struct al_udma_q_params *rx_params)
+{
+ int rc;
+
+ al_dbg("udma [%s]: Initialize queue %d\n", m2m_udma->name,
+ qid);
+
+ rc = al_udma_q_init(&m2m_udma->tx_udma, qid, tx_params);
+ if (rc != 0) {
+ al_err("[%s]: failed to initialize tx q %d, error %d\n",
+ m2m_udma->name, qid, rc);
+ return rc;
+ }
+
+ rc = al_udma_q_init(&m2m_udma->rx_udma, qid, rx_params);
+ if (rc != 0) {
+ al_err("[%s]: failed to initialize rx q %d, error %d\n",
+ m2m_udma->name, qid, rc);
+ return rc;
+ }
+ return 0;
+
+}
+
+/*
+ * Change the M2M UDMA state
+ */
+int al_m2m_udma_state_set(struct al_m2m_udma *m2m_udma,
+ enum al_udma_state udma_state)
+{
+ int rc;
+
+ rc = al_udma_state_set(&m2m_udma->tx_udma, udma_state);
+ if (rc != 0) {
+ al_err("[%s]: failed to change state, error %d\n",
+ m2m_udma->name, rc);
+ return rc;
+ }
+
+ rc = al_udma_state_set(&m2m_udma->rx_udma, udma_state);
+ if (rc != 0) {
+ al_err("[%s]: failed to change state, error %d\n",
+ m2m_udma->name, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Get udma handle of the tx or rx udma, this handle can be used to call misc
+ * configuration functions defined at al_udma_config.h
+ */
+int al_m2m_udma_handle_get(struct al_m2m_udma *m2m_udma,
+ enum al_udma_type type,
+ struct al_udma **udma)
+{
+ if (type == UDMA_TX)
+ *udma = &m2m_udma->tx_udma;
+ else
+ *udma = &m2m_udma->rx_udma;
+ return 0;
+}
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_pcie.c b/arch/arm/mach-alpine/al_hal/al_hal_pcie.c
new file mode 100644
index 0000000..509a15b
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_pcie.c
@@ -0,0 +1,1415 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "al_hal_pcie.h"
+#include "al_hal_pbs_regs.h"
+#include "al_hal_pcie_regs.h"
+#include "al_hal_unit_adapter_regs.h"
+
+/* --->>> Parameters definitions <<<--- */
+#define AL_PCIE_REV_ID_0 0
+#define AL_PCIE_REV_ID_1 1
+
+#define AL_PCIE_AXI_REGS_OFFSET 0x0
+#define AL_PCIE_APP_REGS_OFFSET 0x1000
+#define AL_PCIE_CORE_CONF_BASE_OFFSET 0x2000
+
+#define AL_PCIE_LTSSM_STATE_L0 0x11
+#define AL_PCIE_LTSSM_STATE_L0S 0x12
+#define AL_PCIE_DEVCTL_PAYLOAD_128B 0x00
+#define AL_PCIE_DEVCTL_PAYLOAD_256B 0x20
+
+#define AL_PCIE_SECBUS_DEFAULT 0x1
+#define AL_PCIE_SUBBUS_DEFAULT 0x1
+#define AL_PCIE_LINKUP_WAIT_INTERVAL 50 /* measured in usec */
+#define AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC 20
+
+#define AL_PCIE_LINKUP_RETRIES 8
+
+#define AL_PCIE_MAX_32_MEMORY_BAR_SIZE (0x100000000ULL)
+#define AL_PCIE_MIN_MEMORY_BAR_SIZE (1 << 12)
+#define AL_PCIE_MIN_IO_BAR_SIZE (1 << 8)
+
+
+/* --->>> MACROS <<<--- */
+#define AL_PCIE_PARSE_LANES(v) (((1 << v) - 1) << \
+ PCIE_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT)
+
+/* --->>> static functions <<<--- */
+
+static void
+al_pcie_port_enable_wr_to_rd_only(struct al_pcie_port *pcie_port)
+{
+ if (pcie_port->write_to_read_only_enabled == AL_TRUE)
+ return;
+ al_dbg("PCIe %d: Enable write to Read Only fields\n", pcie_port->port_id);
+ al_reg_write32(&pcie_port->regs->core_space.port_regs.rd_only_wr_en, 1);
+
+ pcie_port->write_to_read_only_enabled = AL_TRUE;
+}
+
+/** helper function to access dbi_cs2 registers */
+void al_reg_write32_dbi_cs2(uint32_t * offset, uint32_t val)
+{
+ al_reg_write32(offset + (0x1000 >> 2),val);
+}
+
+int al_pcie_port_max_lanes_set(struct al_pcie_port *pcie_port, uint8_t lanes)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ /* convert to bitmask format (4 ->'b1111, 2 ->'b11, 1 -> 'b1) */
+ uint32_t active_lanes_val = AL_PCIE_PARSE_LANES(lanes);
+
+ al_reg_write32_masked(®s->axi.pcie_global.conf,
+ PCIE_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK,
+ active_lanes_val);
+
+ pcie_port->max_lanes = lanes;
+ return 0;
+}
+
+void al_pcie_port_memory_shutdown_set(
+ struct al_pcie_port *pcie_port,
+ al_bool enable)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_reg_write32_masked(®s->axi.pcie_global.conf,
+ PCIE_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN,
+ enable == AL_TRUE ?
+ PCIE_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN : 0);
+}
+
+static unsigned int al_pcie_speed_gen_code(enum al_pcie_link_speed speed)
+{
+ if (speed == AL_PCIE_LINK_SPEED_GEN1)
+ return 1;
+ if (speed == AL_PCIE_LINK_SPEED_GEN2)
+ return 2;
+ if (speed == AL_PCIE_LINK_SPEED_GEN3)
+ return 3;
+ /* must not be reached */
+ return 0;
+}
+
+static int
+al_pcie_port_link_config(struct al_pcie_port *pcie_port,
+ struct al_pcie_link_params *link_params)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_dbg("PCIe %d: link config: max speed gen %d, max lanes %d, reversal %s\n",
+ pcie_port->port_id, link_params->max_speed,
+ pcie_port->max_lanes, link_params->enable_reversal? "enable" : "disable");
+
+ al_pcie_port_enable_wr_to_rd_only(pcie_port);
+
+ if (link_params->max_speed != AL_PCIE_LINK_SPEED_DEFAULT) {
+ uint16_t max_speed_val = (uint16_t)al_pcie_speed_gen_code(link_params->max_speed);
+ al_reg_write32_masked((uint32_t __iomem *)(®s->core_space.pcie_link_cap_base),
+ 0xF, max_speed_val);
+ al_reg_write32_masked((uint32_t __iomem *)(®s->core_space.pcie_cap_base + (AL_PCI_EXP_LNKCTL2 >> 2)),
+ 0xF, max_speed_val);
+ }
+
+ /* TODO: add support for reversal mode */
+ if (link_params->enable_reversal) {
+ al_err("PCIe %d: enabling reversal mode not implemented\n",
+ pcie_port->port_id);
+ return -ENOSYS;
+ }
+ return 0;
+}
+
+static void al_pcie_port_ram_parity_int_config(
+ struct al_pcie_port *pcie_port,
+ al_bool enable)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_reg_write32(®s->app.parity.en_core,
+ (enable == AL_TRUE) ? 0xffffffff : 0x0);
+}
+
+static void al_pcie_port_axi_parity_int_config(
+ struct al_pcie_port *pcie_port,
+ al_bool enable)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_reg_write32(®s->axi.parity.en_axi,
+ (enable == AL_TRUE) ? 0xffffffff : 0x0);
+}
+
+static int
+al_pcie_port_lat_rply_timers_config(struct al_pcie_port * pcie_port,
+ struct al_pcie_latency_replay_timers *lat_rply_timers)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t reg = 0;
+
+ AL_REG_FIELD_SET(reg, 0xFFFF, 0, lat_rply_timers->round_trip_lat_limit);
+ AL_REG_FIELD_SET(reg, 0xFFFF0000, 16, lat_rply_timers->replay_timer_limit);
+
+ al_reg_write32(®s->core_space.port_regs.ack_lat_rply_timer, reg);
+ return 0;
+}
+
+int
+al_pcie_port_snoop_config(struct al_pcie_port *pcie_port, al_bool enable_axi_snoop)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ /* Set snoop mode */
+ al_info("PCIE_%d: snoop mode %s\n",
+ pcie_port->port_id, enable_axi_snoop ? "enable" : "disable");
+
+ if (enable_axi_snoop) {
+ al_reg_write32_masked(®s->axi.ctrl.master_arctl,
+ PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP,
+ PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP);
+
+ al_reg_write32_masked(®s->axi.ctrl.master_awctl,
+ PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP,
+ PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP);
+ } else {
+ al_reg_write32_masked(®s->axi.ctrl.master_arctl,
+ PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP,
+ PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP);
+
+ al_reg_write32_masked(®s->axi.ctrl.master_awctl,
+ PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP,
+ PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP);
+ }
+ return 0;
+}
+
+static int
+al_pcie_port_gen2_params_config(struct al_pcie_port *pcie_port,
+ struct al_pcie_gen2_params *gen2_params)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t gen2_ctrl;
+
+ al_dbg("PCIe %d: Gen2 params config: Tx Swing %s, interrupt on link Eq %s, set Deemphasis %s\n",
+ pcie_port->port_id,
+ gen2_params->tx_swing_low ? "Low" : "Full",
+ gen2_params->tx_compliance_receive_enable? "enable" : "disable",
+ gen2_params->set_deemphasis? "enable" : "disable");
+
+ gen2_ctrl = al_reg_read32(®s->core_space.port_regs.gen2_ctrl);
+
+ if (gen2_params->tx_swing_low)
+ AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT);
+ else
+ AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT);
+
+ if (gen2_params->tx_compliance_receive_enable)
+ AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT);
+ else
+ AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT);
+
+ if (gen2_params->set_deemphasis)
+ AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT);
+ else
+ AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT);
+
+ al_reg_write32(®s->core_space.port_regs.gen2_ctrl, gen2_ctrl);
+
+ return 0;
+}
+
+
+static uint16_t
+gen3_lane_eq_param_to_val(struct al_pcie_gen3_lane_eq_params *eq_params)
+{
+ uint16_t eq_control = 0;
+
+ eq_control = eq_params->downstream_port_transmitter_preset & 0xF;
+ eq_control |= (eq_params->downstream_port_receiver_preset_hint & 0x7) << 4;
+ eq_control |= (eq_params->upstream_port_transmitter_preset & 0xF) << 8;
+ eq_control |= (eq_params->upstream_port_receiver_preset_hint & 0x7) << 12;
+
+ return eq_control;
+}
+
+static int
+al_pcie_port_gen3_params_config(struct al_pcie_port *pcie_port,
+ struct al_pcie_gen3_params *gen3_params)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t reg = 0;
+ uint16_t __iomem *lanes_eq_base = (uint16_t __iomem *)(®s->core_space.pcie_sec_ext_cap_base + (0xC >> 2));
+ int i;
+
+ al_dbg("PCIe %d: Gen3 params config: Equalization %s, interrupt on link Eq %s\n",
+ pcie_port->port_id,
+ gen3_params->perform_eq ? "enable" : "disable",
+ gen3_params->interrupt_enable_on_link_eq_request? "enable" : "disable");
+
+ if (gen3_params->perform_eq)
+ AL_REG_BIT_SET(reg, 0);
+ if (gen3_params->interrupt_enable_on_link_eq_request)
+ AL_REG_BIT_SET(reg, 1);
+
+ al_reg_write32(®s->core_space.pcie_sec_ext_cap_base + (4 >> 2),
+ reg);
+
+
+
+ for (i = 0; i < gen3_params->eq_params_elements; i += 2) {
+ uint32_t eq_control =
+ (uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i) |
+ (uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i + 1) << 16;
+
+ al_dbg("PCIe %d: Set EQ (0x%08x) for lane %d, %d\n", pcie_port->port_id, eq_control, i, i + 1);
+ al_reg_write32((uint32_t *)(lanes_eq_base + i), eq_control);
+ }
+ reg = al_reg_read32(®s->core_space.port_regs.gen3_ctrl);
+ if (gen3_params->eq_disable)
+ AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT);
+ else
+ AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT);
+
+ if (gen3_params->eq_phase2_3_disable)
+ AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT);
+ else
+ AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT);
+
+ al_reg_write32(®s->core_space.port_regs.gen3_ctrl, reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_LF_MASK,
+ PCIE_PORT_GEN3_EQ_LF_SHIFT,
+ gen3_params->local_lf);
+ AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_FS_MASK,
+ PCIE_PORT_GEN3_EQ_FS_SHIFT,
+ gen3_params->local_fs);
+
+ al_reg_write32(®s->core_space.port_regs.gen3_eq_fs_lf, reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_MASK,
+ PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_SHIFT,
+ gen3_params->local_lf);
+ AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_MASK,
+ PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_SHIFT,
+ gen3_params->local_fs);
+ al_reg_write32(®s->axi.conf.zero_lane0, reg);
+ al_reg_write32(®s->axi.conf.zero_lane1, reg);
+ al_reg_write32(®s->axi.conf.zero_lane2, reg);
+ al_reg_write32(®s->axi.conf.zero_lane3, reg);
+
+ /*
+ * Gen3 EQ Control Register:
+ * - Preset Request Vector - request 3-5
+ * - Behavior After 24 ms Timeout (when optimal settings are not
+ * found): Recovery.Equalization.RcvrLock
+ * - Phase2_3 2 ms Timeout Disable
+ * - Feedback Mode - Figure Of Merit
+ */
+ reg = 0x00001831;
+ al_reg_write32(®s->core_space.port_regs.gen3_eq_ctrl, reg);
+
+ return 0;
+}
+
+static int
+al_pcie_port_tl_credits_config(struct al_pcie_port *pcie_port,
+ struct al_pcie_tl_credits_params *tl_credits __attribute__((__unused__)))
+{
+ al_err("PCIe %d: transport layer credits config not implemented\n",
+ pcie_port->port_id);
+
+ return -ENOSYS;
+
+}
+
+static int
+al_pcie_port_ep_params_config(struct al_pcie_port *pcie_port,
+ struct al_pcie_ep_params *ep_params)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ int bar_idx;
+
+ al_pcie_port_enable_wr_to_rd_only(pcie_port);
+
+ /* Disable D1 and D3hot capabilities */
+ if (ep_params->cap_d1_d3hot_dis)
+ al_reg_write32_masked(
+ ®s->core_space.pcie_pm_cap_base,
+ AL_FIELD_MASK(26, 25) | AL_FIELD_MASK(31, 28), 0);
+
+ /* Disable FLR capability */
+ if (ep_params->cap_flr_dis)
+ al_reg_write32_masked(
+ ®s->core_space.pcie_dev_cap_base,
+ AL_BIT(28), 0);
+
+ /* Disable ASPM capability */
+ if (ep_params->cap_aspm_dis) {
+ al_reg_write32_masked(
+ ®s->core_space.pcie_cap_base + (AL_PCI_EXP_LNKCAP >> 2),
+ AL_PCI_EXP_LNKCAP_ASPMS, 0);
+ } else if (pcie_port->rev_id == AL_PCIE_REV_ID_0) {
+ al_warn("%s: ASPM support is enabled, please disable it\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Enable relaxed PCIe ordering: Disable read completion after write
+ * ordering.
+ */
+ if (ep_params->relaxed_pcie_ordering)
+ al_reg_write32_masked(
+ ®s->axi.ordering.pos_cntl,
+ AL_BIT(5) | AL_BIT(10),
+ AL_BIT(5) | AL_BIT(10));
+
+ if (!ep_params->bar_params_valid)
+ return 0;
+
+ for (bar_idx = 0; bar_idx < 6;){ /* bar_idx will be incremented depending on bar type */
+ struct al_pcie_ep_bar_params *params = ep_params->bar_params + bar_idx;
+ uint32_t mask = 0;
+ uint32_t ctrl = 0;
+ uint32_t __iomem *bar_addr = ®s->core_space.config_header[(AL_PCI_BASE_ADDRESS_0 >> 2) + bar_idx];
+
+ if (params->enable) {
+ uint64_t size = params->size;
+
+ if (params->memory_64_bit) {
+ struct al_pcie_ep_bar_params *next_params = params + 1;
+ /* 64 bars start at even index (BAR0, BAR 2 or BAR 4) */
+ if (bar_idx & 1)
+ return -EINVAL;
+
+ /* next BAR must be disabled */
+ if (next_params->enable)
+ return -EINVAL;
+
+ /* 64 bar must be memory bar */
+ if (!params->memory_space)
+ return -EINVAL;
+ } else {
+ if (size > AL_PCIE_MAX_32_MEMORY_BAR_SIZE)
+ return -EINVAL;
+ /* 32 bit space can't be prefetchable */
+ if (params->memory_is_prefetchable)
+ return -EINVAL;
+ }
+
+ if (params->memory_space) {
+ if (size < AL_PCIE_MIN_MEMORY_BAR_SIZE) {
+ al_err("PCIe %d: memory BAR %d: size (0x%llx) less that minimal allowed value\n",
+ pcie_port->port_id, bar_idx, size);
+ return -EINVAL;
+ }
+ } else {
+ /* IO can't be prefetchable */
+ if (params->memory_is_prefetchable)
+ return -EINVAL;
+
+ if (size < AL_PCIE_MIN_IO_BAR_SIZE) {
+ al_err("PCIe %d: IO BAR %d: size (0x%llx) less that minimal allowed value\n",
+ pcie_port->port_id, bar_idx, size);
+ return -EINVAL;
+ }
+ }
+
+ /* size must be power of 2 */
+ if (size & (size - 1)) {
+ al_err("PCIe %d: BAR %d:size (0x%llx) must be "
+ "power of 2\n",
+ pcie_port->port_id, bar_idx, size);
+ return -EINVAL;
+ }
+
+ /* If BAR is 64-bit, disable the next BAR before
+ * configuring this one
+ */
+ if (params->memory_64_bit)
+ al_reg_write32_dbi_cs2(bar_addr + 1, 0);
+
+ mask = 1; /* enable bit*/
+ mask |= (params->size - 1) & 0xFFFFFFFF;
+
+ al_reg_write32_dbi_cs2(bar_addr , mask);
+
+ if (params->memory_space == AL_FALSE)
+ ctrl = AL_PCI_BASE_ADDRESS_SPACE_IO;
+ if (params->memory_64_bit)
+ ctrl |= AL_PCI_BASE_ADDRESS_MEM_TYPE_64;
+ if (params->memory_is_prefetchable)
+ ctrl |= AL_PCI_BASE_ADDRESS_MEM_PREFETCH;
+ al_reg_write32(bar_addr, ctrl);
+
+ if (params->memory_64_bit) {
+ mask = ((params->size - 1) >> 32) & 0xFFFFFFFF;
+ al_reg_write32_dbi_cs2(bar_addr + 1, mask);
+ }
+
+ } else {
+ al_reg_write32_dbi_cs2(bar_addr , mask);
+ }
+ if (params->enable && params->memory_64_bit)
+ bar_idx += 2;
+ else
+ bar_idx += 1;
+ }
+ if (ep_params->exp_bar_params.enable) {
+ al_err("PCIe %d: Expansion BAR enable not supported\n", pcie_port->port_id );
+ return -ENOSYS;
+ }
+
+ /* Open CPU generated msi and legacy interrupts in pcie wrapper logic */
+ al_reg_write32(®s->app.soc_int.mask_inta_leg_0, (1 << 21));
+
+ /**
+ * Addressing RMN: 1547
+ *
+ * RMN description:
+ * 1. Whenever writing to 0x2xx offset, the write also happens to
+ * 0x3xx address, meaning two registers are written instead of one.
+ * 2. Read and write from 0x3xx work ok.
+ *
+ * Software flow:
+ * Backup the value of the app.int_grp_a.mask_a register, because
+ * app.int_grp_a.mask_clear_a gets overwritten during the write to
+ * app.soc.mask_msi_leg_0 register.
+ * Restore the original value after the write to app.soc.mask_msi_leg_0
+ * register.
+ */
+ if (pcie_port->rev_id == AL_PCIE_REV_ID_0) {
+ uint32_t backup;
+
+ backup = al_reg_read32(®s->app.int_grp_a_m0.mask_a);
+ al_reg_write32(®s->app.soc_int.mask_msi_leg_0, (1 << 22));
+ al_reg_write32(®s->app.int_grp_a_m0.mask_a, backup);
+ } else
+ al_reg_write32(®s->app.soc_int.mask_msi_leg_0, (1 << 22));
+
+ return 0;
+}
+
+static void
+al_pcie_port_features_config(struct al_pcie_port *pcie_port,
+ struct al_pcie_features *features)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_assert(pcie_port->rev_id > AL_PCIE_REV_ID_0);
+
+ al_reg_write32_masked(
+ ®s->app.ctrl_gen.features,
+ PCIE_W_CTRL_GEN_FEATURES_SATA_EP_MSI_FIX,
+ features->sata_ep_msi_fix ?
+ PCIE_W_CTRL_GEN_FEATURES_SATA_EP_MSI_FIX : 0);
+}
+
+static void
+al_pcie_port_ib_hcrd_config(struct al_pcie_port *pcie_port)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_reg_write32_masked(
+ ®s->core_space.port_regs.vc0_posted_rcv_q_ctrl,
+ RADM_PQ_HCRD_VC0_MASK,
+ (pcie_port->nof_p_hdr - 1) << RADM_PQ_HCRD_VC0_SHIFT);
+
+ al_reg_write32_masked(
+ ®s->core_space.port_regs.vc0_non_posted_rcv_q_ctrl,
+ RADM_NPQ_HCRD_VC0_MASK,
+ (pcie_port->nof_np_hdr - 1) << RADM_NPQ_HCRD_VC0_SHIFT);
+}
+
+enum al_pcie_function_mode al_pcie_function_mode_get(
+ struct al_pcie_port *pcie_port)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t reg, device_type;
+
+ al_assert(pcie_port);
+
+ reg = al_reg_read32(®s->axi.pcie_global.conf);
+
+ device_type = AL_REG_FIELD_GET(reg,
+ PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
+ PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
+
+ switch (device_type) {
+ case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP:
+ return AL_PCIE_FUNCTION_MODE_EP;
+ case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC:
+ return AL_PCIE_FUNCTION_MODE_RC;
+ default:
+ al_err("PCIe %d: unknown device type (%d) in global conf "
+ "register.\n",
+ pcie_port->port_id, device_type);
+ }
+ return AL_PCIE_FUNCTION_MODE_UNKNOWN;
+}
+
+static void
+al_pcie_port_ep_iov_setup(
+ struct al_pcie_port *pcie_port,
+ struct al_pcie_ep_iov_params *ep_iov_params)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ enum al_pcie_function_mode func_mode =
+ al_pcie_function_mode_get(pcie_port);
+
+ al_assert(func_mode == AL_PCIE_FUNCTION_MODE_EP);
+
+ al_reg_write32_masked(
+ ®s->axi.pre_configuration.pcie_core_setup,
+ PCIE_AXI_CORE_SETUP_SRIOV_ENABLE,
+ ((ep_iov_params->sriov_vfunc_en == AL_TRUE) ?
+ PCIE_AXI_CORE_SETUP_SRIOV_ENABLE : 0));
+
+ al_reg_write32_masked(®s->app.cfg_elbi.emulation,
+ PCIE_W_CFG_EMUL_CTRL_FIX_CLIENT1_FMT_EN,
+ ((ep_iov_params->support_32b_address_in_iov == AL_TRUE) ?
+ PCIE_W_CFG_EMUL_CTRL_FIX_CLIENT1_FMT_EN : 0));
+}
+
+
+/******************** link operations ***************************************/
+
+/** return AL_TRUE if link is up, AL_FALSE otherwise */
+static al_bool al_pcie_check_link(struct al_pcie_port *pcie_port,
+ uint8_t *ltssm_ret)
+{
+ struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
+ uint32_t info_0;
+ uint8_t ltssm_state;
+
+ info_0 = al_reg_read32(®s->app.debug.info_0);
+
+ ltssm_state = AL_REG_FIELD_GET(info_0,
+ PCIE_W_DEBUG_INFO_0_LTSSM_STATE_MASK,
+ PCIE_W_DEBUG_INFO_0_LTSSM_STATE_SHIFT);
+
+ al_dbg("PCIe %d: Port Debug 0: 0x%08x. LTSSM state :0x%x\n",
+ pcie_port->port_id, info_0, ltssm_state);
+
+ if (ltssm_ret)
+ *ltssm_ret = ltssm_state;
+
+ if ((ltssm_state == AL_PCIE_LTSSM_STATE_L0) ||
+ (ltssm_state == AL_PCIE_LTSSM_STATE_L0S))
+ return AL_TRUE;
+ return AL_FALSE;
+}
+
+/******************************* API functions ********************************/
+/** Enable PCIe port (deassert reset) */
+int al_pcie_port_enable(
+ struct al_pcie_port *pcie_port,
+ void __iomem *pbs_reg_base)
+{
+ struct al_pbs_regs *pbs_regs = (struct al_pbs_regs *)pbs_reg_base;
+ struct al_pcie_regs *regs = pcie_port->regs;
+ unsigned int port_id = pcie_port->port_id;
+
+ /*
+ * Disable ATS capability
+ * - must be done before core reset deasserted
+ * - rev_id 0 - no effect, but no harm
+ */
+ al_reg_write32_masked(
+ ®s->axi.ordering.pos_cntl,
+ PCIE_AXI_CORE_SETUP_ATS_CAP_DIS,
+ PCIE_AXI_CORE_SETUP_ATS_CAP_DIS);
+
+ /* Deassert core reset */
+ al_reg_write32_masked(
+ &pbs_regs->unit.pcie_conf_1,
+ 1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT),
+ 1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT));
+
+ return 0;
+}
+
+/** Disable PCIe port (assert reset) */
+void al_pcie_port_disable(
+ struct al_pcie_port *pcie_port,
+ void __iomem *pbs_reg_base)
+{
+ struct al_pbs_regs *pbs_regs = (struct al_pbs_regs *)pbs_reg_base;
+ unsigned int port_id = pcie_port->port_id;
+
+ /* Assert core reset */
+ al_reg_write32_masked(
+ &pbs_regs->unit.pcie_conf_1,
+ 1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT),
+ 0);
+}
+
+/** Initializes a PCIe handle structure. */
+int al_pcie_handle_init(struct al_pcie_port *pcie_port,
+ void __iomem *pcie_reg_base,
+ unsigned int port_id)
+{
+ pcie_port->regs = pcie_reg_base;
+ pcie_port->port_id = port_id;
+ pcie_port->write_to_read_only_enabled = AL_FALSE;
+ pcie_port->max_lanes = 0;
+ pcie_port->ib_hcrd_config_required = AL_FALSE;
+
+ al_dbg("pcie port handle initialized. port id: %d. regs base %p\n",
+ port_id, pcie_reg_base);
+ return 0;
+}
+
+/** configure function mode (root complex or endpoint) */
+int
+al_pcie_port_func_mode_config(struct al_pcie_port *pcie_port,
+ enum al_pcie_function_mode mode)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t reg, device_type, new_device_type;
+
+ reg = al_reg_read32(®s->axi.pcie_global.conf);
+
+ device_type = AL_REG_FIELD_GET(reg,
+ PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
+ PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
+ if (mode == AL_PCIE_FUNCTION_MODE_EP)
+ new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP;
+ else
+ new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC;
+
+ if (new_device_type == device_type) {
+ al_dbg("PCIe %d: function mode already set to %s",
+ pcie_port->port_id, (mode == AL_PCIE_FUNCTION_MODE_EP) ?
+ "EndPoint" : "Root Complex");
+ return 0;
+ }
+ al_info("PCIe %d: set function mode to %s\n",
+ pcie_port->port_id, (mode == AL_PCIE_FUNCTION_MODE_EP) ?
+ "EndPoint" : "Root Complex");
+ AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
+ PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT,
+ new_device_type);
+
+ al_reg_write32(®s->axi.pcie_global.conf, reg);
+
+ return 0;
+}
+
+/* Inbound header credits and outstanding outbound reads configuration */
+void al_pcie_port_ib_hcrd_os_ob_reads_config(
+ struct al_pcie_port *pcie_port,
+ struct al_pcie_ib_hcrd_os_ob_reads_config *ib_hcrd_os_ob_reads_config)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_assert(ib_hcrd_os_ob_reads_config->nof_np_hdr > 0);
+
+ al_assert(ib_hcrd_os_ob_reads_config->nof_p_hdr > 0);
+
+ al_assert(ib_hcrd_os_ob_reads_config->nof_cpl_hdr > 0);
+
+ al_assert(
+ (ib_hcrd_os_ob_reads_config->nof_cpl_hdr +
+ ib_hcrd_os_ob_reads_config->nof_np_hdr +
+ ib_hcrd_os_ob_reads_config->nof_p_hdr) == AL_PCIE_IB_HCRD_SUM);
+
+ al_assert(
+ (ib_hcrd_os_ob_reads_config->nof_outstanding_ob_reads *
+ (unsigned int)AL_PCIE_NOF_CPL_HDR_NOF_OS_OB_READS_MIN_RATIO) <=
+ ib_hcrd_os_ob_reads_config->nof_cpl_hdr);
+
+ al_assert(
+ ib_hcrd_os_ob_reads_config->nof_p_hdr <=
+ AL_PCIE_NOF_P_NP_HDR_MAX);
+
+ al_assert(
+ ib_hcrd_os_ob_reads_config->nof_np_hdr <=
+ AL_PCIE_NOF_P_NP_HDR_MAX);
+
+ al_reg_write32_masked(
+ ®s->axi.init_fc.cfg,
+ PCIE_AXI_INIT_FC_CFG_NOF_P_HDR_MASK |
+ PCIE_AXI_INIT_FC_CFG_NOF_NP_HDR_MASK |
+ PCIE_AXI_INIT_FC_CFG_NOF_CPL_HDR_MASK,
+ (ib_hcrd_os_ob_reads_config->nof_p_hdr <<
+ PCIE_AXI_INIT_FC_CFG_NOF_P_HDR_SHIFT) |
+ (ib_hcrd_os_ob_reads_config->nof_np_hdr <<
+ PCIE_AXI_INIT_FC_CFG_NOF_NP_HDR_SHIFT) |
+ (ib_hcrd_os_ob_reads_config->nof_cpl_hdr <<
+ PCIE_AXI_INIT_FC_CFG_NOF_CPL_HDR_SHIFT));
+
+ al_reg_write32_masked(
+ ®s->axi.pre_configuration.pcie_core_setup,
+ PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_MASK,
+ ib_hcrd_os_ob_reads_config->nof_outstanding_ob_reads <<
+ PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_SHIFT);
+
+ /* Store 'nof_p_hdr' and 'nof_np_hdr' to be set in the core later */
+ pcie_port->nof_np_hdr = ib_hcrd_os_ob_reads_config->nof_np_hdr;
+ pcie_port->nof_p_hdr = ib_hcrd_os_ob_reads_config->nof_p_hdr;
+ pcie_port->ib_hcrd_config_required = AL_TRUE;
+}
+
+/*TODO: move those defines */
+/** return current function mode (root complex or endpoint) */
+enum al_pcie_function_mode
+al_pcie_function_type_get(struct al_pcie_port *pcie_port)
+{
+ return al_pcie_function_mode_get(pcie_port);
+}
+
+/** configure pcie port (link params, etc..) */
+int al_pcie_port_config(struct al_pcie_port *pcie_port,
+ struct al_pcie_config_params *params)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ int status = 0;
+
+ al_assert(pcie_port);
+ al_assert(params);
+
+ al_dbg("PCIe %d: port config\n", pcie_port->port_id);
+
+ /* Read revision ID */
+ pcie_port->rev_id = al_reg_read32(
+ (uint32_t __iomem *)(®s->core_space.config_header[0] +
+ (PCI_CLASS_REVISION >> 2))) & 0xff;
+
+ if (pcie_port->rev_id == AL_PCIE_REV_ID_0) {
+ pcie_port->app_int_grp_a_base =
+ (uint32_t __iomem *)®s->app.int_grp_a_m0;
+ pcie_port->app_int_grp_b_base =
+ (uint32_t __iomem *)®s->app.int_grp_b_m0;
+ } else {
+ pcie_port->app_int_grp_a_base =
+ (uint32_t __iomem *)®s->app.int_grp_a;
+ pcie_port->app_int_grp_b_base =
+ (uint32_t __iomem *)®s->app.int_grp_b;
+ }
+
+ pcie_port->axi_int_grp_a_base =
+ (uint32_t __iomem *)®s->axi.int_grp_a;
+
+ /* if max lanes not specifies, read it from register */
+ if (pcie_port->max_lanes == 0) {
+ uint32_t global_conf = al_reg_read32(®s->axi.pcie_global.conf);
+ pcie_port->max_lanes = AL_REG_FIELD_GET(global_conf,
+ PCIE_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK,
+ PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
+ }
+
+ if (params->link_params)
+ status = al_pcie_port_link_config(pcie_port, params->link_params);
+ if (status)
+ goto done;
+
+ status = al_pcie_port_snoop_config(pcie_port, params->enable_axi_snoop);
+ if (status)
+ goto done;
+
+ al_pcie_port_ram_parity_int_config(pcie_port, params->enable_ram_parity_int);
+
+ al_pcie_port_axi_parity_int_config(pcie_port, params->enable_axi_parity_int);
+
+ if (params->lat_rply_timers)
+ status = al_pcie_port_lat_rply_timers_config(pcie_port, params->lat_rply_timers);
+ if (status)
+ goto done;
+
+ if (params->gen2_params)
+ status = al_pcie_port_gen2_params_config(pcie_port, params->gen2_params);
+ if (status)
+ goto done;
+
+ if (params->gen3_params)
+ status = al_pcie_port_gen3_params_config(pcie_port, params->gen3_params);
+ if (status)
+ goto done;
+
+ if (params->tl_credits)
+ status = al_pcie_port_tl_credits_config(pcie_port, params->tl_credits);
+ if (status)
+ goto done;
+
+ if (params->ep_params)
+ status = al_pcie_port_ep_params_config(pcie_port, params->ep_params);
+ if (status)
+ goto done;
+
+ if (params->features)
+ al_pcie_port_features_config(pcie_port, params->features);
+
+ if (pcie_port->ib_hcrd_config_required == AL_TRUE)
+ al_pcie_port_ib_hcrd_config(pcie_port);
+
+ if (params->ep_iov_params)
+ al_pcie_port_ep_iov_setup(pcie_port, params->ep_iov_params);
+
+ if (params->fast_link_mode) {
+ al_reg_write32_masked(®s->core_space.port_regs.port_link_ctrl,
+ 1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT,
+ 1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT);
+ }
+
+ if (params->enable_axi_slave_err_resp)
+ al_reg_write32_masked(®s->core_space.port_regs.axi_slave_err_resp,
+ 1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT,
+ 1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT);
+
+ /* enable memory and I/O access from port when in RC mode*/
+ if (params->function_mode == AL_PCIE_FUNCTION_MODE_RC) {
+ al_reg_write16_masked((uint16_t __iomem *)(®s->core_space.config_header[0] + (0x4 >> 2)),
+ 0x7, /* Mem, MSE, IO */
+ 0x7);
+ /* change the class code to match pci bridge */
+ al_reg_write32_masked((uint32_t __iomem *)(®s->core_space.config_header[0] + (PCI_CLASS_REVISION >> 2)),
+ 0xFFFFFF00,
+ 0x06040000);
+ }
+done:
+ al_dbg("PCIe %d: port config %s\n", pcie_port->port_id, status? "failed": "done");
+
+ return status;
+}
+
+/* Enable/disable deferring incoming configuration requests */
+void al_pcie_app_req_retry_set(
+ struct al_pcie_port *pcie_port,
+ al_bool en)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_reg_write32_masked(
+ ®s->app.global_ctrl.pm_control,
+ PCIE_W_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN,
+ (en == AL_TRUE) ?
+ PCIE_W_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN : 0);
+}
+
+/* start pcie link */
+int al_pcie_link_start(struct al_pcie_port *pcie_port)
+{
+ struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
+
+ al_dbg("PCIe_%d: start port link.\n", pcie_port->port_id);
+
+ al_reg_write32_masked(
+ ®s->app.global_ctrl.port_init,
+ PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN,
+ PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN);
+
+ return 0;
+}
+
+/* stop pcie link */
+int al_pcie_link_stop(struct al_pcie_port *pcie_port)
+{
+ struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
+
+ al_dbg("PCIe_%d: stop port link.\n", pcie_port->port_id);
+
+ al_reg_write32_masked(
+ ®s->app.global_ctrl.port_init,
+ PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN,
+ ~PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN);
+
+ return 0;
+}
+
+/* wait for link up indication */
+int al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms)
+{
+ int wait_count = timeout_ms * AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC;
+
+ while (wait_count-- > 0) {
+ if (al_pcie_check_link(pcie_port, NULL)) {
+ al_info("PCIe_%d: <<<<<<<<< Link up >>>>>>>>>\n", pcie_port->port_id);
+ return 0;
+ } else
+ al_dbg("PCIe_%d: No link up, %d attempts remaining\n",
+ pcie_port->port_id, wait_count);
+
+ al_udelay(AL_PCIE_LINKUP_WAIT_INTERVAL);
+ }
+ al_info("PCIE_%d: link is not established in time\n",
+ pcie_port->port_id);
+
+ return -ETIME;
+}
+
+/** get link status */
+int al_pcie_link_status(struct al_pcie_port *pcie_port,
+ struct al_pcie_link_status *status)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint16_t pcie_lnksta;
+
+ al_assert(status);
+
+ status->link_up = al_pcie_check_link(pcie_port, &status->ltssm_state);
+
+ if (!status->link_up) {
+ status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
+ status->lanes = 0;
+ return 0;
+ }
+
+ pcie_lnksta = al_reg_read16((uint16_t __iomem *)®s->core_space.pcie_cap_base + (AL_PCI_EXP_LNKSTA >> 1));
+
+ switch(pcie_lnksta & AL_PCI_EXP_LNKSTA_CLS) {
+ case AL_PCI_EXP_LNKSTA_CLS_2_5GB:
+ status->speed = AL_PCIE_LINK_SPEED_GEN1;
+ break;
+ case AL_PCI_EXP_LNKSTA_CLS_5_0GB:
+ status->speed = AL_PCIE_LINK_SPEED_GEN2;
+ break;
+ case AL_PCI_EXP_LNKSTA_CLS_8_0GB:
+ status->speed = AL_PCIE_LINK_SPEED_GEN3;
+ break;
+ default:
+ status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
+ al_err("PCIe %d: unknown link speed indication. PCIE LINK STATUS %x\n",
+ pcie_port->port_id, pcie_lnksta);
+ }
+ status->lanes = (pcie_lnksta & AL_PCI_EXP_LNKSTA_NLW) >> AL_PCI_EXP_LNKSTA_NLW_SHIFT;
+ al_info("PCIe %d: Link up. speed gen%d negotiated width %d\n",
+ pcie_port->port_id, status->speed, status->lanes);
+
+ return 0;
+}
+
+/** trigger hot reset */
+int al_pcie_link_hot_reset(struct al_pcie_port *pcie_port)
+{
+ al_err("PCIe %d: link hot reset not implemented\n",
+ pcie_port->port_id);
+
+ return -ENOSYS;
+}
+
+/* TODO: check if this function needed */
+int al_pcie_link_change_speed(struct al_pcie_port *pcie_port,
+ enum al_pcie_link_speed new_speed __attribute__((__unused__)))
+{
+ al_err("PCIe %d: link change speed not implemented\n",
+ pcie_port->port_id);
+
+ return -ENOSYS;
+}
+
+/* TODO: check if this function needed */
+int al_pcie_link_change_width(struct al_pcie_port *pcie_port,
+ uint8_t width __attribute__((__unused__)))
+{
+ al_err("PCIe %d: link change width not implemented\n",
+ pcie_port->port_id);
+
+ return -ENOSYS;
+}
+
+/** set target_bus and mask_target_bus */
+int al_pcie_target_bus_set(struct al_pcie_port *pcie_port,
+ uint8_t target_bus,
+ uint8_t mask_target_bus)
+{
+ struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
+ uint32_t reg;
+
+ reg = al_reg_read32(®s->axi.ob_ctrl.cfg_target_bus);
+ AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
+ PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT,
+ mask_target_bus);
+ AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK,
+ PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT,
+ target_bus);
+ al_reg_write32(®s->axi.ob_ctrl.cfg_target_bus, reg);
+ return 0;
+}
+
+/** get target_bus and mask_target_bus */
+int al_pcie_target_bus_get(struct al_pcie_port *pcie_port,
+ uint8_t *target_bus,
+ uint8_t *mask_target_bus)
+{
+ struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
+ uint32_t reg;
+
+ al_assert(target_bus);
+ al_assert(mask_target_bus);
+
+ reg = al_reg_read32(®s->axi.ob_ctrl.cfg_target_bus);
+
+ *mask_target_bus = AL_REG_FIELD_GET(reg,
+ PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
+ PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT);
+ *target_bus = AL_REG_FIELD_GET(reg,
+ PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK,
+ PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT);
+ return 0;
+}
+
+/** Set secondary bus number */
+int al_pcie_secondary_bus_set(struct al_pcie_port *pcie_port, uint8_t secbus)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ uint32_t secbus_val = (secbus <<
+ PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_SHIFT);
+
+ al_reg_write32_masked(
+ ®s->axi.ob_ctrl.cfg_control,
+ PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_MASK,
+ secbus_val);
+ return 0;
+}
+
+/** Set sub-ordinary bus number */
+int al_pcie_subordinary_bus_set(struct al_pcie_port *pcie_port, uint8_t subbus)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ uint32_t subbus_val = (subbus <<
+ PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_SHIFT);
+
+ al_reg_write32_masked(
+ ®s->axi.ob_ctrl.cfg_control,
+ PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_MASK,
+ subbus_val);
+ return 0;
+}
+
+/** get base address of pci configuration space header */
+int al_pcie_config_space_get(struct al_pcie_port *pcie_port,
+ uint8_t __iomem **addr)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ *addr = (uint8_t __iomem *)®s->core_space.config_header[0];
+ return 0;
+}
+
+/* Read data from the local configuration space */
+uint32_t al_pcie_cfg_emul_local_cfg_space_read(
+ struct al_pcie_port *pcie_port,
+ unsigned int reg_offset)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t data;
+
+ data = al_reg_read32(®s->core_space.config_header[reg_offset]);
+
+ return data;
+}
+
+/* Write data to the local configuration space */
+void al_pcie_cfg_emul_local_cfg_space_write(
+ struct al_pcie_port *pcie_port,
+ unsigned int reg_offset,
+ uint32_t data,
+ al_bool ro)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t *offset = (ro == AL_FALSE) ?
+ (®s->core_space.config_header[reg_offset]) :
+ (®s->core_space.config_header[reg_offset] + (0x1000 >> 2));
+
+ al_reg_write32(offset, data);
+}
+
+void al_pcie_axi_io_config(struct al_pcie_port *pcie_port,
+ al_phys_addr_t start,
+ al_phys_addr_t end)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_reg_write32(®s->axi.ob_ctrl.io_start_h,
+ (uint32_t)((start >> 32) & 0xFFFFFFFF));
+
+ al_reg_write32(®s->axi.ob_ctrl.io_start_l,
+ (uint32_t)(start & 0xFFFFFFFF));
+
+ al_reg_write32(®s->axi.ob_ctrl.io_limit_h,
+ (uint32_t)((end >> 32) & 0xFFFFFFFF));
+
+ al_reg_write32(®s->axi.ob_ctrl.io_limit_l,
+ (uint32_t)(end & 0xFFFFFFFF));
+
+ al_reg_write32_masked(®s->axi.ctrl.slv_ctl,
+ PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN,
+ PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN);
+}
+
+/** program internal ATU region entry */
+int al_pcie_atu_region_set(struct al_pcie_port *pcie_port, struct al_pcie_atu_region *atu_region)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t reg = 0;
+ uint32_t limit_reg_val;
+
+ /*TODO : add sanity check */
+ AL_REG_FIELD_SET(reg, 0xF, 0, atu_region->index);
+ AL_REG_BIT_VAL_SET(reg, 31, atu_region->direction);
+ al_reg_write32(®s->core_space.port_regs.iatu.index, reg);
+
+ al_reg_write32(®s->core_space.port_regs.iatu.lower_base_addr,
+ (uint32_t)(atu_region->base_addr & 0xFFFFFFFF));
+ al_reg_write32(®s->core_space.port_regs.iatu.upper_base_addr,
+ (uint32_t)((atu_region->base_addr >> 32)& 0xFFFFFFFF));
+ al_reg_write32(®s->core_space.port_regs.iatu.lower_target_addr,
+ (uint32_t)(atu_region->target_addr & 0xFFFFFFFF));
+ al_reg_write32(®s->core_space.port_regs.iatu.upper_target_addr,
+ (uint32_t)((atu_region->target_addr >> 32)& 0xFFFFFFFF));
+
+ if (pcie_port->rev_id > AL_PCIE_REV_ID_0) {
+ uint32_t *limit_ext_reg =
+ (atu_region->direction == al_pcie_atu_dir_outbound) ?
+ ®s->app.atu.out_mask_pair[atu_region->index / 2] :
+ ®s->app.atu.in_mask_pair[atu_region->index / 2];
+ uint32_t limit_ext_reg_mask =
+ (atu_region->index % 2) ?
+ PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK :
+ PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK;
+ unsigned int limit_ext_reg_shift =
+ (atu_region->index % 2) ?
+ PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT :
+ PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT;
+ uint64_t limit_sz =
+ atu_region->limit - atu_region->base_addr;
+ uint64_t limit_sz_msk = limit_sz - 1;
+ uint32_t limit_ext_reg_val = (uint32_t)(((limit_sz_msk) >>
+ 32) & 0xFFFFFFFF);
+
+ if (limit_ext_reg_val) {
+ limit_reg_val = (uint32_t)((limit_sz_msk) & 0xFFFFFFFF);
+ al_assert(limit_reg_val == 0xFFFFFFFF);
+ } else {
+ limit_reg_val = (uint32_t)(atu_region->limit &
+ 0xFFFFFFFF);
+ }
+
+ al_reg_write32_masked(
+ limit_ext_reg,
+ limit_ext_reg_mask,
+ limit_ext_reg_val << limit_ext_reg_shift);
+ } else {
+ limit_reg_val = (uint32_t)(atu_region->limit & 0xFFFFFFFF);
+ }
+
+ al_reg_write32(®s->core_space.port_regs.iatu.limit_addr,
+ limit_reg_val);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg, 0x1F, 0, atu_region->tlp_type);
+ AL_REG_FIELD_SET(reg, 0x3 << 9, 9, atu_region->attr);
+ al_reg_write32(®s->core_space.port_regs.iatu.cr1, reg);
+
+ /* Enable/disable the region. */
+ reg = 0;
+ AL_REG_FIELD_SET(reg, 0xFF, 0, atu_region->msg_code);
+ AL_REG_FIELD_SET(reg, 0x700, 8, atu_region->bar_number);
+ AL_REG_BIT_VAL_SET(reg, 16, atu_region->enable_attr_match_mode == AL_TRUE);
+ AL_REG_BIT_VAL_SET(reg, 21, atu_region->enable_msg_match_mode == AL_TRUE);
+ AL_REG_BIT_VAL_SET(reg, 28, atu_region->cfg_shift_mode == AL_TRUE);
+ AL_REG_BIT_VAL_SET(reg, 29, atu_region->invert_matching == AL_TRUE);
+ if (atu_region->tlp_type == AL_PCIE_TLP_TYPE_MEM || atu_region->tlp_type == AL_PCIE_TLP_TYPE_IO)
+ AL_REG_BIT_VAL_SET(reg, 30, !!atu_region->match_mode);
+ AL_REG_BIT_VAL_SET(reg, 31, !!atu_region->enable);
+
+ al_reg_write32(®s->core_space.port_regs.iatu.cr2, reg);
+
+ return 0;
+}
+
+/** generate INTx Assert/DeAssert Message */
+int al_pcie_legacy_int_gen(struct al_pcie_port *pcie_port, al_bool assert,
+ enum al_pcie_legacy_int_type type)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t reg;
+
+ al_assert(type == AL_PCIE_LEGACY_INTA); /* only INTA supported */
+ reg = al_reg_read32(®s->app.global_ctrl.events_gen);
+ AL_REG_BIT_VAL_SET(reg, 3, !!assert);
+ al_reg_write32(®s->app.global_ctrl.events_gen, reg);
+
+ return 0;
+}
+
+/** generate MSI interrupt */
+int al_pcie_msi_int_gen(struct al_pcie_port *pcie_port, uint8_t vector)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t reg;
+
+ /* set msi vector and clear MSI request */
+ reg = al_reg_read32(®s->app.global_ctrl.events_gen);
+ AL_REG_BIT_CLEAR(reg, 4);
+ AL_REG_FIELD_SET(reg,
+ PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_MASK,
+ PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_SHIFT,
+ vector);
+ al_reg_write32(®s->app.global_ctrl.events_gen, reg);
+ /* set MSI request */
+ AL_REG_BIT_SET(reg, 4);
+ al_reg_write32(®s->app.global_ctrl.events_gen, reg);
+
+ return 0;
+}
+
+/** configure MSIX capability */
+int al_pcie_msix_config(
+ struct al_pcie_port *pcie_port,
+ struct al_pcie_msix_params *msix_params)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t msix_reg0 = al_reg_read32(®s->core_space.msix_cap_base);
+
+ msix_reg0 &= ~(AL_PCI_MSIX_MSGCTRL_TBL_SIZE << AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT);
+ msix_reg0 |= ((msix_params->table_size - 1) & AL_PCI_MSIX_MSGCTRL_TBL_SIZE) <<
+ AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT;
+ al_reg_write32(®s->core_space.msix_cap_base, msix_reg0);
+
+ /* Table offset & BAR */
+ al_reg_write32(®s->core_space.msix_cap_base + (AL_PCI_MSIX_TABLE >> 2),
+ (msix_params->table_offset & AL_PCI_MSIX_TABLE_OFFSET) |
+ (msix_params->table_bar & AL_PCI_MSIX_TABLE_BAR));
+ /* PBA offset & BAR */
+ al_reg_write32(®s->core_space.msix_cap_base + (AL_PCI_MSIX_PBA >> 2),
+ (msix_params->pba_offset & AL_PCI_MSIX_PBA_OFFSET) |
+ (msix_params->pba_bar & AL_PCI_MSIX_PBA_BAR));
+
+ return 0;
+}
+
+/** check whether MSIX is enabled */
+al_bool al_pcie_msix_enabled(struct al_pcie_port *pcie_port)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t msix_reg0 = al_reg_read32(®s->core_space.msix_cap_base);
+
+ if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_EN)
+ return AL_TRUE;
+ return AL_FALSE;
+}
+
+/** check whether MSIX is masked */
+al_bool al_pcie_msix_masked(struct al_pcie_port *pcie_port)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+ uint32_t msix_reg0 = al_reg_read32(®s->core_space.msix_cap_base);
+
+ if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_MASK)
+ return AL_TRUE;
+ return AL_FALSE;
+}
+
+/********************** Loopback mode (RC and Endpoint modes) ************/
+
+/** enter local pipe loopback mode */
+int al_pcie_local_pipe_loopback_enter(struct al_pcie_port *pcie_port)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_dbg("PCIe %d: Enter LOCAL PIPE Loopback mode", pcie_port->port_id);
+
+ al_reg_write32_masked(®s->core_space.port_regs.pipe_loopback_ctrl,
+ 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
+ 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT);
+
+ al_reg_write32_masked(®s->core_space.port_regs.port_link_ctrl,
+ 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
+ 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT);
+
+ return 0;
+}
+
+/**
+ * @brief exit local pipe loopback mode
+ *
+ * @param pcie_port pcie port handle
+ * @return 0 if no error found
+ */
+int al_pcie_local_pipe_loopback_exit(struct al_pcie_port *pcie_port)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_dbg("PCIe %d: Exit LOCAL PIPE Loopback mode", pcie_port->port_id);
+
+ al_reg_write32_masked(®s->core_space.port_regs.pipe_loopback_ctrl,
+ 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
+ 0);
+
+ al_reg_write32_masked(®s->core_space.port_regs.port_link_ctrl,
+ 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
+ 0);
+ return 0;
+}
+
+/** enter remote loopback mode */
+int al_pcie_remote_loopback_enter(struct al_pcie_port *pcie_port)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_dbg("PCIe %d: Enter REMOTE Loopback mode", pcie_port->port_id);
+
+ al_reg_write32_masked(®s->core_space.port_regs.port_link_ctrl,
+ 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
+ 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT);
+
+ return 0;
+}
+
+
+/**
+ * @brief exit remote loopback mode
+ *
+ * @param pcie_port pcie port handle
+ * @return 0 if no error found
+ */
+int al_pcie_remote_loopback_exit(struct al_pcie_port *pcie_port)
+{
+ struct al_pcie_regs *regs = pcie_port->regs;
+
+ al_dbg("PCIe %d: Exit REMOTE Loopback mode", pcie_port->port_id);
+
+ al_reg_write32_masked(®s->core_space.port_regs.port_link_ctrl,
+ 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
+ 0);
+ return 0;
+}
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_pcie_axi_reg.h b/arch/arm/mach-alpine/al_hal/al_hal_pcie_axi_reg.h
new file mode 100644
index 0000000..240bc19
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_pcie_axi_reg.h
@@ -0,0 +1,538 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __AL_PCIE_HAL_AXI_REG_H__
+#define __AL_PCIE_HAL_AXI_REG_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+
+
+struct al_pcie_axi_ctrl {
+ /* [0x0] */
+ uint32_t global;
+ uint32_t rsrvd_0;
+ /* [0x8] */
+ uint32_t master_bctl;
+ /* [0xc] */
+ uint32_t master_rctl;
+ /* [0x10] */
+ uint32_t master_ctl;
+ /* [0x14] */
+ uint32_t master_arctl;
+ /* [0x18] */
+ uint32_t master_awctl;
+ /* [0x1c] */
+ uint32_t slave_rctl;
+ /* [0x20] */
+ uint32_t slv_wctl;
+ /* [0x24] */
+ uint32_t slv_ctl;
+ /* [0x28] */
+ uint32_t dbi_ctl;
+ /* [0x2c] */
+ uint32_t vmid_mask;
+ uint32_t rsrvd[4];
+};
+struct al_pcie_axi_ob_ctrl {
+ /* [0x0] */
+ uint32_t cfg_target_bus;
+ /* [0x4] */
+ uint32_t cfg_control;
+ /* [0x8] */
+ uint32_t io_start_l;
+ /* [0xc] */
+ uint32_t io_start_h;
+ /* [0x10] */
+ uint32_t io_limit_l;
+ /* [0x14] */
+ uint32_t io_limit_h;
+ /* [0x18] */
+ uint32_t msg_start_l;
+ /* [0x1c] */
+ uint32_t msg_start_h;
+ /* [0x20] */
+ uint32_t msg_limit_l;
+ /* [0x24] */
+ uint32_t msg_limit_h;
+ uint32_t rsrvd[6];
+};
+struct al_pcie_axi_msg {
+ /* [0x0] */
+ uint32_t addr_high;
+ /* [0x4] */
+ uint32_t addr_low;
+ /* [0x8] */
+ uint32_t type;
+};
+struct al_pcie_axi_pcie_status {
+ /* [0x0] */
+ uint32_t debug;
+};
+struct al_pcie_axi_rd_parity {
+ /* [0x0] */
+ uint32_t log_high;
+ /* [0x4] */
+ uint32_t log_low;
+};
+struct al_pcie_axi_rd_cmpl {
+ /* [0x0] */
+ uint32_t cmpl_log_high;
+ /* [0x4] */
+ uint32_t cmpl_log_low;
+};
+struct al_pcie_axi_rd_to {
+ /* [0x0] */
+ uint32_t to_log_high;
+ /* [0x4] */
+ uint32_t to_log_low;
+};
+struct al_pcie_axi_wr_cmpl {
+ /* [0x0] */
+ uint32_t wr_cmpl_log_high;
+ /* [0x4] */
+ uint32_t wr_cmpl_log_low;
+};
+struct al_pcie_axi_wr_to {
+ /* [0x0] */
+ uint32_t wr_to_log_high;
+ /* [0x4] */
+ uint32_t wr_to_log_low;
+};
+struct al_pcie_axi_pcie_global {
+ /* [0x0] */
+ uint32_t conf;
+};
+struct al_pcie_axi_status {
+ /* [0x0] */
+ uint32_t lane0;
+ /* [0x4] */
+ uint32_t lane1;
+ /* [0x8] */
+ uint32_t lane2;
+ /* [0xc] */
+ uint32_t lane3;
+};
+struct al_pcie_axi_conf {
+ /* [0x0] */
+ uint32_t zero_lane0;
+ /* [0x4] */
+ uint32_t zero_lane1;
+ /* [0x8] */
+ uint32_t zero_lane2;
+ /* [0xc] */
+ uint32_t zero_lane3;
+ /* [0x10] */
+ uint32_t one_lane0;
+ /* [0x14] */
+ uint32_t one_lane1;
+ /* [0x18] */
+ uint32_t one_lane2;
+ /* [0x1c] */
+ uint32_t one_lane3;
+};
+struct al_pcie_axi_parity {
+ /* [0x0] */
+ uint32_t en_axi;
+ /* [0x4] */
+ uint32_t status_axi;
+};
+struct al_pcie_axi_pos_logged {
+ /* [0x0] */
+ uint32_t error_low;
+ /* [0x4] */
+ uint32_t error_high;
+};
+struct al_pcie_axi_ordering {
+ /* [0x0] */
+ uint32_t pos_cntl;
+};
+struct al_pcie_axi_link_down {
+ /* [0x0] */
+ uint32_t reset_extend;
+};
+struct al_pcie_axi_pre_configuration {
+ /* [0x0] */
+ uint32_t pcie_core_setup;
+};
+struct al_pcie_axi_init_fc {
+ /* [0x0] The sum of all the fields below must be 97 */
+ uint32_t cfg;
+};
+struct al_pcie_axi_int_grp_a_axi {
+ /* [0x0] Interrupt Cause RegisterSet by hardware */
+ uint32_t cause;
+ uint32_t rsrvd_0;
+ /* [0x8] Interrupt Cause Set RegisterWriting 1 to a bit in t ... */
+ uint32_t cause_set;
+ uint32_t rsrvd_1;
+ /* [0x10] Interrupt Mask RegisterIf Auto-mask control bit =TR ... */
+ uint32_t mask;
+ uint32_t rsrvd_2;
+ /* [0x18] Interrupt Mask Clear RegisterUsed when auto-mask co ... */
+ uint32_t mask_clear;
+ uint32_t rsrvd_3;
+ /* [0x20] Interrupt Status RegisterThis register latches the ... */
+ uint32_t status;
+ uint32_t rsrvd_4;
+ /* [0x28] Interrupt Control Register */
+ uint32_t control;
+ uint32_t rsrvd_5;
+ /* [0x30] Interrupt Mask RegisterEach bit in this register ma ... */
+ uint32_t abort_mask;
+ uint32_t rsrvd_6;
+ /* [0x38] Interrupt Log RegisterEach bit in this register mas ... */
+ uint32_t log_mask;
+ uint32_t rsrvd;
+};
+
+struct al_pcie_axi_regs {
+ struct al_pcie_axi_ctrl ctrl; /* [0x0] */
+ struct al_pcie_axi_ob_ctrl ob_ctrl; /* [0x40] */
+ uint32_t rsrvd_0[4];
+ struct al_pcie_axi_msg msg; /* [0x90] */
+ struct al_pcie_axi_pcie_status pcie_status; /* [0x9c] */
+ struct al_pcie_axi_rd_parity rd_parity; /* [0xa0] */
+ struct al_pcie_axi_rd_cmpl rd_cmpl; /* [0xa8] */
+ struct al_pcie_axi_rd_to rd_to; /* [0xb0] */
+ struct al_pcie_axi_wr_cmpl wr_cmpl; /* [0xb8] */
+ struct al_pcie_axi_wr_to wr_to; /* [0xc0] */
+ struct al_pcie_axi_pcie_global pcie_global; /* [0xc8] */
+ struct al_pcie_axi_status status; /* [0xcc] */
+ struct al_pcie_axi_conf conf; /* [0xdc] */
+ struct al_pcie_axi_parity parity; /* [0xfc] */
+ struct al_pcie_axi_pos_logged pos_logged; /* [0x104] */
+ struct al_pcie_axi_ordering ordering; /* [0x10c] */
+ struct al_pcie_axi_link_down link_down; /* [0x110] */
+ struct al_pcie_axi_pre_configuration pre_configuration; /* [0x114] */
+ struct al_pcie_axi_init_fc init_fc; /* [0x118] */
+ uint32_t rsrvd_1[57];
+ struct al_pcie_axi_int_grp_a_axi int_grp_a; /* [0x200] */
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** Global register ****/
+/* Not in use */
+#define PCIE_AXI_CTRL_GLOBAL_CPL_AFTER_P_ORDER_DIS (1 << 0)
+/* Not in use */
+#define PCIE_AXI_CTRL_GLOBAL_CPU_CPL_ONLY_EN (1 << 1)
+/* When linked down, map all transactions to PCIe to DEC ERR. */
+#define PCIE_AXI_CTRL_GLOBAL_BLOCK_PCIE_SLAVE_EN (1 << 2)
+/* Wait for the NIC to flush before enabling reset to the PCIe c ... */
+#define PCIE_AXI_CTRL_GLOBAL_WAIT_SLV_FLUSH_EN (1 << 3)
+/* When the BME is cleared and this bit is set, it causes all tr ... */
+#define PCIE_AXI_CTRL_GLOBAL_MEM_BAR_MAP_TO_ERR (1 << 4)
+/* Wait for the DBI port (the port that enables access to the in ... */
+#define PCIE_AXI_CTRL_GLOBAL_WAIT_DBI_FLUSH_EN (1 << 5)
+/* When set, adds parity on the write and read address channels, ... */
+#define PCIE_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR (1 << 16)
+/* When set, enables parity check on the read data. */
+#define PCIE_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD (1 << 17)
+/* When set, adds parity on the RD data channel. */
+#define PCIE_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV (1 << 18)
+/* When set, enables parity check on the write data. */
+#define PCIE_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR (1 << 19)
+/* When set, error track for timeout and parity is disabled, i */
+#define PCIE_AXI_CTRL_GLOBAL_ERROR_TRACK_DIS (1 << 20)
+
+/**** Master_Arctl register ****/
+/* override arcache */
+#define PCIE_AXI_CTRL_MASTER_ARCTL_OVR_ARCACHE (1 << 0)
+/* arache value */
+#define PCIE_AXI_CTRL_MASTER_ARCTL_ARACHE_VA_MASK 0x0000001E
+#define PCIE_AXI_CTRL_MASTER_ARCTL_ARACHE_VA_SHIFT 1
+/* arprot override */
+#define PCIE_AXI_CTRL_MASTER_ARCTL_ARPROT_OVR (1 << 5)
+/* arprot value */
+#define PCIE_AXI_CTRL_MASTER_ARCTL_ARPROT_VALUE_MASK 0x000001C0
+#define PCIE_AXI_CTRL_MASTER_ARCTL_ARPROT_VALUE_SHIFT 6
+/* vmid val */
+#define PCIE_AXI_CTRL_MASTER_ARCTL_VMID_VAL_MASK 0x01FFFE00
+#define PCIE_AXI_CTRL_MASTER_ARCTL_VMID_VAL_SHIFT 9
+/* IPA value */
+#define PCIE_AXI_CTRL_MASTER_ARCTL_IPA_VAL (1 << 25)
+/* overide snoop inidcation, if not set take it from mstr_armisc ... */
+#define PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP (1 << 26)
+/*
+snoop indication value when override */
+#define PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP (1 << 27)
+/*
+arqos value */
+#define PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_MASK 0xF0000000
+#define PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_SHIFT 28
+
+/**** Master_Awctl register ****/
+/* override arcache */
+#define PCIE_AXI_CTRL_MASTER_AWCTL_OVR_ARCACHE (1 << 0)
+/* awache value */
+#define PCIE_AXI_CTRL_MASTER_AWCTL_AWACHE_VA_MASK 0x0000001E
+#define PCIE_AXI_CTRL_MASTER_AWCTL_AWACHE_VA_SHIFT 1
+/* awprot override */
+#define PCIE_AXI_CTRL_MASTER_AWCTL_AWPROT_OVR (1 << 5)
+/* awprot value */
+#define PCIE_AXI_CTRL_MASTER_AWCTL_AWPROT_VALUE_MASK 0x000001C0
+#define PCIE_AXI_CTRL_MASTER_AWCTL_AWPROT_VALUE_SHIFT 6
+/* vmid val */
+#define PCIE_AXI_CTRL_MASTER_AWCTL_VMID_VAL_MASK 0x01FFFE00
+#define PCIE_AXI_CTRL_MASTER_AWCTL_VMID_VAL_SHIFT 9
+/* IPA value */
+#define PCIE_AXI_CTRL_MASTER_AWCTL_IPA_VAL (1 << 25)
+/* overide snoop inidcation, if not set take it from mstr_armisc ... */
+#define PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP (1 << 26)
+/*
+snoop indication value when override */
+#define PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP (1 << 27)
+/*
+awqos value */
+#define PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_MASK 0xF0000000
+#define PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_SHIFT 28
+
+/**** slv_ctl register ****/
+#define PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN (1 << 6)
+
+/**** Cfg_Target_Bus register ****/
+/* Defines which MSBs to complete the number of the bust that ar ... */
+#define PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK 0x000000FF
+#define PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT 0
+/* Target bus number for outbound configuration type0 and type1 ... */
+#define PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK 0x0000FF00
+#define PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT 8
+
+/**** Cfg_Control register ****/
+/* Primary bus number */
+#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_PBUS_MASK 0x000000FF
+#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_PBUS_SHIFT 0
+/*
+Subordinate bus number */
+#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_MASK 0x0000FF00
+#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_SHIFT 8
+/* Secondary bus nnumber */
+#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_MASK 0x00FF0000
+#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_SHIFT 16
+/* Enable outbound configuration access through iATU. */
+#define PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_IATU_EN (1 << 31)
+
+/**** IO_Start_H register ****/
+/*
+Outbound ATIU I/O start address high */
+#define PCIE_AXI_MISC_OB_CTRL_IO_START_H_ADDR_MASK 0x000003FF
+#define PCIE_AXI_MISC_OB_CTRL_IO_START_H_ADDR_SHIFT 0
+
+/**** IO_Limit_H register ****/
+/*
+Outbound ATIU I/O limit address high */
+#define PCIE_AXI_MISC_OB_CTRL_IO_LIMIT_H_ADDR_MASK 0x000003FF
+#define PCIE_AXI_MISC_OB_CTRL_IO_LIMIT_H_ADDR_SHIFT 0
+
+/**** Msg_Start_H register ****/
+/*
+Outbound ATIU msg-no-data start address high */
+#define PCIE_AXI_MISC_OB_CTRL_MSG_START_H_ADDR_MASK 0x000003FF
+#define PCIE_AXI_MISC_OB_CTRL_MSG_START_H_ADDR_SHIFT 0
+
+/**** Msg_Limit_H register ****/
+/*
+Outbound ATIU msg-no-data limit address high */
+#define PCIE_AXI_MISC_OB_CTRL_MSG_LIMIT_H_ADDR_MASK 0x000003FF
+#define PCIE_AXI_MISC_OB_CTRL_MSG_LIMIT_H_ADDR_SHIFT 0
+
+/**** type register ****/
+/* Type of message */
+#define PCIE_AXI_MISC_MSG_TYPE_TYPE_MASK 0x00FFFFFF
+#define PCIE_AXI_MISC_MSG_TYPE_TYPE_SHIFT 0
+/* Reserved */
+#define PCIE_AXI_MISC_MSG_TYPE_RSRVD_MASK 0xFF000000
+#define PCIE_AXI_MISC_MSG_TYPE_RSRVD_SHIFT 24
+
+/**** debug register ****/
+/* Causes ACI PCIe reset, including ,master/slave/DBI (registers ... */
+#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_AXI_BRIDGE_RESET (1 << 0)
+/* Causes reset of the entire PCIe core (including the AXI bridg ... */
+#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_CORE_RESET (1 << 1)
+/* Indicates that the SB is empty from the request to the PCIe ( ... */
+#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_SB_FLUSH_OB_STATUS (1 << 2)
+/* MAP and transaction to the PCIe core to ERROR. */
+#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_SB_MAP_TO_ERR (1 << 3)
+/* Indicates that the pcie_core clock is gated off */
+#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_CORE_CLK_GATE_OFF (1 << 4)
+/* Reserved */
+#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_RSRVD_MASK 0xFFFFFFE0
+#define PCIE_AXI_MISC_PCIE_STATUS_DEBUG_RSRVD_SHIFT 5
+
+/**** conf register ****/
+/* Device TypeIndicates the specific type of this PCI Express Fu ... */
+#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK 0x0000000F
+#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT 0
+/* [4] – Lane 0 active[5] – Lave 1 active[6] – Lane 2 active[7] ... */
+#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK 0x000000F0
+#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT 4
+/* [8] SD to the memories */
+#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN 0x100
+#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_RESERVED_MASK 0xFFFFFE00
+#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_RESERVED_SHIFT 9
+
+/**** zero_laneX register ****/
+/* phy_mac_local_fs */
+#define PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_MASK 0x0000003f
+#define PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_SHIFT 0
+/* phy_mac_local_lf */
+#define PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_MASK 0x00000fc0
+#define PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_SHIFT 6
+
+/**** pos_cntl register ****/
+/* Disables POS. */
+#define PCIE_AXI_POS_ORDER_AXI_POS_BYPASS (1 << 0)
+/* Clear the POS data structure. */
+#define PCIE_AXI_POS_ORDER_AXI_POS_CLEAR (1 << 1)
+/* Read push all write. */
+#define PCIE_AXI_POS_ORDER_AXI_POS_RSO_ENABLE (1 << 2)
+/* Causes the PCIe core to wait for all the BRESPs before issuin ... */
+#define PCIE_AXI_POS_ORDER_AXI_DW_RD_FLUSH_WR (1 << 3)
+/* When set, to 1'b1 supports interleaving data return from the ... */
+#define PCIE_AXI_POS_ORDER_RD_CMPL_AFTER_WR_SUPPORT_RD_INTERLV (1 << 4)
+/* When set, to 1'b1 disables read completion after write orderi ... */
+#define PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX (1 << 5)
+/* When set, disables EP mode read cmpl on the master port push ... */
+#define PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS (1 << 6)
+/* When set, disables EP mode read cmpl on the master port push ... */
+#define PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_SUPPORT_INTERLV_DIS (1 << 7)
+/* When set disable the ATS CAP. */
+#define PCIE_AXI_CORE_SETUP_ATS_CAP_DIS AL_BIT(13)
+
+/**** pcie_core_setup register ****/
+/* This Value delay the rate change to the serdes, until the EIO ... */
+#define PCIE_AXI_CORE_SETUP_DELAY_MAC_PHY_RATE_MASK 0x000000FF
+#define PCIE_AXI_CORE_SETUP_DELAY_MAC_PHY_RATE_SHIFT 0
+/* Limit the number of outstanding AXI reads that the PCIe core ... */
+#define PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_MASK 0x0000FF00
+#define PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_SHIFT 8
+/* Enable the sriov feature */
+#define PCIE_AXI_CORE_SETUP_SRIOV_ENABLE AL_BIT(16)
+
+/**** cfg register ****/
+/* This value set the possible out standing headers writes (post ... */
+#define PCIE_AXI_INIT_FC_CFG_NOF_P_HDR_MASK 0x0000007F
+#define PCIE_AXI_INIT_FC_CFG_NOF_P_HDR_SHIFT 0
+/* This value set the possible out standing headers reads (non-p ... */
+#define PCIE_AXI_INIT_FC_CFG_NOF_NP_HDR_MASK 0x00003F80
+#define PCIE_AXI_INIT_FC_CFG_NOF_NP_HDR_SHIFT 7
+/* This value set the possible out standing headers CMPLs , the ... */
+#define PCIE_AXI_INIT_FC_CFG_NOF_CPL_HDR_MASK 0x001FC000
+#define PCIE_AXI_INIT_FC_CFG_NOF_CPL_HDR_SHIFT 14
+
+#define PCIE_AXI_INIT_FC_CFG_RSRVD_MASK 0xFFE00000
+#define PCIE_AXI_INIT_FC_CFG_RSRVD_SHIFT 21
+
+/**** int_cause_grp_A_axi register ****/
+/* Master Response Composer Lookup ErrorOverflow that occurred i ... */
+#define PCIE_AXI_INT_GRP_A_CAUSE_GM_COMPOSER_LOOKUP_ERR (1 << 0)
+/* Indicates a PARITY ERROR on the master data read channel */
+#define PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_DATA_PATH_RD (1 << 2)
+/* Indicates a PARITY ERROR on the slave addr read channel */
+#define PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_RD (1 << 3)
+/* Indicates a PARITY ERROR on the slave addr write channel */
+#define PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_WR (1 << 4)
+/* Indicates a PARITY ERROR on the slave data write channel */
+#define PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_DATA_WR (1 << 5)
+/* Reserved */
+#define PCIE_AXI_INT_GRP_A_CAUSE_RESERVED_6 (1 << 6)
+/* Software error: ECAM write request with invalid bus number */
+#define PCIE_AXI_INT_GRP_A_CAUSE_SW_ECAM_ERR_RD (1 << 7)
+/* Software error: ECAM read request with invalid bus number */
+#define PCIE_AXI_INT_GRP_A_CAUSE_SW_ECAM_ERR_WR (1 << 8)
+/* Indicates an ERROR in the PCIe application cause register. */
+#define PCIE_AXI_INT_GRP_A_CAUSE_PCIE_CORE_INT (1 << 9)
+/* Whenever the Master AXI finishes writing a message, it sets t ... */
+#define PCIE_AXI_INT_GRP_A_CAUSE_MSTR_AXI_GETOUT_MSG (1 << 10)
+/* Read AXI compilation has ERROR. */
+#define PCIE_AXI_INT_GRP_A_CAUSE_RD_CMPL_ERR (1 << 11)
+/* Write AXI compilation has ERROR. */
+#define PCIE_AXI_INT_GRP_A_CAUSE_WR_CMPL_ERR (1 << 12)
+/* Read AXI compilation has timed out. */
+#define PCIE_AXI_INT_GRP_A_CAUSE_RD_CMPL_TO (1 << 13)
+/* Write AXI compilation has timed out. */
+#define PCIE_AXI_INT_GRP_A_CAUSE_WR_CMPL_TO (1 << 14)
+/* Parity error AXI domain */
+#define PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERROR_AXI (1 << 15)
+/* POS error interrupt */
+#define PCIE_AXI_INT_GRP_A_CAUSE_POS_AXI_BRESP (1 << 16)
+/* The outstanding write counter become full should never happe ... */
+#define PCIE_AXI_INT_GRP_A_CAUSE_WRITE_CNT_FULL_ERR (1 << 17)
+/* BRESP received before the write counter increment. */
+#define PCIE_AXI_INT_GRP_A_CAUSE_BRESP_BEFORE_WR_CNT_INC_ERR (1 << 18)
+
+/**** int_control_grp_A_axi register ****/
+/* When Clear_on_Read =1, all bits of the Cause register are cle ... */
+#define PCIE_AXI_INT_GRP_A_CTRL_CLEAR_ON_READ (1 << 0)
+/* (Must be set only when MSIX is enabled */
+#define PCIE_AXI_INT_GRP_A_CTRL_AUTO_MASK (1 << 1)
+/* Auto_Clear (RW)When Auto-Clear =1, the bits in the Interrupt ... */
+#define PCIE_AXI_INT_GRP_A_CTRL_AUTO_CLEAR (1 << 2)
+/* When set,_on_Posedge =1, the bits in the Interrupt Cause regi ... */
+#define PCIE_AXI_INT_GRP_A_CTRL_SET_ON_POS (1 << 3)
+/* When Moderation_Reset =1, all Moderation timers associated wi ... */
+#define PCIE_AXI_INT_GRP_A_CTRL_MOD_RST (1 << 4)
+/* When mask_msi_x =1, no MSI-X from this group is sent */
+#define PCIE_AXI_INT_GRP_A_CTRL_MASK_MSI_X (1 << 5)
+/* MSI-X AWID value. Same ID for all cause bits. */
+#define PCIE_AXI_INT_GRP_A_CTRL_AWID_MASK 0x00000F00
+#define PCIE_AXI_INT_GRP_A_CTRL_AWID_SHIFT 8
+/* This value determines the interval between interrupts */
+#define PCIE_AXI_INT_GRP_A_CTRL_MOD_INTV_MASK 0x00FF0000
+#define PCIE_AXI_INT_GRP_A_CTRL_MOD_INTV_SHIFT 16
+/* This value determines the Moderation_Timer_Clock speed */
+#define PCIE_AXI_INT_GRP_A_CTRL_MOD_RES_MASK 0x0F000000
+#define PCIE_AXI_INT_GRP_A_CTRL_MOD_RES_SHIFT 24
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_pcie_axi_REG_H */
+
+/** @} end of ... group */
+
+
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_pcie_interrupts.c b/arch/arm/mach-alpine/al_hal/al_hal_pcie_interrupts.c
new file mode 100644
index 0000000..68de3f9
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_pcie_interrupts.c
@@ -0,0 +1,73 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "al_hal_pcie_interrupts.h"
+#include "al_hal_pcie_regs.h"
+#include "al_hal_iofic_regs.h"
+
+/* Enable PCIe controller interrupts */
+int al_pcie_ints_config(struct al_pcie_port *pcie_port)
+{
+ al_iofic_config(pcie_port->app_int_grp_a_base, 0,
+ INT_CONTROL_GRP_SET_ON_POSEDGE);
+ al_iofic_config(pcie_port->app_int_grp_b_base, 0, 0);
+ al_iofic_config(pcie_port->axi_int_grp_a_base, 0, 0);
+
+ return 0;
+}
+
+void al_pcie_app_int_grp_a_unmask(struct al_pcie_port *pcie_port,
+ uint32_t int_mask)
+{
+ al_iofic_unmask(pcie_port->app_int_grp_a_base, 0, int_mask);
+}
+
+void al_pcie_app_int_grp_a_mask(struct al_pcie_port *pcie_port,
+ uint32_t int_mask)
+{
+ al_iofic_mask(pcie_port->app_int_grp_a_base, 0, int_mask);
+}
+
+void al_pcie_app_int_grp_b_unmask(struct al_pcie_port *pcie_port,
+ uint32_t int_mask)
+{
+ al_iofic_unmask(pcie_port->app_int_grp_b_base, 0, int_mask);
+}
+
+void al_pcie_app_int_grp_b_mask(struct al_pcie_port *pcie_port,
+ uint32_t int_mask)
+{
+ al_iofic_mask(pcie_port->app_int_grp_b_base, 0, int_mask);
+}
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_pcie_regs.h b/arch/arm/mach-alpine/al_hal/al_hal_pcie_regs.h
new file mode 100644
index 0000000..7ac1c3e
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_pcie_regs.h
@@ -0,0 +1,182 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __AL_HAL_PCIE_REGS_H__
+#define __AL_HAL_PCIE_REGS_H__
+
+#include "al_hal_pcie_axi_reg.h"
+#include "al_hal_pcie_w_reg.h"
+
+#define AL_PCIE_AXI_REGS_OFFSET 0x0
+#define AL_PCIE_APP_REGS_OFFSET 0x1000
+#define AL_PCIE_CORE_CONF_BASE_OFFSET 0x2000
+
+struct al_pcie_core_iatu_regs {
+ uint32_t index;
+ uint32_t cr1;
+ uint32_t cr2;
+ uint32_t lower_base_addr;
+ uint32_t upper_base_addr;
+ uint32_t limit_addr;
+ uint32_t lower_target_addr;
+ uint32_t upper_target_addr;
+ uint32_t cr3;
+ uint32_t rsrvd[(0x270 - 0x224) >> 2];
+};
+
+struct al_pcie_core_port_regs {
+ uint32_t ack_lat_rply_timer;
+ uint32_t reserved1[(0x10 - 0x4) >> 2];
+ uint32_t port_link_ctrl;
+ uint32_t reserved2[(0x1c - 0x14) >> 2];
+ uint32_t filter_mask_reg_1;
+ uint32_t reserved3[(0x48 - 0x20) >> 2];
+ uint32_t vc0_posted_rcv_q_ctrl;
+ uint32_t vc0_non_posted_rcv_q_ctrl;
+ uint32_t vc0_comp_rcv_q_ctrl;
+ uint32_t reserved4[(0x10C - 0x54) >> 2];
+ uint32_t gen2_ctrl;
+ uint32_t reserved5[(0x190 - 0x110) >> 2];
+ uint32_t gen3_ctrl;
+ uint32_t gen3_eq_fs_lf;
+ uint32_t gen3_eq_preset_to_coef_map;
+ uint32_t gen3_eq_preset_idx;
+ uint32_t reserved6;
+ uint32_t gen3_eq_status;
+ uint32_t gen3_eq_ctrl;
+ uint32_t reserved7[(0x1B8 - 0x1AC) >> 2];
+ uint32_t pipe_loopback_ctrl;
+ uint32_t rd_only_wr_en;
+ uint32_t reserved8[(0x1D0 - 0x1C0) >> 2];
+ uint32_t axi_slave_err_resp;
+ uint32_t reserved9[(0x200 - 0x1D4) >> 2];
+ struct al_pcie_core_iatu_regs iatu;
+ uint32_t reserved10[(0x448 - 0x270) >> 2];
+};
+
+struct al_pcie_core_reg_space {
+ uint32_t config_header[0x40 >> 2];
+ uint32_t pcie_pm_cap_base;
+ uint32_t reserved1[(0x70 - 0x44) >> 2];
+ uint32_t pcie_cap_base;
+ uint32_t pcie_dev_cap_base;
+ uint32_t reserved2;
+ uint32_t pcie_link_cap_base;
+ uint32_t reserved3[(0xB0 - 0x80) >> 2];
+ uint32_t msix_cap_base;
+ uint32_t reserved4[(0x100 - 0xB4) >> 2];
+ uint32_t pcie_aer_cap_base;
+ uint32_t reserved5[(0x150 - 0x104) >> 2];
+ uint32_t pcie_sec_ext_cap_base;
+ uint32_t reserved6[(0x700 - 0x154) >> 2];
+ struct al_pcie_core_port_regs port_regs;
+};
+
+struct al_pcie_regs {
+ struct al_pcie_axi_regs __iomem axi;
+ uint32_t reserved1[(AL_PCIE_APP_REGS_OFFSET -
+ (AL_PCIE_AXI_REGS_OFFSET +
+ sizeof(struct al_pcie_axi_regs))) >> 2];
+ struct al_pcie_w_regs __iomem app;
+ uint32_t reserved2[(AL_PCIE_CORE_CONF_BASE_OFFSET -
+ (AL_PCIE_APP_REGS_OFFSET +
+ sizeof(struct al_pcie_w_regs))) >> 2];
+ struct al_pcie_core_reg_space core_space;
+};
+
+#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP 0
+#define PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC 4
+
+#define PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT 18
+#define PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT 19
+#define PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT 20
+
+#define PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT 9
+#define PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT 16
+
+#define PCIE_PORT_GEN3_EQ_LF_SHIFT 0
+#define PCIE_PORT_GEN3_EQ_LF_MASK 0x3f
+#define PCIE_PORT_GEN3_EQ_FS_SHIFT 6
+#define PCIE_PORT_GEN3_EQ_FS_MASK (0x3f << PCIE_PORT_GEN3_EQ_FS_SHIFT)
+
+#define PCIE_PORT_LINK_CTRL_LB_EN_SHIFT 2
+#define PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT 7
+#define PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT 31
+
+#define PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT 0
+
+/* filter_mask_reg_1 register */
+/*
+ * 0: Treat Function MisMatched TLPs as UR
+ * 1: Treat Function MisMatched TLPs as Supported
+ */
+#define CX_FLT_MASK_UR_FUNC_MISMATCH AL_BIT(16)
+
+/*
+ * 0: Treat CFG type1 TLPs as UR for EP; Supported for RC
+ * 1: Treat CFG type1 TLPs as Supported for EP; UR for RC
+ */
+#define CX_FLT_MASK_CFG_TYPE1_RE_AS_UR AL_BIT(19)
+
+/*
+ * 0: Enforce requester id match for received CPL TLPs.
+ * A violation results in cpl_abort, and possibly AER of unexp_cpl_err,
+ * cpl_rcvd_ur, cpl_rcvd_ca
+ * 1: Mask requester id match for received CPL TLPs
+ */
+#define CX_FLT_MASK_CPL_REQID_MATCH AL_BIT(22)
+
+/*
+ * 0: Enforce function match for received CPL TLPs.
+ * A violation results in cpl_abort, and possibly AER of unexp_cpl_err,
+ * cpl_rcvd_ur, cpl_rcvd_ca
+ * 1: Mask function match for received CPL TLPs
+ */
+#define CX_FLT_MASK_CPL_FUNC_MATCH AL_BIT(23)
+
+/* vc0_posted_rcv_q_ctrl register */
+#define RADM_PQ_HCRD_VC0_MASK AL_FIELD_MASK(19, 12)
+#define RADM_PQ_HCRD_VC0_SHIFT 12
+
+/* vc0_non_posted_rcv_q_ctrl register */
+#define RADM_NPQ_HCRD_VC0_MASK AL_FIELD_MASK(19, 12)
+#define RADM_NPQ_HCRD_VC0_SHIFT 12
+
+/* vc0_comp_rcv_q_ctrl register */
+#define RADM_CPLQ_HCRD_VC0_MASK AL_FIELD_MASK(19, 12)
+#define RADM_CPLQ_HCRD_VC0_SHIFT 12
+
+#endif
+
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_pcie_w_reg.h b/arch/arm/mach-alpine/al_hal/al_hal_pcie_w_reg.h
new file mode 100644
index 0000000..c75fc8e
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_pcie_w_reg.h
@@ -0,0 +1,658 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __AL_HAL_PCIE_W_REG_H__
+#define __AL_HAL_PCIE_W_REG_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+
+
+struct al_pcie_w_global_ctrl {
+ /* [0x0] */
+ uint32_t port_init;
+ /* [0x4] */
+ uint32_t port_status;
+ /* [0x8] */
+ uint32_t pm_control;
+ uint32_t rsrvd_0;
+ /* [0x10] */
+ uint32_t events_gen;
+ uint32_t rsrvd[3];
+};
+struct al_pcie_w_lcl_log {
+ uint32_t rsrvd_0[4];
+ /* [0x10] */
+ uint32_t cpl_to_info;
+ uint32_t rsrvd_1[3];
+ /* [0x20] */
+ uint32_t rcv_msg0_0;
+ /* [0x24] */
+ uint32_t rcv_msg0_1;
+ /* [0x28] */
+ uint32_t rcv_msg0_2;
+ uint32_t rsrvd_2;
+ /* [0x30] */
+ uint32_t rcv_msg1_0;
+ /* [0x34] */
+ uint32_t rcv_msg1_1;
+ /* [0x38] */
+ uint32_t rcv_msg1_2;
+ uint32_t rsrvd_3;
+ /* [0x40] */
+ uint32_t core_q_status;
+ uint32_t rsrvd[7];
+};
+struct al_pcie_w_debug {
+ /* [0x0] */
+ uint32_t info_0;
+ /* [0x4] */
+ uint32_t info_1;
+ /* [0x8] */
+ uint32_t info_2;
+ uint32_t rsrvd;
+};
+struct al_pcie_w_ob_ven_msg {
+ /* [0x0] */
+ uint32_t control;
+ /* [0x4] */
+ uint32_t param_1;
+ /* [0x8] */
+ uint32_t param_2;
+ /* [0xc] */
+ uint32_t data_high;
+ uint32_t rsrvd_0;
+ /* [0x14] */
+ uint32_t data_low;
+ uint32_t rsrvd[2];
+};
+struct al_pcie_w_soc_int {
+ /* [0x0] */
+ uint32_t status_0;
+ /* [0x4] */
+ uint32_t status_1;
+ /* [0x8] */
+ uint32_t status_2;
+ /* [0xc] */
+ uint32_t mask_inta_leg_0;
+ /* [0x10] */
+ uint32_t mask_inta_leg_1;
+ /* [0x14] */
+ uint32_t mask_inta_leg_2;
+ /* [0x18] */
+ uint32_t mask_msi_leg_0;
+ /* [0x1c] */
+ uint32_t mask_msi_leg_1;
+ /* [0x20] */
+ uint32_t mask_msi_leg_2;
+ /* [0x24] */
+ uint32_t msi_leg_cntl;
+};
+struct al_pcie_w_link_down {
+ /* [0x0] */
+ uint32_t reset_delay;
+ /* [0x4] */
+ uint32_t reset_extend_rsrvd;
+};
+struct al_pcie_w_cntl_gen {
+ /* [0x0] */
+ uint32_t features;
+};
+struct al_pcie_w_parity {
+ /* [0x0] */
+ uint32_t en_core;
+ /* [0x4] */
+ uint32_t status_core;
+};
+struct al_pcie_w_last_wr {
+ /* [0x0] */
+ uint32_t cfg_addr;
+};
+struct al_pcie_w_atu {
+ /* [0x0] */
+ uint32_t in_mask_pair[6];
+ /* [0x18] */
+ uint32_t out_mask_pair[6];
+};
+struct al_pcie_w_cfg_elbi {
+ /* [0x0] */
+ uint32_t emulation;
+};
+struct al_pcie_w_emulatecfg {
+ /* [0x0] */
+ uint32_t data;
+ /* [0x4] */
+ uint32_t addr;
+ /* [0x8] */
+ uint32_t cmpl;
+};
+struct al_pcie_w_int_grp_a {
+ /* [0x0] Interrupt Cause RegisterSet by hardware - If MSI-X ... */
+ uint32_t cause_a;
+ uint32_t rsrvd_0;
+ /* [0x8] Interrupt Cause Set RegisterWriting 1 to a bit in t ... */
+ uint32_t cause_set_a;
+ uint32_t rsrvd_1;
+ /* [0x10] Interrupt Mask RegisterIf Auto-mask control bit =TR ... */
+ uint32_t mask_a;
+ uint32_t rsrvd_2;
+ /* [0x18] Interrupt Mask Clear RegisterUsed when auto-mask co ... */
+ uint32_t mask_clear_a;
+ uint32_t rsrvd_3;
+ /* [0x20] Interrupt Status RegisterThis register latches the ... */
+ uint32_t status_a;
+ uint32_t rsrvd_4;
+ /* [0x28] Interrupt Control Register */
+ uint32_t control_a;
+ uint32_t rsrvd_5;
+ /* [0x30] Interrupt Mask RegisterEach bit in this register ma ... */
+ uint32_t abort_mask_a;
+ uint32_t rsrvd_6;
+ /* [0x38] Interrupt Log RegisterEach bit in this register mas ... */
+ uint32_t log_mask_a;
+ uint32_t rsrvd;
+};
+struct al_pcie_w_int_grp_b {
+ /* [0x0] Interrupt Cause RegisterSet by hardware- If MSI-X i ... */
+ uint32_t cause_b;
+ uint32_t rsrvd_0;
+ /* [0x8] Interrupt Cause Set RegisterWriting 1 to a bit in t ... */
+ uint32_t cause_set_b;
+ uint32_t rsrvd_1;
+ /* [0x10] Interrupt Mask RegisterIf Auto-mask control bit =TR ... */
+ uint32_t mask_b;
+ uint32_t rsrvd_2;
+ /* [0x18] Interrupt Mask Clear RegisterUsed when auto-mask co ... */
+ uint32_t mask_clear_b;
+ uint32_t rsrvd_3;
+ /* [0x20] Interrupt Status RegisterThis register latches the ... */
+ uint32_t status_b;
+ uint32_t rsrvd_4;
+ /* [0x28] Interrupt Control Register */
+ uint32_t control_b;
+ uint32_t rsrvd_5;
+ /* [0x30] Interrupt Mask RegisterEach bit in this register ma ... */
+ uint32_t abort_mask_b;
+ uint32_t rsrvd_6;
+ /* [0x38] Interrupt Log RegisterEach bit in this register mas ... */
+ uint32_t log_mask_b;
+ uint32_t rsrvd;
+};
+
+struct al_pcie_w_regs {
+ struct al_pcie_w_global_ctrl global_ctrl; /* [0x0] */
+ struct al_pcie_w_lcl_log lcl_log; /* [0x20] */
+ struct al_pcie_w_debug debug; /* [0x80] */
+ struct al_pcie_w_ob_ven_msg ob_ven_msg; /* [0x90] */
+ uint32_t rsrvd_0[84];
+ struct al_pcie_w_soc_int soc_int; /* [0x200] */
+ struct al_pcie_w_link_down link_down; /* [0x228] */
+ struct al_pcie_w_cntl_gen ctrl_gen; /* [0x230] */
+ struct al_pcie_w_parity parity; /* [0x234] */
+ struct al_pcie_w_last_wr last_wr; /* [0x23c] */
+ struct al_pcie_w_atu atu; /* [0x240] */
+ struct al_pcie_w_cfg_elbi cfg_elbi; /* [0x270] */
+ struct al_pcie_w_emulatecfg emulatecfg; /* [0x274] */
+ uint32_t rsrvd_1[32];
+ struct al_pcie_w_int_grp_a int_grp_a_m0; /* [0x300] */
+ struct al_pcie_w_int_grp_b int_grp_b_m0; /* [0x340] */
+ uint32_t rsrvd_2[32];
+ struct al_pcie_w_int_grp_a int_grp_a; /* [0x400] */
+ struct al_pcie_w_int_grp_b int_grp_b; /* [0x440] */
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** Port_Init register ****/
+/* Enable port to start LTSSM Link Training */
+#define PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN (1 << 0)
+/* Device TypeIndicates the specific type of this PCIe Function */
+#define PCIE_W_GLOBAL_CTRL_PORT_INIT_DEVICE_TYPE_MASK 0x000000F0
+#define PCIE_W_GLOBAL_CTRL_PORT_INIT_DEVICE_TYPE_SHIFT 4
+/* Performs Manual Lane reversal for transmit Lanes */
+#define PCIE_W_GLOBAL_CTRL_PORT_INIT_TX_LANE_FLIP_EN (1 << 8)
+/* Performs Manual Lane reversal for receive Lanes */
+#define PCIE_W_GLOBAL_CTRL_PORT_INIT_RX_LANE_FLIP_EN (1 << 9)
+/* Auxiliary Power DetectedIndicates that auxiliary power (Vaux) ... */
+#define PCIE_W_GLOBAL_CTRL_PORT_INIT_SYS_AUX_PWR_DET_NOT_USE (1 << 10)
+
+/**** Port_Status register ****/
+/* PHY Link up/down indicator */
+#define PCIE_W_GLOBAL_CTRL_PORT_STS_PHY_LINK_UP (1 << 0)
+/* Data Link Layer up/down indicatorThis status from the Flow Co ... */
+#define PCIE_W_GLOBAL_CTRL_PORT_STS_DL_LINK_UP (1 << 1)
+/* Reset request due to link down status. */
+#define PCIE_W_GLOBAL_CTRL_PORT_STS_LINK_REQ_RST (1 << 2)
+/* Power management is in L0s state.. */
+#define PCIE_W_GLOBAL_CTRL_PORT_STS_PM_LINKST_IN_L0S (1 << 3)
+/* Power management is in L1 state. */
+#define PCIE_W_GLOBAL_CTRL_PORT_STS_PM_LINKST_IN_L1 (1 << 4)
+/* Power management is in L2 state. */
+#define PCIE_W_GLOBAL_CTRL_PORT_STS_PM_LINKST_IN_L2 (1 << 5)
+/* Power management is exiting L2 state. */
+#define PCIE_W_GLOBAL_CTRL_PORT_STS_PM_LINKST_L2_EXIT (1 << 6)
+/* Power state of the device. */
+#define PCIE_W_GLOBAL_CTRL_PORT_STS_PM_DSTATE_MASK 0x00000380
+#define PCIE_W_GLOBAL_CTRL_PORT_STS_PM_DSTATE_SHIFT 7
+/* Los state */
+#define PCIE_W_GLOBAL_CTRL_PORT_STS_XMLH_IN_RL0S (1 << 10)
+/* Timeout count before flush */
+#define PCIE_W_GLOBAL_CTRL_PORT_STS_LINK_TOUT_FLUSH_NOT (1 << 11)
+/* Clock Turnoff RequestAllows clock generation module to turn o ... */
+#define PCIE_W_GLOBAL_CTRL_PORT_STS_CORE_CLK_REQ_N (1 << 31)
+
+/**** PM_Control register ****/
+/* Wake Up */
+#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_PM_XMT_PME (1 << 0)
+/* Request to Enter ASPM L1 */
+#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_REQ_ENTR_L1 (1 << 3)
+/* Request to exit ASPM L1.
+Only effective if L1 is enabled. */
+#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_REQ_EXIT_L1 (1 << 4)
+/* Indication that component is ready to enter the L23 state */
+#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_READY_ENTR_L23 (1 << 5)
+/* Request to generate a PM_Turn_Off Message to communicate tra ... */
+#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_PM_XMT_TURNOFF (1 << 6)
+/* Provides a capability to defer incoming Configuration Request ... */
+#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN (1 << 7)
+/* Core core gate enableIf set, core_clk is gated off whenever a ... */
+#define PCIE_W_GLOBAL_CTRL_PM_CONTROL_CORE_CLK_GATE (1 << 31)
+
+/**** Events_Gen register ****/
+/* INT_D. Not supported */
+#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_ASSERT_INTD (1 << 0)
+/* INT_C. Not supported */
+#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_ASSERT_INTC (1 << 1)
+/* INT_B. Not supported */
+#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_ASSERT_INTB (1 << 2)
+/* Transmit INT_A Interrupt ControlEvery transition from 0 to 1 ... */
+#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_ASSERT_INTA (1 << 3)
+/* A request to generate an outbound MSI interrupt when MSI is e ... */
+#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_TRNS_REQ (1 << 4)
+/* Set the MSI vector before issuing msi_trans_req. */
+#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_MASK 0x000003E0
+#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_SHIFT 5
+/* The application requests hot reset to a downstream device */
+#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT (1 << 10)
+/* The application request unlock message to be sent */
+#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_UNLOCK_GEN (1 << 30)
+/* Indicates that FLR on a Physical Function has been completed */
+#define PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE (1 << 31)
+
+/**** Cpl_TO_Info register ****/
+/* The Traffic Class of the timed out CPL */
+#define PCIE_W_LCL_LOG_CPL_TO_INFO_TC_MASK 0x00000003
+#define PCIE_W_LCL_LOG_CPL_TO_INFO_TC_SHIFT 0
+/* Indicates which Virtual Function (VF) had a CPL timeout */
+#define PCIE_W_LCL_LOG_CPL_TO_INFO_FUN_NUM_MASK 0x000000FC
+#define PCIE_W_LCL_LOG_CPL_TO_INFO_FUN_NUM_SHIFT 2
+/* The Tag field of the timed out CPL */
+#define PCIE_W_LCL_LOG_CPL_TO_INFO_TAG_MASK 0x0000FF00
+#define PCIE_W_LCL_LOG_CPL_TO_INFO_TAG_SHIFT 8
+/* The Attributes field of the timed out CPL */
+#define PCIE_W_LCL_LOG_CPL_TO_INFO_ATTR_MASK 0x00030000
+#define PCIE_W_LCL_LOG_CPL_TO_INFO_ATTR_SHIFT 16
+/* The Len field of the timed out CPL */
+#define PCIE_W_LCL_LOG_CPL_TO_INFO_LEN_MASK 0x3FFC0000
+#define PCIE_W_LCL_LOG_CPL_TO_INFO_LEN_SHIFT 18
+/* Write 1 to this field to clear the information logged in the ... */
+#define PCIE_W_LCL_LOG_CPL_TO_INFO_VALID (1 << 31)
+
+/**** Rcv_Msg0_0 register ****/
+/* The Requester ID of the received message */
+#define PCIE_W_LCL_LOG_RCV_MSG0_0_REQ_ID_MASK 0x0000FFFF
+#define PCIE_W_LCL_LOG_RCV_MSG0_0_REQ_ID_SHIFT 0
+/* Valid logged messageWriting 1 to this bit enables new message ... */
+#define PCIE_W_LCL_LOG_RCV_MSG0_0_VALID (1 << 31)
+
+/**** Rcv_Msg1_0 register ****/
+/* The Requester ID of the received message */
+#define PCIE_W_LCL_LOG_RCV_MSG1_0_REQ_ID_MASK 0x0000FFFF
+#define PCIE_W_LCL_LOG_RCV_MSG1_0_REQ_ID_SHIFT 0
+/* Valid logged messageWriting 1 to this bit enables new message ... */
+#define PCIE_W_LCL_LOG_RCV_MSG1_0_VALID (1 << 31)
+
+/**** Core_Queues_Status register ****/
+/* Indicates which entries in the CPL lookup tablehave valid ent ... */
+#define PCIE_W_LCL_LOG_CORE_Q_STATUS_CPL_LUT_VALID_MASK 0x0000FFFF
+#define PCIE_W_LCL_LOG_CORE_Q_STATUS_CPL_LUT_VALID_SHIFT 0
+
+/**** Debug_Info_0 register ****/
+/* Indicates the current power state */
+#define PCIE_W_DEBUG_INFO_0_PM_CURRENT_STATE_MASK 0x00000007
+#define PCIE_W_DEBUG_INFO_0_PM_CURRENT_STATE_SHIFT 0
+/* Current state of the LTSSM */
+#define PCIE_W_DEBUG_INFO_0_LTSSM_STATE_MASK 0x000001F8
+#define PCIE_W_DEBUG_INFO_0_LTSSM_STATE_SHIFT 3
+/* Decode of the Recovery. Equalization LTSSM state */
+#define PCIE_W_DEBUG_INFO_0_LTSSM_STATE_RCVRY_EQ (1 << 9)
+
+/**** control register ****/
+/* Indication to send vendor message; when clear the message was ... */
+#define PCIE_W_OB_VEN_MSG_CONTROL_REQ (1 << 0)
+
+/**** param_1 register ****/
+/* Vendor message parameters */
+#define PCIE_W_OB_VEN_MSG_PARAM_1_FMT_MASK 0x00000003
+#define PCIE_W_OB_VEN_MSG_PARAM_1_FMT_SHIFT 0
+/* Vendor message parameters */
+#define PCIE_W_OB_VEN_MSG_PARAM_1_TYPE_MASK 0x0000007C
+#define PCIE_W_OB_VEN_MSG_PARAM_1_TYPE_SHIFT 2
+/* Vendor message parameters */
+#define PCIE_W_OB_VEN_MSG_PARAM_1_TC_MASK 0x00000380
+#define PCIE_W_OB_VEN_MSG_PARAM_1_TC_SHIFT 7
+/* Vendor message parameters */
+#define PCIE_W_OB_VEN_MSG_PARAM_1_TD (1 << 10)
+/* Vendor message parameters */
+#define PCIE_W_OB_VEN_MSG_PARAM_1_EP (1 << 11)
+/* Vendor message parameters */
+#define PCIE_W_OB_VEN_MSG_PARAM_1_ATTR_MASK 0x00003000
+#define PCIE_W_OB_VEN_MSG_PARAM_1_ATTR_SHIFT 12
+/* Vendor message parameters */
+#define PCIE_W_OB_VEN_MSG_PARAM_1_LEN_MASK 0x00FFC000
+#define PCIE_W_OB_VEN_MSG_PARAM_1_LEN_SHIFT 14
+/* Vendor message parameters */
+#define PCIE_W_OB_VEN_MSG_PARAM_1_TAG_MASK 0xFF000000
+#define PCIE_W_OB_VEN_MSG_PARAM_1_TAG_SHIFT 24
+
+/**** param_2 register ****/
+/* Vendor message parameters */
+#define PCIE_W_OB_VEN_MSG_PARAM_2_REQ_ID_MASK 0x0000FFFF
+#define PCIE_W_OB_VEN_MSG_PARAM_2_REQ_ID_SHIFT 0
+/* Vendor message parameters */
+#define PCIE_W_OB_VEN_MSG_PARAM_2_CODE_MASK 0x00FF0000
+#define PCIE_W_OB_VEN_MSG_PARAM_2_CODE_SHIFT 16
+/* Vendor message parameters */
+#define PCIE_W_OB_VEN_MSG_PARAM_2_RSVD_31_24_MASK 0xFF000000
+#define PCIE_W_OB_VEN_MSG_PARAM_2_RSVD_31_24_SHIFT 24
+
+/**** features register ****/
+/* Enable MSI fix from the SATA to the PCIe EP - Only valid for port zero */
+#define PCIE_W_CTRL_GEN_FEATURES_SATA_EP_MSI_FIX AL_BIT(16)
+
+/**** in/out_mask_x_y register ****/
+/* When bit [i] set to 1 it maks the compare in the atu_in/out wind ... */
+#define PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK 0x0000FFFF
+#define PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT 0
+/* When bit [i] set to 1 it maks the compare in the atu_in/out wind ... */
+#define PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK 0xFFFF0000
+#define PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT 16
+
+/* emulation register */
+
+/*
+ * Force all inbound PF0 configuration read to ELBI (emulation interface)
+ */
+#define PCIE_W_CFG_EMUL_CTRL_FORCE_FUN_0_CFG_ELBI AL_BIT(0)
+/*
+ * Force all non-PF0 inbound configuration read to ELBI
+ */
+#define PCIE_W_CFG_EMUL_CTRL_FORCE_FUN_NO0_CFG_ELBI AL_BIT(1)
+/*
+ * Enable direct connection between DBI and CDM.
+ * By default, local CPU can not access the PCIe Core Configuration Space (CDM)
+ * through DBI interface if there is pended inbound configuration read or
+ * write.
+ * In emulation mode, since the inbound configuration is stalled and CPU must
+ * access to core configuration space before releasing the inbound
+ * configuration transaction, it's required to enable direct path to CPU.
+ */
+#define PCIE_W_CFG_EMUL_CTRL_EMUL_DBI_FORCE_CDM_EN AL_BIT(2)
+/*
+ * Disable config direction to trgt1 if above CONFIG_LIMIT.
+ * i.e. direct all inbound configuration access to emulation interface
+ */
+#define PCIE_W_CFG_EMUL_CTRL_EMULCFG_ABOVE_LIMIT_DIS AL_BIT(3)
+/*
+ * ARI emulation enable, this emulates 8 bits function number, instead of 3.
+ * Since our PCIe core does not have the ARI capability and it's single
+ * function, requester and completer ID are in the form of {bus, dev, fun},
+ * when bus and dev numbers are latched from the received configuration write.
+ * When this bit is set, dev number is overridden by the function number when
+ * function >= 8.
+ */
+#define PCIE_W_CFG_EMUL_CTRL_AP_ARI_EMUL_EN AL_BIT(8)
+/*
+ * Disbale all FLR functionality within the core for both PF and VF. By
+ * default the core resets internal data structures and terminate pended
+ * requests. Since now all the resources are being used for all functions, it's
+ * not correct to apply FLR on the core. When setting this bit, FLR is
+ * propaged as configuration write to emulation and emulation driver should
+ * handle it by SW.
+ */
+#define PCIE_W_CFG_EMUL_CTRL_EMULCFG_PFVF_FLR_DIS AL_BIT(9)
+/*
+ * Disable FLR for func !=0 functionality within the core
+ */
+#define PCIE_W_CFG_EMUL_CTRL_EMULCFG_VF_FLR_DIS AL_BIT(10)
+/*
+ * Enable multi-function (VMID) propagation for outbound requests.
+ */
+#define PCIE_W_CFG_EMUL_CTRL_SRVIO_VFUNC_EN AL_BIT(16)
+/*
+ * Fix client1 FMT bits after cutting address 63:56, fix address format to
+ * 32-bits if original request is 32-bit address.
+ */
+#define PCIE_W_CFG_EMUL_CTRL_FIX_CLIENT1_FMT_EN AL_BIT(17)
+
+/* address register */
+
+/* Valid address - Cleared on read */
+#define PCIE_W_CFG_EMUL_ADDR_VALID AL_BIT(0)
+
+/* Received Configuration Type: CfgType0 (=0) or CfgType1 (=1) */
+#define PCIE_W_CFG_EMUL_ADDR_CFG_TYPE AL_BIT(1)
+#define PCIE_W_CFG_EMUL_ADDR_CFG_TYPE_0 0
+#define PCIE_W_CFG_EMUL_ADDR_CFG_TYPE_1 AL_BIT(1)
+
+/* Target register offset (including extended register) */
+#define PCIE_W_CFG_EMUL_ADDR_REG_OFFSET_MASK AL_FIELD_MASK(11, 2)
+#define PCIE_W_CFG_EMUL_ADDR_REG_OFFSET_SHIFT 2
+
+/*
+ * Received Byte Enable.
+ * If 4'b0000, the received packet is Configuration Read transaction, otherwise
+ * Configuration Write with corresponding 4-bits Byte Enable.
+ */
+#define PCIE_W_CFG_EMUL_ADDR_BYTE_ENABLE_MASK AL_FIELD_MASK(15, 12)
+#define PCIE_W_CFG_EMUL_ADDR_BYTE_ENABLE_SHIFT 12
+
+/*
+ * Dev_Fun
+ * - Non-ARI: [19:16] target function num, [23:19] target device number
+ * - ARI: [23:16] target function number
+ */
+#define PCIE_W_CFG_EMUL_ADDR_DEV_FUN_MASK AL_FIELD_MASK(23, 16)
+#define PCIE_W_CFG_EMUL_ADDR_DEV_FUN_SHIFT 16
+
+/* Target Bus Number */
+#define PCIE_W_CFG_EMUL_ADDR_BUS_NUM_MASK AL_FIELD_MASK(31, 24)
+#define PCIE_W_CFG_EMUL_ADDR_BUS_NUM_SHIFT 24
+
+/**** cause_A register ****/
+/* Deassert_INTD received. Write zero to clear this bit. */
+#define PCIE_W_INT_GRP_A_CAUSE_A_DEASSERT_INTD (1 << 0)
+/* Deassert_INTC received. Write zero to clear this bit. */
+#define PCIE_W_INT_GRP_A_CAUSE_A_DEASSERT_INTC (1 << 1)
+/* Deassert_INTB received. Write zero to clear this bit. */
+#define PCIE_W_INT_GRP_A_CAUSE_A_DEASSERT_INTB (1 << 2)
+/* Deassert_INTA received. Write zero to clear this bit. */
+#define PCIE_W_INT_GRP_A_CAUSE_A_DEASSERT_INTA (1 << 3)
+/* Assert_INTD received. Write zero to clear this bit. */
+#define PCIE_W_INT_GRP_A_CAUSE_A_ASSERT_INTD (1 << 4)
+/* Assert_INTC received. Write zero to clear this bit. */
+#define PCIE_W_INT_GRP_A_CAUSE_A_ASSERT_INTC (1 << 5)
+/* Assert_INTC received. Write zero to clear this bit. */
+#define PCIE_W_INT_GRP_A_CAUSE_A_ASSERT_INTB (1 << 6)
+/* Assert_INTA received. Write zero to clear this bit. */
+#define PCIE_W_INT_GRP_A_CAUSE_A_ASSERT_INTA (1 << 7)
+/* MSI Controller InterruptMSI interrupt is being received */
+#define PCIE_W_INT_GRP_A_CAUSE_A_MSI_CNTR_RCV_INT (1 << 8)
+/* MSI sent grant. Write zero to clear this bit. */
+#define PCIE_W_INT_GRP_A_CAUSE_A_MSI_TRNS_GNT (1 << 9)
+/* System error detected Indicates if any device in the hierarch ... */
+#define PCIE_W_INT_GRP_A_CAUSE_A_SYS_ERR_RC (1 << 10)
+/* Set when software initiates FLR on a Physical Function by wri ... */
+#define PCIE_W_INT_GRP_A_CAUSE_A_FLR_PF_ACTIVE (1 << 11)
+/* Reported error condition causes a bit to be set in the Root E ... */
+#define PCIE_W_INT_GRP_A_CAUSE_A_AER_RC_ERR (1 << 12)
+/* The core asserts aer_rc_err_msi when all of the following con ... */
+#define PCIE_W_INT_GRP_A_CAUSE_A_AER_RC_ERR_MSI (1 << 13)
+/* Wake Up */
+#define PCIE_W_INT_GRP_A_CAUSE_A_WAKE (1 << 14)
+/* The core asserts cfg_pme_int when all of the following condit ... */
+#define PCIE_W_INT_GRP_A_CAUSE_A_PME_INT (1 << 15)
+/* The core asserts cfg_pme_msi when all of the following condit ... */
+#define PCIE_W_INT_GRP_A_CAUSE_A_PME_MSI (1 << 16)
+/* The core asserts hp_pme when all of the following conditions ... */
+#define PCIE_W_INT_GRP_A_CAUSE_A_HP_PME (1 << 17)
+/* The core asserts hp_int when all of the following conditions ... */
+#define PCIE_W_INT_GRP_A_CAUSE_A_HP_INT (1 << 18)
+/* The core asserts hp_msi when the logical AND of the followin ... */
+#define PCIE_W_INT_GRP_A_CAUSE_A_HP_MSI (1 << 19)
+/* Read VPD registers notification */
+#define PCIE_W_INT_GRP_A_CAUSE_A_VPD_INT (1 << 20)
+/* The core assert link down event, whenever the link is going d ... */
+#define PCIE_W_INT_GRP_A_CAUSE_A_LINK_DOWN_EVENT (1 << 21)
+/* When the EP gets a command to shut down, signal the software ... */
+#define PCIE_W_INT_GRP_A_CAUSE_A_PM_XTLH_BLOCK_TLP (1 << 22)
+/* PHY/MAC link up */
+#define PCIE_W_INT_GRP_A_CAUSE_A_XMLH_LINK_UP (1 << 23)
+/* Data link up */
+#define PCIE_W_INT_GRP_A_CAUSE_A_RDLH_LINK_UP (1 << 24)
+/* The ltssm is in RCVRY_LOCK state. */
+#define PCIE_W_INT_GRP_A_CAUSE_A_LTSSM_RCVRY_STATE (1 << 25)
+/* Config write transaction to the config space by the RC peer, ... */
+#define PCIE_W_INT_GRP_A_CAUSE_A_CFG_WR_EVENT (1 << 26)
+/* When emulation mode is active, every cfg access in EP mode will cause INT. */
+#define PCIE_W_INT_GRP_A_CAUSE_A_CFG_EMUL_EVENT (1 << 31)
+
+/**** control_A register ****/
+/* When Clear_on_Read =1, all bits of Cause register are cleare ... */
+#define PCIE_W_INT_GRP_A_CONTROL_A_CLEAR_ON_READ (1 << 0)
+/* (Must be set only when MSIX is enabled */
+#define PCIE_W_INT_GRP_A_CONTROL_A_AUTO_MASK (1 << 1)
+/* Auto_Clear (RW)When Auto-Clear =1, the bits in the Interrupt ... */
+#define PCIE_W_INT_GRP_A_CONTROL_A_AUTO_CLEAR (1 << 2)
+/* When Set_on_Posedge =1, the bits in the Interrupt Cause regis ... */
+#define PCIE_W_INT_GRP_A_CONTROL_A_SET_ON_POSEDGE (1 << 3)
+/* When Moderation_Reset =1, all Moderation timers associated wi ... */
+#define PCIE_W_INT_GRP_A_CONTROL_A_MOD_RST (1 << 4)
+/* When mask_msi_x =1, no MSI-X from this group is sent */
+#define PCIE_W_INT_GRP_A_CONTROL_A_MASK_MSI_X (1 << 5)
+/* MSI-X AWID value. Same ID for all cause bits. */
+#define PCIE_W_INT_GRP_A_CONTROL_A_AWID_MASK 0x00000F00
+#define PCIE_W_INT_GRP_A_CONTROL_A_AWID_SHIFT 8
+/* This value determines the interval between interrupts; writin ... */
+#define PCIE_W_INT_GRP_A_CONTROL_A_MOD_INTV_MASK 0x00FF0000
+#define PCIE_W_INT_GRP_A_CONTROL_A_MOD_INTV_SHIFT 16
+/* This value determines the Moderation_Timer_Clock speed */
+#define PCIE_W_INT_GRP_A_CONTROL_A_MOD_RES_MASK 0x0F000000
+#define PCIE_W_INT_GRP_A_CONTROL_A_MOD_RES_SHIFT 24
+
+/**** cause_B register ****/
+/* Indicates that the core received a PM_PME Message */
+#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_PM_PME (1 << 0)
+/* Indicates that the core received a PME_TO_Ack Message */
+#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_PM_TO_ACK (1 << 1)
+/* Indicates that the core received an PME_Turn_Off Message */
+#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_PM_TURNOFF (1 << 2)
+/* Indicates that the core received an ERR_CORR Message */
+#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_CORRECTABLE_ERR (1 << 3)
+/* Indicates that the core received an ERR_NONFATAL Message */
+#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_NONFATAL_ERR (1 << 4)
+/* Indicates that the core received an ERR_FATAL Message */
+#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_FATAL_ERR (1 << 5)
+/* Indicates that the core received a Vendor Defined Message */
+#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_VENDOR_0 (1 << 6)
+/* Indicates that the core received a Vendor Defined Message */
+#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_VENDOR_1 (1 << 7)
+/* Indicates that the core received an Unlock Message */
+#define PCIE_W_INT_GRP_B_CAUSE_B_MSG_UNLOCK (1 << 8)
+/* Notification when the Link Autonomous Bandwidth Status regist ... */
+#define PCIE_W_INT_GRP_B_CAUSE_B_LINK_AUTO_BW_INT (1 << 12)
+/* Notification that the Link Equalization Request bit in the Li ... */
+#define PCIE_W_INT_GRP_B_CAUSE_B_LINK_EQ_REQ_INT (1 << 13)
+/* OB Vendor message request is granted by the PCIe core Write ... */
+#define PCIE_W_INT_GRP_B_CAUSE_B_VENDOR_MSG_GRANT (1 << 14)
+/* CPL timeout from the PCIe core inidication */
+#define PCIE_W_INT_GRP_B_CAUSE_B_CMP_TIME_OUT (1 << 15)
+/* Slave Response Composer Lookup ErrorIndicates that an overflo ... */
+#define PCIE_W_INT_GRP_B_CAUSE_B_RADMX_CMPOSER_LOOKUP_ERR (1 << 16)
+/* Parity Error */
+#define PCIE_W_INT_GRP_B_CAUSE_B_PARITY_ERROR_CORE (1 << 17)
+
+/**** control_B register ****/
+/* When Clear_on_Read =1, all bits of the Cause register are cle ... */
+#define PCIE_W_INT_GRP_B_CONTROL_B_CLEAR_ON_READ (1 << 0)
+/* (Must be set only when MSIX is enabled */
+#define PCIE_W_INT_GRP_B_CONTROL_B_AUTO_MASK (1 << 1)
+/* Auto_Clear (RW)When Auto-Clear =1, the bits in the Interrupt ... */
+#define PCIE_W_INT_GRP_B_CONTROL_B_AUTO_CLEAR (1 << 2)
+/* When Set_on_Posedge =1, the bits in the interrupt Cause regis ... */
+#define PCIE_W_INT_GRP_B_CONTROL_B_SET_ON_POSEDGE (1 << 3)
+/* When Moderation_Reset =1, all Moderation timers associated wi ... */
+#define PCIE_W_INT_GRP_B_CONTROL_B_MOD_RST (1 << 4)
+/* When mask_msi_x =1, no MSI-X from this group is sent */
+#define PCIE_W_INT_GRP_B_CONTROL_B_MASK_MSI_X (1 << 5)
+/* MSI-X AWID value. Same ID for all cause bits. */
+#define PCIE_W_INT_GRP_B_CONTROL_B_AWID_MASK 0x00000F00
+#define PCIE_W_INT_GRP_B_CONTROL_B_AWID_SHIFT 8
+/* This value determines the interval between interrupts */
+#define PCIE_W_INT_GRP_B_CONTROL_B_MOD_INTV_MASK 0x00FF0000
+#define PCIE_W_INT_GRP_B_CONTROL_B_MOD_INTV_SHIFT 16
+/* This value determines the Moderation_Timer_Clock speed */
+#define PCIE_W_INT_GRP_B_CONTROL_B_MOD_RES_MASK 0x0F000000
+#define PCIE_W_INT_GRP_B_CONTROL_B_MOD_RES_SHIFT 24
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_pcie_w_REG_H */
+
+/** @} end of ... group */
+
+
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_serdes.c b/arch/arm/mach-alpine/al_hal/al_hal_serdes.c
new file mode 100644
index 0000000..0e24f0b
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_serdes.c
@@ -0,0 +1,2702 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "al_hal_serdes.h"
+#include "al_hal_serdes_regs.h"
+#include "al_hal_serdes_internal_regs.h"
+
+#define SRDS_CORE_REG_ADDR(page, type, offset)\
+ (((page) << 13) | ((type) << 12) | (offset))
+
+/* Link Training configuration */
+#define AL_SERDES_TX_DEEMPH_SUM_MAX 0x1b
+
+/* c configurations */
+#define AL_SERDES_TX_DEEMPH_C_ZERO_MAX_VAL 0x1b
+#define AL_SERDES_TX_DEEMPH_C_ZERO_MIN_VAL 0
+#define AL_SERDES_TX_DEEMPH_C_ZERO_PRESET AL_SERDES_TX_DEEMPH_C_ZERO_MAX_VAL
+
+/* c(+1) configurations */
+#define AL_SERDES_TX_DEEMPH_C_PLUS_MAX_VAL 0x9
+#define AL_SERDES_TX_DEEMPH_C_PLUS_MIN_VAL 0
+#define AL_SERDES_TX_DEEMPH_C_PLUS_PRESET AL_SERDES_TX_DEEMPH_C_PLUS_MIN_VAL
+
+/* c(-1) configurations */
+#define AL_SERDES_TX_DEEMPH_C_MINUS_MAX_VAL 0x6
+#define AL_SERDES_TX_DEEMPH_C_MINUS_MIN_VAL 0
+#define AL_SERDES_TX_DEEMPH_C_MINUS_PRESET AL_SERDES_TX_DEEMPH_C_MINUS_MIN_VAL
+
+/* Rx equal total delay = MDELAY * TRIES */
+#define AL_SERDES_RX_EQUAL_MDELAY 10
+#define AL_SERDES_RX_EQUAL_TRIES 50
+
+/* Rx eye calculation delay = MDELAY * TRIES */
+#define AL_SERDES_RX_EYE_CAL_MDELAY 50
+#define AL_SERDES_RX_EYE_CAL_TRIES 70
+
+
+/**
+ * SERDES core reg read
+ */
+static inline uint8_t al_serdes_grp_reg_read(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset);
+
+/**
+ * SERDES core reg write
+ */
+static inline void al_serdes_grp_reg_write(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t data);
+
+/**
+ * SERDES core masked reg write
+ */
+static inline void al_serdes_grp_reg_masked_write(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t mask,
+ uint8_t data);
+
+/******************************************************************************/
+/******************************************************************************/
+int al_serdes_handle_init(
+ void __iomem *serdes_regs_base,
+ struct al_serdes_obj *obj)
+{
+ int i;
+
+ al_dbg(
+ "%s(%p, %p)\n",
+ __func__,
+ serdes_regs_base,
+ obj);
+
+ al_assert(serdes_regs_base);
+
+ for (i = 0; i < AL_SRDS_NUM_GROUPS; i++) {
+ obj->grp_info[i].pobj = obj;
+
+ obj->grp_info[i].regs_base =
+ &((struct al_serdes_regs *)serdes_regs_base)[i];
+ }
+
+ return 0;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+int al_serdes_reg_read(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t *data)
+{
+ int status = 0;
+
+ al_dbg(
+ "%s(%p, %d, %d, %d, %u)\n",
+ __func__,
+ obj,
+ grp,
+ page,
+ type,
+ offset);
+
+ al_assert(obj);
+ al_assert(data);
+ al_assert(((int)grp) >= AL_SRDS_GRP_A);
+ al_assert(((int)grp) <= AL_SRDS_GRP_D);
+ al_assert(((int)page) >= AL_SRDS_REG_PAGE_0_LANE_0);
+ al_assert(((int)page) <= AL_SRDS_REG_PAGE_4_COMMON);
+ al_assert(((int)type) >= AL_SRDS_REG_TYPE_PMA);
+ al_assert(((int)type) <= AL_SRDS_REG_TYPE_PCS);
+
+ *data = al_serdes_grp_reg_read(
+ &obj->grp_info[grp],
+ page,
+ type,
+ offset);
+
+ al_dbg(
+ "%s: return(%u)\n",
+ __func__,
+ *data);
+
+ return status;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+int al_serdes_reg_write(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t data)
+{
+ int status = 0;
+
+ al_dbg(
+ "%s(%p, %d, %d, %d, %u, %u)\n",
+ __func__,
+ obj,
+ grp,
+ page,
+ type,
+ offset,
+ data);
+
+ al_assert(obj);
+ al_assert(((int)grp) >= AL_SRDS_GRP_A);
+ al_assert(((int)grp) <= AL_SRDS_GRP_D);
+ al_assert(((int)page) >= AL_SRDS_REG_PAGE_0_LANE_0);
+ al_assert(((int)page) <= AL_SRDS_REG_PAGE_0123_LANES_0123);
+ al_assert(((int)type) >= AL_SRDS_REG_TYPE_PMA);
+ al_assert(((int)type) <= AL_SRDS_REG_TYPE_PCS);
+
+ al_serdes_grp_reg_write(
+ &obj->grp_info[grp],
+ page,
+ type,
+ offset,
+ data);
+
+ return status;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+#if (SERDES_IREG_FLD_PCSRX_DATAWIDTH_REG_NUM != SERDES_IREG_FLD_PCSTX_DATAWIDTH_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_PCSRX_DIVRATE_REG_NUM != SERDES_IREG_FLD_PCSTX_DIVRATE_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM != SERDES_IREG_FLD_CMNPCS_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM != SERDES_IREG_FLD_CMNPCSBIST_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM != SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_LB_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_PCSRX_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_PCSRXBIST_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+#if (SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM != SERDES_IREG_FLD_PCSTX_LOCWREN_REG_NUM)
+#error "Wrong assumption!"
+#endif
+void al_serdes_bist_overrides_enable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_rate rate)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+ int i;
+
+ uint8_t rx_rate_val;
+ uint8_t tx_rate_val;
+
+ switch (rate) {
+ case AL_SRDS_RATE_1_8:
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_8;
+ tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_8;
+ break;
+ case AL_SRDS_RATE_1_4:
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_4;
+ tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_4;
+ break;
+ case AL_SRDS_RATE_1_2:
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_2;
+ tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_2;
+ break;
+ case AL_SRDS_RATE_FULL:
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1;
+ tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_1;
+ break;
+ default:
+ al_err("%s: invalid rate (%d)\n", __func__, rate);
+ al_assert(0);
+ rx_rate_val = SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1;
+ tx_rate_val = SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_1;
+ }
+
+ for (i = 0; i < AL_SRDS_NUM_LANES; i++) {
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRX_DATAWIDTH_REG_NUM,
+ SERDES_IREG_FLD_PCSRX_DATAWIDTH_MASK |
+ SERDES_IREG_FLD_PCSTX_DATAWIDTH_MASK,
+ SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_20 |
+ SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_20);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRX_DIVRATE_REG_NUM,
+ SERDES_IREG_FLD_PCSRX_DIVRATE_MASK |
+ SERDES_IREG_FLD_PCSTX_DIVRATE_MASK,
+ rx_rate_val | tx_rate_val);
+ }
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN |
+ SERDES_IREG_FLD_CMNPCS_LOCWREN |
+ SERDES_IREG_FLD_CMNPCSBIST_LOCWREN |
+ SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN,
+ 0);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN |
+ SERDES_IREG_FLD_CMNPCS_LOCWREN |
+ SERDES_IREG_FLD_CMNPCSBIST_LOCWREN |
+ SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN,
+ 0);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCS_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_PCS_LOCWREN,
+ 0);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNPCS_TXENABLE_REG_NUM,
+ SERDES_IREG_FLD_CMNPCS_TXENABLE,
+ SERDES_IREG_FLD_CMNPCS_TXENABLE);
+
+ for (i = 0; i < AL_SRDS_NUM_LANES; i++) {
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN |
+ SERDES_IREG_FLD_LB_LOCWREN |
+ SERDES_IREG_FLD_PCSRX_LOCWREN |
+ SERDES_IREG_FLD_PCSRXBIST_LOCWREN |
+ SERDES_IREG_FLD_PCSRXEQ_LOCWREN |
+ SERDES_IREG_FLD_PCSTX_LOCWREN,
+ 0);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSTXBIST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_PCSTXBIST_LOCWREN,
+ 0);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN,
+ 0);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN);
+ }
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_group_pm_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_pm pm)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ uint8_t pm_val;
+
+ switch (pm) {
+ case AL_SRDS_PM_PD:
+ pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_PD;
+ break;
+ case AL_SRDS_PM_P2:
+ pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P2;
+ break;
+ case AL_SRDS_PM_P1:
+ pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P1;
+ break;
+ case AL_SRDS_PM_P0S:
+ pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0S;
+ break;
+ case AL_SRDS_PM_P0:
+ pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0;
+ break;
+ default:
+ al_err("%s: invalid power mode (%d)\n", __func__, pm);
+ al_assert(0);
+ pm_val = SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0;
+ }
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_REG_NUM,
+ SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_MASK,
+ pm_val);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_lane_pm_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_pm rx_pm,
+ enum al_serdes_pm tx_pm)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ uint8_t rx_pm_val;
+ uint8_t tx_pm_val;
+
+ switch (rx_pm) {
+ case AL_SRDS_PM_PD:
+ rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_PD;
+ break;
+ case AL_SRDS_PM_P2:
+ rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P2;
+ break;
+ case AL_SRDS_PM_P1:
+ rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P1;
+ break;
+ case AL_SRDS_PM_P0S:
+ rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0S;
+ break;
+ case AL_SRDS_PM_P0:
+ rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0;
+ break;
+ default:
+ al_err("%s: invalid rx power mode (%d)\n", __func__, rx_pm);
+ al_assert(0);
+ rx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0;
+ }
+
+ switch (tx_pm) {
+ case AL_SRDS_PM_PD:
+ tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_PD;
+ break;
+ case AL_SRDS_PM_P2:
+ tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P2;
+ break;
+ case AL_SRDS_PM_P1:
+ tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P1;
+ break;
+ case AL_SRDS_PM_P0S:
+ tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0S;
+ break;
+ case AL_SRDS_PM_P0:
+ tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0;
+ break;
+ default:
+ al_err("%s: invalid tx power mode (%d)\n", __func__, tx_pm);
+ al_assert(0);
+ tx_pm_val = SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0;
+ }
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_LANEPCSPSTATE_RX_REG_NUM,
+ SERDES_IREG_FLD_LANEPCSPSTATE_RX_MASK,
+ rx_pm_val);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_LANEPCSPSTATE_TX_REG_NUM,
+ SERDES_IREG_FLD_LANEPCSPSTATE_TX_MASK,
+ tx_pm_val);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_pma_hard_reset_group(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ al_bool enable)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ /* Enable Hard Reset Override */
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_REG_NUM,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_MASK,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_REGS);
+
+ /* Assert/Deassert Hard Reset Override */
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_REG_NUM,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_MASK,
+ enable ?
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_ASSERT :
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_DEASSERT);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_pma_hard_reset_lane(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool enable)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ /* Enable Hard Reset Override */
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_REG_NUM,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_MASK,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_REGS);
+
+ /* Assert/Deassert Hard Reset Override */
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_REG_NUM,
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_MASK,
+ enable ?
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_ASSERT :
+ SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_DEASSERT);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+#if (SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN_REG_NUM !=\
+ SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN_REG_NUM) ||\
+ (SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN_REG_NUM !=\
+ SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN_REG_NUM) ||\
+ (SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN_REG_NUM !=\
+ SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN_REG_NUM) ||\
+ (SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN_REG_NUM !=\
+ SERDES_IREG_FLD_LB_CDRCLK2TXEN_REG_NUM)
+#error Wrong assumption
+#endif
+
+void al_serdes_loopback_control(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_lb_mode mode)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+ uint8_t val = 0;
+
+ switch (mode) {
+ case AL_SRDS_LB_MODE_OFF:
+ break;
+ case AL_SRDS_LB_MODE_PMA_IO_UN_TIMED_RX_TO_TX:
+ val = SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN;
+ break;
+ case AL_SRDS_LB_MODE_PMA_INTERNALLY_BUFFERED_SERIAL_TX_TO_RX:
+ val = SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN;
+ break;
+ case AL_SRDS_LB_MODE_PMA_SERIAL_TX_IO_TO_RX_IO:
+ val = SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN;
+ break;
+ case AL_SRDS_LB_MODE_PMA_PARALLEL_RX_TO_TX:
+ val = SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN |
+ SERDES_IREG_FLD_LB_CDRCLK2TXEN;
+ break;
+ default:
+ al_err("%s: invalid mode (%d)\n", __func__, mode);
+ al_assert(0);
+ }
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN_REG_NUM,
+ SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN |
+ SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN |
+ SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN |
+ SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN |
+ SERDES_IREG_FLD_LB_CDRCLK2TXEN,
+ val);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_bist_pattern_select(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_bist_pattern pattern,
+ uint8_t *user_data)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+ uint8_t val = 0;
+
+ switch (pattern) {
+ case AL_SRDS_BIST_PATTERN_USER:
+ al_assert(user_data);
+ al_err("%s: user pattern currently not supported!\n", __func__);
+ al_assert(0);
+ break;
+ case AL_SRDS_BIST_PATTERN_PRBS7:
+ val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS7;
+ break;
+ case AL_SRDS_BIST_PATTERN_PRBS23:
+ val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS23;
+ break;
+ case AL_SRDS_BIST_PATTERN_PRBS31:
+ val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS31;
+ break;
+ case AL_SRDS_BIST_PATTERN_CLK1010:
+ val = SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_CLK1010;
+ break;
+ default:
+ al_err("%s: invalid pattern (%d)\n", __func__, pattern);
+ al_assert(0);
+ }
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_CMNPCSBIST_MODESEL_REG_NUM,
+ SERDES_IREG_FLD_CMNPCSBIST_MODESEL_MASK,
+ val);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_bist_tx_enable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool enable)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSTXBIST_EN_REG_NUM,
+ SERDES_IREG_FLD_PCSTXBIST_EN,
+ enable ? SERDES_IREG_FLD_PCSTXBIST_EN : 0);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_bist_tx_err_inject(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TXBIST_BITERROR_EN_REG_NUM,
+ SERDES_IREG_FLD_TXBIST_BITERROR_EN,
+ SERDES_IREG_FLD_TXBIST_BITERROR_EN);
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TXBIST_BITERROR_EN_REG_NUM,
+ SERDES_IREG_FLD_TXBIST_BITERROR_EN,
+ 0);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+void al_serdes_bist_rx_enable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool enable)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXBIST_EN_REG_NUM,
+ SERDES_IREG_FLD_PCSRXBIST_EN,
+ enable ? SERDES_IREG_FLD_PCSRXBIST_EN : 0);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+#if (SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW_REG_NUM !=\
+ SERDES_IREG_FLD_RXBIST_RXLOCKED_REG_NUM)
+#error Wrong assumption
+#endif
+
+void al_serdes_bist_rx_status(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool *is_locked,
+ al_bool *err_cnt_overflow,
+ uint16_t *err_cnt)
+{
+ struct al_serdes_group_info *grp_info = &obj->grp_info[grp];
+ uint8_t status_reg_val;
+ uint16_t err_cnt_msb_reg_val;
+ uint16_t err_cnt_lsb_reg_val;
+
+ status_reg_val = al_serdes_grp_reg_read(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXBIST_RXLOCKED_REG_NUM);
+
+ err_cnt_msb_reg_val = al_serdes_grp_reg_read(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXBIST_ERRCOUNT_MSB_REG_NUM);
+
+ err_cnt_lsb_reg_val = al_serdes_grp_reg_read(
+ grp_info,
+ (enum al_serdes_reg_page)lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXBIST_ERRCOUNT_LSB_REG_NUM);
+
+ *is_locked =
+ (status_reg_val & SERDES_IREG_FLD_RXBIST_RXLOCKED) ?
+ AL_TRUE : AL_FALSE;
+
+ *err_cnt_overflow =
+ (status_reg_val & SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW) ?
+ AL_TRUE : AL_FALSE;
+
+ *err_cnt = (err_cnt_msb_reg_val << 8) + err_cnt_lsb_reg_val;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static inline uint8_t al_serdes_grp_reg_read(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset)
+{
+ al_reg_write32(
+ &grp_info->regs_base->gen.reg_addr,
+ SRDS_CORE_REG_ADDR(page, type, offset));
+
+ return al_reg_read32(&grp_info->regs_base->gen.reg_data);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static inline void al_serdes_grp_reg_write(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t data)
+{
+ al_reg_write32(
+ &grp_info->regs_base->gen.reg_addr,
+ SRDS_CORE_REG_ADDR(page, type, offset));
+
+ al_reg_write32(&grp_info->regs_base->gen.reg_data, data);
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static inline void al_serdes_grp_reg_masked_write(
+ struct al_serdes_group_info *grp_info,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t mask,
+ uint8_t data)
+{
+ uint8_t val;
+ enum al_serdes_reg_page start_page = page;
+ enum al_serdes_reg_page end_page = page;
+ enum al_serdes_reg_page iter_page;
+
+ if (page == AL_SRDS_REG_PAGE_0123_LANES_0123) {
+ start_page = AL_SRDS_REG_PAGE_0_LANE_0;
+ end_page = AL_SRDS_REG_PAGE_3_LANE_3;
+ }
+
+ for(iter_page = start_page; iter_page <= end_page; ++iter_page) {
+ val = al_serdes_grp_reg_read(grp_info, iter_page, type, offset);
+ val &= ~mask;
+ val |= data;
+ al_serdes_grp_reg_write(grp_info, iter_page, type, offset, val);
+ }
+}
+
+/******************************************************************************/
+/******************************************************************************/
+int al_serdes_eye_measure_run(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ uint32_t timeout,
+ unsigned int *value)
+{
+ uint32_t reg = 0;
+ uint32_t i;
+ struct serdes_lane *lane_regs;
+
+ lane_regs = &obj->grp_info[grp].regs_base->lane[lane];
+
+ al_reg_write32(&lane_regs->ictl_multi_rxeq,
+ SERDES_LANE_ICTL_MULTI_RXEQ_START_L_A);
+
+ for (i = 0 ; i < timeout ; i++) {
+ reg = al_reg_read32(&lane_regs->octl_multi);
+
+ if (reg & SERDES_LANE_OCTL_MULTI_RXEQ_DONE_L_A)
+ break;
+
+ al_msleep(10);
+ }
+
+ if (i == timeout) {
+ al_err("%s: measure eye failed on timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ *value = al_reg_read32(&lane_regs->odat_multi_rxeq);
+
+ al_reg_write32(&lane_regs->ictl_multi_rxeq, 0);
+
+ return 0;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+int al_serdes_eye_diag_sample(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ unsigned int x,
+ int y,
+ unsigned int timeout,
+ unsigned int *value)
+{
+ enum al_serdes_reg_page page = (enum al_serdes_reg_page)lane;
+ struct al_serdes_group_info *grp_info;
+ uint32_t i;
+ uint8_t sample_count_orig_msb;
+ uint8_t sample_count_orig_lsb;
+
+ al_assert(obj);
+ al_assert(((int)grp) >= AL_SRDS_GRP_A);
+ al_assert(((int)grp) <= AL_SRDS_GRP_D);
+ al_assert(((int)page) >= AL_SRDS_REG_PAGE_0_LANE_0);
+ al_assert(((int)page) <= AL_SRDS_REG_PAGE_0123_LANES_0123);
+
+ grp_info = &obj->grp_info[grp];
+
+ /* Obtain sample count by reading RXCALROAMEYEMEAS_COUNT */
+ sample_count_orig_msb = al_serdes_grp_reg_read(grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM);
+ sample_count_orig_lsb = al_serdes_grp_reg_read(grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM);
+
+ /* Set sample count to ~100000 samples */
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM, 0x13);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM, 0x88);
+
+ /* BER Contour Overwrite */
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN,
+ 0);
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN,
+ 0);
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN,
+ 0);
+
+ /* RXROAM_XORBITSEL = 0x1 or 0x0 */
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL_2ND);
+
+ /* Set X */
+ al_serdes_grp_reg_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMXADJUST_REG_NUM, x);
+
+ /* Set Y */
+ al_serdes_grp_reg_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_REG_NUM,
+ y < 32 ? 31 - y : y + 1);
+
+ /* Start Measurement by setting RXCALROAMEYEMEASIN_CYCLEEN = 0x1 */
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START);
+
+ /* Check RXCALROAMEYEMEASDONE Signal (Polling Until 0x1) */
+ for (i = 0 ; i < timeout ; i++) {
+ if (al_serdes_grp_reg_read(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASDONE_REG_NUM) &
+ SERDES_IREG_FLD_RXCALROAMEYEMEASDONE)
+ break;
+ al_udelay(1);
+ }
+ if (i == timeout) {
+ al_err("%s: eye diagram sampling timed out!\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* Stop Measurement by setting RXCALROAMEYEMEASIN_CYCLEEN = 0x0 */
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START,
+ 0);
+
+ /* Obtain Error Counts by reading RXCALROAMEYEMEAS_ACC */
+ *value = ((unsigned int)al_serdes_grp_reg_read(grp_info, page,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_MSB_REG_NUM)) << 8 |
+ al_serdes_grp_reg_read(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_LSB_REG_NUM);
+
+ /* BER Contour Overwrite */
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN);
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN,
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN);
+ al_serdes_grp_reg_masked_write(grp_info, page, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN);
+
+ /* Restore sample count */
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM,
+ sample_count_orig_msb);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM,
+ sample_count_orig_lsb);
+
+ return 0;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+static void al_serdes_tx_deemph_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ uint32_t c_zero,
+ uint32_t c_plus_1,
+ uint32_t c_minus_1)
+{
+ al_serdes_grp_reg_masked_write(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_1_REG_NUM,
+ SERDES_IREG_TX_DRV_1_LEVN_MASK,
+ ((c_zero + c_plus_1 + c_minus_1)
+ << SERDES_IREG_TX_DRV_1_LEVN_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_2_REG_NUM,
+ SERDES_IREG_TX_DRV_2_LEVNM1_MASK,
+ (c_plus_1 << SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_3_REG_NUM,
+ SERDES_IREG_TX_DRV_3_LEVNP1_MASK,
+ (c_minus_1 << SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT));
+}
+
+static void al_serdes_tx_deemph_get(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ uint32_t *c_zero,
+ uint32_t *c_plus_1,
+ uint32_t *c_minus_1)
+{
+ uint32_t reg = 0;
+
+ reg = al_serdes_grp_reg_read(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_2_REG_NUM);
+
+ *c_plus_1 = ((reg & SERDES_IREG_TX_DRV_2_LEVNM1_MASK) >>
+ SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT);
+
+ reg = al_serdes_grp_reg_read(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_3_REG_NUM);
+
+ *c_minus_1 = ((reg & SERDES_IREG_TX_DRV_3_LEVNP1_MASK) >>
+ SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT);
+
+ reg = al_serdes_grp_reg_read(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_1_REG_NUM);
+
+ *c_zero = (((reg & SERDES_IREG_TX_DRV_1_LEVN_MASK) >>
+ SERDES_IREG_TX_DRV_1_LEVN_SHIFT) - *c_plus_1 - *c_minus_1);
+}
+
+al_bool al_serdes_tx_deemph_inc(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_tx_deemph_param param)
+{
+ al_bool ret = AL_TRUE;
+ uint32_t c0;
+ uint32_t c1;
+ uint32_t c_1;
+
+ al_serdes_tx_deemph_get(obj, grp, lane, &c0, &c1, &c_1);
+
+ al_dbg("%s: current txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n",
+ __func__, c0, c1, c_1);
+
+ switch (param) {
+ case AL_SERDES_TX_DEEMP_C_ZERO:
+
+ if (c0 == AL_SERDES_TX_DEEMPH_C_ZERO_MAX_VAL)
+ return AL_FALSE;
+
+ c0++;
+
+ break;
+ case AL_SERDES_TX_DEEMP_C_PLUS:
+
+ if (c1 == AL_SERDES_TX_DEEMPH_C_PLUS_MAX_VAL)
+ return AL_FALSE;
+
+ c1++;
+
+ break;
+ case AL_SERDES_TX_DEEMP_C_MINUS:
+
+ if (c_1 == AL_SERDES_TX_DEEMPH_C_MINUS_MAX_VAL)
+ return AL_FALSE;
+
+ c_1++;
+
+ break;
+ }
+
+ if ((c0 + c1 + c_1) > AL_SERDES_TX_DEEMPH_SUM_MAX) {
+ al_dbg("%s: sum of all tx de-emphasis over the max limit\n",
+ __func__);
+
+ return AL_FALSE;
+ }
+
+ al_dbg("%s: new txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n",
+ __func__, c0, c1, c_1);
+
+ al_serdes_tx_deemph_set(obj, grp, lane, c0, c1, c_1);
+
+ return ret;
+}
+
+al_bool al_serdes_tx_deemph_dec(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_tx_deemph_param param)
+{
+ al_bool ret = AL_TRUE;
+ uint32_t c0;
+ uint32_t c1;
+ uint32_t c_1;
+
+ al_serdes_tx_deemph_get(obj, grp, lane, &c0, &c1, &c_1);
+
+ al_dbg("%s: current txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n",
+ __func__, c0, c1, c_1);
+
+ switch (param) {
+ case AL_SERDES_TX_DEEMP_C_ZERO:
+
+ if (c0 == AL_SERDES_TX_DEEMPH_C_ZERO_MIN_VAL)
+ return AL_FALSE;
+
+ c0--;
+
+ break;
+ case AL_SERDES_TX_DEEMP_C_PLUS:
+
+ if (c1 == AL_SERDES_TX_DEEMPH_C_PLUS_MIN_VAL)
+ return AL_FALSE;
+
+ c1--;
+
+ break;
+ case AL_SERDES_TX_DEEMP_C_MINUS:
+
+ if (c_1 == AL_SERDES_TX_DEEMPH_C_MINUS_MIN_VAL)
+ return AL_FALSE;
+
+ c_1--;
+
+ break;
+ }
+
+ al_dbg("%s: new txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n",
+ __func__, c0, c1, c_1);
+
+ al_serdes_tx_deemph_set(obj, grp, lane, c0, c1, c_1);
+
+ return ret;
+}
+
+void al_serdes_tx_deemph_preset(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane)
+{
+ uint32_t c0;
+ uint32_t c1;
+ uint32_t c_1;
+
+ c0 = AL_SERDES_TX_DEEMPH_C_ZERO_PRESET;
+
+ c1 = AL_SERDES_TX_DEEMPH_C_PLUS_PRESET;
+
+ c_1 = AL_SERDES_TX_DEEMPH_C_MINUS_PRESET;
+
+ al_dbg("preset: new txdeemph: c0 = 0x%x c1 = 0x%x c-1 = 0x%x\n",
+ c0, c1, c_1);
+
+ al_serdes_tx_deemph_set(obj, grp, lane, c0, c1, c_1);
+}
+
+al_bool al_serdes_signal_is_detected(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane)
+{
+ uint32_t reg = 0;
+
+ reg = al_serdes_grp_reg_read(
+ &obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXRANDET_REG_NUM);
+
+ return ((reg & SERDES_IREG_FLD_RXRANDET_STAT) ? AL_TRUE : AL_FALSE);
+}
+
+void al_serdes_tx_advanced_params_set(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_tx_params *params)
+{
+ uint8_t reg = 0;
+
+ if(!params->override) {
+ al_serdes_grp_reg_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN);
+
+ return;
+ }
+
+ al_serdes_grp_reg_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN,
+ 0);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_TX_DRV_1_HLEV_MASK,
+ SERDES_IREG_TX_DRV_1_HLEV_SHIFT,
+ params->amp);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_TX_DRV_1_LEVN_MASK,
+ SERDES_IREG_TX_DRV_1_LEVN_SHIFT,
+ params->total_driver_units);
+
+ al_serdes_grp_reg_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_1_REG_NUM,
+ reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_TX_DRV_2_LEVNM1_MASK,
+ SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT,
+ params->c_plus_1);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_TX_DRV_2_LEVNM2_MASK,
+ SERDES_IREG_TX_DRV_2_LEVNM2_SHIFT,
+ params->c_plus_2);
+
+ al_serdes_grp_reg_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_2_REG_NUM,
+ reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_TX_DRV_3_LEVNP1_MASK,
+ SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT,
+ params->c_minus_1);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_TX_DRV_3_SLEW_MASK,
+ SERDES_IREG_TX_DRV_3_SLEW_SHIFT,
+ params->slew_rate);
+
+ al_serdes_grp_reg_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_3_REG_NUM,
+ reg);
+
+}
+
+void al_serdes_tx_advanced_params_get(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_tx_params *tx_params)
+{
+ uint8_t reg_val = 0;
+
+ al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_1_REG_NUM,
+ ®_val);
+ tx_params->amp = (reg_val & SERDES_IREG_TX_DRV_1_HLEV_MASK) >>
+ SERDES_IREG_TX_DRV_1_HLEV_SHIFT;
+ tx_params->total_driver_units = (reg_val &
+ SERDES_IREG_TX_DRV_1_LEVN_MASK) >>
+ SERDES_IREG_TX_DRV_1_LEVN_SHIFT;
+
+ al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_2_REG_NUM,
+ ®_val);
+ tx_params->c_plus_1 = (reg_val & SERDES_IREG_TX_DRV_2_LEVNM1_MASK) >>
+ SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT;
+ tx_params->c_plus_2 = (reg_val & SERDES_IREG_TX_DRV_2_LEVNM2_MASK) >>
+ SERDES_IREG_TX_DRV_2_LEVNM2_SHIFT;
+
+ al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_TX_DRV_3_REG_NUM,
+ ®_val);
+ tx_params->c_minus_1 = (reg_val & SERDES_IREG_TX_DRV_3_LEVNP1_MASK) >>
+ SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT;
+ tx_params->slew_rate = (reg_val & SERDES_IREG_TX_DRV_3_SLEW_MASK) >>
+ SERDES_IREG_TX_DRV_3_SLEW_SHIFT;
+
+ al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM,
+ ®_val);
+ tx_params->override = ((reg_val & SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN) == 0);
+}
+
+
+void al_serdes_rx_advanced_params_set(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_rx_params *params)
+{
+ uint8_t reg = 0;
+
+ if(!params->override) {
+ al_serdes_grp_reg_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM,
+ SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN,
+ SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN);
+
+ return;
+ }
+
+ al_serdes_grp_reg_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM,
+ SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN,
+ 0);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_1_DCGAIN_MASK,
+ SERDES_IREG_RX_CALEQ_1_DCGAIN_SHIFT,
+ params->dcgain);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_MASK,
+ SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_SHIFT,
+ params->dfe_3db_freq);
+
+ al_serdes_grp_reg_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_1_REG_NUM,
+ reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_MASK,
+ SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_SHIFT,
+ params->dfe_gain);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_MASK,
+ SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_SHIFT,
+ params->dfe_first_tap_ctrl);
+
+ al_serdes_grp_reg_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_2_REG_NUM,
+ reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_MASK,
+ SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_SHIFT,
+ params->dfe_secound_tap_ctrl);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_MASK,
+ SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_SHIFT,
+ params->dfe_third_tap_ctrl);
+
+ al_serdes_grp_reg_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_3_REG_NUM,
+ reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_MASK,
+ SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_SHIFT,
+ params->dfe_fourth_tap_ctrl);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_MASK,
+ SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_SHIFT,
+ params->low_freq_agc_gain);
+
+ al_serdes_grp_reg_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_4_REG_NUM,
+ reg);
+
+ reg = 0;
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_MASK,
+ SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_SHIFT,
+ params->precal_code_sel);
+
+ AL_REG_FIELD_SET(reg,
+ SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_MASK,
+ SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_SHIFT,
+ params->high_freq_agc_boost);
+
+ al_serdes_grp_reg_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_5_REG_NUM,
+ reg);
+}
+
+static inline void al_serdes_ns_delay(int cnt)
+{
+ al_udelay((cnt + 999) / 1000);
+}
+
+static inline void al_serdes_common_cfg_eth(struct al_serdes_group_info *grp_info)
+{
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_MASK,
+ (0x1 << SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_MASK,
+ (0 << SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_MASK,
+ (0x2 << SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_MASK,
+ (0 << SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_COARSE_STEP_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_COARSE_STEP_MASK,
+ (0x1 << SERDES_IREG_FLD_RXEQ_COARSE_STEP_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_MASK,
+ (0x1 << SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_MASK,
+ (0xf0 << SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_MASK,
+ (0 << SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_FINE_STEP_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_FINE_STEP_MASK,
+ (1 << SERDES_IREG_FLD_RXEQ_FINE_STEP_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_REG_NUM,
+ SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_MASK,
+ (0x8 << SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_MASK,
+ (0 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_MASK,
+ (0x64 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_MASK,
+ (0x3 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_MASK,
+ (0x1 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_MASK,
+ (3 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_MASK,
+ (1 << SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_MASK,
+ (0xc << SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_SHIFT));
+
+ al_serdes_grp_reg_masked_write(
+ grp_info,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM,
+ SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_MASK,
+ (0xcc << SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_SHIFT));
+}
+
+struct al_serdes_mode_rx_tx_inv_state {
+ al_bool restore;
+ uint32_t pipe_rst;
+ uint32_t ipd_multi[AL_SRDS_NUM_LANES];
+ uint8_t inv_value[AL_SRDS_NUM_LANES];
+};
+
+static void al_serdes_mode_rx_tx_inv_state_save(
+ struct al_serdes_group_info *grp_info,
+ struct al_serdes_mode_rx_tx_inv_state *state)
+{
+ if (al_reg_read32(&grp_info->regs_base->gen.irst) & SERDES_GEN_IRST_POR_B_A) {
+ int i;
+
+ state->restore = AL_TRUE;
+ state->pipe_rst = al_reg_read32(&grp_info->regs_base->gen.irst);
+
+ for (i = 0; i < AL_SRDS_NUM_LANES; i++) {
+ state->inv_value[i] = al_serdes_grp_reg_read(
+ grp_info,
+ i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_POLARITY_RX_REG_NUM);
+ state->ipd_multi[i] =
+ al_reg_read32(&grp_info->regs_base->lane[i].ipd_multi);
+ }
+ } else {
+ state->restore = AL_FALSE;
+ }
+}
+
+static void al_serdes_mode_rx_tx_inv_state_restore(
+ struct al_serdes_group_info *grp_info,
+ struct al_serdes_mode_rx_tx_inv_state *state)
+{
+ if (state->restore) {
+ int i;
+
+ for (i = 0; i < AL_SRDS_NUM_LANES; i++) {
+ al_serdes_grp_reg_write(
+ grp_info,
+ i,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_POLARITY_RX_REG_NUM,
+ state->inv_value[i]);
+ al_reg_write32(
+ &grp_info->regs_base->lane[i].ipd_multi, state->ipd_multi[i]);
+ al_reg_write32_masked(
+ &grp_info->regs_base->gen.irst,
+ (SERDES_GEN_IRST_PIPE_RST_L0_B_A_SEL >> i) |
+ (SERDES_GEN_IRST_PIPE_RST_L0_B_A >> i),
+ state->pipe_rst);
+ }
+ }
+}
+
+void al_serdes_mode_set_sgmii(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp)
+{
+ struct al_serdes_group_info *grp_info;
+ struct al_serdes_mode_rx_tx_inv_state rx_tx_inv_state;
+
+ al_assert(obj);
+ al_assert(((int)grp) >= AL_SRDS_GRP_A);
+ al_assert(((int)grp) <= AL_SRDS_GRP_D);
+
+ grp_info = &obj->grp_info[grp];
+
+ al_serdes_mode_rx_tx_inv_state_save(grp_info, &rx_tx_inv_state);
+
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x000000);
+ al_reg_write32(&grp_info->regs_base->lane[0].ictl_multi, 0x10110010);
+ al_reg_write32(&grp_info->regs_base->lane[1].ictl_multi, 0x10110010);
+ al_reg_write32(&grp_info->regs_base->lane[2].ictl_multi, 0x10110010);
+ al_reg_write32(&grp_info->regs_base->lane[3].ictl_multi, 0x10110010);
+ al_reg_write32(&grp_info->regs_base->gen.ipd_multi_synth , 0x0001);
+ al_reg_write32(&grp_info->regs_base->lane[0].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->lane[1].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->lane[2].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->lane[3].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->gen.ictl_pcs , 0);
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x001000);
+ al_serdes_ns_delay(800);
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x000000);
+ al_serdes_ns_delay(500);
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x001000);
+ al_serdes_ns_delay(500);
+
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 101, 183);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 102, 183);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 103, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 104, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 105, 26);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 106, 26);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 107, 2);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 108, 2);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 109, 17);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 110, 13);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 101, 153);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 102, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 103, 108);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 104, 183);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 105, 183);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 106, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 107, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 108, 26);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 109, 26);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 110, 7);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 111, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 112, 8);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 113, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 114, 8);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 115, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 116, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 117, 179);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 118, 246);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 119, 208);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 120, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 121, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 122, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 123, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 124, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 125, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 126, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 127, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 128, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 129, 226);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 130, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 131, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 132, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 133, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 134, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 135, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 136, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 137, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 138, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 139, 226);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 140, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 141, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 142, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 143, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 144, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 145, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 146, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 147, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 148, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 149, 63);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 150, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 151, 100);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 152, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 153, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 154, 2);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 155, 5);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 156, 5);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 157, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 158, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 159, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 160, 8);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 161, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 162, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 163, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 164, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0_LANE_0,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_1_LANE_1,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_2_LANE_2,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_3_LANE_3,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 13, 16);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 48, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 49, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 54, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 55, 180);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 93, 2);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 165, 3);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 41, 6);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 354, 3);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 355, 58);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 356, 9);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 357, 3);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 358, 62);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 359, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 701, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 87, 0x1f);
+
+ al_serdes_common_cfg_eth(grp_info);
+
+ al_serdes_mode_rx_tx_inv_state_restore(grp_info, &rx_tx_inv_state);
+
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x0011F0);
+ al_serdes_ns_delay(500);
+}
+
+void al_serdes_mode_set_kr(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp)
+{
+ struct al_serdes_group_info *grp_info;
+ struct al_serdes_mode_rx_tx_inv_state rx_tx_inv_state;
+
+ al_assert(obj);
+ al_assert(((int)grp) >= AL_SRDS_GRP_A);
+ al_assert(((int)grp) <= AL_SRDS_GRP_D);
+
+ grp_info = &obj->grp_info[grp];
+
+ al_serdes_mode_rx_tx_inv_state_save(grp_info, &rx_tx_inv_state);
+
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x000000);
+ al_reg_write32(&grp_info->regs_base->lane[0].ictl_multi, 0x30330030);
+ al_reg_write32(&grp_info->regs_base->lane[1].ictl_multi, 0x30330030);
+ al_reg_write32(&grp_info->regs_base->lane[2].ictl_multi, 0x30330030);
+ al_reg_write32(&grp_info->regs_base->lane[3].ictl_multi, 0x30330030);
+ al_reg_write32(&grp_info->regs_base->gen.ipd_multi_synth , 0x0001);
+ al_reg_write32(&grp_info->regs_base->lane[0].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->lane[1].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->lane[2].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->lane[3].ipd_multi, 0x0003);
+ al_reg_write32(&grp_info->regs_base->gen.ictl_pcs , 0);
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x001000);
+ al_serdes_ns_delay(800);
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x000000);
+ al_serdes_ns_delay(500);
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x001000);
+ al_serdes_ns_delay(500);
+
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 101, 189);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 102, 189);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 103, 6);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 104, 6);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 105, 27);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 106, 27);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 107, 1);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 108, 1);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 109, 119);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 110, 5);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 101, 170);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 102, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 103, 108);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 104, 189);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 105, 189);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 106, 6);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 107, 6);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 108, 27);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 109, 27);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 110, 7);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 111, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 112, 16);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 113, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 114, 16);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 115, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 116, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 117, 179);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 118, 246);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 119, 208);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 120, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 121, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 122, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 123, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 124, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 125, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 126, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 127, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 128, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 129, 226);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 130, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 131, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 132, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 133, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 134, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 135, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 136, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 137, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 138, 211);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 139, 226);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 140, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 141, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 142, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 143, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 144, 239);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 145, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 146, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 147, 251);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 148, 255);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 149, 63);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 150, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 151, 50);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 152, 17);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 153, 2);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 154, 1);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 155, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 156, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 157, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 158, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 159, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 160, 8);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 161, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 162, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 163, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 164, 4);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0_LANE_0,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_1_LANE_1,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_2_LANE_2,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_3_LANE_3,
+ AL_SRDS_REG_TYPE_PMA, 7, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 13, 16);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 48, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 49, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 54, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 55, 149); /*Was 182*/
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 93, 2);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 165, 3);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 41, 6);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 354, 3);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 355, 58);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 356, 9);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 357, 3);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 358, 62);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA, 359, 12);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 701, 0);
+ al_serdes_grp_reg_write(grp_info, AL_SRDS_REG_PAGE_0123_LANES_0123,
+ AL_SRDS_REG_TYPE_PMA, 87, 0x1f);
+
+ al_serdes_common_cfg_eth(grp_info);
+
+ al_serdes_mode_rx_tx_inv_state_restore(grp_info, &rx_tx_inv_state);
+
+ al_reg_write32(&grp_info->regs_base->gen.irst, 0x0011F0);
+ al_serdes_ns_delay(500);
+}
+
+void al_serdes_rx_advanced_params_get(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_rx_params* rx_params)
+{
+ uint8_t temp_val;
+
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_1_REG_NUM,
+ &temp_val);
+ rx_params->dcgain = (temp_val & SERDES_IREG_RX_CALEQ_1_DCGAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_1_DCGAIN_SHIFT;
+ rx_params->dfe_3db_freq = (temp_val &
+ SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_MASK) >>
+ SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_SHIFT;
+
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_2_REG_NUM,
+ &temp_val);
+ rx_params->dfe_gain = (temp_val &
+ SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_SHIFT;
+ rx_params->dfe_first_tap_ctrl = (temp_val &
+ SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_SHIFT;
+
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_3_REG_NUM,
+ &temp_val);
+ rx_params->dfe_secound_tap_ctrl = (temp_val &
+ SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_SHIFT;
+ rx_params->dfe_third_tap_ctrl = (temp_val &
+ SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_SHIFT;
+
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_4_REG_NUM,
+ &temp_val);
+ rx_params->dfe_fourth_tap_ctrl = (temp_val &
+ SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_SHIFT;
+ rx_params->low_freq_agc_gain = (temp_val &
+ SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_MASK) >>
+ SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_SHIFT;
+
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RX_CALEQ_5_REG_NUM,
+ &temp_val);
+ rx_params->precal_code_sel = (temp_val &
+ SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_MASK) >>
+ SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_SHIFT;
+ rx_params->high_freq_agc_boost = (temp_val &
+ SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_MASK) >>
+ SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_SHIFT;
+
+ al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM,
+ &temp_val);
+ rx_params->override = ((temp_val & SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN) == 0);
+}
+
+#if ( SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM != \
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM || \
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM != \
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM)
+#error Wrong assumption
+#endif
+int al_serdes_rx_equalization(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane)
+{
+ uint8_t serdes_ireg_fld_rxcalroamyadjust_locwren_val;
+ uint8_t serdes_ireg_fld_rxroam_xorbitsel_val;
+ uint8_t serdes_ireg_fld_pcsrxeq_locwren_val;
+ uint8_t serdes_ireg_fld_rxcal_locwren_val;
+ uint8_t temp_val;
+ uint8_t done;
+
+ int test_score;
+ int i;
+
+ /*
+ * Make sure Roam Eye mechanism is not overridden
+ * Lane SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN = 1,
+ * so Rx 4-Point Eye process is not overridden
+ * Lane SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN = 1,
+ * so Eye Roam latch is not overridden
+ * Lane SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN = 1,
+ * so Eye Roam latch 'X adjust' is not overridden
+ * Lane SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN = 1,
+ * so Eye Roam latch 'Y adjust' is not overridden
+ * Lane SERDES_IREG_FLD_RXROAM_XORBITSEL = 0/1,
+ * so Eye Roamlatch works on the right Eye position (XORBITSEL)
+ * For most cases 0 is needed, but sometimes 1 is needed.
+ * I couldn't sort out why is this so the code uses a global
+ * XORBITSELmode variable, set by the user (GUI). Default is 0.
+ * control must be internal. At the end we restore original setting
+ */
+
+ /* save current values for restoring them later in the end */
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM,
+ &serdes_ireg_fld_rxcal_locwren_val);
+
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ &serdes_ireg_fld_rxcalroamyadjust_locwren_val );
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM,
+ &serdes_ireg_fld_rxroam_xorbitsel_val );
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM,
+ &serdes_ireg_fld_pcsrxeq_locwren_val );
+
+ /*
+ * Set Bits:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN
+ * SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN
+ * SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN
+ * to return 4pt-RxEye and EyeRoam Latch to internal logic
+ *
+ * clear bit SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN
+ * AGC/DFE controlled via PMA registers
+ */
+ temp_val = serdes_ireg_fld_rxcal_locwren_val;
+ temp_val |= SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN;
+ temp_val |= SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN;
+ temp_val |= SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN;
+ temp_val |= SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN;
+
+ al_serdes_reg_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM,
+ temp_val );
+
+ /*
+ * Set bit SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN
+ * to return EyeRoam Latch Y to internal logic
+ */
+ temp_val = serdes_ireg_fld_rxcalroamyadjust_locwren_val |
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN;
+ al_serdes_reg_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ temp_val );
+
+ /*
+ * Clear Bit: SERDES_IREG_FLD_RXROAM_XORBITSEL
+ * so XORBITSEL=0, needed for the Eye mapping.
+ */
+ temp_val = serdes_ireg_fld_rxroam_xorbitsel_val &
+ ~SERDES_IREG_FLD_RXROAM_XORBITSEL;
+ al_serdes_reg_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM,
+ temp_val );
+
+ /*
+ * Take Control from int.pin over RxEQ process.
+ * Clear Bit SERDES_IREG_FLD_PCSRXEQ_LOCWREN
+ * to override RxEQ via PMA
+ */
+ temp_val = serdes_ireg_fld_pcsrxeq_locwren_val &
+ ~SERDES_IREG_FLD_PCSRXEQ_LOCWREN;
+ al_serdes_reg_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM,
+ temp_val );
+
+
+ /*
+ * Start/Stop RxEQ Cal is via PCSRXEQ_START: 1=START. 0=STOP.
+ * Clear Bit SERDES_IREG_FLD_PCSRXEQ_START
+ * to start fresh from Stop
+ */
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM,
+ &temp_val );
+ temp_val &= ~SERDES_IREG_FLD_PCSRXEQ_START;
+ al_serdes_reg_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM,
+ temp_val );
+
+ /* Set Bit SERDES_IREG_FLD_PCSRXEQ_START
+ * to begin Rx Eq Cal */
+ temp_val |= SERDES_IREG_FLD_PCSRXEQ_START;
+ al_serdes_reg_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM,
+ temp_val );
+
+ /* Poll on RxEq Cal completion. SERDES_IREG_FLD_RXEQ_DONE. 1=Done. */
+ for( i = 0; i < AL_SERDES_RX_EQUAL_TRIES; ++i ) {
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASDONE_REG_NUM,
+ &done );
+ done &= SERDES_IREG_FLD_RXEQ_DONE;
+
+ /* Check if RxEQ Cal is done */
+ if (done)
+ break;
+ al_msleep(AL_SERDES_RX_EQUAL_MDELAY);
+ }
+
+ if (!done) {
+ al_err("%s: Timeout!\n", __func__);
+ return -1;
+ }
+
+ /* Stop the RxEQ process. */
+ temp_val &= ~SERDES_IREG_FLD_PCSRXEQ_START;
+ al_serdes_reg_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM,
+ temp_val );
+ /* Get score */
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RXEQ_BEST_EYE_MSB_VAL_REG_NUM,
+ &temp_val );
+ test_score = (int)( (temp_val & 0xFF) << 6 );
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_REG_NUM,
+ &temp_val );
+ test_score += (int)(temp_val & SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_MASK);
+
+ /* Restore start values */
+ al_serdes_reg_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM,
+ serdes_ireg_fld_rxcal_locwren_val);
+ al_serdes_reg_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ serdes_ireg_fld_rxcalroamyadjust_locwren_val );
+ al_serdes_reg_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM,
+ serdes_ireg_fld_rxroam_xorbitsel_val );
+ al_serdes_reg_write(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM,
+ serdes_ireg_fld_pcsrxeq_locwren_val );
+
+ return test_score;
+}
+
+#if ( SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM != \
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM || \
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM != \
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM || \
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM != \
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM)
+#error Wrong assumption
+#endif
+int al_serdes_calc_eye_size(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ int* width,
+ int* height)
+{
+ uint8_t rxcaleyediagfsm_x_y_valweight_val;
+ uint8_t rxcaleyediagfsm_xvalcoarse_val;
+ uint8_t rxcaleyediagfsm_xvalfine_val;
+ uint8_t rxcaleyediagfsm_yvalcoarse_val;
+ uint8_t rxcaleyediagfsm_yvalfine_val;
+ uint8_t rxlock2ref_locwren_val;
+ uint8_t rxcal_locwren_val;
+ uint8_t rxcalroamyadjust_locwren_val;
+ uint8_t rxlock2ref_ovren_val;
+
+ int i;
+ uint8_t status;
+ uint8_t reg_value;
+
+ /* Save Registers */
+ al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM,
+ &rxlock2ref_locwren_val);
+ al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM,
+ &rxcal_locwren_val);
+ al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ &rxcalroamyadjust_locwren_val);
+ al_serdes_reg_read(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM,
+ &rxlock2ref_ovren_val);
+
+ al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM,
+ &rxcaleyediagfsm_x_y_valweight_val);
+ al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM,
+ &rxcaleyediagfsm_xvalcoarse_val);
+ al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM,
+ &rxcaleyediagfsm_xvalfine_val);
+ al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM,
+ &rxcaleyediagfsm_yvalcoarse_val);
+ al_serdes_reg_read(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM,
+ &rxcaleyediagfsm_yvalfine_val);
+
+ /*
+ * Clear Bit:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN
+ * to override RxEQ via PMA
+ * Set Bits:
+ * SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN,
+ * SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN
+ * to keep Eye Diag Roam controlled internally
+ */
+ al_serdes_grp_reg_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN |
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN |
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN,
+ SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN |
+ SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN);
+ /*
+ * Set Bit:
+ * SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN
+ * to keep Eye Diag Roam controlled internally
+ */
+ al_serdes_grp_reg_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN);
+
+ /*
+ * Clear Bit:
+ * SERDES_IREG_FLD_RXROAM_XORBITSEL,
+ * so XORBITSEL=0, needed for the Eye mapping
+ * Set Bit:
+ * SERDES_IREG_FLD_RXLOCK2REF_OVREN,
+ * so RXLOCK2REF_OVREN=1, keeping lock to data, preventing data hit
+ */
+ al_serdes_grp_reg_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN |
+ SERDES_IREG_FLD_RXROAM_XORBITSEL,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN);
+
+
+ /*
+ * Clear Bit:
+ * SERDES_IREG_FLD_RXLOCK2REF_LOCWREN,
+ * so RXLOCK2REF_LOCWREN=0, to override control
+ */
+ al_serdes_grp_reg_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM,
+ SERDES_IREG_FLD_RXLOCK2REF_LOCWREN,
+ 0);
+
+ /* Width Calculation */
+
+ /* Return Value = 0*Y + 1*X */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM,
+ 0x01);
+ /* X coarse scan step = 3 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM,
+ 0x03);
+ /* X fine scan step = 1 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM,
+ 0x01);
+ /* Y coarse scan step = 0 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM,
+ 0x00);
+ /* Y fine scan step = 0 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM,
+ 0x00);
+
+ /*
+ * Set Bit:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ * to start Eye measurement
+ */
+ al_serdes_grp_reg_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START);
+
+ for( i = 0; i < AL_SERDES_RX_EYE_CAL_TRIES; ++i ) {
+ /* Check if RxEQ Cal is done */
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE_REG_NUM,
+ &status );
+ if (status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE)
+ break;
+ al_msleep(AL_SERDES_RX_EYE_CAL_MDELAY);
+ }
+
+ if (status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR) {
+ al_err("%s: eye measure error!\n", __func__);
+ return -1;
+ }
+
+ if (!(status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE)) {
+ al_err("%s: eye measure timeout!\n", __func__);
+ return -1;
+ }
+
+ /* Read Eye Opening Metrics, Bits:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB,
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB
+ */
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_REG_NUM,
+ ®_value );
+ *width = reg_value << 6;
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_REG_NUM,
+ ®_value );
+ *width =+ reg_value & SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_MAKE;
+
+ /*
+ * Clear Bit:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ * to stop Eye measurement
+ */
+ al_serdes_grp_reg_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ 0);
+
+ /* Height Calculation */
+
+ /* Return Value = 1*Y + 0*X */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM,
+ 0x10);
+ /* X coarse scan step = 0 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM,
+ 0x00);
+ /* X fine scan step = 0 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM,
+ 0x00);
+ /* Y coarse scan step = 3 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM,
+ 0x03);
+ /* Y fine scan step = 1 */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM,
+ 0x01);
+
+ /*
+ * Set Bit:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ * to start Eye measurement
+ */
+ al_serdes_grp_reg_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START);
+
+ for( i = 0; i < AL_SERDES_RX_EYE_CAL_TRIES; ++i ) {
+ /* Check if RxEQ Cal is done */
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE_REG_NUM,
+ &status );
+ if (status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE)
+ break;
+ al_msleep(AL_SERDES_RX_EYE_CAL_MDELAY);
+ }
+
+ if (status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR) {
+ al_err("%s: eye measure error!\n", __func__);
+ return -1;
+ }
+
+ if (!(status & SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE)) {
+ al_err("%s: eye measure timeout!\n", __func__);
+ return -1;
+ }
+
+ /* Read Eye Opening Metrics, Bits:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB,
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB
+ */
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_REG_NUM,
+ ®_value );
+ *height = reg_value << 6;
+ al_serdes_reg_read(
+ obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_REG_NUM,
+ ®_value );
+ *height =+ reg_value & SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_MAKE;
+
+ /*
+ * Clear Bit:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ * to stop Eye measurement
+ */
+ al_serdes_grp_reg_masked_write(&obj->grp_info[grp],
+ lane,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START,
+ 0);
+
+ /* Restore Registers */
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM,
+ rxcaleyediagfsm_x_y_valweight_val);
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM,
+ rxcaleyediagfsm_xvalcoarse_val);
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM,
+ rxcaleyediagfsm_xvalfine_val);
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM,
+ rxcaleyediagfsm_yvalcoarse_val);
+ al_serdes_reg_write(obj, grp, AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM,
+ rxcaleyediagfsm_yvalfine_val);
+
+ al_serdes_reg_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM,
+ rxlock2ref_locwren_val);
+ al_serdes_reg_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM,
+ rxcal_locwren_val);
+ al_serdes_reg_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM,
+ rxcalroamyadjust_locwren_val);
+ al_serdes_reg_write(obj, grp, lane, AL_SRDS_REG_TYPE_PMA,
+ SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM,
+ rxlock2ref_ovren_val);
+ return 0;
+}
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_ssm.c b/arch/arm/mach-alpine/al_hal/al_hal_ssm.c
new file mode 100644
index 0000000..f4973f2
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_ssm.c
@@ -0,0 +1,218 @@
+/*******************************************************************************
+Copyright (C) 2014 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_ssm.c
+ *
+ */
+
+#include "al_hal_ssm.h"
+
+/**
+ * Initialize acceleration DMA for RAID/Crypto usage
+ *
+ * @param ssm_dma ssm dma handle
+ * @param params parameters from upper layer
+ *
+ * @return 0 on success.
+ */
+int al_ssm_dma_init(
+ struct al_ssm_dma *ssm_dma,
+ struct al_ssm_dma_params *params)
+{
+ struct al_m2m_udma_params m2m_params;
+ struct unit_regs __iomem *unit_regs;
+ int rc;
+
+ al_dbg("ssm [%s]: Initialize unit\n", params->name);
+
+ ssm_dma->dev_id = params->dev_id;
+ ssm_dma->rev_id = params->rev_id;
+
+ m2m_params.name = params->name;
+ unit_regs = (struct unit_regs __iomem *)params->udma_regs_base;
+ m2m_params.m2s_regs_base = &unit_regs->m2s;
+ m2m_params.s2m_regs_base = &unit_regs->s2m;
+ m2m_params.num_of_queues = params->num_of_queues;
+ m2m_params.max_m2s_descs_per_pkt = AL_SSM_MAX_SRC_DESCS;
+ m2m_params.max_s2m_descs_per_pkt = AL_SSM_MAX_DST_DESCS;
+
+ /* initialize the udma */
+ rc = al_m2m_udma_init(&ssm_dma->m2m_udma, &m2m_params);
+ if (rc != 0)
+ al_err("failed to initialize udma, error %d\n", rc);
+ return rc;
+}
+
+/**
+ * Initialize the m2s(tx) and s2m(rx) components of the queue
+ *
+ * @param ssm_dma ssm dma handle
+ * @param qid queue index
+ * @param tx_params TX UDMA params
+ * @param rx_params RX UDMA params
+ * @param q_type indicate q type (crc/csum/memcpy, crypto, raid)
+ *
+ * @return 0 if no error found.
+ * -EINVAL if the qid is out of range
+ * -EIO if queue was already initialized
+ */
+int al_ssm_dma_q_init(struct al_ssm_dma *ssm_dma,
+ uint32_t qid,
+ struct al_udma_q_params *tx_params,
+ struct al_udma_q_params *rx_params,
+ enum al_ssm_q_type q_type)
+{
+ int rc;
+
+ al_dbg("ssm [%s]: Initialize queue %d\n",
+ ssm_dma->m2m_udma.name, qid);
+
+ tx_params->dev_id = ssm_dma->dev_id;
+ tx_params->rev_id = ssm_dma->rev_id;
+ rx_params->dev_id = ssm_dma->dev_id;
+ rx_params->rev_id = ssm_dma->rev_id;
+
+ rc = al_m2m_udma_q_init(&ssm_dma->m2m_udma, qid, tx_params, rx_params);
+ if (rc != 0)
+ al_err("ssm [%s]: failed to initialize tx q %d, error %d\n",
+ ssm_dma->m2m_udma.name, qid, rc);
+ else
+ ssm_dma->q_types[qid] = q_type;
+
+ return rc;
+}
+
+/**
+ * Change the DMA state
+ *
+ * @param ssm_dma ssm DMA handle
+ * @param dma_state the new state
+ *
+ * @return 0
+ */
+int al_ssm_dma_state_set(
+ struct al_ssm_dma *ssm_dma,
+ enum al_udma_state dma_state)
+{
+ int rc;
+
+ rc = al_m2m_udma_state_set(&ssm_dma->m2m_udma, dma_state);
+ if (rc != 0)
+ al_err("ssm [%s]: failed to change state, error %d\n",
+ ssm_dma->m2m_udma.name, rc);
+ return rc;
+}
+
+/**
+ * Get udma handle of the tx or rx udma, this handle can be used to call misc
+ * configuration functions defined at al_udma_config.h
+ *
+ * @param ssm_dma ssm DMA handle
+ * @param type tx or rx udma
+ * @param udma the requested udma handle written to this pointer
+ *
+ * @return 0
+ */
+int al_ssm_dma_handle_get(
+ struct al_ssm_dma *ssm_dma,
+ enum al_udma_type type,
+ struct al_udma **udma)
+{
+ return al_m2m_udma_handle_get(&ssm_dma->m2m_udma, type, udma);
+}
+
+/******************************************************************************
+ ******************************************************************************/
+struct al_udma *al_ssm_dma_tx_udma_handle_get(
+ struct al_ssm_dma *ssm_dma)
+{
+ struct al_udma *udma;
+ int err;
+
+ err = al_m2m_udma_handle_get(&ssm_dma->m2m_udma, UDMA_TX, &udma);
+ if (err)
+ return NULL;
+
+ return udma;
+}
+
+/******************************************************************************
+ ******************************************************************************/
+struct al_udma_q *al_ssm_dma_tx_queue_handle_get(
+ struct al_ssm_dma *ssm_dma,
+ unsigned int qid)
+{
+ struct al_udma *udma;
+ int err;
+
+ err = al_m2m_udma_handle_get(&ssm_dma->m2m_udma, UDMA_TX, &udma);
+ if (err)
+ return NULL;
+
+ return &udma->udma_q[qid];
+}
+
+/******************************************************************************
+ ******************************************************************************/
+struct al_udma *al_ssm_dma_rx_udma_handle_get(
+ struct al_ssm_dma *ssm_dma)
+{
+ struct al_udma *udma;
+ int err;
+
+ err = al_m2m_udma_handle_get(&ssm_dma->m2m_udma, UDMA_RX, &udma);
+ if (err)
+ return NULL;
+
+ return udma;
+}
+
+/******************************************************************************
+ ******************************************************************************/
+struct al_udma_q *al_ssm_dma_rx_queue_handle_get(
+ struct al_ssm_dma *ssm_dma,
+ unsigned int qid)
+{
+ struct al_udma *udma;
+ int err;
+
+ err = al_m2m_udma_handle_get(&ssm_dma->m2m_udma, UDMA_RX, &udma);
+ if (err)
+ return NULL;
+
+ return &udma->udma_q[qid];
+}
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_udma_config.c b/arch/arm/mach-alpine/al_hal/al_hal_udma_config.c
new file mode 100644
index 0000000..6a37400
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_udma_config.c
@@ -0,0 +1,1314 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @file al_hal_udma_config.c
+ *
+ * @brief Universal DMA HAL driver for configurations
+ *
+ */
+
+#include
+#include
+#include
+
+/**************** Misc configurations *********************/
+/** Configure AXI generic configuration */
+int al_udma_axi_set(struct udma_gen_axi *axi_regs,
+ struct al_udma_axi_conf *axi)
+{
+ uint32_t reg;
+
+ al_reg_write32(&axi_regs->cfg_1, axi->axi_timeout);
+
+ reg = al_reg_read32(&axi_regs->cfg_2);
+ reg &= ~UDMA_GEN_AXI_CFG_2_ARB_PROMOTION_MASK;
+ reg |= axi->arb_promotion;
+ al_reg_write32(&axi_regs->cfg_2, reg);
+
+ reg = al_reg_read32(&axi_regs->endian_cfg);
+ if (axi->swap_8_bytes == AL_TRUE)
+ reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_64B_EN;
+ else
+ reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_64B_EN;
+
+ if (axi->swap_s2m_data == AL_TRUE)
+ reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DATA;
+ else
+ reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DATA;
+
+ if (axi->swap_s2m_desc == AL_TRUE)
+ reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DESC;
+ else
+ reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DESC;
+
+ if (axi->swap_m2s_data == AL_TRUE)
+ reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DATA;
+ else
+ reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DATA;
+
+ if (axi->swap_m2s_desc == AL_TRUE)
+ reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DESC;
+ else
+ reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DESC;
+
+ al_reg_write32(&axi_regs->endian_cfg, reg);
+ return 0;
+}
+
+/* Configure UDMA AXI M2S configuration */
+/** Configure AXI M2S submaster */
+static int al_udma_m2s_axi_sm_set(struct al_udma_axi_submaster *m2s_sm,
+ uint32_t *cfg_1, uint32_t *cfg_2,
+ uint32_t *cfg_max_beats)
+{
+ uint32_t reg;
+ reg = al_reg_read32(cfg_1);
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_MASK;
+ reg |= m2s_sm->id & UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_MASK;
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_MASK;
+ reg |= (m2s_sm->cache_type <<
+ UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_SHIFT) &
+ UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_MASK;
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK;
+ reg |= (m2s_sm->burst << UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_SHIFT) &
+ UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK;
+ al_reg_write32(cfg_1, reg);
+
+ reg = al_reg_read32(cfg_2);
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_MASK;
+ reg |= m2s_sm->used_ext & UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_MASK;
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_MASK;
+ reg |= (m2s_sm->bus_size <<
+ UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_SHIFT) &
+ UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_MASK;
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_MASK;
+ reg |= (m2s_sm->qos << UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_SHIFT) &
+ UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_MASK;
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_MASK;
+ reg |= (m2s_sm->prot << UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_SHIFT) &
+ UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_MASK;
+ al_reg_write32(cfg_2, reg);
+
+ reg = al_reg_read32(cfg_max_beats);
+ reg &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
+ reg |= m2s_sm->max_beats &
+ UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
+ al_reg_write32(cfg_max_beats, reg);
+
+ return 0;
+}
+
+/** Configure UDMA AXI M2S configuration */
+int al_udma_m2s_axi_set(struct al_udma *udma,
+ struct al_udma_m2s_axi_conf *axi_m2s)
+{
+ uint32_t reg;
+
+ al_udma_m2s_axi_sm_set(&axi_m2s->comp_write,
+ &udma->udma_regs->m2s.axi_m2s.comp_wr_cfg_1,
+ &udma->udma_regs->m2s.axi_m2s.comp_wr_cfg_2,
+ &udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1);
+
+ al_udma_m2s_axi_sm_set(&axi_m2s->data_read,
+ &udma->udma_regs->m2s.axi_m2s.data_rd_cfg_1,
+ &udma->udma_regs->m2s.axi_m2s.data_rd_cfg_2,
+ &udma->udma_regs->m2s.axi_m2s.data_rd_cfg);
+
+ al_udma_m2s_axi_sm_set(&axi_m2s->desc_read,
+ &udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_1,
+ &udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_2,
+ &udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_3);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.data_rd_cfg);
+ if (axi_m2s->break_on_max_boundary == AL_TRUE)
+ reg |= UDMA_AXI_M2S_DATA_RD_CFG_ALWAYS_BREAK_ON_MAX_BOUDRY;
+ else
+ reg &= ~UDMA_AXI_M2S_DATA_RD_CFG_ALWAYS_BREAK_ON_MAX_BOUDRY;
+ al_reg_write32(&udma->udma_regs->m2s.axi_m2s.data_rd_cfg, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1);
+ reg &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK;
+ reg |= (axi_m2s->min_axi_beats <<
+ UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT) &
+ UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK;
+ al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.ostand_cfg);
+ reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_MASK;
+ reg |= axi_m2s->ostand_max_data_read &
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_MASK;
+ reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_MASK;
+ reg |= (axi_m2s->ostand_max_desc_read <<
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_SHIFT) &
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_MASK;
+ reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_MASK;
+ reg |= (axi_m2s->ostand_max_comp_req <<
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_SHIFT) &
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_MASK;
+ reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_MASK;
+ reg |= (axi_m2s->ostand_max_comp_write <<
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_SHIFT) &
+ UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_MASK;
+ al_reg_write32(&udma->udma_regs->m2s.axi_m2s.ostand_cfg, reg);
+ return 0;
+}
+
+/** Configure AXI S2M submaster */
+static int al_udma_s2m_axi_sm_set(struct al_udma_axi_submaster *s2m_sm,
+ uint32_t *cfg_1, uint32_t *cfg_2,
+ uint32_t *cfg_max_beats)
+{
+ uint32_t reg;
+ reg = al_reg_read32(cfg_1);
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK;
+ reg |= s2m_sm->id & UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK;
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_MASK;
+ reg |= (s2m_sm->cache_type <<
+ UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_SHIFT) &
+ UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_MASK;
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK;
+ reg |= (s2m_sm->burst << UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_SHIFT) &
+ UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK;
+ al_reg_write32(cfg_1, reg);
+
+ reg = al_reg_read32(cfg_2);
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_MASK;
+ reg |= s2m_sm->used_ext & UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_MASK;
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_MASK;
+ reg |= (s2m_sm->bus_size << UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_SHIFT) &
+ UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_MASK;
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_MASK;
+ reg |= (s2m_sm->qos << UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_SHIFT) &
+ UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_MASK;
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_MASK;
+ reg |= (s2m_sm->prot << UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_SHIFT) &
+ UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_MASK;
+ al_reg_write32(cfg_2, reg);
+
+ reg = al_reg_read32(cfg_max_beats);
+ reg &= ~UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
+ reg |= s2m_sm->max_beats &
+ UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
+ al_reg_write32(cfg_max_beats, reg);
+
+ return 0;
+}
+
+/** Configure UDMA AXI S2M configuration */
+int al_udma_s2m_axi_set(struct al_udma *udma,
+ struct al_udma_s2m_axi_conf *axi_s2m)
+{
+
+ uint32_t reg;
+
+ al_udma_s2m_axi_sm_set(&axi_s2m->data_write,
+ &udma->udma_regs->s2m.axi_s2m.data_wr_cfg_1,
+ &udma->udma_regs->s2m.axi_s2m.data_wr_cfg_2,
+ &udma->udma_regs->s2m.axi_s2m.data_wr_cfg);
+
+ al_udma_s2m_axi_sm_set(&axi_s2m->desc_read,
+ &udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_4,
+ &udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_5,
+ &udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_3);
+
+ al_udma_s2m_axi_sm_set(&axi_s2m->comp_write,
+ &udma->udma_regs->s2m.axi_s2m.comp_wr_cfg_1,
+ &udma->udma_regs->s2m.axi_s2m.comp_wr_cfg_2,
+ &udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_3);
+ if (axi_s2m->break_on_max_boundary == AL_TRUE)
+ reg |= UDMA_AXI_S2M_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY;
+ else
+ reg &= ~UDMA_AXI_S2M_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY;
+ al_reg_write32(&udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_3, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1);
+ reg &= ~UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK;
+ reg |= (axi_s2m->min_axi_beats <<
+ UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT) &
+ UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK;
+ al_reg_write32(&udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_rd);
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_MASK;
+ reg |= axi_s2m->ostand_max_desc_read &
+ UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_MASK;
+
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_MASK;
+ reg |= (axi_s2m->ack_fifo_depth <<
+ UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_SHIFT) &
+ UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_MASK;
+
+ al_reg_write32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_rd, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_wr);
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_MASK;
+ reg |= axi_s2m->ostand_max_data_req &
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_MASK;
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_MASK;
+ reg |= (axi_s2m->ostand_max_data_write <<
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_SHIFT) &
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_MASK;
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_MASK;
+ reg |= (axi_s2m->ostand_max_comp_req <<
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_SHIFT) &
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_MASK;
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_MASK;
+ reg |= (axi_s2m->ostand_max_comp_write <<
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_SHIFT) &
+ UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_MASK;
+ al_reg_write32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_wr, reg);
+ return 0;
+}
+
+/** M2S packet len configuration */
+int al_udma_m2s_packet_size_cfg_set(struct al_udma *udma,
+ struct al_udma_m2s_pkt_len_conf *conf)
+{
+ uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s.cfg_len);
+ uint32_t max_supported_size = UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_MASK;
+
+ al_assert(udma->type == UDMA_TX);
+
+ if (conf->encode_64k_as_zero == AL_TRUE)
+ max_supported_size += 1; /* 64K */
+
+ if (conf->max_pkt_size > max_supported_size) {
+ al_err("udma [%s]: requested max_pkt_size (0x%x) exceeds the"
+ "supported limit (0x%x)\n", udma->name,
+ conf->max_pkt_size, max_supported_size);
+ return -EINVAL;
+ }
+
+ reg &= ~UDMA_M2S_CFG_LEN_ENCODE_64K;
+ if (conf->encode_64k_as_zero == AL_TRUE)
+ reg |= UDMA_M2S_CFG_LEN_ENCODE_64K;
+ else
+ reg &= ~UDMA_M2S_CFG_LEN_ENCODE_64K;
+
+ reg &= ~UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_MASK;
+ reg |= conf->max_pkt_size;
+
+ al_reg_write32(&udma->udma_regs->m2s.m2s.cfg_len, reg);
+ return 0;
+}
+
+/** Report Error - to be used for abort */
+void al_udma_err_report(struct al_udma *udma __attribute__((__unused__)))
+{
+ return;
+}
+
+/** Statistics - TBD */
+void al_udma_stats_get(struct al_udma *udma __attribute__((__unused__)))
+{
+ return;
+}
+
+/** Configure UDMA M2S descriptor prefetch */
+int al_udma_m2s_pref_set(struct al_udma *udma,
+ struct al_udma_m2s_desc_pref_conf *conf)
+{
+ uint32_t reg;
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1);
+ reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK;
+ reg |= conf->desc_fifo_depth;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2);
+
+ if (conf->sch_mode == SRR)
+ reg |= UDMA_M2S_RD_DESC_PREF_CFG_2_PREF_FORCE_RR;
+ else if (conf->sch_mode == STRICT)
+ reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_2_PREF_FORCE_RR;
+ else {
+ al_err("udma [%s]: requested descriptor preferch arbiter "
+ "mode (%d) is invalid\n", udma->name, conf->sch_mode);
+ return -EINVAL;
+ }
+ reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK;
+ reg |= conf->max_desc_per_packet &
+ UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3);
+ reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
+ reg |= conf->min_burst_below_thr &
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
+
+ reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK;
+ reg |=(conf->min_burst_above_thr <<
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT) &
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK;
+
+ reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
+ reg |= (conf->pref_thr <<
+ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) &
+ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
+
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.data_cfg);
+ reg &= ~UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK;
+ reg |= conf->data_fifo_depth &
+ UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK;
+
+ reg &= ~UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_MASK;
+ reg |= (conf->max_pkt_limit
+ << UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_SHIFT) &
+ UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_MASK;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rd.data_cfg, reg);
+
+ return 0;
+}
+
+/** Ger the M2S UDMA descriptor prefetch */
+int al_udma_m2s_pref_get(struct al_udma *udma,
+ struct al_udma_m2s_desc_pref_conf *conf)
+{
+ uint32_t reg;
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1);
+ conf->desc_fifo_depth =
+ AL_REG_FIELD_GET(reg, UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK,
+ UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_SHIFT);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2);
+ if (reg & UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK)
+ conf->sch_mode = SRR;
+ else
+ conf->sch_mode = STRICT;
+ conf->max_desc_per_packet =
+ AL_REG_FIELD_GET(reg,
+ UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK,
+ UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_SHIFT);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3);
+
+ conf->min_burst_below_thr =
+ AL_REG_FIELD_GET(reg,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_SHIFT);
+
+ conf->min_burst_above_thr =
+ AL_REG_FIELD_GET(reg,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT);
+
+ conf->pref_thr = AL_REG_FIELD_GET(reg,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT);
+ return 0;
+}
+
+/* set max descriptors */
+int al_udma_m2s_max_descs_set(struct al_udma *udma, uint8_t max_descs)
+{
+ uint32_t pref_thr = max_descs;
+ uint32_t min_burst_above_thr = 4;
+ al_assert(max_descs <= AL_UDMA_M2S_MAX_ALLOWED_DESCS_PER_PACKET);
+ al_assert(max_descs > 0);
+
+ /* increase min_burst_above_thr so larger burst can be used to fetch
+ * descriptors */
+ if (pref_thr >= 8)
+ min_burst_above_thr = 8;
+ else {
+ /* don't set prefetch threshold too low so we can have the
+ * min_burst_above_thr >= 4 */
+ pref_thr = 4;
+ }
+
+ al_reg_write32_masked(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2,
+ UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK,
+ max_descs << UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_SHIFT);
+
+ al_reg_write32_masked(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK |
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK,
+ (max_descs << UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) |
+ (min_burst_above_thr << UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT));
+
+ return 0;
+}
+
+/* set s2m max descriptors */
+int al_udma_s2m_max_descs_set(struct al_udma *udma, uint8_t max_descs)
+{
+ uint32_t pref_thr = max_descs;
+ uint32_t min_burst_above_thr = 4;
+ al_assert(max_descs <= AL_UDMA_S2M_MAX_ALLOWED_DESCS_PER_PACKET);
+ al_assert(max_descs > 0);
+
+ /* increase min_burst_above_thr so larger burst can be used to fetch
+ * descriptors */
+ if (pref_thr >= 8)
+ min_burst_above_thr = 8;
+ else
+ /* don't set prefetch threshold too low so we can have the
+ * min_burst_above_thr >= 4 */
+ pref_thr = 4;
+
+ al_reg_write32_masked(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3,
+ UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK |
+ UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK,
+ (max_descs << UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) |
+ (min_burst_above_thr << UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT));
+
+ return 0;
+}
+
+int al_udma_s2m_full_line_write_set(struct al_udma *udma, al_bool enable)
+{
+ uint32_t val = 0;
+
+ if (enable == AL_TRUE) {
+ val = UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE;
+ al_info("udma [%s]: full line write enabled\n", udma->name);
+ }
+
+ al_reg_write32_masked(&udma->udma_regs->s2m.s2m_wr.data_cfg_2,
+ UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE,
+ val);
+ return 0;
+}
+
+/** Configure S2M UDMA descriptor prefetch */
+int al_udma_s2m_pref_set(struct al_udma *udma,
+ struct al_udma_s2m_desc_pref_conf *conf)
+{
+ uint32_t reg;
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_1);
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK;
+ reg |= conf->desc_fifo_depth;
+ al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_2);
+
+ if (conf->sch_mode == SRR)
+ reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_PREF_FORCE_RR;
+ else if (conf->sch_mode == STRICT)
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_PREF_FORCE_RR;
+ else {
+ al_err("udma [%s]: requested descriptor preferch arbiter "
+ "mode (%d) is invalid\n", udma->name, conf->sch_mode);
+ return -EINVAL;
+ }
+ if (conf->q_promotion == AL_TRUE)
+ reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_Q_PROMOTION;
+ else
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_Q_PROMOTION;
+
+ if (conf->force_promotion == AL_TRUE)
+ reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_FORCE_PROMOTION;
+ else
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_FORCE_PROMOTION;
+
+ if (conf->en_pref_prediction == AL_TRUE)
+ reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_EN_PREF_PREDICTION;
+ else
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_EN_PREF_PREDICTION;
+
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_MASK;
+ reg |= (conf->promotion_th
+ << UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_SHIFT) &
+ UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_MASK;
+
+ al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_2, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3);
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
+ reg |= (conf->pref_thr << UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) &
+ UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
+
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
+ reg |= conf->min_burst_below_thr &
+ UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
+
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK;
+ reg |=(conf->min_burst_above_thr <<
+ UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT) &
+ UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
+
+ al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_4);
+ reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_MASK;
+ reg |= conf->a_full_thr & UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_MASK;
+ al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_4, reg);
+
+
+ return 0;
+}
+
+/* Configure S2M UDMA data write */
+int al_udma_s2m_data_write_set(struct al_udma *udma,
+ struct al_udma_s2m_data_write_conf *conf)
+{
+ uint32_t reg;
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_wr.data_cfg_1);
+ reg &= ~UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_MASK;
+ reg |= conf->data_fifo_depth &
+ UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_MASK;
+ reg &= ~UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_MASK;
+ reg |= (conf->max_pkt_limit <<
+ UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_SHIFT) &
+ UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_MASK;
+ reg &= ~UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_MASK;
+ reg |= (conf->fifo_margin <<
+ UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_SHIFT) &
+ UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_MASK;
+ al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2);
+ reg &= ~UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK;
+ reg |= conf->desc_wait_timer &
+ UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK;
+ reg &= ~(UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC |
+ UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC |
+ UDMA_S2M_WR_DATA_CFG_2_WAIT_FOR_PREF |
+ UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE |
+ UDMA_S2M_WR_DATA_CFG_2_DIRECT_HDR_USE_BUF1);
+ reg |= conf->flags &
+ (UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC |
+ UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC |
+ UDMA_S2M_WR_DATA_CFG_2_WAIT_FOR_PREF |
+ UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE |
+ UDMA_S2M_WR_DATA_CFG_2_DIRECT_HDR_USE_BUF1);
+ al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2, reg);
+
+ return 0;
+}
+
+/* Configure S2M UDMA completion */
+int al_udma_s2m_completion_set(struct al_udma *udma,
+ struct al_udma_s2m_completion_conf *conf)
+{
+ uint32_t reg = al_reg_read32(&udma->udma_regs->s2m.s2m_comp.cfg_1c);
+ reg &= ~UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
+ reg |= conf->desc_size & UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
+ if (conf->cnt_words == AL_TRUE)
+ reg |= UDMA_S2M_COMP_CFG_1C_CNT_WORDS;
+ else
+ reg &= ~UDMA_S2M_COMP_CFG_1C_CNT_WORDS;
+ if (conf->q_promotion == AL_TRUE)
+ reg |= UDMA_S2M_COMP_CFG_1C_Q_PROMOTION;
+ else
+ reg &= ~UDMA_S2M_COMP_CFG_1C_Q_PROMOTION;
+ if (conf->force_rr == AL_TRUE)
+ reg |= UDMA_S2M_COMP_CFG_1C_FORCE_RR;
+ else
+ reg &= ~UDMA_S2M_COMP_CFG_1C_FORCE_RR;
+ reg &= ~UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_MASK;
+ reg |= (conf->q_free_min << UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_SHIFT) &
+ UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_MASK;
+ al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_1c, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_comp.cfg_2c);
+ reg &= ~UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_MASK;
+ reg |= conf->comp_fifo_depth
+ & UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_MASK;
+ reg &= ~UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK;
+ reg |= (conf->unack_fifo_depth
+ << UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_SHIFT) &
+ UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK;
+ al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_2c, reg);
+
+ al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_application_ack,
+ conf->timeout);
+ return 0;
+}
+
+/** Configure the M2S UDMA scheduling mode */
+int al_udma_m2s_sc_set(struct al_udma *udma,
+ struct al_udma_m2s_dwrr_conf *sched)
+{
+ uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s_dwrr.cfg_sched);
+
+ if (sched->enable_dwrr == AL_TRUE)
+ reg |= UDMA_M2S_DWRR_CFG_SCHED_EN_DWRR;
+ else
+ reg &= ~UDMA_M2S_DWRR_CFG_SCHED_EN_DWRR;
+
+ if (sched->pkt_mode == AL_TRUE)
+ reg |= UDMA_M2S_DWRR_CFG_SCHED_PKT_MODE_EN;
+ else
+ reg &= ~UDMA_M2S_DWRR_CFG_SCHED_PKT_MODE_EN;
+
+ reg &= ~UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_MASK;
+ reg |= sched->weight << UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_SHIFT;
+ reg &= ~UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_MASK;
+ reg |= sched->inc_factor << UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_SHIFT;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_dwrr.cfg_sched, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_dwrr.ctrl_deficit_cnt);
+ reg &= ~UDMA_M2S_DWRR_CTRL_DEFICIT_CNT_INIT_MASK;
+ reg |= sched->deficit_init_val;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_dwrr.ctrl_deficit_cnt, reg);
+
+ return 0;
+}
+
+/** Configure the M2S UDMA rate limitation */
+int al_udma_m2s_rlimit_set(struct al_udma *udma,
+ struct al_udma_m2s_rlimit_mode *mode)
+{
+ uint32_t reg = al_reg_read32(
+ &udma->udma_regs->m2s.m2s_rate_limiter.gen_cfg);
+
+ if (mode->pkt_mode_en == AL_TRUE)
+ reg |= UDMA_M2S_RATE_LIMITER_GEN_CFG_PKT_MODE_EN;
+ else
+ reg &= ~UDMA_M2S_RATE_LIMITER_GEN_CFG_PKT_MODE_EN;
+ reg &= ~UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_MASK;
+ reg |= mode->short_cycle_sz &
+ UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_MASK;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rate_limiter.gen_cfg, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_token);
+ reg &= ~UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_MASK;
+ reg |= mode->token_init_val &
+ UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_MASK;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_token, reg);
+
+ return 0;
+}
+
+int al_udma_m2s_rlimit_reset(struct al_udma *udma)
+{
+ uint32_t reg = al_reg_read32(
+ &udma->udma_regs->m2s.m2s_rate_limiter.ctrl_cycle_cnt);
+ reg |= UDMA_M2S_RATE_LIMITER_CTRL_CYCLE_CNT_RST;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_cycle_cnt,
+ reg);
+ return 0;
+}
+
+/** Configure the Stream/Q rate limitation */
+static int al_udma_common_rlimit_set(struct udma_rlimit_common *regs,
+ struct al_udma_m2s_rlimit_cfg *conf)
+{
+ uint32_t reg = al_reg_read32(®s->cfg_1s);
+ /* mask max burst size, and enable/pause control bits */
+ reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_MASK;
+ reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_EN;
+ reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_PAUSE;
+ reg |= conf->max_burst_sz &
+ UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_MASK;
+ al_reg_write32(®s->cfg_1s, reg);
+
+ reg = al_reg_read32(®s->cfg_cycle);
+ reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_MASK;
+ reg |= conf->long_cycle_sz &
+ UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_MASK;
+ al_reg_write32(®s->cfg_cycle, reg);
+
+ reg = al_reg_read32(®s->cfg_token_size_1);
+ reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK;
+ reg |= conf->long_cycle &
+ UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK;
+ al_reg_write32(®s->cfg_token_size_1, reg);
+
+ reg = al_reg_read32(®s->cfg_token_size_2);
+ reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK;
+ reg |= conf->short_cycle &
+ UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK;
+ al_reg_write32(®s->cfg_token_size_2, reg);
+
+ reg = al_reg_read32(®s->mask);
+ reg &= ~0xf; /* only bits 0-3 defined */
+ reg |= conf->mask & 0xf;
+ al_reg_write32(®s->mask, reg);
+
+ return 0;
+}
+
+static int al_udma_common_rlimit_act(struct udma_rlimit_common *regs,
+ enum al_udma_m2s_rlimit_action act)
+{
+ uint32_t reg;
+
+ switch (act) {
+ case AL_UDMA_STRM_RLIMIT_ENABLE:
+ reg = al_reg_read32(®s->cfg_1s);
+ reg |= UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_EN;
+ al_reg_write32(®s->cfg_1s, reg);
+ break;
+ case AL_UDMA_STRM_RLIMIT_PAUSE:
+ reg = al_reg_read32(®s->cfg_1s);
+ reg |= UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_PAUSE;
+ al_reg_write32(®s->cfg_1s, reg);
+ break;
+ case AL_UDMA_STRM_RLIMIT_RESET:
+ reg = al_reg_read32(®s->sw_ctrl);
+ reg |= UDMA_M2S_STREAM_RATE_LIMITER_SW_CTRL_RST_TOKEN_CNT;
+ al_reg_write32(®s->sw_ctrl, reg);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/** Configure the M2S Stream rate limitation */
+int al_udma_m2s_strm_rlimit_set(struct al_udma *udma,
+ struct al_udma_m2s_rlimit_cfg *conf)
+{
+ struct udma_rlimit_common *rlimit_regs =
+ &udma->udma_regs->m2s.m2s_stream_rate_limiter.rlimit;
+
+ return al_udma_common_rlimit_set(rlimit_regs, conf);
+}
+
+int al_udma_m2s_strm_rlimit_act(struct al_udma *udma,
+ enum al_udma_m2s_rlimit_action act)
+{
+ struct udma_rlimit_common *rlimit_regs =
+ &udma->udma_regs->m2s.m2s_stream_rate_limiter.rlimit;
+
+ if (al_udma_common_rlimit_act(rlimit_regs, act) == -EINVAL) {
+ al_err("udma [%s]: udma stream rate limit invalid action "
+ "(%d)\n", udma->name, act);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/** Configure the M2S UDMA Q rate limitation */
+int al_udma_m2s_q_rlimit_set(struct al_udma_q *udma_q,
+ struct al_udma_m2s_rlimit_cfg *conf)
+{
+ struct udma_rlimit_common *rlimit_regs = &udma_q->q_regs->m2s_q.rlimit;
+
+ return al_udma_common_rlimit_set(rlimit_regs, conf);
+}
+
+int al_udma_m2s_q_rlimit_act(struct al_udma_q *udma_q,
+ enum al_udma_m2s_rlimit_action act)
+{
+ struct udma_rlimit_common *rlimit_regs = &udma_q->q_regs->m2s_q.rlimit;
+
+ if (al_udma_common_rlimit_act(rlimit_regs, act) == -EINVAL) {
+ al_err("udma [%s %d]: udma stream rate limit invalid action "
+ "(%d)\n",
+ udma_q->udma->name, udma_q->qid, act);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/** Configure the M2S UDMA Q scheduling mode */
+int al_udma_m2s_q_sc_set(struct al_udma_q *udma_q,
+ struct al_udma_m2s_q_dwrr_conf *conf)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_1);
+
+ reg &= ~UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_MASK;
+ reg |= conf->max_deficit_cnt_sz &
+ UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_MASK;
+ if (conf->strict == AL_TRUE)
+ reg |= UDMA_M2S_Q_DWRR_CFG_1_STRICT;
+ else
+ reg &= ~UDMA_M2S_Q_DWRR_CFG_1_STRICT;
+ al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_1, reg);
+
+ reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_2);
+ reg &= ~UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK;
+ reg |= (conf->axi_qos << UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_SHIFT) &
+ UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK;
+ reg &= ~UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK;
+ reg |= conf->q_qos & UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK;
+ al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_2, reg);
+
+ reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_3);
+ reg &= ~UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_MASK;
+ reg |= conf->weight & UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_MASK;
+ al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_3, reg);
+
+ return 0;
+}
+
+int al_udma_m2s_q_sc_pause(struct al_udma_q *udma_q, al_bool set)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_1);
+
+ if (set == AL_TRUE)
+ reg |= UDMA_M2S_Q_DWRR_CFG_1_PAUSE;
+ else
+ reg &= ~UDMA_M2S_Q_DWRR_CFG_1_PAUSE;
+ al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_1, reg);
+
+ return 0;
+}
+
+int al_udma_m2s_q_sc_reset(struct al_udma_q *udma_q)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_sw_ctrl);
+
+ reg |= UDMA_M2S_Q_DWRR_SW_CTRL_RST_CNT;
+ al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_sw_ctrl, reg);
+
+ return 0;
+}
+
+/** M2S UDMA completion and application timeouts */
+int al_udma_m2s_comp_timeouts_set(struct al_udma *udma,
+ struct al_udma_m2s_comp_timeouts *conf)
+{
+ uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_1c);
+
+ if (conf->sch_mode == SRR)
+ reg |= UDMA_M2S_COMP_CFG_1C_FORCE_RR;
+ else if (conf->sch_mode == STRICT)
+ reg &= ~UDMA_M2S_COMP_CFG_1C_FORCE_RR;
+ else {
+ al_err("udma [%s]: requested completion descriptor preferch "
+ "arbiter mode (%d) is invalid\n",
+ udma->name, conf->sch_mode);
+ return -EINVAL;
+ }
+ if (conf->enable_q_promotion == AL_TRUE)
+ reg |= UDMA_M2S_COMP_CFG_1C_Q_PROMOTION;
+ else
+ reg &= ~UDMA_M2S_COMP_CFG_1C_Q_PROMOTION;
+ reg &= ~UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_MASK;
+ reg |=
+ conf->comp_fifo_depth << UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_SHIFT;
+
+ reg &= ~UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_MASK;
+ reg |= conf->unack_fifo_depth
+ << UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_SHIFT;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_1c, reg);
+
+ al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_coal
+ , conf->coal_timeout);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_application_ack);
+ reg &= ~UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_MASK;
+ reg |= conf->app_timeout << UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_SHIFT;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_application_ack, reg);
+ return 0;
+}
+
+int al_udma_m2s_comp_timeouts_get(struct al_udma *udma,
+ struct al_udma_m2s_comp_timeouts *conf)
+{
+ uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_1c);
+
+ if (reg & UDMA_M2S_COMP_CFG_1C_FORCE_RR)
+ conf->sch_mode = SRR;
+ else
+ conf->sch_mode = STRICT;
+
+ if (reg & UDMA_M2S_COMP_CFG_1C_Q_PROMOTION)
+ conf->enable_q_promotion = AL_TRUE;
+ else
+ conf->enable_q_promotion = AL_FALSE;
+
+ conf->comp_fifo_depth =
+ AL_REG_FIELD_GET(reg,
+ UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_MASK,
+ UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_SHIFT);
+ conf->unack_fifo_depth =
+ AL_REG_FIELD_GET(reg,
+ UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_MASK,
+ UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_SHIFT);
+
+ conf->coal_timeout = al_reg_read32(
+ &udma->udma_regs->m2s.m2s_comp.cfg_coal);
+
+ reg = al_reg_read32(
+ &udma->udma_regs->m2s.m2s_comp.cfg_application_ack);
+
+ conf->app_timeout =
+ AL_REG_FIELD_GET(reg,
+ UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_MASK,
+ UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_SHIFT);
+
+ return 0;
+}
+
+/**
+ * S2M UDMA configure no descriptors behaviour
+ */
+int al_udma_s2m_no_desc_cfg_set(struct al_udma *udma, al_bool drop_packet, al_bool gen_interrupt, uint32_t wait_for_desc_timeout)
+{
+ uint32_t reg;
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2);
+
+ if ((drop_packet == AL_TRUE) && (wait_for_desc_timeout == 0)) {
+ al_err("udam [%s]: setting timeout to 0 will cause the udma to wait forever instead of dropping the packet", udma->name);
+ return -EINVAL;
+ }
+
+ if (drop_packet == AL_TRUE)
+ reg |= UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC;
+ else
+ reg &= ~UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC;
+
+ if (gen_interrupt == AL_TRUE)
+ reg |= UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC;
+ else
+ reg &= ~UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC;
+
+ AL_REG_FIELD_SET(reg, UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK, UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_SHIFT, wait_for_desc_timeout);
+
+ al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2, reg);
+
+ return 0;
+}
+
+/* S2M UDMA configure a queue's completion update */
+int al_udma_s2m_q_compl_updade_config(struct al_udma_q *udma_q, al_bool enable)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.comp_cfg);
+
+ if (enable == AL_TRUE)
+ reg |= UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE;
+ else
+ reg &= ~UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE;
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg, reg);
+
+ return 0;
+}
+
+/* S2M UDMA configure a queue's completion descriptors coalescing */
+int al_udma_s2m_q_compl_coal_config(struct al_udma_q *udma_q, al_bool enable, uint32_t
+ coal_timeout)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.comp_cfg);
+
+ if (enable == AL_TRUE)
+ reg &= ~UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL;
+ else
+ reg |= UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL;
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg, reg);
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg_2, coal_timeout);
+ return 0;
+}
+
+/* S2M UDMA configure completion descriptors write burst parameters */
+int al_udma_s2m_compl_desc_burst_config(struct al_udma *udma, uint16_t
+ burst_size)
+{
+ if ((burst_size != 64) && (burst_size != 128) && (burst_size != 256)) {
+ al_err("%s: invalid burst_size value (%d)\n", __func__,
+ burst_size);
+ return -EINVAL;
+ }
+
+ /* convert burst size from bytes to beats (16 byte) */
+ burst_size = burst_size / 16;
+ al_reg_write32_masked(&udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1,
+ UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK |
+ UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK,
+ burst_size << UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT |
+ burst_size << UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT);
+ return 0;
+}
+
+/* S2M UDMA per queue completion configuration */
+int al_udma_s2m_q_comp_set(struct al_udma_q *udma_q,
+ struct al_udma_s2m_q_comp_conf *conf)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.comp_cfg);
+ if (conf->en_comp_ring_update == AL_TRUE)
+ reg |= UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE;
+ else
+ reg &= ~UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE;
+
+ if (conf->dis_comp_coal == AL_TRUE)
+ reg |= UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL;
+ else
+ reg &= ~UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL;
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg, reg);
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg_2, conf->comp_timer);
+
+ reg = al_reg_read32(&udma_q->q_regs->s2m_q.pkt_cfg);
+
+ reg &= ~UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK;
+ reg |= conf->hdr_split_size & UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK;
+ if (conf->force_hdr_split == AL_TRUE)
+ reg |= UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT;
+ else
+ reg &= ~UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT;
+ if (conf->en_hdr_split == AL_TRUE)
+ reg |= UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT;
+ else
+ reg &= ~UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT;
+
+ al_reg_write32(&udma_q->q_regs->s2m_q.pkt_cfg, reg);
+
+ reg = al_reg_read32(&udma_q->q_regs->s2m_q.qos_cfg);
+ reg &= ~UDMA_S2M_QOS_CFG_Q_QOS_MASK;
+ reg |= conf->q_qos & UDMA_S2M_QOS_CFG_Q_QOS_MASK;
+ al_reg_write32(&udma_q->q_regs->s2m_q.qos_cfg, reg);
+
+ return 0;
+}
+
+/* UDMA VMID control configuration */
+void al_udma_gen_vmid_conf_set(
+ struct unit_regs *unit_regs,
+ struct al_udma_gen_vmid_conf *conf)
+{
+ al_reg_write32_masked(
+ &unit_regs->gen.vmid.cfg_vmid_0,
+ UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_MASK |
+ UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_MASK |
+ UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_MASK |
+ UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_MASK,
+ (((conf->tx_q_conf[0].desc_en << 0) |
+ (conf->tx_q_conf[1].desc_en << 1) |
+ (conf->tx_q_conf[2].desc_en << 2) |
+ (conf->tx_q_conf[3].desc_en << 3)) <<
+ UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_SHIFT) |
+ (((conf->tx_q_conf[0].queue_en << 0) |
+ (conf->tx_q_conf[1].queue_en << 1) |
+ (conf->tx_q_conf[2].queue_en << 2) |
+ (conf->tx_q_conf[3].queue_en << 3)) <<
+ UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_SHIFT) |
+ (((conf->rx_q_conf[0].desc_en << 0) |
+ (conf->rx_q_conf[1].desc_en << 1) |
+ (conf->rx_q_conf[2].desc_en << 2) |
+ (conf->rx_q_conf[3].desc_en << 3)) <<
+ UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_SHIFT) |
+ (((conf->rx_q_conf[0].queue_en << 0) |
+ (conf->rx_q_conf[1].queue_en << 1) |
+ (conf->rx_q_conf[2].queue_en << 2) |
+ (conf->rx_q_conf[3].queue_en << 3)) <<
+ UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_SHIFT));
+
+ al_reg_write32(
+ &unit_regs->gen.vmid.cfg_vmid_1,
+ (conf->tx_q_conf[0].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_SHIFT) |
+ (conf->tx_q_conf[1].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_SHIFT));
+
+ al_reg_write32(
+ &unit_regs->gen.vmid.cfg_vmid_2,
+ (conf->tx_q_conf[2].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_SHIFT) |
+ (conf->tx_q_conf[3].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_SHIFT));
+
+ al_reg_write32(
+ &unit_regs->gen.vmid.cfg_vmid_3,
+ (conf->rx_q_conf[0].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_SHIFT) |
+ (conf->rx_q_conf[1].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_SHIFT));
+
+ al_reg_write32(
+ &unit_regs->gen.vmid.cfg_vmid_4,
+ (conf->rx_q_conf[2].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_SHIFT) |
+ (conf->rx_q_conf[3].vmid <<
+ UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_SHIFT));
+}
+
+/* UDMA VMID MSIX control configuration */
+void al_udma_gen_vmid_msix_conf_set(
+ struct unit_regs *unit_regs,
+ struct al_udma_gen_vmid_msix_conf *conf)
+{
+ al_reg_write32_masked(
+ &unit_regs->gen.vmid.cfg_vmid_0,
+ UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN |
+ UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL,
+ (conf->access_en ? UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN : 0) |
+ (conf->sel ? UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL : 0));
+}
+
+/* UDMA VMID control advanced Tx queue configuration */
+void al_udma_gen_vmid_advanced_tx_q_conf(
+ struct al_udma_q *q,
+ struct al_udma_gen_vmid_advanced_tx_q_conf *conf)
+{
+ struct unit_regs *unit_regs = (struct unit_regs *)q->udma->udma_regs;
+ struct udma_gen_vmpr *vmpr = &unit_regs->gen.vmpr[q->qid];
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_0,
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN |
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN |
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN,
+ conf->tx_q_addr_hi_sel |
+ ((conf->tx_q_data_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN : 0) |
+ ((conf->tx_q_prefetch_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN : 0) |
+ ((conf->tx_q_compl_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN : 0));
+
+ al_reg_write32(
+ &vmpr->cfg_vmpr_1,
+ conf->tx_q_addr_hi);
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_2,
+ UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_MASK,
+ (conf->tx_q_prefetch_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_SHIFT) |
+ (conf->tx_q_compl_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_SHIFT));
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_3,
+ UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_MASK,
+ (conf->tx_q_data_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SHIFT) |
+ (conf->tx_q_data_vmid_mask <<
+ UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_SHIFT));
+}
+
+/** UDMA VMID control advanced Rx queue configuration */
+void al_udma_gen_vmid_advanced_rx_q_conf(
+ struct al_udma_q *q,
+ struct al_udma_gen_vmid_advanced_rx_q_conf *conf)
+{
+ struct unit_regs *unit_regs = (struct unit_regs *)q->udma->udma_regs;
+ struct udma_gen_vmpr *vmpr = &unit_regs->gen.vmpr[q->qid];
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_4,
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN |
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN,
+ (conf->rx_q_addr_hi_sel <<
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_SHIFT) |
+ ((conf->rx_q_data_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN : 0) |
+ (conf->rx_q_data_buff2_addr_hi_sel <<
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_SHIFT) |
+ ((conf->rx_q_data_buff2_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN : 0) |
+ (conf->rx_q_ddp_addr_hi_sel <<
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_SHIFT) |
+ ((conf->rx_q_ddp_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN : 0) |
+ ((conf->rx_q_prefetch_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN : 0) |
+ ((conf->rx_q_compl_vmid_en == AL_TRUE) ?
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN : 0));
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_6,
+ UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_MASK,
+ (conf->rx_q_prefetch_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_SHIFT) |
+ (conf->rx_q_compl_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_SHIFT));
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_7,
+ UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_MASK,
+ (conf->rx_q_data_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SHIFT) |
+ (conf->rx_q_data_vmid_mask <<
+ UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_SHIFT));
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_8,
+ UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_MASK,
+ (conf->rx_q_data_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SHIFT) |
+ (conf->rx_q_data_vmid_mask <<
+ UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_SHIFT));
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_9,
+ UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_MASK |
+ UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_MASK,
+ (conf->rx_q_data_vmid <<
+ UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SHIFT) |
+ (conf->rx_q_data_vmid_mask <<
+ UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_SHIFT));
+
+ al_reg_write32(
+ &vmpr->cfg_vmpr_10,
+ conf->rx_q_addr_hi);
+
+ al_reg_write32(
+ &vmpr->cfg_vmpr_11,
+ conf->rx_q_data_buff2_addr_hi);
+
+ al_reg_write32(
+ &vmpr->cfg_vmpr_12,
+ conf->rx_q_ddp_addr_hi);
+}
+
+/* UDMA header split buffer 2 Rx queue configuration */
+void al_udma_gen_hdr_split_buff2_rx_q_conf(
+ struct al_udma_q *q,
+ struct al_udma_gen_hdr_split_buff2_q_conf *conf)
+{
+ struct unit_regs *unit_regs = (struct unit_regs *)q->udma->udma_regs;
+ struct udma_gen_vmpr *vmpr = &unit_regs->gen.vmpr[q->qid];
+
+ al_reg_write32_masked(
+ &vmpr->cfg_vmpr_4,
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_MASK,
+ conf->add_msb_sel <<
+ UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_SHIFT);
+
+ al_reg_write32(
+ &vmpr->cfg_vmpr_5,
+ conf->addr_msb);
+}
+
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_udma_debug.c b/arch/arm/mach-alpine/al_hal/al_hal_udma_debug.c
new file mode 100644
index 0000000..f98609d
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_udma_debug.c
@@ -0,0 +1,496 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @file al_hal_udma_debug.c
+ *
+ * @brief Universal DMA HAL driver for debug
+ *
+ */
+
+#define DEBUG
+
+#include
+#include
+#include
+
+static void al_udma_regs_m2s_axi_print(struct al_udma *udma)
+{
+ al_dbg("M2S AXI regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, comp_wr_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, comp_wr_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, data_rd_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, data_rd_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_rd_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_rd_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, data_rd_cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_rd_cfg_3);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, desc_wr_cfg_1);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, axi_m2s,
+ desc_wr_cfg_1,
+ max_axi_beats,
+ UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, axi_m2s,
+ desc_wr_cfg_1,
+ min_axi_beats,
+ UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, axi_m2s, ostand_cfg);
+}
+
+static void al_udma_regs_m2s_general_print(struct al_udma *udma)
+{
+ al_dbg("M2S general regs:\n");
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, state);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state,
+ comp_ctrl,
+ UDMA_M2S_STATE_COMP_CTRL);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state,
+ stream_if,
+ UDMA_M2S_STATE_STREAM_IF);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state,
+ rd_ctrl,
+ UDMA_M2S_STATE_DATA_RD_CTRL);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s, state,
+ desc_pref,
+ UDMA_M2S_STATE_DESC_PREF);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, err_log_mask);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_0);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, log_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, data_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, header_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, unack_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, check_en);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, fifo_en);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, cfg_len);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, stream_cfg);
+}
+
+static void al_udma_regs_m2s_rd_print(struct al_udma *udma)
+{
+ al_dbg("M2S read regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, desc_pref_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, desc_pref_cfg_2);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, desc_pref_cfg_3);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_rd,
+ desc_pref_cfg_3,
+ min_burst_below_thr,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_rd,
+ desc_pref_cfg_3,
+ min_burst_above_thr,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_rd,
+ desc_pref_cfg_3,
+ pref_thr,
+ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rd, data_cfg);
+}
+
+static void al_udma_regs_m2s_dwrr_print(struct al_udma *udma)
+{
+ al_dbg("M2S DWRR regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_dwrr, cfg_sched);
+}
+
+static void al_udma_regs_m2s_rate_limiter_print(struct al_udma *udma)
+{
+ al_dbg("M2S rate limiter regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_rate_limiter, gen_cfg);
+}
+
+static void al_udma_regs_m2s_stream_rate_limiter_print(struct al_udma *udma)
+{
+ al_dbg("M2S stream rate limiter regs:\n");
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
+ rlimit.cfg_1s);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
+ rlimit.cfg_cycle);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
+ rlimit.cfg_token_size_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
+ rlimit.cfg_token_size_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stream_rate_limiter,
+ rlimit.mask);
+}
+
+static void al_udma_regs_m2s_comp_print(struct al_udma *udma)
+{
+ al_dbg("M2S completion regs:\n");
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_comp, cfg_1c);
+
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_comp, cfg_1c,
+ comp_fifo_depth,
+ UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_comp, cfg_1c,
+ unack_fifo_depth,
+ UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH);
+ AL_UDMA_PRINT_REG_BIT(udma, " ", "\n", m2s, m2s_comp, cfg_1c,
+ q_promotion,
+ UDMA_M2S_COMP_CFG_1C_Q_PROMOTION);
+ AL_UDMA_PRINT_REG_BIT(udma, " ", "\n", m2s, m2s_comp, cfg_1c,
+ force_rr,
+ UDMA_M2S_COMP_CFG_1C_FORCE_RR);
+ AL_UDMA_PRINT_REG_FIELD(udma, " ", "\n", "%d", m2s, m2s_comp, cfg_1c,
+ q_free_min,
+ UDMA_M2S_COMP_CFG_1C_Q_FREE_MIN);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_comp, cfg_coal);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_comp, cfg_application_ack);
+}
+
+static void al_udma_regs_m2s_stat_print(struct al_udma *udma)
+{
+ al_dbg("M2S statistics regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, cfg_st);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, tx_pkt);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, tx_bytes_low);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, tx_bytes_high);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, prefed_desc);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, comp_pkt);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, comp_desc);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_stat, ack_pkts);
+}
+
+static void al_udma_regs_m2s_feature_print(struct al_udma *udma)
+{
+ al_dbg("M2S feature regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_4);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_feature, reg_5);
+}
+
+static void al_udma_regs_m2s_q_print(struct al_udma *udma, uint32_t qid)
+{
+ al_dbg("M2S Q[%d] status regs:\n", qid);
+ al_reg_write32(&udma->udma_regs->m2s.m2s.indirect_ctrl, qid);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_pref_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_comp_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_rate_limit_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s, sel_dwrr_status);
+
+ al_dbg("M2S Q[%d] regs:\n", qid);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrbp_low);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrbp_high);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrl);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrhp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdrtp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tdcp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tcrbp_low);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tcrbp_high);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], tcrhp);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], rlimit.cfg_1s);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], rlimit.cfg_cycle);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid],
+ rlimit.cfg_token_size_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid],
+ rlimit.cfg_token_size_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], rlimit.mask);
+
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], dwrr_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], dwrr_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], dwrr_cfg_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], comp_cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], q_tx_pkt);
+}
+
+static void al_udma_regs_s2m_axi_print(struct al_udma *udma)
+{
+ al_dbg("S2M AXI regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, data_wr_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, data_wr_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_rd_cfg_4);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_rd_cfg_5);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, comp_wr_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, comp_wr_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, data_wr_cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_rd_cfg_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, desc_wr_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, ostand_cfg_rd);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, axi_s2m, ostand_cfg_wr);
+}
+
+static void al_udma_regs_s2m_general_print(struct al_udma *udma)
+{
+ al_dbg("S2M general regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, state);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, err_log_mask);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_0);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, log_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, s_data_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, s_header_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, axi_data_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, unack_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, check_en);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, fifo_en);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, stream_cfg);
+}
+
+static void al_udma_regs_s2m_rd_print(struct al_udma *udma)
+{
+ al_dbg("S2M read regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_rd, desc_pref_cfg_4);
+}
+
+static void al_udma_regs_s2m_wr_print(struct al_udma *udma)
+{
+ al_dbg("S2M write regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_wr, data_cfg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_wr, data_cfg_1);
+}
+
+static void al_udma_regs_s2m_comp_print(struct al_udma *udma)
+{
+ al_dbg("S2M completion regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_comp, cfg_1c);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_comp, cfg_2c);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_comp, cfg_application_ack);
+}
+
+static void al_udma_regs_s2m_stat_print(struct al_udma *udma)
+{
+ al_dbg("S2M statistics regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, drop_pkt);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, rx_bytes_low);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, rx_bytes_high);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, prefed_desc);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, comp_pkt);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, comp_desc);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_stat, ack_pkts);
+}
+
+static void al_udma_regs_s2m_feature_print(struct al_udma *udma)
+{
+ al_dbg("S2M feature regs:\n");
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_1);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_3);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_4);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_5);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_feature, reg_6);
+}
+
+static void al_udma_regs_s2m_q_print(struct al_udma *udma, uint32_t qid)
+{
+ al_dbg("S2M Q[%d] status regs:\n", qid);
+ al_reg_write32(&udma->udma_regs->m2s.m2s.indirect_ctrl, qid);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, sel_pref_fifo_status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m, sel_comp_fifo_status);
+
+ al_dbg("S2M Q[%d] regs:\n", qid);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], status);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrbp_low);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrbp_high);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrl);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrhp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdrtp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rdcp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrbp_low);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrbp_high);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrhp);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], rcrhp_internal);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], comp_cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], comp_cfg_2);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], pkt_cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], qos_cfg);
+ AL_UDMA_PRINT_REG(udma, " ", "\n", s2m, s2m_q[qid], q_rx_pkt);
+}
+
+void al_udma_regs_print(struct al_udma *udma, unsigned int mask)
+{
+ uint32_t i;
+
+ if (!udma)
+ return;
+
+ if (udma->type == UDMA_TX) {
+ if (mask & AL_UDMA_DEBUG_AXI)
+ al_udma_regs_m2s_axi_print(udma);
+ if (mask & AL_UDMA_DEBUG_GENERAL)
+ al_udma_regs_m2s_general_print(udma);
+ if (mask & AL_UDMA_DEBUG_READ)
+ al_udma_regs_m2s_rd_print(udma);
+ if (mask & AL_UDMA_DEBUG_DWRR)
+ al_udma_regs_m2s_dwrr_print(udma);
+ if (mask & AL_UDMA_DEBUG_RATE_LIMITER)
+ al_udma_regs_m2s_rate_limiter_print(udma);
+ if (mask & AL_UDMA_DEBUG_STREAM_RATE_LIMITER)
+ al_udma_regs_m2s_stream_rate_limiter_print(udma);
+ if (mask & AL_UDMA_DEBUG_COMP)
+ al_udma_regs_m2s_comp_print(udma);
+ if (mask & AL_UDMA_DEBUG_STAT)
+ al_udma_regs_m2s_stat_print(udma);
+ if (mask & AL_UDMA_DEBUG_FEATURE)
+ al_udma_regs_m2s_feature_print(udma);
+ for (i = 0; i < DMA_MAX_Q; i++) {
+ if (mask & AL_UDMA_DEBUG_QUEUE(i))
+ al_udma_regs_m2s_q_print(udma, i);
+ }
+ } else {
+ if (mask & AL_UDMA_DEBUG_AXI)
+ al_udma_regs_s2m_axi_print(udma);
+ if (mask & AL_UDMA_DEBUG_GENERAL)
+ al_udma_regs_s2m_general_print(udma);
+ if (mask & AL_UDMA_DEBUG_READ)
+ al_udma_regs_s2m_rd_print(udma);
+ if (mask & AL_UDMA_DEBUG_WRITE)
+ al_udma_regs_s2m_wr_print(udma);
+ if (mask & AL_UDMA_DEBUG_COMP)
+ al_udma_regs_s2m_comp_print(udma);
+ if (mask & AL_UDMA_DEBUG_STAT)
+ al_udma_regs_s2m_stat_print(udma);
+ if (mask & AL_UDMA_DEBUG_FEATURE)
+ al_udma_regs_s2m_feature_print(udma);
+ for (i = 0; i < DMA_MAX_Q; i++) {
+ if (mask & AL_UDMA_DEBUG_QUEUE(i))
+ al_udma_regs_s2m_q_print(udma, i);
+ }
+ }
+}
+
+void al_udma_q_struct_print(struct al_udma *udma, uint32_t qid)
+{
+ struct al_udma_q *queue;
+
+ if (!udma)
+ return;
+
+ if (qid >= DMA_MAX_Q)
+ return;
+
+ queue = &udma->udma_q[qid];
+
+ al_dbg("Q[%d] struct:\n", qid);
+ al_dbg(" size_mask = 0x%08x\n", (uint32_t)queue->size_mask);
+ al_dbg(" q_regs = %p\n", queue->q_regs);
+ al_dbg(" desc_base_ptr = %p\n", queue->desc_base_ptr);
+ al_dbg(" next_desc_idx = %d\n", (uint16_t)queue->next_desc_idx);
+ al_dbg(" desc_ring_id = %d\n", (uint32_t)queue->desc_ring_id);
+ al_dbg(" cdesc_base_ptr = %p\n", queue->cdesc_base_ptr);
+ al_dbg(" cdesc_size = %d\n", (uint32_t)queue->cdesc_size);
+ al_dbg(" next_cdesc_idx = %d\n", (uint16_t)queue->next_cdesc_idx);
+ al_dbg(" end_cdesc_ptr = %p\n", queue->end_cdesc_ptr);
+ al_dbg(" comp_head_idx = %d\n", (uint16_t)queue->comp_head_idx);
+ al_dbg(" comp_head_ptr = %p\n", queue->comp_head_ptr);
+ al_dbg(" pkt_crnt_descs = %d\n", (uint32_t)queue->pkt_crnt_descs);
+ al_dbg(" comp_ring_id = %d\n", (uint32_t)queue->comp_ring_id);
+ al_dbg(" desc_phy_base = 0x%016llx\n", (uint64_t)queue->desc_phy_base);
+ al_dbg(" cdesc_phy_base = 0x%016llx\n",
+ (uint64_t)queue->cdesc_phy_base);
+ al_dbg(" flags = 0x%08x\n", (uint32_t)queue->flags);
+ al_dbg(" size = %d\n", (uint32_t)queue->size);
+ al_dbg(" status = %d\n", (uint32_t)queue->status);
+ al_dbg(" udma = %p\n", queue->udma);
+ al_dbg(" qid = %d\n", (uint32_t)queue->qid);
+}
+
+void al_udma_ring_print(struct al_udma *udma, uint32_t qid,
+ enum al_udma_ring_type rtype)
+{
+ struct al_udma_q *queue;
+ uint32_t desc_size;
+ void *base_ptr;
+ uint32_t i;
+
+ if (!udma)
+ return;
+
+ if (qid >= DMA_MAX_Q)
+ return;
+
+ queue = &udma->udma_q[qid];
+ if (rtype == AL_RING_SUBMISSION) {
+ base_ptr = queue->desc_base_ptr;
+ desc_size = sizeof(union al_udma_desc);
+ if (base_ptr)
+ al_dbg("Q[%d] submission ring pointers:\n", qid);
+ else {
+ al_dbg("Q[%d] submission ring is not allocated\n", qid);
+ return;
+ }
+ } else {
+ base_ptr = queue->cdesc_base_ptr;
+ desc_size = queue->cdesc_size;
+ if (base_ptr)
+ al_dbg("Q[%d] completion ring pointers:\n", qid);
+ else {
+ al_dbg("Q[%d] completion ring is not allocated\n", qid);
+ return;
+ }
+ }
+
+ for (i = 0; i < queue->size; i++) {
+ uint32_t *curr_addr = base_ptr + i * desc_size;
+ if (desc_size == 16)
+ al_dbg("[%04d](%p): %08x %08x %08x %08x\n",
+ i,
+ curr_addr,
+ (uint32_t)*curr_addr,
+ (uint32_t)*(curr_addr+1),
+ (uint32_t)*(curr_addr+2),
+ (uint32_t)*(curr_addr+3));
+ else if (desc_size == 8)
+ al_dbg("[%04d](%p): %08x %08x\n",
+ i,
+ curr_addr,
+ (uint32_t)*curr_addr,
+ (uint32_t)*(curr_addr+1));
+ else if (desc_size == 4)
+ al_dbg("[%04d](%p): %08x\n",
+ i,
+ curr_addr,
+ (uint32_t)*curr_addr);
+ else
+ break;
+ }
+}
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_udma_fast.c b/arch/arm/mach-alpine/al_hal/al_hal_udma_fast.c
new file mode 100644
index 0000000..349fcce
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_udma_fast.c
@@ -0,0 +1,77 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "al_hal_udma_fast.h"
+
+int al_udma_fast_memcpy_q_prepare(struct al_udma_q *udma_txq,
+ struct al_udma_q *udma_rxq,
+ struct al_memcpy_transaction *xaction)
+{
+ union al_udma_desc *desc;
+ uint32_t i;
+ uint32_t attr = 0;
+ uint32_t tx_flags = 0;
+ uint32_t rx_flags = 0;
+
+ attr |= AL_CRC_CHECKSUM << TX_DESC_META_OP_SHIFT;
+ attr |= AL_CRC_CHECKSUM_NULL << TX_DESC_META_CRC_OP_TYPE_SHIFT;
+ attr |= TX_DESC_META_CRC_SEND_ORIG;
+ attr |= RX_DESC_META_CRC_FIRST_BUF;
+ attr |= RX_DESC_META_CRC_LAST_BUF;
+
+ if (xaction->flags & AL_SSM_BARRIER)
+ tx_flags |= AL_M2S_DESC_DMB;
+ if (xaction->flags & AL_SSM_SRC_NO_SNOOP)
+ tx_flags |= AL_M2S_DESC_NO_SNOOP_H;
+ tx_flags |= AL_M2S_DESC_FIRST | AL_M2S_DESC_LAST;
+
+ if (xaction->flags & AL_SSM_INTERRUPT)
+ rx_flags |= AL_M2S_DESC_INT_EN;
+ if (xaction->flags & AL_SSM_DEST_NO_SNOOP)
+ rx_flags |= AL_M2S_DESC_NO_SNOOP_H;
+
+ for (i = 0; i < udma_txq->size; i++) {
+ desc = udma_txq->desc_base_ptr + i;
+ desc->tx.meta_ctrl = swap32_to_le(attr);
+ desc->tx.len_ctrl = swap32_to_le(tx_flags);
+ }
+
+ for (i = 0; i < udma_rxq->size; i++) {
+ desc = udma_rxq->desc_base_ptr + i;
+ desc->rx.len_ctrl = swap32_to_le(rx_flags);
+ }
+
+ return 0;
+}
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_udma_iofic.c b/arch/arm/mach-alpine/al_hal/al_hal_udma_iofic.c
new file mode 100644
index 0000000..80dc22e
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_udma_iofic.c
@@ -0,0 +1,150 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_udma_iofic.c
+ *
+ * @brief unit interrupts configurations
+ *
+ */
+
+#include "al_hal_udma_iofic.h"
+#include "al_hal_udma_regs.h"
+
+/*
+ * configure the interrupt registers, interrupts will are kept masked
+ */
+static int al_udma_main_iofic_config(struct al_iofic_regs __iomem *base,
+ enum al_iofic_mode mode)
+{
+ switch (mode) {
+ case AL_IOFIC_MODE_LEGACY:
+ al_iofic_config(base, AL_INT_GROUP_A,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_MASK_MSI_X |
+ INT_CONTROL_GRP_CLEAR_ON_READ);
+ al_iofic_config(base, AL_INT_GROUP_B,
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+ al_iofic_config(base, AL_INT_GROUP_C,
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+ al_iofic_config(base, AL_INT_GROUP_D,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_MASK_MSI_X |
+ INT_CONTROL_GRP_CLEAR_ON_READ);
+ break;
+ case AL_IOFIC_MODE_MSIX_PER_Q:
+ al_iofic_config(base, AL_INT_GROUP_A,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_AUTO_MASK |
+ INT_CONTROL_GRP_AUTO_CLEAR);
+ al_iofic_config(base, AL_INT_GROUP_B,
+ INT_CONTROL_GRP_AUTO_CLEAR |
+ INT_CONTROL_GRP_AUTO_MASK |
+ INT_CONTROL_GRP_CLEAR_ON_READ);
+ al_iofic_config(base, AL_INT_GROUP_C,
+ INT_CONTROL_GRP_AUTO_CLEAR |
+ INT_CONTROL_GRP_AUTO_MASK |
+ INT_CONTROL_GRP_CLEAR_ON_READ);
+ al_iofic_config(base, AL_INT_GROUP_D,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+ break;
+ case AL_IOFIC_MODE_MSIX_PER_GROUP:
+ al_iofic_config(base, AL_INT_GROUP_A,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_AUTO_CLEAR |
+ INT_CONTROL_GRP_AUTO_MASK);
+ al_iofic_config(base, AL_INT_GROUP_B,
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+ al_iofic_config(base, AL_INT_GROUP_C,
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+ al_iofic_config(base, AL_INT_GROUP_D,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+ break;
+ default:
+ al_err("%s: invalid mode (%d)\n", __func__, mode);
+ return -EINVAL;
+ }
+
+ al_dbg("%s: base.%p mode %d\n", __func__, base, mode);
+ return 0;
+}
+
+/*
+ * configure the UDMA interrupt registers, interrupts are kept masked
+ */
+int al_udma_iofic_config(struct unit_regs __iomem *regs, enum al_iofic_mode mode,
+ uint32_t m2s_errors_disable,
+ uint32_t m2s_aborts_disable,
+ uint32_t s2m_errors_disable,
+ uint32_t s2m_aborts_disable)
+{
+ int rc;
+
+ rc = al_udma_main_iofic_config(®s->gen.interrupt_regs.main_iofic, mode);
+ if (rc != 0)
+ return rc;
+
+ al_iofic_unmask(®s->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_A, ~m2s_errors_disable);
+ al_iofic_abort_mask(®s->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_A, m2s_aborts_disable);
+
+ al_iofic_unmask(®s->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_B, ~s2m_errors_disable);
+ al_iofic_abort_mask(®s->gen.interrupt_regs.secondary_iofic_ctrl, AL_INT_GROUP_B, s2m_aborts_disable);
+
+ al_dbg("%s base.%p mode %d\n", __func__, regs, mode);
+ return 0;
+}
+
+/*
+ * return the offset of the unmask register for a given group
+ */
+uint32_t __iomem * al_udma_iofic_unmask_offset_get(
+ struct unit_regs __iomem *regs,
+ enum al_udma_iofic_level level,
+ int group)
+{
+ al_assert(al_udma_iofic_level_and_group_valid(level, group));
+ return al_iofic_unmask_offset_get(al_udma_iofic_reg_base_get(regs, level), group);
+}
+
+/** @} end of UDMA group */
diff --git a/arch/arm/mach-alpine/al_hal/al_hal_udma_main.c b/arch/arm/mach-alpine/al_hal/al_hal_udma_main.c
new file mode 100644
index 0000000..21f97fb
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_hal_udma_main.c
@@ -0,0 +1,725 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_udma_main.c
+ *
+ * @brief Universal DMA HAL driver for main functions (initialization, data path)
+ *
+ */
+
+#include
+
+#define AL_UDMA_Q_RST_TOUT 10000 /* Queue reset timeout [uSecs] */
+
+#define UDMA_STATE_IDLE 0x0
+#define UDMA_STATE_NORMAL 0x1
+#define UDMA_STATE_ABORT 0x2
+#define UDMA_STATE_RESERVED 0x3
+
+const char *const al_udma_states_name[] = {
+ "Disable",
+ "Idle",
+ "Normal",
+ "Abort",
+ "Reset"
+};
+
+#define AL_UDMA_INITIAL_RING_ID 1
+
+/* dma_q flags */
+#define AL_UDMA_Q_FLAGS_IGNORE_RING_ID AL_BIT(0)
+#define AL_UDMA_Q_FLAGS_NO_COMP_UPDATE AL_BIT(1)
+#define AL_UDMA_Q_FLAGS_EN_COMP_COAL AL_BIT(2)
+
+
+static void al_udma_set_defaults(struct al_udma *udma)
+{
+ uint32_t tmp;
+
+#if 0
+ uint32_t reg, reg2;
+#endif
+
+ if (udma->type == UDMA_TX) {
+ struct unit_regs* tmp_unit_regs =
+ (struct unit_regs*)udma->udma_regs;
+
+ tmp = al_reg_read32(
+ &udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3);
+ tmp &= ~ UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
+ tmp |= 16 << UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT;
+ al_reg_write32(&tmp_unit_regs->m2s.m2s_rd.desc_pref_cfg_3
+ , tmp);
+ al_reg_write32(& tmp_unit_regs->gen.axi.cfg_1,0);
+ al_reg_write32(&tmp_unit_regs->m2s.m2s_comp.cfg_application_ack
+ , 0); /* Ack time out */
+
+ // TODO: remove this code in the future
+ tmp = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1);
+ tmp &= ~ UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
+ tmp |= 4 << UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT;
+ al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1
+ , tmp);
+
+ }
+ if (udma->type == UDMA_RX) {
+ al_reg_write32(
+ &udma->udma_regs->s2m.s2m_comp.cfg_application_ack, 0);
+ /* Ack time out */
+
+ }
+#if 0
+ if (udma->type == UDMA_TX) {
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_feature.reg_1);
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_feature.reg_3);
+ al_reg_write32(&udma->udma_regs->m2s.m2s_rd.data_cfg, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_feature.reg_4);
+ tmp = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_1c);
+ tmp &= ~0x1FFFF;
+ tmp |= reg & 0x1FFFF;
+ al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_1c, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.m2s_feature.reg_5);
+ al_reg_write32(&udma->udma_regs->m2s.axi_m2s.ostand_cfg, reg);
+
+ /* Set AXI defauls */
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.comp_wr_cfg_1);
+ reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK;
+ reg |= 0x1 << UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_SHIFT;
+ al_reg_write32(&udma->udma_regs->m2s.axi_m2s.comp_wr_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.data_rd_cfg_1);
+ reg &= ~UDMA_AXI_M2S_DATA_RD_CFG_1_ARBURST_MASK;
+ reg |= 0x1 << UDMA_AXI_M2S_DATA_RD_CFG_1_ARBURST_SHIFT;
+ al_reg_write32(&udma->udma_regs->m2s.axi_m2s.data_rd_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_1);
+ reg &= ~UDMA_AXI_M2S_DESC_RD_CFG_1_ARBURST_MASK;
+ reg |= 0x1 << UDMA_AXI_M2S_DESC_RD_CFG_1_ARBURST_SHIFT;
+ al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_1, reg);
+
+ } else {
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_feature.reg_1);
+ al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_feature.reg_3);
+ al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_feature.reg_4);
+ al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_2c, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_feature.reg_5);
+ tmp = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_rd);
+ tmp &= ~UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_MASK;
+ tmp |= AL_REG_FIELD_GET(reg,
+ UDMA_S2M_FEATURE_REG_5_MAX_DESC_RD_OSTAND_MASK,
+ UDMA_S2M_FEATURE_REG_5_MAX_DESC_RD_OSTAND_SHIFT);
+ al_reg_write32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_rd, tmp);
+
+ reg2 = al_reg_read32(&udma->udma_regs->s2m.s2m_feature.reg_6);
+
+ tmp = AL_REG_FIELD_GET(reg,
+ UDMA_S2M_FEATURE_REG_5_MAX_DATA_WR_OSTAND_MASK,
+ UDMA_S2M_FEATURE_REG_5_MAX_DATA_WR_OSTAND_SHIFT);
+
+ tmp |= AL_REG_FIELD_GET(reg2,
+ UDMA_S2M_FEATURE_REG_6_MAX_DATA_BEATS_WR_OSTAND_MASK,
+ UDMA_S2M_FEATURE_REG_6_MAX_DATA_BEATS_WR_OSTAND_SHIFT)
+ << UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_DATA_WR_SHIFT;
+
+ tmp |= AL_REG_FIELD_GET(reg2,
+ UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_MASK,
+ UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_SHIFT)
+ << UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_SHIFT;
+
+ tmp |= AL_REG_FIELD_GET(reg2,
+ UDMA_S2M_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_MASK,
+ UDMA_S2M_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_SHIFT)
+ << UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_SHIFT;
+
+
+
+ al_reg_write32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_wr, tmp);
+
+ tmp = al_reg_read32(&udma->udma_regs->s2m.s2m_comp.cfg_1c);
+ tmp &= ~UDMA_S2M_COMP_CFG_1C_ACK_FIFO_DEPTH_MASK;
+ tmp |= AL_REG_FIELD_GET(reg2,
+ UDMA_S2M_FEATURE_REG_6_MAX_STREAM_ACK_MASK,
+ UDMA_S2M_FEATURE_REG_6_MAX_STREAM_ACK_SHIFT) << UDMA_S2M_COMP_CFG_1C_ACK_FIFO_DEPTH_SHIFT;
+ al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_1c, tmp);
+
+ /* Set AXI defaults */
+ reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.comp_wr_cfg_1);
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK;
+ reg |= 0x1 << UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_SHIFT;
+ reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK;
+ reg |= 0x2 << UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_SHIFT;
+ al_reg_write32(&udma->udma_regs->s2m.axi_s2m.comp_wr_cfg_1, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_4);
+ reg &= ~UDMA_AXI_S2M_DESC_RD_CFG_4_ARBURST_MASK;
+ reg |= 0x1<udma_regs->s2m.axi_s2m.desc_rd_cfg_4, reg);
+
+ reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.data_wr_cfg_1);
+ reg &= ~UDMA_AXI_S2M_DATA_WR_CFG_1_AWBURST_MASK;
+ reg |= 0x1<udma_regs->s2m.axi_s2m.data_wr_cfg_1, reg);
+
+ /* Set comp fifo depth */
+ reg = al_reg_read32(&udma->udma_regs->s2m.s2m_comp.cfg_2c);
+ reg &= ~UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK;
+ reg |= 0x20<udma_regs->s2m.s2m_comp.cfg_2c, reg);
+
+
+ /* data fifo depth */
+ reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_wr);
+ reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_MASK;
+ reg |= 0x20<udma_regs->s2m.axi_s2m.ostand_cfg_wr, reg);
+
+ /* Ignore pkt Len Error */
+ al_reg_write32(&udma->udma_regs->s2m.s2m.err_abort_mask, 1<<24);
+ }
+#endif
+}
+/**
+ * misc queue configurations
+ *
+ * @param udma_q udma queue data structure
+ *
+ * @return 0
+ */
+static int al_udma_q_config(struct al_udma_q *udma_q)
+{
+ uint32_t *reg_addr;
+ uint32_t val;
+
+ if (udma_q->udma->type == UDMA_TX) {
+ reg_addr = &udma_q->q_regs->m2s_q.rlimit.mask;
+
+ val = al_reg_read32(reg_addr);
+ // enable DMB
+ val &= ~UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_PAUSE_DMB;
+ al_reg_write32(reg_addr, val);
+ }
+ return 0;
+}
+
+/**
+ * set the queue's completion configuration register
+ *
+ * @param udma_q udma queue data structure
+ *
+ * @return 0
+ */
+static int al_udma_q_config_compl(struct al_udma_q *udma_q)
+{
+ uint32_t *reg_addr;
+ uint32_t val;
+
+ if (udma_q->udma->type == UDMA_TX)
+ reg_addr = &udma_q->q_regs->m2s_q.comp_cfg;
+ else
+ reg_addr = &udma_q->q_regs->s2m_q.comp_cfg;
+
+ val = al_reg_read32(reg_addr);
+
+ if (udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE)
+ val &= ~UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
+ else
+ val |= UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
+
+ if (udma_q->flags & AL_UDMA_Q_FLAGS_EN_COMP_COAL)
+ val &= ~UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
+ else
+ val |= UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
+
+ al_reg_write32(reg_addr, val);
+
+ /* set the completion queue size */
+ if (udma_q->udma->type == UDMA_RX) {
+ val = al_reg_read32(
+ &udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c);
+ val &= ~UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
+ /* the register expects it to be in words */
+ val |= (udma_q->cdesc_size >> 2)
+ & UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
+ al_reg_write32(&udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c
+ , val);
+ }
+ return 0;
+}
+
+/**
+ * reset the queues pointers (Head, Tail, etc) and set the base addresses
+ *
+ * @param udma_q udma queue data structure
+ */
+static int al_udma_q_set_pointers(struct al_udma_q *udma_q)
+{
+ /* reset the descriptors ring pointers */
+ /* assert descriptor base address aligned. */
+ al_assert((AL_ADDR_LOW(udma_q->desc_phy_base) &
+ ~UDMA_M2S_Q_TDRBP_LOW_ADDR_MASK) == 0);
+ al_reg_write32(&udma_q->q_regs->rings.drbp_low,
+ AL_ADDR_LOW(udma_q->desc_phy_base));
+ al_reg_write32(&udma_q->q_regs->rings.drbp_high,
+ AL_ADDR_HIGH(udma_q->desc_phy_base));
+
+ al_reg_write32(&udma_q->q_regs->rings.drl, udma_q->size);
+
+ /* if completion ring update disabled */
+ if (udma_q->cdesc_base_ptr == NULL) {
+ udma_q->flags |= AL_UDMA_Q_FLAGS_NO_COMP_UPDATE;
+ } else {
+ /* reset the completion descriptors ring pointers */
+ /* assert completion base address aligned. */
+ al_assert((AL_ADDR_LOW(udma_q->cdesc_phy_base) &
+ ~UDMA_M2S_Q_TCRBP_LOW_ADDR_MASK) == 0);
+ al_reg_write32(&udma_q->q_regs->rings.crbp_low,
+ AL_ADDR_LOW(udma_q->cdesc_phy_base));
+ al_reg_write32(&udma_q->q_regs->rings.crbp_high,
+ AL_ADDR_HIGH(udma_q->cdesc_phy_base));
+
+ al_udma_q_config_compl(udma_q);
+ }
+ return 0;
+}
+
+/**
+ * enable/disable udma queue
+ *
+ * @param udma_q udma queue data structure
+ * @param enable none zero value enables the queue, zero means disable
+ *
+ * @return 0
+ */
+static int al_udma_q_enable(struct al_udma_q *udma_q, int enable)
+{
+ uint32_t reg = al_reg_read32(&udma_q->q_regs->rings.cfg);
+
+ if (enable) {
+ reg |= (UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
+ udma_q->status = AL_QUEUE_ENABLED;
+ } else {
+ reg &= ~(UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
+ udma_q->status = AL_QUEUE_DISABLED;
+ }
+ al_reg_write32(&udma_q->q_regs->rings.cfg, reg);
+ return 0;
+}
+
+
+/************************ API functions ***************************************/
+
+/* Initializations functions */
+/*
+ * Initialize the udma engine
+ */
+int al_udma_init(struct al_udma *udma, struct al_udma_params *udma_params)
+{
+ int i;
+
+ al_assert(udma);
+
+ if (udma_params->num_of_queues > DMA_MAX_Q) {
+ al_err("udma: invalid num_of_queues parameter\n");
+ return -EINVAL;
+ }
+
+ udma->udma_regs = udma_params->udma_reg;
+ udma->type = udma_params->type;
+ udma->num_of_queues = udma_params->num_of_queues;
+
+ if (udma_params->name == NULL)
+ udma->name = "";
+ else
+ udma->name = udma_params->name;
+
+ udma->state = UDMA_DISABLE;
+ for (i = 0; i < DMA_MAX_Q; i++) {
+ udma->udma_q[i].status = AL_QUEUE_NOT_INITIALIZED;
+ }
+ /* initialize configuration registers to correct values */
+ al_udma_set_defaults(udma);
+ al_dbg("udma [%s] initialized. base %p\n", udma->name,
+ udma->udma_regs);
+ return 0;
+}
+
+/*
+ * Initialize the udma queue data structure
+ */
+int al_udma_q_init(struct al_udma *udma, uint32_t qid,
+ struct al_udma_q_params *q_params)
+{
+ struct al_udma_q *udma_q;
+
+ al_assert(udma);
+ al_assert(q_params);
+
+ if (qid >= udma->num_of_queues) {
+ al_err("udma: invalid queue id (%d)\n", qid);
+ return -EINVAL;
+ }
+
+ if (udma->udma_q[qid].status == AL_QUEUE_ENABLED) {
+ al_err("udma: queue (%d) already enabled!\n", qid);
+ return -EIO;
+ }
+
+ if (q_params->size < AL_UDMA_MIN_Q_SIZE) {
+ al_err("udma: queue (%d) size too small\n", qid);
+ return -EINVAL;
+ }
+
+ if (q_params->size > AL_UDMA_MAX_Q_SIZE) {
+ al_err("udma: queue (%d) size too large\n", qid);
+ return -EINVAL;
+ }
+
+ if (q_params->size & (q_params->size - 1)) {
+ al_err("udma: queue (%d) size (%d) must be power of 2\n",
+ q_params->size, qid);
+ return -EINVAL;
+ }
+
+ udma_q = &udma->udma_q[qid];
+ /* set the queue's regs base address */
+ if (udma->type == UDMA_TX)
+ udma_q->q_regs = (union udma_q_regs __iomem *)
+ &udma->udma_regs->m2s.m2s_q[qid];
+ else
+ udma_q->q_regs = (union udma_q_regs __iomem *)
+ &udma->udma_regs->s2m.s2m_q[qid];
+
+ udma_q->dev_id = q_params->dev_id;
+ udma_q->rev_id = q_params->rev_id;
+ udma_q->size = q_params->size;
+ udma_q->size_mask = q_params->size - 1;
+ udma_q->desc_base_ptr = q_params->desc_base;
+ udma_q->desc_phy_base = q_params->desc_phy_base;
+ udma_q->cdesc_base_ptr = q_params->cdesc_base;
+ udma_q->cdesc_phy_base = q_params->cdesc_phy_base;
+ udma_q->cdesc_size = q_params->cdesc_size;
+
+ udma_q->next_desc_idx = 0;
+ udma_q->next_cdesc_idx = 0;
+ udma_q->end_cdesc_ptr = (uint8_t *) udma_q->cdesc_base_ptr +
+ (udma_q->size - 1) * udma_q->cdesc_size;
+ udma_q->comp_head_idx = 0;
+ udma_q->comp_head_ptr = (union al_udma_cdesc *)udma_q->cdesc_base_ptr;
+ udma_q->desc_ring_id = AL_UDMA_INITIAL_RING_ID;
+ udma_q->comp_ring_id = AL_UDMA_INITIAL_RING_ID;
+#if 0
+ udma_q->desc_ctrl_bits = AL_UDMA_INITIAL_RING_ID <<
+ AL_M2S_DESC_RING_ID_SHIFT;
+#endif
+ udma_q->pkt_crnt_descs = 0;
+ udma_q->flags = 0;
+ udma_q->status = AL_QUEUE_DISABLED;
+ udma_q->udma = udma;
+ udma_q->qid = qid;
+
+ /* start hardware configuration: */
+ al_udma_q_config(udma_q);
+ /* reset the queue pointers */
+ al_udma_q_set_pointers(udma_q);
+
+ /* enable the q */
+ al_udma_q_enable(udma_q, 1);
+
+ al_dbg("udma [%s %d]: %s q init. size 0x%x\n"
+ " desc ring info: phys base 0x%llx virt base %p\n"
+ " cdesc ring info: phys base 0x%llx virt base %p "
+ "entry size 0x%x",
+ udma_q->udma->name, udma_q->qid,
+ udma->type == UDMA_TX ? "Tx" : "Rx",
+ q_params->size,
+ (unsigned long long)q_params->desc_phy_base,
+ q_params->desc_base,
+ (unsigned long long)q_params->cdesc_phy_base,
+ q_params->cdesc_base,
+ q_params->cdesc_size);
+
+ return 0;
+}
+
+/*
+ * Reset a udma queue
+ */
+int al_udma_q_reset(struct al_udma_q *udma_q)
+{
+ unsigned int remaining_time = AL_UDMA_Q_RST_TOUT;
+ uint32_t *status_reg;
+ uint32_t *dcp_reg;
+ uint32_t *crhp_reg;
+ uint32_t *q_sw_ctrl_reg;
+
+ al_assert(udma_q);
+
+ /* De-assert scheduling and prefetch */
+ al_udma_q_enable(udma_q, 0);
+
+ /* Wait for scheduling and prefetch to stop */
+ status_reg = &udma_q->q_regs->rings.status;
+
+ while (remaining_time) {
+ uint32_t status = al_reg_read32(status_reg);
+
+ if (!(status & (UDMA_M2S_Q_STATUS_PREFETCH |
+ UDMA_M2S_Q_STATUS_SCHEDULER)))
+ break;
+
+ remaining_time--;
+ al_udelay(1);
+ }
+
+ if (!remaining_time) {
+ al_err("udma [%s %d]: %s timeout waiting for prefetch and "
+ "scheduler disable\n", udma_q->udma->name, udma_q->qid,
+ __func__);
+ return -ETIME;
+ }
+
+ /* Wait for the completion queue to reach to the same pointer as the
+ * prefetch stopped at ([TR]DCP == [TR]CRHP) */
+ dcp_reg = &udma_q->q_regs->rings.dcp;
+ crhp_reg = &udma_q->q_regs->rings.crhp;
+
+ while (remaining_time) {
+ uint32_t dcp = al_reg_read32(dcp_reg);
+ uint32_t crhp = al_reg_read32(crhp_reg);
+
+ if (dcp == crhp)
+ break;
+
+ remaining_time--;
+ al_udelay(1);
+ };
+
+ if (!remaining_time) {
+ al_err("udma [%s %d]: %s timeout waiting for dcp==crhp\n",
+ udma_q->udma->name, udma_q->qid, __func__);
+ return -ETIME;
+ }
+
+ /* Assert the queue reset */
+ if (udma_q->udma->type == UDMA_TX)
+ q_sw_ctrl_reg = &udma_q->q_regs->m2s_q.q_sw_ctrl;
+ else
+ q_sw_ctrl_reg = &udma_q->q_regs->s2m_q.q_sw_ctrl;
+
+ al_reg_write32(q_sw_ctrl_reg, UDMA_M2S_Q_SW_CTRL_RST_Q);
+
+ return 0;
+}
+
+/*
+ * return (by reference) a pointer to a specific queue date structure.
+ */
+int al_udma_q_handle_get(struct al_udma *udma, uint32_t qid,
+ struct al_udma_q **q_handle)
+{
+
+ al_assert(udma);
+ al_assert(q_handle);
+
+ if (unlikely(qid >= udma->num_of_queues)) {
+ al_err("udma [%s]: invalid queue id (%d)\n", udma->name, qid);
+ return -EINVAL;
+ }
+ *q_handle = &udma->udma_q[qid];
+ return 0;
+}
+
+/*
+ * Change the UDMA's state
+ */
+int al_udma_state_set(struct al_udma *udma, enum al_udma_state state)
+{
+ uint32_t reg;
+
+ al_assert(udma != NULL);
+ if (state == udma->state)
+ al_dbg("udma [%s]: requested state identical to "
+ "current state (%d)\n", udma->name, state);
+
+ al_dbg("udma [%s]: change state from (%s) to (%s)\n",
+ udma->name, al_udma_states_name[udma->state],
+ al_udma_states_name[state]);
+
+ reg = 0;
+ switch (state) {
+ case UDMA_DISABLE:
+ reg |= UDMA_M2S_CHANGE_STATE_DIS;
+ break;
+ case UDMA_NORMAL:
+ reg |= UDMA_M2S_CHANGE_STATE_NORMAL;
+ break;
+ case UDMA_ABORT:
+ reg |= UDMA_M2S_CHANGE_STATE_ABORT;
+ break;
+ default:
+ al_err("udma: invalid state (%d)\n", state);
+ return -EINVAL;
+ }
+
+ if (udma->type == UDMA_TX)
+ al_reg_write32(&udma->udma_regs->m2s.m2s.change_state, reg);
+ else
+ al_reg_write32(&udma->udma_regs->s2m.s2m.change_state, reg);
+
+ udma->state = state;
+ return 0;
+}
+
+/*
+ * return the current UDMA hardware state
+ */
+enum al_udma_state al_udma_state_get(struct al_udma *udma)
+{
+ uint32_t state_reg;
+ uint32_t comp_ctrl;
+ uint32_t stream_if;
+ uint32_t data_rd;
+ uint32_t desc_pref;
+
+ if (udma->type == UDMA_TX)
+ state_reg = al_reg_read32(&udma->udma_regs->m2s.m2s.state);
+ else
+ state_reg = al_reg_read32(&udma->udma_regs->s2m.s2m.state);
+
+ comp_ctrl = AL_REG_FIELD_GET(state_reg,
+ UDMA_M2S_STATE_COMP_CTRL_MASK,
+ UDMA_M2S_STATE_COMP_CTRL_SHIFT);
+ stream_if = AL_REG_FIELD_GET(state_reg,
+ UDMA_M2S_STATE_STREAM_IF_MASK,
+ UDMA_M2S_STATE_STREAM_IF_SHIFT);
+ data_rd = AL_REG_FIELD_GET(state_reg,
+ UDMA_M2S_STATE_DATA_RD_CTRL_MASK,
+ UDMA_M2S_STATE_DATA_RD_CTRL_SHIFT);
+ desc_pref = AL_REG_FIELD_GET(state_reg,
+ UDMA_M2S_STATE_DESC_PREF_MASK,
+ UDMA_M2S_STATE_DESC_PREF_SHIFT);
+
+ al_assert(comp_ctrl != UDMA_STATE_RESERVED);
+ al_assert(stream_if != UDMA_STATE_RESERVED);
+ al_assert(data_rd != UDMA_STATE_RESERVED);
+ al_assert(desc_pref != UDMA_STATE_RESERVED);
+
+ /* if any of the states is abort then return abort */
+ if ((comp_ctrl == UDMA_STATE_ABORT) || (stream_if == UDMA_STATE_ABORT)
+ || (data_rd == UDMA_STATE_ABORT)
+ || (desc_pref == UDMA_STATE_ABORT))
+ return UDMA_ABORT;
+
+ /* if any of the states is normal then return normal */
+ if ((comp_ctrl == UDMA_STATE_NORMAL)
+ || (stream_if == UDMA_STATE_NORMAL)
+ || (data_rd == UDMA_STATE_NORMAL)
+ || (desc_pref == UDMA_STATE_NORMAL))
+ return UDMA_NORMAL;
+
+ return UDMA_IDLE;
+}
+
+/*
+ * Action handling
+ */
+
+/*
+ * get next completed packet from completion ring of the queue
+ */
+uint32_t al_udma_cdesc_packet_get(
+ struct al_udma_q *udma_q,
+ volatile union al_udma_cdesc **cdesc)
+{
+ uint32_t count;
+ volatile union al_udma_cdesc *curr;
+ uint32_t comp_flags;
+
+ /* this function requires the completion ring update */
+ al_assert(!(udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE));
+
+ /* comp_head points to the last comp desc that was processed */
+ curr = udma_q->comp_head_ptr;
+ comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
+
+ /* check if the completion descriptor is new */
+ if (unlikely(al_udma_new_cdesc(udma_q, comp_flags) == AL_FALSE))
+ return 0;
+ /* if new desc found, increment the current packets descriptors */
+ count = udma_q->pkt_crnt_descs + 1;
+ while (!cdesc_is_last(comp_flags)) {
+ curr = al_cdesc_next_update(udma_q, curr);
+ comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
+ if (unlikely(al_udma_new_cdesc(udma_q, comp_flags)
+ == AL_FALSE)) {
+ /* the current packet here doesn't have all */
+ /* descriptors completed. log the current desc */
+ /* location and number of completed descriptors so */
+ /* far. then return */
+ udma_q->pkt_crnt_descs = count;
+ udma_q->comp_head_ptr = curr;
+ return 0;
+ }
+ count++;
+ /* check against max descs per packet. */
+ al_assert(count <= udma_q->size);
+ }
+ /* return back the first descriptor of the packet */
+ *cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx);
+ udma_q->pkt_crnt_descs = 0;
+ udma_q->comp_head_ptr = al_cdesc_next_update(udma_q, curr);
+
+ al_dbg("udma [%s %d]: packet completed. first desc %p (ixd 0x%x)"
+ " descs %d\n", udma_q->udma->name, udma_q->qid, *cdesc,
+ udma_q->next_cdesc_idx, count);
+
+ return count;
+}
+
+/** @} end of UDMA group */
diff --git a/arch/arm/mach-alpine/al_hal/al_init_sys_fabric.c b/arch/arm/mach-alpine/al_hal/al_init_sys_fabric.c
new file mode 100644
index 0000000..b0ce4ad
--- /dev/null
+++ b/arch/arm/mach-alpine/al_hal/al_init_sys_fabric.c
@@ -0,0 +1,86 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include
+#include
+#include
+#include
+#include "al_init_ccu_regs.h"
+#include "al_init_sys_fabric_offsets.h"
+
+/* definition currently missing from nb_regs */
+#define AL_NB_ACF_MISC_READ_BYPASS (1 << 30)
+
+/* initialization of different units */
+void al_nbservice_init(void __iomem *nb_regs_address,
+ al_bool dev_ord_relax)
+{
+ struct al_nb_regs __iomem *nb_regs = nb_regs_address;
+
+ /* allow reads to bypass writes to different addresses */
+ al_reg_write32_masked(
+ &(nb_regs->global.acf_misc),
+ AL_NB_ACF_MISC_READ_BYPASS,
+ (dev_ord_relax) ? AL_NB_ACF_MISC_READ_BYPASS : 0);
+}
+
+void al_ccu_init(void __iomem *ccu_address, al_bool iocc)
+{
+ /* enable snoop */
+ if (iocc) {
+ al_reg_write32(ccu_address + AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET
+ , 1);
+ al_reg_write32(ccu_address + AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET
+ , 1);
+ }
+ /* disable speculative fetches from masters */
+ al_reg_write32(ccu_address + AL_CCU_SPECULATION_CONTROL_OFFSET, 7);
+}
+
+void al_nbservice_clear_settings(void __iomem *nb_regs_address)
+{
+ struct al_nb_regs __iomem *nb_regs = nb_regs_address;
+
+ al_reg_write32_masked(
+ &(nb_regs->global.acf_misc),
+ AL_NB_ACF_MISC_READ_BYPASS,
+ 0);
+}
+
+void al_ccu_clear_settings(void __iomem *ccu_address)
+{
+ al_reg_write32(ccu_address + AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET, 0);
+ al_reg_write32(ccu_address + AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET, 0);
+}
diff --git a/arch/arm/mach-alpine/alpine_cpu_pm.c b/arch/arm/mach-alpine/alpine_cpu_pm.c
new file mode 100644
index 0000000..d62a143
--- /dev/null
+++ b/arch/arm/mach-alpine/alpine_cpu_pm.c
@@ -0,0 +1,165 @@
+/*
+ * Alpine CPU Power Management Services
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+
+#include
+
+/* NB registers */
+#undef AL_NB_SERVICE_BASE
+#define AL_NB_SERVICE_BASE al_nb_service_base
+#define AL_NB_INIT_CONTROL (AL_NB_SERVICE_BASE + 0x8)
+#define AL_NB_POWER_CONTROL(cpu) (AL_NB_SERVICE_BASE + \
+ 0x2000 + (cpu)*0x100 + 0x20)
+
+int alpine_suspend_finish(unsigned long);
+
+static void __iomem *al_nb_service_base;
+static struct al_cpu_resume_regs __iomem *al_cpu_resume_regs;
+static int suspend_wakeup_supported;
+
+int alpine_cpu_suspend_wakeup_supported(void)
+{
+ return suspend_wakeup_supported;
+}
+EXPORT_SYMBOL(alpine_cpu_suspend_wakeup_supported);
+
+void alpine_cpu_wakeup(unsigned int cpu, uintptr_t resume_addr)
+{
+ /*
+ * Cancel previous powerdown request
+ * This can happen if the CPU is "hot plugged in" after being powered
+ * off due to being "hot plugged out" - see 'alpine_cpu_die' below.
+ */
+ writel(0, (void __iomem *)AL_NB_POWER_CONTROL(cpu));
+
+ /* Set CPU resume address */
+ writel(resume_addr, &al_cpu_resume_regs->per_cpu[cpu].resume_addr);
+
+ /* Release from reset - has effect once per SoC reset */
+ writel(readl(AL_NB_INIT_CONTROL) | (1 << cpu), AL_NB_INIT_CONTROL);
+}
+EXPORT_SYMBOL(alpine_cpu_wakeup);
+
+void alpine_cpu_die(unsigned int cpu)
+{
+ if (!suspend_wakeup_supported) {
+ pr_err("Annapurna Labs PM components not found\n");
+ return;
+ }
+
+ /* request powerdown. cpu will be turned off when it issues WFI */
+ writel(0x3, (void __iomem *)AL_NB_POWER_CONTROL(cpu));
+
+ alpine_suspend_finish(0);
+
+ BUG(); /*execution should never reach this point */
+}
+EXPORT_SYMBOL(alpine_cpu_die);
+
+void alpine_cpu_suspend(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /* Write the resume address */
+ writel(virt_to_phys(cpu_resume),
+ &al_cpu_resume_regs->per_cpu[cpu].resume_addr);
+ writel(AL_CPU_RESUME_FLG_PERCPU_CLUSTER_SKIP,
+ &al_cpu_resume_regs->per_cpu[cpu].flags);
+
+ /* request powerdown. cpu will be turned off when it issues WFI
+ * bits 0:1 - request core powerdown
+ * bits 20:21 - do not wake-up from i/o gic
+ */
+ writel(0x3 | (3<<20), (void __iomem *)AL_NB_POWER_CONTROL(cpu));
+ /*verify the write got through*/
+ readl((void __iomem *)AL_NB_POWER_CONTROL(cpu));
+
+ cpu_pm_enter();
+ cpu_suspend(0, alpine_suspend_finish);
+
+ /*clear the powerdown request*/
+ writel(0, (void __iomem *)AL_NB_POWER_CONTROL(cpu));
+ /*verify the write got through*/
+ readl((void __iomem *)AL_NB_POWER_CONTROL(cpu));
+
+ cpu_pm_exit();
+}
+EXPORT_SYMBOL(alpine_cpu_suspend);
+
+#ifdef CONFIG_PM
+static int al_pm_valid(suspend_state_t state)
+{
+ return ((state == PM_SUSPEND_STANDBY) || (state == PM_SUSPEND_MEM));
+}
+
+static int al_pm_enter(suspend_state_t state)
+{
+ if (al_pm_valid(state))
+ alpine_cpu_suspend();
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct platform_suspend_ops al_pm_ops = {
+ .enter = al_pm_enter,
+ .valid = al_pm_valid,
+};
+#endif
+
+void __init alpine_cpu_pm_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(
+ NULL, NULL, "annapurna-labs,al-nb-service");
+ al_nb_service_base = of_iomap(np, 0);
+
+ np = of_find_compatible_node(
+ NULL, NULL, "annapurna-labs,al-cpu-resume");
+ al_cpu_resume_regs =
+ (struct al_cpu_resume_regs __iomem *)of_iomap(np, 0);
+
+ suspend_wakeup_supported =
+ al_nb_service_base &&
+ al_cpu_resume_regs &&
+ ((readl(&al_cpu_resume_regs->watermark) &
+ AL_CPU_RESUME_MAGIC_NUM_MASK) == AL_CPU_RESUME_MAGIC_NUM) &&
+ ((readl(&al_cpu_resume_regs->watermark) &
+ AL_CPU_RESUME_MIN_VER_MASK) >= AL_CPU_RESUME_MIN_VER);
+
+#ifdef CONFIG_PM
+ if (suspend_wakeup_supported) {
+ suspend_set_ops(&al_pm_ops);
+ } else {
+ pr_err("Annapurna Labs PM components not found\n");
+ return;
+ }
+#endif
+}
+
diff --git a/arch/arm/mach-alpine/alpine_machine.c b/arch/arm/mach-alpine/alpine_machine.c
new file mode 100644
index 0000000..7920eba
--- /dev/null
+++ b/arch/arm/mach-alpine/alpine_machine.c
@@ -0,0 +1,323 @@
+/*
+ * Device Tree support for Alpine platforms.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+#include "core.h"
+
+#define WDTLOAD 0x000
+ #define LOAD_MIN 0x00000001
+ #define LOAD_MAX 0xFFFFFFFF
+#define WDTVALUE 0x004
+#define WDTCONTROL 0x008
+ /* control register masks */
+ #define INT_ENABLE (1 << 0)
+ #define RESET_ENABLE (1 << 1)
+#define WDTLOCK 0xC00
+ #define UNLOCK 0x1ACCE551
+ #define LOCK 0x00000001
+
+#define SERDES_NUM_GROUPS 4
+#define SERDES_GROUP_SIZE 0x400
+
+static void __iomem *wd0_base;
+static void __iomem *serdes_base;
+
+static const __initconst struct of_device_id clk_match[] = {
+ { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
+ {}
+};
+
+static void __init al_timer_init(void)
+{
+ struct device_node *np;
+
+ /* Find the first watchdog and make sure it is not disabled */
+ np = of_find_compatible_node(
+ NULL, NULL, "arm,sp805");
+
+ if (np && of_device_is_available(np)) {
+ wd0_base = of_iomap(np, 0);
+ BUG_ON(!wd0_base);
+ } else {
+ wd0_base = NULL;
+ }
+
+ /* Timer initialization */
+ of_clk_init(NULL);
+ clocksource_of_init();
+}
+
+static void al_power_off(void)
+{
+ printk(KERN_EMERG "Unable to shutdown\n");
+}
+
+static void al_restart(char str, const char *cmd)
+{
+ if (!wd0_base) {
+ pr_err("%s: Not supported!\n", __func__);
+ } else {
+ writel(UNLOCK, wd0_base + WDTLOCK);
+ writel(LOAD_MIN, wd0_base + WDTLOAD);
+ writel(INT_ENABLE | RESET_ENABLE, wd0_base + WDTCONTROL);
+ }
+
+ while (1)
+ ;
+}
+
+static void __init al_map_io(void)
+{
+ /* Needed for early printk to work */
+ struct map_desc uart_map_desc[1];
+
+ uart_map_desc[0].virtual = (unsigned long)AL_UART_BASE(0);
+ uart_map_desc[0].pfn = __phys_to_pfn(AL_UART_BASE(0));
+ uart_map_desc[0].length = SZ_64K;
+ uart_map_desc[0].type = MT_DEVICE;
+
+ iotable_init(uart_map_desc, ARRAY_SIZE(uart_map_desc));
+}
+
+static void __init al_init_irq(void)
+{
+ irqchip_init();
+
+ if (al_msix_init() != 0)
+ pr_err("%s: al_msix_init() failed!\n", __func__);
+}
+
+static void __init al_serdes_resource_init(void)
+{
+ struct device_node *np;
+
+ /* Find the serdes node and make sure it is not disabled */
+ np = of_find_compatible_node(NULL, NULL, "annapurna-labs,al-serdes");
+
+ if (np && of_device_is_available(np)) {
+ serdes_base = of_iomap(np, 0);
+ BUG_ON(!serdes_base);
+ } else {
+ pr_err("%s: init serdes regs base failed!\n", __func__);
+ serdes_base = NULL;
+ }
+}
+
+void __iomem *alpine_serdes_resource_get(u32 group)
+{
+ void __iomem *base = NULL;
+
+ if (group >= SERDES_NUM_GROUPS)
+ return NULL;
+
+ if (serdes_base)
+ base = serdes_base + group * SERDES_GROUP_SIZE;
+
+ return base;
+}
+EXPORT_SYMBOL(alpine_serdes_resource_get);
+
+static struct alpine_serdes_eth_group_mode {
+ struct mutex lock;
+ enum alpine_serdes_eth_mode mode;
+ bool mode_set;
+} alpine_serdes_eth_group_mode[SERDES_NUM_GROUPS] = {
+ {
+ .lock = __MUTEX_INITIALIZER(alpine_serdes_eth_group_mode[0].lock),
+ .mode_set = false,
+ },
+ {
+ .lock = __MUTEX_INITIALIZER(alpine_serdes_eth_group_mode[1].lock),
+ .mode_set = false,
+ },
+ {
+ .lock = __MUTEX_INITIALIZER(alpine_serdes_eth_group_mode[2].lock),
+ .mode_set = false,
+ },
+ {
+ .lock = __MUTEX_INITIALIZER(alpine_serdes_eth_group_mode[3].lock),
+ .mode_set = false,
+ }};
+
+int alpine_serdes_eth_mode_set(
+ u32 group,
+ enum alpine_serdes_eth_mode mode)
+{
+ struct alpine_serdes_eth_group_mode *group_mode =
+ &alpine_serdes_eth_group_mode[group];
+
+ if (!serdes_base)
+ return -EINVAL;
+
+ if (group >= SERDES_NUM_GROUPS)
+ return -EINVAL;
+
+ mutex_lock(&group_mode->lock);
+
+ if (!group_mode->mode_set || (group_mode->mode != mode)) {
+ struct al_serdes_obj obj;
+ struct al_serdes_adv_tx_params tx_params[AL_SRDS_NUM_LANES];
+ struct al_serdes_adv_rx_params rx_params[AL_SRDS_NUM_LANES];
+ int i;
+
+ al_serdes_handle_init(serdes_base, &obj);
+
+ /* save group params */
+ for (i = 0; i < AL_SRDS_NUM_LANES; i++) {
+ al_serdes_tx_advanced_params_get(
+ &obj,
+ group,
+ i,
+ &tx_params[i]);
+ al_serdes_rx_advanced_params_get(
+ &obj,
+ group,
+ i,
+ &rx_params[i]);
+ }
+
+ if (mode == ALPINE_SERDES_ETH_MODE_SGMII)
+ al_serdes_mode_set_sgmii(&obj, group);
+ else
+ al_serdes_mode_set_kr(&obj, group);
+
+ /* restore group params */
+ for (i = 0; i < AL_SRDS_NUM_LANES; i++) {
+ al_serdes_tx_advanced_params_set(
+ &obj,
+ group,
+ i,
+ &tx_params[i]);
+ al_serdes_rx_advanced_params_set(
+ &obj,
+ group,
+ i,
+ &rx_params[i]);
+ }
+
+ group_mode->mode = mode;
+ group_mode->mode_set = true;
+ }
+
+ mutex_unlock(&group_mode->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(alpine_serdes_eth_mode_set);
+
+void alpine_serdes_eth_group_lock(u32 group)
+{
+ struct alpine_serdes_eth_group_mode *group_mode =
+ &alpine_serdes_eth_group_mode[group];
+
+ mutex_lock(&group_mode->lock);
+}
+EXPORT_SYMBOL(alpine_serdes_eth_group_lock);
+
+void alpine_serdes_eth_group_unlock(u32 group)
+{
+ struct alpine_serdes_eth_group_mode *group_mode =
+ &alpine_serdes_eth_group_mode[group];
+
+ mutex_unlock(&group_mode->lock);
+}
+EXPORT_SYMBOL(alpine_serdes_eth_group_unlock);
+
+static void __init al_init(void)
+{
+ pm_power_off = al_power_off;
+
+ /*
+ * Power Management Services Initialization
+ * When running in SMP this should be done earlier
+ */
+#ifndef CONFIG_SMP
+ alpine_cpu_pm_init();
+#endif
+
+ /* fabric uses a notifier for device registration,
+ * Hence it must be initialized before registering
+ * any devices
+ **/
+ al_fabric_init();
+
+ al_serdes_resource_init();
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+static const char *al_match[] __initdata = {
+ "annapurna-labs,alpine",
+ NULL,
+};
+
+unsigned int al_spin_lock_wfe_enable __read_mostly = 0;
+EXPORT_SYMBOL(al_spin_lock_wfe_enable);
+
+static int __init spin_lock_wfe_enable(char *str)
+{
+ get_option(&str, &al_spin_lock_wfe_enable);
+ if (al_spin_lock_wfe_enable)
+ al_spin_lock_wfe_enable = 1;
+ return 0;
+}
+
+early_param("spin_lock_wfe_enable", spin_lock_wfe_enable);
+
+unsigned int al_gettimeofday_use_jiffies __read_mostly = 0;
+EXPORT_SYMBOL(al_gettimeofday_use_jiffies);
+
+static int __init gettimeofday_use_jiffies(char *str)
+{
+ get_option(&str, &al_gettimeofday_use_jiffies);
+ if (al_gettimeofday_use_jiffies)
+ al_gettimeofday_use_jiffies = 1;
+ return 0;
+}
+
+early_param("gettimeofday_use_jiffies", gettimeofday_use_jiffies);
+
+DT_MACHINE_START(AL_DT, "AnnapurnaLabs Alpine (Device Tree)")
+ .smp = smp_ops(al_smp_ops),
+ .map_io = al_map_io,
+ .init_irq = al_init_irq,
+ .init_time = al_timer_init,
+ .init_machine = al_init,
+ .dt_compat = al_match,
+ .restart = al_restart,
+MACHINE_END
diff --git a/arch/arm/mach-alpine/core.h b/arch/arm/mach-alpine/core.h
new file mode 100644
index 0000000..d4a050b
--- /dev/null
+++ b/arch/arm/mach-alpine/core.h
@@ -0,0 +1,29 @@
+/*
+ * linux/arch/arm/mach-alpine/core.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+extern struct smp_operations __initdata al_smp_ops;
+
+#ifdef CONFIG_PCI_MSI
+int al_msix_init(void);
+#else
+static inline int al_msix_init(void)
+{
+ return 0;
+}
+#endif
+
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_common.h b/arch/arm/mach-alpine/include/al_hal/al_hal_common.h
new file mode 100644
index 0000000..1ae1446
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_common.h
@@ -0,0 +1,69 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_common HAL Common Layer
+ * Includes all common header files used by HAL
+ * @{
+ * @file al_hal_common.h
+ *
+ */
+
+#ifndef __AL_HAL_COMMON_H__
+#define __AL_HAL_COMMON_H__
+
+#include "al_hal_plat_types.h"
+#include "al_hal_plat_services.h"
+
+#include "al_hal_types.h"
+#include "al_hal_reg_utils.h"
+
+/* Get the maximal value out of two typed values */
+#define al_max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
+
+/* Get the minimal value out of two typed values */
+#define al_min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1 : __min2; })
+
+/* Get the number of elements in an array */
+#define AL_ARR_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+/** @} end of Common group */
+#endif /* __AL_HAL_COMMON_H__ */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_ddr.h b/arch/arm/mach-alpine/include/al_hal/al_hal_ddr.h
new file mode 100644
index 0000000..e16c9be
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_ddr.h
@@ -0,0 +1,505 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup groupddr DDR controller & PHY hardrware abstraction layer
+ * @{
+ * @file al_hal_ddr.h
+ *
+ * @brief Header file for the DDR HAL driver
+ */
+
+#ifndef __AL_HAL_DDR_H__
+#define __AL_HAL_DDR_H__
+
+#include "al_hal_common.h"
+#include "al_hal_ddr_cfg.h"
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/**
+ * DDR address mapping - not connected bit
+ * See explanation about al_ddr_addrmap below.
+ */
+#define AL_DDR_ADDRMAP_NC 0xff
+
+/* Data Width */
+enum al_ddr_data_width {
+ AL_DDR_DATA_WIDTH_32_BITS,
+ AL_DDR_DATA_WIDTH_64_BITS,
+};
+
+/**
+ * Address mapping:
+ * Read and write requests are provided to the DDR controller with a system
+ * address. The system address is the command address of a transaction as
+ * presented on one of the data ports. The DDR controller is responsible for
+ * mapping this system address to rank, bank, row, and column addresses to the
+ * SDRAM. It converts the system address to a physical address.
+ * For each CS/bank/column/row bit assign a system memory address bit index.
+ * Set to AL_DDR_ADDRMAP_NC if not connected.
+ * CS minimal supported memory address bit index is 10.
+ * Bank minimal supported memory address bit index is 6.
+ * Column minimal supported memory address bit index is 4.
+ * Row minimal supported memory address bit index is 10.
+ *
+ * Address mapping might affect the system performance and should be optimized
+ * according to the specific application nature. The basic guideline is keeping
+ * as much open pages as possible and avoiding frequent closing of pages and
+ * opening new ones.
+ *
+ * Example:
+ * Mapping of 16GB memory device with 64 bits data width, 1KB page
+ *
+ * System address bit index | SDRAM required mapping
+ * ----------------------------------------------------
+ * 33:32 cs[1:0]
+ * 31:16 row[15:0]
+ * 15:13 bank[2:0]
+ * 12:3 col[9:0]
+ * 2:0 N/A since 8 bytes are accessed at a time
+ *
+ * In this case the following setting is required:
+ * col_b3_9_b11_13 = { 6, 7, 8, 9, 10, 11, 12, AL_DDR_ADDRMAP_NC, ... }
+ * bank_b0_2 = { 13, 14, 15 }
+ * row_b0_2_10 = { 16, 17, 18 }
+ * row_b11_15 = { 27, 28, 29, 30, 31 }
+ * cs_b0_1 = { 32, 33 }
+ */
+struct al_ddr_addrmap {
+ /**
+ * Column bits 3 - 9, 11 - 13
+ * Bit 3 relevant only for 64 bits data bus
+ * Bit 13 relevant only for 32 bits data bus
+ */
+ uint8_t col_b3_9_b11_13[10];
+
+ /* Bank bits 0 - 2 */
+ uint8_t bank_b0_2[3];
+
+ /**
+ * Row bits 0 - 2
+ * Bits 3 - 10 are following bit 2
+ */
+ uint8_t row_b0_2_10[3];
+
+ /* Column bits 11 - 15 */
+ uint8_t row_b11_15[5];
+
+ /* CS bits 0 - 1 */
+ uint8_t cs_b0_1[2];
+};
+
+
+/* Data BIST mode */
+enum al_ddr_bist_mode {
+ /**
+ * Loopback mode:
+ * Address, commands and data loop back at the PHY I/Os
+ */
+ AL_DDR_BIST_MODE_LOOPBACK,
+
+ /**
+ * DRAM mode:
+ * Address, commands and data go to DRAM for normal memory accesses.
+ */
+ AL_DDR_BIST_MODE_DRAM,
+};
+
+/* Data/AC BIST pattern */
+enum al_ddr_bist_pat {
+ /* Walking '0' */
+ AL_DDR_BIST_PATTERN_WALK_0,
+
+ /* Walking '1' */
+ AL_DDR_BIST_PATTERN_WALK_1,
+
+ /* LFSR-based pseudo-random */
+ AL_DDR_BIST_PATTERN_LFSR,
+
+ /* User programmable (Not valid for AC loopback) */
+ AL_DDR_BIST_PATTERN_USER,
+};
+
+/* Data BIST parameters */
+struct al_ddr_bist_params {
+ /* Mode */
+ enum al_ddr_bist_mode mode;
+
+ /* Pattern */
+ enum al_ddr_bist_pat pat;
+
+ /**
+ * User Data Pattern 0:
+ * Data to be applied on even DQ pins during BIST.
+ * Valid values: 0x0000 - 0xffff
+ */
+ unsigned int user_pat_even;
+
+ /**
+ * User Data Pattern 1:
+ * Data to be applied on odd DQ pins during BIST.
+ * Valid values: 0x0000 - 0xffff
+ */
+ unsigned int user_pat_odd;
+
+ /** Word count
+ * Indicates the number of words to generate during BIST.
+ * Valid values are 4, 8, 12, 16, and so on.
+ * Maximal value: 0xfffc
+ */
+ unsigned int wc;
+
+ /** Address increment
+ * Selects the value by which the SDRAM address is incremented for each
+ * write/read access. This value must be at the beginning of a burst
+ * boundary, i.e. the lower bits must be "000".
+ * Maximal value: 0xff8
+ */
+ unsigned int inc;
+
+ /**
+ * BIST Column Address:
+ * Selects the SDRAM column address to be used during BIST. The lower
+ * bits of this address must be "000".
+ */
+ unsigned int col_min;
+
+ /**
+ * BIST Maximum Column Address:
+ * Specifies the maximum SDRAM column address to be used during BIST
+ * before the address increments to the next row.
+ */
+ unsigned int col_max;
+
+ /**
+ * BIST Row Address:
+ * Selects the SDRAM row address to be used during BIST.
+ */
+ unsigned int row_min;
+
+ /**
+ * BIST Maximum Row Address:
+ * Specifies the maximum SDRAM row address to be used during BIST
+ * before the address increments to the next bank.
+ */
+ unsigned int row_max;
+
+ /**
+ * BIST Bank Address:
+ * Selects the SDRAM bank address to be used during BIST.
+ */
+ unsigned int bank_min;
+
+ /**
+ * BIST Maximum Bank Address:
+ * Specifies the maximum SDRAM bank address to be used during BIST
+ * before the address increments to the next rank.
+ */
+ unsigned int bank_max;
+
+ /**
+ * BIST Rank:
+ * Selects the SDRAM rank to be used during BIST.
+ */
+ unsigned int rank_min;
+
+ /**
+ * BIST Maximum Rank:
+ * Specifies the maximum SDRAM rank to be used during BIST.
+ */
+ unsigned int rank_max;
+
+ /**
+ * Active byte lanes to have the BIST applied upon.
+ * Lanes 0-3 can always have BIST applied upon.
+ * Lane 4 - only if ECC is supported by the DDR device.
+ * Lanes 5-8 - only for 64 bits data bus width.
+ */
+ uint8_t active_byte_lanes[AL_DDR_PHY_NUM_BYTE_LANES];
+};
+
+/* ECC status parameters */
+struct al_ddr_ecc_status {
+ /* Number of ECC errors detected */
+ unsigned int err_cnt;
+
+ /* Rank number of a read resulting in an ECC error */
+ unsigned int rank;
+
+ /* Bank number of a read resulting in an ECC error */
+ unsigned int bank;
+
+ /* Row number of a read resulting in an ECC error */
+ unsigned int row;
+
+ /* Collumn number of a read resulting in an ECC error */
+ unsigned int col;
+
+ /* Data pattern that resulted in a corrected error */
+ uint32_t syndromes_31_0;
+ uint32_t syndromes_63_32; /* For 32-bit ECC - not used. */
+ uint32_t syndromes_ecc; /* ECC lane */
+
+ /**
+ * Mask for the corrected data portion
+ * 1 on any bit indicates that the bit has been corrected by the ECC
+ * logic
+ * 0 on any bit indicates that the bit has not been corrected by the
+ * ECC logic
+ * This register accumulates data over multiple ECC errors, to give an
+ * overall indication of which bits are being fixed. It is cleared by
+ * calling al_ddr_ecc_corr_int_clear.
+ */
+ uint32_t corr_bit_mask_31_0;
+ uint32_t corr_bit_mask_63_32; /* For 32-bit ECC - not used. */
+ uint32_t corr_bit_mask_ecc; /* ECC lane */
+
+ /* Bit number corrected by single-bit ECC error */
+ unsigned int ecc_corrected_bit_num;
+};
+
+struct al_ddr_ecc_cfg {
+ /* ECC mode indicator */
+ al_bool ecc_enabled;
+
+ /* Enable ECC scrubs - applicable only when ecc is enabled */
+ al_bool scrub_enabled;
+};
+
+/* DDR controller power modes */
+enum al_ddr_power_mode {
+ /* No power mode enabled */
+ AL_DDR_POWERMODE_OFF,
+
+ /**
+ * Self refresh:
+ * Puts the SDRAM into self refresh when no active transactions
+ */
+ AL_DDR_POWERMODE_SELF_REFRESH,
+
+ /**
+ * Power down:
+ * The DDR controller goes into power-down after a
+ * programmable number of idle cycles (Multiples of 32 clocks)
+ */
+ AL_DDR_POWERMODE_POWER_DOWN,
+
+};
+
+/* DDR operating modes */
+enum al_ddr_operating_mode {
+ /* Initialiazation */
+ AL_DDR_OPERATING_MODE_INIT,
+
+ /* Normal operation */
+ AL_DDR_OPERATING_MODE_NORMAL,
+
+ /* Power down */
+ AL_DDR_OPERATING_MODE_POWER_DOWN,
+
+ /* Self refresh */
+ AL_DDR_OPERATING_MODE_SELF_REFRESH,
+
+};
+
+int al_ddr_phy_datx_bist(
+ void __iomem *ddr_ctrl_regs_base,
+ void __iomem *ddr_phy_regs_base,
+ struct al_ddr_bist_params *params);
+
+int al_ddr_phy_ac_bist(
+ void __iomem *ddr_phy_regs_base,
+ enum al_ddr_bist_pat pat);
+
+/**
+ * @brief Get current data bus width
+ *
+ * @param ddr_ctrl_regs_base
+ * Address of the DDR controller register base
+ *
+ * @returns The data bus width
+ */
+enum al_ddr_data_width al_ddr_data_width_get(
+ void __iomem *ddr_ctrl_regs_base);
+
+/**
+ * @brief Get the current number of available ranks
+ *
+ * @param ddr_phy_regs_base
+ * Address of the DDR controller register base
+ *
+ * @returns The number of available ranks
+ */
+unsigned int al_ddr_active_ranks_get(
+ void __iomem *ddr_ctrl_regs_base);
+
+/**
+ * @brief Get the current corrected/uncorrected error status
+ *
+ * @param ddr_ctrl_regs_base
+ * Address of the DDR controller register base
+ * @param corr_status
+ * The corrected error status (use NULL if no status is required)
+ * @param uncorr_status
+ * The uncorrected error status (use NULL if no status is required)
+ *
+ * @returns 0 if successful
+ * <0 otherwise
+ */
+int al_ddr_ecc_status_get(
+ void __iomem *ddr_ctrl_regs_base,
+ struct al_ddr_ecc_status *corr_status,
+ struct al_ddr_ecc_status *uncorr_status);
+
+/**
+ * @brief Get the current ECC configuration
+ *
+ * @param ddr_ctrl_regs_base
+ * Address of the DDR controller register base
+ * @param cfg
+ * The ECC configuration
+ */
+void al_ddr_ecc_cfg_get(
+ void __iomem *ddr_ctrl_regs_base,
+ struct al_ddr_ecc_cfg *cfg);
+
+int al_ddr_ecc_corr_count_clear(
+ void __iomem *ddr_ctrl_regs_base);
+
+/**
+ * @brief Clear the correctable error interrupt
+ *
+ * @param nb_regs_base
+ * Address of the NB register base, used i.o. to clear NB interrupt
+ * (use NULL if no clearing is required)
+ * @param ddr_ctrl_regs_base
+ * Address of the DDR controller register base
+ *
+ * @returns 0 if successful
+ * <0 otherwise
+ */
+int al_ddr_ecc_corr_int_clear(
+ void __iomem *nb_regs_base,
+ void __iomem *ddr_ctrl_regs_base);
+
+int al_ddr_ecc_uncorr_count_clear(
+ void __iomem *ddr_ctrl_regs_base);
+
+/**
+ * @brief Clear the uncorrectable error interrupt
+ *
+ * @param nb_regs_base
+ * Address of the NB register base, used i.o. to clear NB interrupt
+ * (use NULL if no clearing is required)
+ * @param ddr_ctrl_regs_base
+ * Address of the DDR controller register base
+ *
+ * @returns 0 if successful
+ * <0 otherwise
+ */
+int al_ddr_ecc_uncorr_int_clear(
+ void __iomem *nb_regs_base,
+ void __iomem *ddr_ctrl_regs_base);
+
+int al_ddr_ecc_data_poison_enable(
+ void __iomem *ddr_ctrl_regs_base,
+ unsigned int rank,
+ unsigned int bank,
+ unsigned int col,
+ unsigned int row);
+
+int al_ddr_ecc_data_poison_disable(
+ void __iomem *ddr_ctrl_regs_base);
+
+unsigned int al_ddr_parity_count_get(
+ void __iomem *ddr_ctrl_regs_base);
+
+void al_ddr_parity_count_clear(
+ void __iomem *ddr_ctrl_regs_base);
+
+void al_ddr_parity_int_clear(
+ void __iomem *nb_regs_base,
+ void __iomem *ddr_ctrl_regs_base);
+
+int al_ddr_power_mode_set(
+ void __iomem *ddr_ctrl_regs_base,
+ enum al_ddr_power_mode power_mode,
+ unsigned int timer_x32);
+
+enum al_ddr_operating_mode al_ddr_operating_mode_get(
+ void __iomem *ddr_ctrl_regs_base);
+
+int al_ddr_address_translate_sys2dram(
+ void __iomem *ddr_ctrl_regs_base,
+ al_phys_addr_t sys_address,
+ unsigned int *rank,
+ unsigned int *bank,
+ unsigned int *col,
+ unsigned int *row);
+
+int al_ddr_address_translate_dram2sys(
+ void __iomem *ddr_ctrl_regs_base,
+ al_phys_addr_t *sys_address,
+ unsigned int rank,
+ unsigned int bank,
+ unsigned int col,
+ unsigned int row);
+
+/**
+ * @brief Get the amount of connected address bits
+ *
+ * User can use these bits i.o. to calculate the memory device's rank size
+ *
+ * @param ddr_ctrl_regs_base
+ * Address of the DDR controller register base
+ *
+ * @returns Num of connected address bits (rank size == 1 << active_bits)
+ */
+unsigned int al_ddr_bits_per_rank_get(
+ void __iomem *ddr_ctrl_regs_base);
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+/** @} end of DDR group */
+#endif
+
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_ddr_cfg.h b/arch/arm/mach-alpine/include/al_hal/al_hal_ddr_cfg.h
new file mode 100644
index 0000000..d7e09ed
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_ddr_cfg.h
@@ -0,0 +1,50 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __AL_HAL_DDR_CFG_H__
+#define __AL_HAL_DDR_CFG_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* The number of byte lanes (including ECC) */
+#define AL_DDR_PHY_NUM_BYTE_LANES 9
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_iofic.h b/arch/arm/mach-alpine/include/al_hal/al_hal_iofic.h
new file mode 100644
index 0000000..eeb388e
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_iofic.h
@@ -0,0 +1,200 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_interrupts Common I/O Fabric Interrupt Controller
+ * This HAL provides the API for programming the Common I/O Fabric Interrupt
+ * Controller (IOFIC) found in most of the units attached to the I/O Fabric of
+ * Alpine platform
+ * @{
+ * @file al_hal_iofic.h
+ *
+ * @brief Header file for the interrupt controller that's embedded in various units
+ *
+ */
+
+#ifndef __AL_HAL_IOFIC_H__
+#define __AL_HAL_IOFIC_H__
+
+#include
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+#define AL_IOFIC_MAX_GROUPS 4
+
+/*
+ * Configurations
+ */
+
+/**
+ * Configure the interrupt controller registers, actual interrupts are still
+ * masked at this stage.
+ *
+ * @param regs_base regs pointer to interrupt controller registers
+ * @param group the interrupt group.
+ * @param flags flags of Interrupt Control Register
+ *
+ * @return 0 on success. -EINVAL otherwise.
+ */
+int al_iofic_config(void __iomem *regs_base, int group,
+ uint32_t flags);
+
+/**
+ * configure the moderation timer resolution for a given group
+ * Applies for both msix and legacy mode.
+ *
+ * @param regs_base pointer to unit registers
+ * @param group the interrupt group
+ * @param resolution resolution of the timer interval, the resolution determines the rate
+ * of decrementing the interval timer, setting value N means that the interval
+ * timer will be decremented each (N+1) * (0.68) micro seconds.
+ *
+ * @return 0 on success. -EINVAL otherwise.
+ */
+int al_iofic_moder_res_config(void __iomem *regs_base, int group,
+ uint8_t resolution);
+
+/**
+ * configure the moderation timer interval for a given legacy interrupt group
+ *
+ * @param regs_base regs pointer to unit registers
+ * @param group the interrupt group
+ * @param interval between interrupts in resolution units. 0 disable
+ *
+ * @return 0 on success. -EINVAL otherwise.
+ */
+int al_iofic_legacy_moder_interval_config(void __iomem *regs_base, int group,
+ uint8_t interval);
+
+/**
+ * configure the moderation timer interval for a given msix vector
+ *
+ * @param regs_base pointer to unit registers
+ * @param group the interrupt group
+ * @param vector vector index
+ * @param interval interval between interrupts, 0 disable
+ *
+ * @return 0 on success. -EINVAL otherwise.
+ */
+int al_iofic_msix_moder_interval_config(void __iomem *regs_base, int group,
+ uint8_t vector, uint8_t interval);
+
+/**
+ * return the offset of the unmask register for a given group.
+ * this function can be used when the upper layer wants to directly
+ * access the unmask regiter and bypass the al_iofic_unmask() API.
+ *
+ * @param regs_base regs pointer to unit registers
+ * @param group the interrupt group
+ * @return the offset of the unmask register.
+ */
+uint32_t __iomem * al_iofic_unmask_offset_get(void __iomem *regs_base, int group);
+
+/**
+ * unmask specific interrupts for a given group
+ * this functions guarantees atomic operations, it is performance optimized as
+ * it will not require read-modify-write. The unmask done using the interrupt
+ * mask clear register, so it's safe to call it while the mask is changed by
+ * the HW (auto mask) or another core.
+ *
+ * @param regs_base pointer to unit registers
+ * @param group the interrupt group
+ * @param mask bitwise of interrupts to unmask, set bits will be unmasked.
+ */
+void al_iofic_unmask(void __iomem *regs_base, int group, uint32_t mask);
+
+/**
+ * mask specific interrupts for a given group
+ * this functions modifies interrupt mask register, the callee must make sure
+ * the mask is not changed by another cpu.
+ *
+ * @param regs_base pointer to unit registers
+ * @param group the interrupt group
+ * @param mask bitwise of interrupts to mask, set bits will be masked.
+ */
+void al_iofic_mask(void __iomem *regs_base, int group, uint32_t mask);
+
+/**
+ * read the mask register for a given group
+ * this functions return the interrupt mask register
+ *
+ * @param regs_base pointer to unit registers
+ * @param group the interrupt group
+ */
+uint32_t al_iofic_read_mask(void __iomem *regs_base, int group);
+
+/**
+ * read interrupt cause register for a given group
+ * this will clear the set bits if the Clear on Read mode enabled.
+ * @param regs_base pointer to unit registers
+ * @param group the interrupt group
+ */
+uint32_t al_iofic_read_cause(void __iomem *regs_base, int group);
+
+/**
+ * clear bits in the interrupt cause register for a given group
+ *
+ * @param regs_base pointer to unit registers
+ * @param group the interrupt group
+ * @param mask bitwise of bits to be cleared, set bits will be cleared.
+ */
+void al_iofic_clear_cause(void __iomem *regs_base, int group, uint32_t mask);
+
+/**
+ * set the cause register for a given group
+ * this function set the cause register. It will generate an interrupt (if
+ * the the interrupt isn't masked )
+ *
+ * @param regs_base pointer to unit registers
+ * @param group the interrupt group
+ * @param mask bitwise of bits to be set.
+ */
+void al_iofic_set_cause(void __iomem *regs_base, int group, uint32_t mask);
+
+/**
+ * unmask specific interrupts from aborting the udma a given group
+ *
+ * @param regs_base pointer to unit registers
+ * @param group the interrupt group
+ * @param mask bitwise of interrupts to mask
+ */
+void al_iofic_abort_mask(void __iomem *regs_base, int group, uint32_t mask);
+
+#endif
+/** @} end of interrupt controller group */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_iofic_regs.h b/arch/arm/mach-alpine/include/al_hal/al_hal_iofic_regs.h
new file mode 100644
index 0000000..05b4a13
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_iofic_regs.h
@@ -0,0 +1,120 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __AL_HAL_IOFIC_REG_H
+#define __AL_HAL_IOFIC_REG_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+struct al_iofic_grp_ctrl {
+ uint32_t int_cause_grp; /* Interrupt Cause RegisterSet by hardware */
+ uint32_t rsrvd1;
+ uint32_t int_cause_set_grp; /* Interrupt Cause Set RegisterWriting 1 to a bit in t ... */
+ uint32_t rsrvd2;
+ uint32_t int_mask_grp; /* Interrupt Mask RegisterIf Auto-mask control bit =TR ... */
+ uint32_t rsrvd3;
+ uint32_t int_mask_clear_grp; /* Interrupt Mask Clear RegisterUsed when auto-mask co ... */
+ uint32_t rsrvd4;
+ uint32_t int_status_grp; /* Interrupt status RegisterThis register latch the st ... */
+ uint32_t rsrvd5;
+ uint32_t int_control_grp; /* Interrupt Control Register */
+ uint32_t rsrvd6;
+ uint32_t int_abort_msk_grp; /* Interrupt Mask RegisterEach bit in this register ma ... */
+ uint32_t rsrvd7;
+ uint32_t int_log_msk_grp; /* Interrupt Log RegisterEach bit in this register mas ... */
+ uint32_t rsrvd8;
+};
+
+struct al_iofic_grp_mod {
+ uint32_t grp_int_mod_reg; /* Interrupt moderation registerDedicated moderation in ... */
+ uint32_t rsrvd;
+};
+
+struct al_iofic_regs {
+ struct al_iofic_grp_ctrl ctrl[0];
+ uint32_t rsrvd1[0x400 >> 2];
+ struct al_iofic_grp_mod grp_int_mod[0][32];
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** int_control_grp register ****/
+/* When Clear_on_Read =1, All bits of Cause register ... */
+#define INT_CONTROL_GRP_CLEAR_ON_READ (1 << 0)
+/* (must be set only when MSIX is enabled)When Auto-Ma ... */
+#define INT_CONTROL_GRP_AUTO_MASK (1 << 1)
+/* Auto_Clear (RW)When Auto-Clear =1, the bits in the ... */
+#define INT_CONTROL_GRP_AUTO_CLEAR (1 << 2)
+/* When Set_on_Posedge =1, the bits in the interrupt c ... */
+#define INT_CONTROL_GRP_SET_ON_POSEDGE (1 << 3)
+/* When Moderation_Reset =1, all Moderation timers ass ... */
+#define INT_CONTROL_GRP_MOD_RST (1 << 4)
+/* When mask_msi_x =1, No MSI-X from this group is sen ... */
+#define INT_CONTROL_GRP_MASK_MSI_X (1 << 5)
+/* MSI-X AWID value, same ID for all cause bits */
+#define INT_CONTROL_GRP_AWID_MASK 0x00000F00
+#define INT_CONTROL_GRP_AWID_SHIFT 8
+/* This value determines the interval between interrup ... */
+#define INT_CONTROL_GRP_MOD_INTV_MASK 0x00FF0000
+#define INT_CONTROL_GRP_MOD_INTV_SHIFT 16
+/* This value determines the Moderation_Timer_Clock sp ... */
+#define INT_CONTROL_GRP_MOD_RES_MASK 0x0F000000
+#define INT_CONTROL_GRP_MOD_RES_SHIFT 24
+
+/**** grp_int_mod_reg register ****/
+/* Interrupt Moderation Interval registerDedicated reg ... */
+#define INT_MOD_INTV_MASK 0x000000FF
+#define INT_MOD_INTV_SHIFT 0
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_IOFIC_REG_H */
+
+
+
+
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_m2m_udma.h b/arch/arm/mach-alpine/include/al_hal/al_hal_m2m_udma.h
new file mode 100644
index 0000000..e6ca90e
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_m2m_udma.h
@@ -0,0 +1,147 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_m2m_udma M2M UDMA
+ * @ingroup group_udma_api
+ * M2M UDMA
+ * @{
+ * @file al_hal_m2m_udma.h
+ *
+ * @brief Header file for HAL driver for DMA that compound of M2S and S2M.
+ *
+ * The M2M UDMA is a software concept that defines a DMA that is consisted from
+ * M2S and S2M UDMAs, this concept is used to share common functionality
+ * between different DMA's that use M2S and S2M UDMAs, this is the case for the
+ * RAID and Crypto Acceleration DMAs.
+ * The M2M UDMA is built on top of the UDMA driver, while the later manages
+ * either S2M or M2S UDMA, the M2M instantiates two UDMA engines, and uses the
+ * UDMA driver to manage and provide the following functionalities:
+ * - S2M and M2S UDMA initialization.
+ * - S2M and M2S UDMA Queues initialization.
+ * - manages the state of the two GMAs.
+ * Other functionalities will be provided directly by the UDMA driver.
+ */
+
+#ifndef __AL_HAL_M2M_UDMA_H__
+#define __AL_HAL_M2M_UDMA_H__
+
+#include
+#include
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/** M2M UDMA private data structure */
+struct al_m2m_udma {
+ char *name;
+ void __iomem *m2s_regs_base;
+ void __iomem *s2m_regs_base;
+ uint8_t num_of_queues;
+ struct al_udma tx_udma; /** the m2s component of the M2M UDMA */
+ struct al_udma rx_udma; /** the s2m component of the M2M UDMA */
+};
+
+/** M2M UDMA parameters from upper layer */
+struct al_m2m_udma_params {
+ void __iomem *m2s_regs_base;
+ void __iomem *s2m_regs_base;
+ char *name; /** the upper layer must keep the string area */
+ uint8_t num_of_queues;/** number of queues */
+ uint8_t max_m2s_descs_per_pkt; /** maximum descriptors per m2s packet */
+ uint8_t max_s2m_descs_per_pkt; /** maximum descriptors per s2m packet */
+};
+
+/**
+ * initialize M2M UDMA
+ *
+ * @param m2m_udma m2m udma handle
+ * @param params m2m udma parameters from upper layer
+ *
+ * @return 0 on success. -EINVAL otherwise.
+ */
+int al_m2m_udma_init(struct al_m2m_udma *m2m_udma,
+ struct al_m2m_udma_params *params);
+
+/**
+ * initialize the m2s(tx) and s2m(tx) udmas of the queue
+ *
+ * @param m2m_udma m2m udma handle
+ * @param qid queue index
+ * @param tx_params udma queue pramas fot the tx udma queue
+ * @param rx_params udma queue pramas fot the rx udma queue
+ *
+ * @return 0 if no error found.
+ * -EINVAL if the qid is out of range
+ * -EIO if queue was already initialized
+ */
+int al_m2m_udma_q_init(struct al_m2m_udma *m2m_udma, uint32_t qid,
+ struct al_udma_q_params *tx_params,
+ struct al_udma_q_params *rx_params);
+/**
+ * Change the M2M UDMA state
+ *
+ * @param dma m2m udma handle
+ * @param udma_state the target state
+ *
+ * @return 0
+ */
+int al_m2m_udma_state_set(struct al_m2m_udma *dma,
+ enum al_udma_state udma_state);
+
+/**
+ * Get udma handle of the tx or rx udma, this handle can be used to call misc
+ * configuration functions defined at al_udma_config.h
+ *
+ * @param m2m_udma m2m udma handle
+ * @param type tx or rx udma
+ * @param udma the requested udma handle written to this pointer
+ *
+ * @return 0
+ */
+int al_m2m_udma_handle_get(struct al_m2m_udma *m2m_udma,
+ enum al_udma_type type,
+ struct al_udma **udma);
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+#endif /* __AL_HAL_M2M_UDMA_H__ */
+/** @} end of M2M UDMA group */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_nb_regs.h b/arch/arm/mach-alpine/include/al_hal/al_hal_nb_regs.h
new file mode 100644
index 0000000..26ecb25
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_nb_regs.h
@@ -0,0 +1,1381 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __AL_HAL_NB_REGS_H__
+#define __AL_HAL_NB_REGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+
+
+struct al_nb_global {
+ /* [0x0] */
+ uint32_t cpus_config;
+ /* [0x4] */
+ uint32_t cpus_secure;
+ /* [0x8] Force init reset. */
+ uint32_t cpus_init_control;
+ /* [0xc] Force init reset per decei mode. */
+ uint32_t cpus_init_status;
+ /* [0x10] */
+ uint32_t nb_int_cause;
+ /* [0x14] */
+ uint32_t sev_int_cause;
+ /* [0x18] */
+ uint32_t pmus_int_cause;
+ /* [0x1c] */
+ uint32_t sev_mask;
+ /* [0x20] */
+ uint32_t cpus_hold_reset;
+ /* [0x24] */
+ uint32_t cpus_software_reset;
+ /* [0x28] */
+ uint32_t wd_timer0_reset;
+ /* [0x2c] */
+ uint32_t wd_timer1_reset;
+ /* [0x30] */
+ uint32_t wd_timer2_reset;
+ /* [0x34] */
+ uint32_t wd_timer3_reset;
+ /* [0x38] */
+ uint32_t ddrc_hold_reset;
+ /* [0x3c] */
+ uint32_t fabric_software_reset;
+ /* [0x40] */
+ uint32_t cpus_power_ctrl;
+ uint32_t rsrvd_0[7];
+ /* [0x60] */
+ uint32_t acf_base_high;
+ /* [0x64] */
+ uint32_t acf_base_low;
+ /* [0x68] */
+ uint32_t acf_control_override;
+ /* [0x6c] Read-only that reflects CPU Cluster Local GIC base ... */
+ uint32_t lgic_base_high;
+ /* [0x70] Read-only that reflects CPU Cluster Local GIC base ... */
+ uint32_t lgic_base_low;
+ /* [0x74] Read-only that reflects the device's IOGIC base hig ... */
+ uint32_t iogic_base_high;
+ /* [0x78] Read-only that reflects IOGIC base low address */
+ uint32_t iogic_base_low;
+ /* [0x7c] */
+ uint32_t io_wr_split_control;
+ /* [0x80] */
+ uint32_t io_rd_rob_control;
+ /* [0x84] */
+ uint32_t sb_pos_error_log_1;
+ /* [0x88] */
+ uint32_t sb_pos_error_log_0;
+ /* [0x8c] */
+ uint32_t c2swb_config;
+ /* [0x90] */
+ uint32_t msix_error_log;
+ /* [0x94] */
+ uint32_t error_cause;
+ /* [0x98] */
+ uint32_t error_mask;
+ uint32_t rsrvd_1;
+ /* [0xa0] */
+ uint32_t qos_peak_control;
+ /* [0xa4] */
+ uint32_t qos_set_control;
+ /* [0xa8] */
+ uint32_t ddr_qos;
+ uint32_t rsrvd_2[9];
+ /* [0xd0] */
+ uint32_t acf_misc;
+ /* [0xd4] */
+ uint32_t config_bus_control;
+ uint32_t rsrvd_3[10];
+ /* [0x100] */
+ uint32_t cpu_max_pd_timer;
+ /* [0x104] */
+ uint32_t cpu_max_pu_timer;
+ uint32_t rsrvd_4[2];
+ /* [0x110] */
+ uint32_t auto_ddr_self_refresh_counter;
+ uint32_t rsrvd_5[3];
+ /* [0x120] */
+ uint32_t coresight_pd;
+ /* [0x124] */
+ uint32_t coresight_internal_0;
+ /* [0x128] */
+ uint32_t coresight_dbgromaddr;
+ /* [0x12c] */
+ uint32_t coresight_dbgselfaddr;
+ /* [0x130] */
+ uint32_t coresght_targetid;
+ /* [0x134] */
+ uint32_t coresght_targetid0;
+ uint32_t rsrvd[946];
+};
+struct al_nb_system_counter {
+ /* [0x0] */
+ uint32_t cnt_control;
+ /* [0x4] */
+ uint32_t cnt_base_freq;
+ /* [0x8] */
+ uint32_t cnt_low;
+ /* [0xc] */
+ uint32_t cnt_high;
+ /* [0x10] */
+ uint32_t cnt_init_low;
+ /* [0x14] */
+ uint32_t cnt_init_high;
+ uint32_t rsrvd[58];
+};
+struct al_nb_rams_control_misc {
+ /* [0x0] */
+ uint32_t ca15_rf_misc;
+ uint32_t rsrvd_0;
+ /* [0x8] */
+ uint32_t nb_rf_misc;
+ uint32_t rsrvd[61];
+};
+struct al_nb_ca15_rams_control {
+ /* [0x0] */
+ uint32_t rf_0;
+ /* [0x4] */
+ uint32_t rf_1;
+ /* [0x8] */
+ uint32_t rf_2;
+ uint32_t rsrvd;
+};
+struct al_nb_semaphores {
+ /* [0x0] This configration is only sampled during reset of t ... */
+ uint32_t lockn;
+};
+struct al_nb_debug {
+ /* [0x0] */
+ uint32_t ca15_outputs_1;
+ /* [0x4] */
+ uint32_t ca15_outputs_2;
+ uint32_t rsrvd_0[2];
+ /* [0x10] */
+ uint32_t cpu_msg[4];
+ /* [0x20] */
+ uint32_t rsv0_config;
+ /* [0x24] */
+ uint32_t rsv1_config;
+ uint32_t rsrvd_1[2];
+ /* [0x30] */
+ uint32_t rsv0_status;
+ /* [0x34] */
+ uint32_t rsv1_status;
+ uint32_t rsrvd_2[2];
+ /* [0x40] */
+ uint32_t ddrc;
+ /* [0x44] */
+ uint32_t ddrc_phy_smode_control;
+ /* [0x48] */
+ uint32_t ddrc_phy_smode_status;
+ uint32_t rsrvd_3[5];
+ /* [0x60] */
+ uint32_t pmc;
+ uint32_t rsrvd_4[3];
+ /* [0x70] */
+ uint32_t cpus_general;
+ uint32_t rsrvd_5[3];
+ /* [0x80] */
+ uint32_t cpus_int_out;
+ uint32_t rsrvd_6[31];
+ /* [0x100] */
+ uint32_t track_dump_ctrl;
+ /* [0x104] */
+ uint32_t track_dump_rdata_0;
+ /* [0x108] */
+ uint32_t track_dump_rdata_1;
+ uint32_t rsrvd_7[5];
+ /* [0x120] */
+ uint32_t track_events;
+ uint32_t rsrvd_8[3];
+ /* [0x130] */
+ uint32_t pos_track_dump_ctrl;
+ /* [0x134] */
+ uint32_t pos_track_dump_rdata_0;
+ /* [0x138] */
+ uint32_t pos_track_dump_rdata_1;
+ uint32_t rsrvd_9;
+ /* [0x140] */
+ uint32_t c2swb_track_dump_ctrl;
+ /* [0x144] */
+ uint32_t c2swb_track_dump_rdata_0;
+ /* [0x148] */
+ uint32_t c2swb_track_dump_rdata_1;
+ uint32_t rsrvd_10[5];
+ /* [0x160] */
+ uint32_t c2swb_bar_ovrd_high;
+ /* [0x164] */
+ uint32_t c2swb_bar_ovrd_low;
+ uint32_t rsrvd[38];
+};
+struct al_nb_cpun_config_status {
+ /* [0x0] This configration is only sampled during reset of t ... */
+ uint32_t config;
+ uint32_t rsrvd_0;
+ /* [0x8] */
+ uint32_t local_cause_mask;
+ uint32_t rsrvd_1;
+ /* [0x10] */
+ uint32_t pmus_cause_mask;
+ uint32_t rsrvd_2[3];
+ /* [0x20] Specifies the state of the CPU with reference to po ... */
+ uint32_t power_ctrl;
+ /* [0x24] */
+ uint32_t power_status;
+ /* [0x28] */
+ uint32_t resume_addr_l;
+ /* [0x2c] */
+ uint32_t resume_addr_h;
+ uint32_t rsrvd[52];
+};
+struct al_nb_mc_pmu {
+ /* [0x0] PMU Global Control Register */
+ uint32_t pmu_control;
+ /* [0x4] PMU Global Control Register */
+ uint32_t overflow;
+ uint32_t rsrvd[62];
+};
+struct al_nb_mc_pmu_counters {
+ /* [0x0] Counter Configuration Register */
+ uint32_t cfg;
+ /* [0x4] Counter Control Register */
+ uint32_t cntl;
+ /* [0x8] Counter Control Register */
+ uint32_t low;
+ /* [0xc] Counter Control Register */
+ uint32_t high;
+ uint32_t rsrvd[4];
+};
+struct al_nb_nb_version {
+ /* [0x0] Northbridge Revision */
+ uint32_t version;
+ uint32_t rsrvd;
+};
+struct al_nb_sriov {
+ /* [0x0] */
+ uint32_t cpu_vmid[4];
+ uint32_t rsrvd[4];
+};
+union al_nb_pcie_logging {
+ struct {
+ /* [0x0] */
+ uint32_t control;
+ uint32_t rsrvd_0[3];
+ /* [0x10] */
+ uint32_t wr_window_low;
+ /* [0x14] */
+ uint32_t wr_window_high;
+ /* [0x18] */
+ uint32_t wr_window_size;
+ uint32_t rsrvd_1;
+ /* [0x20] */
+ uint32_t fifo_base;
+ /* [0x24] */
+ uint32_t fifo_size;
+ /* [0x28] */
+ uint32_t fifo_head;
+ /* [0x2c] */
+ uint32_t fifo_tail;
+ /* [0x30] */
+ uint32_t wr_window_low_1;
+ /* [0x34] */
+ uint32_t wr_window_high_1;
+ /* [0x38] */
+ uint32_t wr_window_size_1;
+ uint32_t rsrvd_2;
+ /* [0x40] */
+ uint32_t fifo_base_1;
+ /* [0x44] */
+ uint32_t fifo_size_1;
+ /* [0x48] */
+ uint32_t fifo_head_1;
+ /* [0x4c] */
+ uint32_t fifo_tail_1;
+ /* [0x50] */
+ uint32_t rd_window_low;
+ /* [0x54] */
+ uint32_t rd_window_high;
+ /* [0x58] */
+ uint32_t rd_window_size;
+ /* [0x5c] */
+ uint32_t read_latch;
+ /* [0x60] */
+ uint32_t rd_window_low_1;
+ /* [0x64] */
+ uint32_t rd_window_high_1;
+ /* [0x68] */
+ uint32_t rd_window_size_1;
+ /* [0x6c] */
+ uint32_t read_latch_1;
+ /* [0x70] */
+ uint32_t read_latch_timeout;
+ uint32_t rsrvd[35];
+ } a0;
+ struct {
+ uint32_t control;
+ uint32_t read_latch;
+ uint32_t window_low;
+ uint32_t rsrvd_0;
+ uint32_t window_high;
+ uint32_t fifo_base;
+ uint32_t fifo_size;
+ uint32_t fifo_head; /* */
+ uint32_t fifo_tail;
+ uint32_t rsrvd[55];
+ } m0;
+};
+
+struct al_nb_regs {
+ struct al_nb_global global; /* [0x0] */
+ struct al_nb_system_counter system_counter; /* [0x1000] */
+ struct al_nb_rams_control_misc rams_control_misc; /* [0x1100] */
+ struct al_nb_ca15_rams_control ca15_rams_control[5]; /* [0x1200] */
+ uint32_t rsrvd_0[108];
+ struct al_nb_semaphores semaphores[64]; /* [0x1400] */
+ uint32_t rsrvd_1[320];
+ struct al_nb_debug debug; /* [0x1a00] */
+ uint32_t rsrvd_2[256];
+ struct al_nb_cpun_config_status cpun_config_status[4]; /* [0x2000] */
+ uint32_t rsrvd_3[1792];
+ struct al_nb_mc_pmu mc_pmu; /* [0x4000] */
+ struct al_nb_mc_pmu_counters mc_pmu_counters[4]; /* [0x4100] */
+ uint32_t rsrvd_4[160];
+ struct al_nb_nb_version nb_version; /* [0x4400] */
+ uint32_t rsrvd_5[126];
+ struct al_nb_sriov sriov; /* [0x4600] */
+ uint32_t rsrvd_6[632];
+ union al_nb_pcie_logging pcie_logging; /* [0x5000] */
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** CPUs_Config register ****/
+/* Disable broadcast of barrier onto system bus */
+#define NB_GLOBAL_CPUS_CONFIG_SYSBARDISABLE (1 << 0)
+/* Enable broadcast of inner shareable transactions from CPUs */
+#define NB_GLOBAL_CPUS_CONFIG_BROADCASTINNER (1 << 1)
+/* Disable broadcast of cache maintanance system bus */
+#define NB_GLOBAL_CPUS_CONFIG_BROADCASTCACHEMAINT (1 << 2)
+/* Enable broadcast of outer shareable transactions from CPUs */
+#define NB_GLOBAL_CPUS_CONFIG_BROADCASTOUTER (1 << 3)
+/* Defines the internal CPU GIC operating frequency ratio with t ... */
+#define NB_GLOBAL_CPUS_CONFIG_PERIPHCLKEN_MASK 0x00000030
+#define NB_GLOBAL_CPUS_CONFIG_PERIPHCLKEN_SHIFT 4
+
+/**** CPUs_Secure register ****/
+/* dbgen
+Write once. */
+#define NB_GLOBAL_CPUS_SECURE_DBGEN (1 << 0)
+/* niden
+Write once. */
+#define NB_GLOBAL_CPUS_SECURE_NIDEN (1 << 1)
+/* spiden
+Write once. */
+#define NB_GLOBAL_CPUS_SECURE_SPIDEN (1 << 2)
+/* spniden
+Write once. */
+#define NB_GLOBAL_CPUS_SECURE_SPNIDEN (1 << 3)
+/* Disable write access to some secure GIC registers */
+#define NB_GLOBAL_CPUS_SECURE_CFGSDISABLE (1 << 4)
+
+/**** CPUs_Init_Control register ****/
+/* CPU Init DoneSpecifies which CPUs' inits are done and can exi ... */
+#define NB_GLOBAL_CPUS_INIT_CONTROL_CPUS_INITDONE_MASK 0x0000000F
+#define NB_GLOBAL_CPUS_INIT_CONTROL_CPUS_INITDONE_SHIFT 0
+/* DBGPWRDNREQ MaskWhen CPU does not exist, its dbgpwrdnreq must ... */
+#define NB_GLOBAL_CPUS_INIT_CONTROL_DBGPWRDNREQ_MASK_MASK 0x000000F0
+#define NB_GLOBAL_CPUS_INIT_CONTROL_DBGPWRDNREQ_MASK_SHIFT 4
+/* Force CPU init power-on-reset exit.
+For debug purposes only. */
+#define NB_GLOBAL_CPUS_INIT_CONTROL_FORCE_CPUPOR_MASK 0x00000F00
+#define NB_GLOBAL_CPUS_INIT_CONTROL_FORCE_CPUPOR_SHIFT 8
+
+/**** CPUs_Init_Status register ****/
+/* Specifies which CPUs are enabled in the device configurations ... */
+#define NB_GLOBAL_CPUS_INIT_STATUS_CPUS_EXIST_MASK 0x0000000F
+#define NB_GLOBAL_CPUS_INIT_STATUS_CPUS_EXIST_SHIFT 0
+
+/**** NB_Int_Cause register ****/
+/*
+ * Each bit corresponds to an IRQ.
+ * value is 1 for level irq, 0 for trigger irq
+ * Level IRQ indices: 12-13, 23, 24, 26-29
+ */
+#define NB_GLOBAL_NB_INT_CAUSE_LEVEL_IRQ_MASK 0x3D803000
+/* Cross trigger interrupt */
+#define NB_GLOBAL_NB_INT_CAUSE_NCTIIRQ_MASK 0x0000000F
+#define NB_GLOBAL_NB_INT_CAUSE_NCTIIRQ_SHIFT 0
+/* Communications channel receive */
+#define NB_GLOBAL_NB_INT_CAUSE_COMMRX_MASK 0x000000F0
+#define NB_GLOBAL_NB_INT_CAUSE_COMMRX_SHIFT 4
+/* Communication channel transmit */
+#define NB_GLOBAL_NB_INT_CAUSE_COMMTX_MASK 0x00000F00
+#define NB_GLOBAL_NB_INT_CAUSE_COMMTX_SHIFT 8
+/* Emulation write fifo log has valid entry */
+#define NB_GLOBAL_NB_INT_CAUSE_PCIE_LOG_FIFO_VALID_0 (1 << 12)
+/* Write logging FIFO wrap occurred */
+#define NB_GLOBAL_NB_INT_CAUSE_WR_LOG_FIFO_WRAP_M0 (1 << 13)
+/* Emulation write fifo log has valid entry */
+#define NB_GLOBAL_NB_INT_CAUSE_PCIE_LOG_FIFO_VALID_1_A0 (1 << 13)
+/* Write logging FIFO is full */
+#define NB_GLOBAL_NB_INT_CAUSE_WR_LOG_FIFO_FULL_M0 (1 << 14)
+/* Reserved, read undefined must write as zeros. */
+#define NB_GLOBAL_NB_INT_CAUSE_RESERVED_15_15 (1 << 15)
+/* Error indicator for AXI write transactions with a BRESP error ... */
+#define NB_GLOBAL_NB_INT_CAUSE_CPU_AXIERRIRQ (1 << 16)
+/* Error indicator for: L2 RAM double-bit ECC error, illegal wri ... */
+#define NB_GLOBAL_NB_INT_CAUSE_CPU_INTERRIRQ (1 << 17)
+/* Coherent fabric error summary interrupt */
+#define NB_GLOBAL_NB_INT_CAUSE_ACF_ERRORIRQ (1 << 18)
+/* DDR Controller ECC Correctable error summary interrupt */
+#define NB_GLOBAL_NB_INT_CAUSE_MCTL_ECC_CORR_ERR (1 << 19)
+/* DDR Controller ECC Uncorrectable error summary interrupt */
+#define NB_GLOBAL_NB_INT_CAUSE_MCTL_ECC_UNCORR_ERR (1 << 20)
+/* DRAM parity error interrupt */
+#define NB_GLOBAL_NB_INT_CAUSE_MCTL_PARITY_ERR (1 << 21)
+/* Reserved, not functional */
+#define NB_GLOBAL_NB_INT_CAUSE_MCTL_WDATARAM_PAR (1 << 22)
+/* Error cause summary interrupt */
+#define NB_GLOBAL_NB_INT_CAUSE_ERR_CAUSE_SUM_A0 (1 << 23)
+/* SB PoS error */
+#define NB_GLOBAL_NB_INT_CAUSE_SB_POS_ERR (1 << 24)
+/* Received msix is not mapped to local GIC or IO-GIC spin */
+#define NB_GLOBAL_NB_INT_CAUSE_MSIX_ERR_INT_M0 (1 << 25)
+/* Coresight timestamp overflow */
+#define NB_GLOBAL_NB_INT_CAUSE_CORESIGHT_TS_OVERFLOW_M0 (1 << 26)
+/* Emulation write fifo log is wrapped */
+#define NB_GLOBAL_NB_INT_CAUSE_WR_LOG_FIFO_WRAP_A0 (1 << 26)
+/* Write data parity error from SB channel 0. */
+#define NB_GLOBAL_NB_INT_CAUSE_SB0_WRDATA_PERR_M0 (1 << 27)
+/* Emulation write fifo log is full (new pushes might corrupt da ... */
+#define NB_GLOBAL_NB_INT_CAUSE_WR_LOG_FIFO_FULL_A0 (1 << 27)
+/* Write data parity error from SB channel 1. */
+#define NB_GLOBAL_NB_INT_CAUSE_SB1_WRDATA_PERR_M0 (1 << 28)
+/* Emulation write fifo log is wrapped */
+#define NB_GLOBAL_NB_INT_CAUSE_WR_LOG_FIFO_WRAP_1_A0 (1 << 28)
+/* Read data parity error from SB slaves. */
+#define NB_GLOBAL_NB_INT_CAUSE_SB_SLV_RDATA_PERR_M0 (1 << 29)
+/* Emulation write fifo log is full (new pushes might corrupt da ... */
+#define NB_GLOBAL_NB_INT_CAUSE_WR_LOG_FIFO_FULL_1_A0 (1 << 29)
+/* PCIe read latched */
+#define NB_GLOBAL_NB_INT_CAUSE_RD_LOG_SET_0 (1 << 30)
+/* PCIe read latched */
+#define NB_GLOBAL_NB_INT_CAUSE_RD_LOG_SET_1_A0 (1 << 31)
+
+/**** SEV_Int_Cause register ****/
+/* SMMU 0/1 global non-secure fault interrupt */
+#define NB_GLOBAL_SEV_INT_CAUSE_SMMU_GBL_FLT_IRPT_NS_MASK 0x00000003
+#define NB_GLOBAL_SEV_INT_CAUSE_SMMU_GBL_FLT_IRPT_NS_SHIFT 0
+/* SMMU 0/1 non-secure context interrupt */
+#define NB_GLOBAL_SEV_INT_CAUSE_SMMU_CXT_IRPT_NS_MASK 0x0000000C
+#define NB_GLOBAL_SEV_INT_CAUSE_SMMU_CXT_IRPT_NS_SHIFT 2
+/* SMMU0/1 Non-secure configurtion acess fault interrupt */
+#define NB_GLOBAL_SEV_INT_CAUSE_SMMU_CFG_FLT_IRPT_S_MASK 0x00000030
+#define NB_GLOBAL_SEV_INT_CAUSE_SMMU_CFG_FLT_IRPT_S_SHIFT 4
+/* Reserved. Read undefined; must write as zeros. */
+#define NB_GLOBAL_SEV_INT_CAUSE_RESERVED_11_6_MASK 0x00000FC0
+#define NB_GLOBAL_SEV_INT_CAUSE_RESERVED_11_6_SHIFT 6
+/* PCIe emulation: inbound writes fifo has valid entry */
+#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_LOG_FIFO_VALID_0 (1 << 12)
+/* PCIe emulation: inbound writes fifo has being wrapped (tail p ... */
+#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_LOG_FIFO_WRAP_0 (1 << 13)
+/* PCIe emulation: inbound writes fifo is full */
+#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_LOG_FIFO_FULL_0 (1 << 14)
+/* PCIe emulation: inbound writes fifo has valid entry */
+#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_LOG_FIFO_VALID_1 (1 << 15)
+/* PCIe emulation: inbound writes fifo has being wrapped (tail p ... */
+#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_LOG_FIFO_WRAP_1 (1 << 16)
+/* PCIe emulation: inbound writes fifo is full */
+#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_LOG_FIFO_FULL_1 (1 << 17)
+/* PCIe emulation: inbound pcie read is latched */
+#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_RD_LOG_SET_0 (1 << 18)
+/* PCIe emulation: inbound pcie read is latched */
+#define NB_GLOBAL_SEV_INT_CAUSE_PCIE_RD_LOG_SET_1 (1 << 19)
+/* Reserved. Read undefined; must write as zeros. */
+#define NB_GLOBAL_SEV_INT_CAUSE_RESERVED_31_20_MASK 0xFFF00000
+#define NB_GLOBAL_SEV_INT_CAUSE_RESERVED_31_20_SHIFT 20
+
+/**** PMUs_Int_Cause register ****/
+/* CPUs PMU Overflow interrupt */
+#define NB_GLOBAL_PMUS_INT_CAUSE_CPUS_OVFL_MASK 0x0000000F
+#define NB_GLOBAL_PMUS_INT_CAUSE_CPUS_OVFL_SHIFT 0
+/* Northbridge PMU overflow */
+#define NB_GLOBAL_PMUS_INT_CAUSE_NB_OVFL (1 << 4)
+/* Memory Controller PMU overflow */
+#define NB_GLOBAL_PMUS_INT_CAUSE_MCTL_OVFL (1 << 5)
+/* Coherency Interconnect PMU overflow */
+#define NB_GLOBAL_PMUS_INT_CAUSE_CCI_OVFL_MASK 0x000007C0
+#define NB_GLOBAL_PMUS_INT_CAUSE_CCI_OVFL_SHIFT 6
+/* Coherency Interconnect PMU overflow */
+#define NB_GLOBAL_PMUS_INT_CAUSE_SMMU_OVFL_MASK 0x00001800
+#define NB_GLOBAL_PMUS_INT_CAUSE_SMMU_OVFL_SHIFT 11
+/* Reserved. Read undefined; must write as zeros. */
+#define NB_GLOBAL_PMUS_INT_CAUSE_RESERVED_23_13_MASK 0x00FFE000
+#define NB_GLOBAL_PMUS_INT_CAUSE_RESERVED_23_13_SHIFT 13
+/* Southbridge PMUs overflow */
+#define NB_GLOBAL_PMUS_INT_CAUSE_SB_PMUS_OVFL_MASK 0xFF000000
+#define NB_GLOBAL_PMUS_INT_CAUSE_SB_PMUS_OVFL_SHIFT 24
+
+/**** CPUs_Hold_Reset register ****/
+/* Shared L2 memory system, interrupt controller and timer logic ... */
+#define NB_GLOBAL_CPUS_HOLD_RESET_L2RESET (1 << 0)
+/* Shared debug domain reset */
+#define NB_GLOBAL_CPUS_HOLD_RESET_PRESETDBG (1 << 1)
+/* Individual CPU debug, PTM, watchpoint and breakpoint logic re ... */
+#define NB_GLOBAL_CPUS_HOLD_RESET_CPU_DBGRESET_MASK 0x000000F0
+#define NB_GLOBAL_CPUS_HOLD_RESET_CPU_DBGRESET_SHIFT 4
+/* Individual CPU core and VFP/NEON logic reset */
+#define NB_GLOBAL_CPUS_HOLD_RESET_CPU_CORERESET_MASK 0x00000F00
+#define NB_GLOBAL_CPUS_HOLD_RESET_CPU_CORERESET_SHIFT 8
+/* Individual CPU por-on-reset */
+#define NB_GLOBAL_CPUS_HOLD_RESET_CPU_PORESET_MASK 0x0000F000
+#define NB_GLOBAL_CPUS_HOLD_RESET_CPU_PORESET_SHIFT 12
+/* Wait for interrupt mask */
+#define NB_GLOBAL_CPUS_HOLD_RESET_WFI_MASK_MASK 0x000F0000
+#define NB_GLOBAL_CPUS_HOLD_RESET_WFI_MASK_SHIFT 16
+
+/**** CPUs_Software_Reset register ****/
+/* Write 1. Apply the software reset. */
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_SWRESET_REQ (1 << 0)
+/* Defines the level of software reset. */
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_MASK 0x0000000E
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT 1
+/* Individual CPU core reset. */
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_CPU_CORE \
+ (0x0 << NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT)
+/* Individual CPU power-on-reset. */
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_CPU_PORESET \
+ (0x1 << NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT)
+/* Individual CPU debug reset. */
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_CPU_DBG \
+ (0x2 << NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT)
+/* A Cluster reset puts each core into core reset (no dbg) and a ... */
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_CLUSTER_NO_DBG \
+ (0x3 << NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT)
+/* A Cluster reset puts each core into power-on-reset and also r ... */
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_CLUSTER \
+ (0x4 << NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT)
+/* A Cluster power-on-reset puts each core into power-on-reset a ... */
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_CLUSTER_PORESET \
+ (0x5 << NB_GLOBAL_CPUS_SOFTWARE_RESET_LEVEL_SHIFT)
+/* Defines which cores to reset when no cluster_poreset is reque ... */
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_CORES_MASK 0x000000F0
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_CORES_SHIFT 4
+/* CPUn wait for interrupt enable */
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_WFI_MASK_MASK 0x000F0000
+#define NB_GLOBAL_CPUS_SOFTWARE_RESET_WFI_MASK_SHIFT 16
+
+/**** WD_Timer0_Reset register ****/
+/* Shared L2 memory system, interrupt controller and timer logic ... */
+#define NB_GLOBAL_WD_TIMER0_RESET_L2RESET (1 << 0)
+/* Shared debug domain reset */
+#define NB_GLOBAL_WD_TIMER0_RESET_PRESETDBG (1 << 1)
+/* Individual CPU debug PTM, watchpoint and breakpoint logic res ... */
+#define NB_GLOBAL_WD_TIMER0_RESET_CPU_DBGRESET_MASK 0x000000F0
+#define NB_GLOBAL_WD_TIMER0_RESET_CPU_DBGRESET_SHIFT 4
+/* Individual CPU core and VFP/NEON logic reset */
+#define NB_GLOBAL_WD_TIMER0_RESET_CPU_CORERESET_MASK 0x00000F00
+#define NB_GLOBAL_WD_TIMER0_RESET_CPU_CORERESET_SHIFT 8
+/* Individual CPU por-on-reset */
+#define NB_GLOBAL_WD_TIMER0_RESET_CPU_PORESET_MASK 0x0000F000
+#define NB_GLOBAL_WD_TIMER0_RESET_CPU_PORESET_SHIFT 12
+
+/**** WD_Timer1_Reset register ****/
+/* Shared L2 memory system, interrupt controller and timer logic ... */
+#define NB_GLOBAL_WD_TIMER1_RESET_L2RESET (1 << 0)
+/* Shared debug domain reset */
+#define NB_GLOBAL_WD_TIMER1_RESET_PRESETDBG (1 << 1)
+/* Individual CPU debug PTM, watchpoint and breakpoint logic res ... */
+#define NB_GLOBAL_WD_TIMER1_RESET_CPU_DBGRESET_MASK 0x000000F0
+#define NB_GLOBAL_WD_TIMER1_RESET_CPU_DBGRESET_SHIFT 4
+/* Individual CPU core and VFP/NEON logic reset */
+#define NB_GLOBAL_WD_TIMER1_RESET_CPU_CORERESET_MASK 0x00000F00
+#define NB_GLOBAL_WD_TIMER1_RESET_CPU_CORERESET_SHIFT 8
+/* Individual CPU por-on-reset */
+#define NB_GLOBAL_WD_TIMER1_RESET_CPU_PORESET_MASK 0x0000F000
+#define NB_GLOBAL_WD_TIMER1_RESET_CPU_PORESET_SHIFT 12
+
+/**** WD_Timer2_Reset register ****/
+/* Shared L2 memory system, interrupt controller and timer logic ... */
+#define NB_GLOBAL_WD_TIMER2_RESET_L2RESET (1 << 0)
+/* Shared debug domain reset */
+#define NB_GLOBAL_WD_TIMER2_RESET_PRESETDBG (1 << 1)
+/* Individual CPU debug, PTM, watchpoint and breakpoint logic re ... */
+#define NB_GLOBAL_WD_TIMER2_RESET_CPU_DBGRESET_MASK 0x000000F0
+#define NB_GLOBAL_WD_TIMER2_RESET_CPU_DBGRESET_SHIFT 4
+/* Individual CPU core and VFP/NEON logic reset */
+#define NB_GLOBAL_WD_TIMER2_RESET_CPU_CORERESET_MASK 0x00000F00
+#define NB_GLOBAL_WD_TIMER2_RESET_CPU_CORERESET_SHIFT 8
+/* Individual CPU por-on-reset */
+#define NB_GLOBAL_WD_TIMER2_RESET_CPU_PORESET_MASK 0x0000F000
+#define NB_GLOBAL_WD_TIMER2_RESET_CPU_PORESET_SHIFT 12
+
+/**** WD_Timer3_Reset register ****/
+/* Shared L2 memory system, interrupt controller and timer logic ... */
+#define NB_GLOBAL_WD_TIMER3_RESET_L2RESET (1 << 0)
+/* Shared debug domain reset */
+#define NB_GLOBAL_WD_TIMER3_RESET_PRESETDBG (1 << 1)
+/* Individual CPU debug, PTM, watchpoint and breakpoint logic re ... */
+#define NB_GLOBAL_WD_TIMER3_RESET_CPU_DBGRESET_MASK 0x000000F0
+#define NB_GLOBAL_WD_TIMER3_RESET_CPU_DBGRESET_SHIFT 4
+/* Individual CPU core and VFP/NEON logic reset */
+#define NB_GLOBAL_WD_TIMER3_RESET_CPU_CORERESET_MASK 0x00000F00
+#define NB_GLOBAL_WD_TIMER3_RESET_CPU_CORERESET_SHIFT 8
+/* Individual CPU por-on-reset */
+#define NB_GLOBAL_WD_TIMER3_RESET_CPU_PORESET_MASK 0x0000F000
+#define NB_GLOBAL_WD_TIMER3_RESET_CPU_PORESET_SHIFT 12
+
+/**** DDRC_Hold_Reset register ****/
+/* DDR Control and PHY memory mapped registers reset control0 - ... */
+#define NB_GLOBAL_DDRC_HOLD_RESET_APB_SYNC_RESET (1 << 0)
+/* DDR Control Core reset control0 - Reset is deasserted */
+#define NB_GLOBAL_DDRC_HOLD_RESET_CORE_SYNC_RESET (1 << 1)
+/* DDR Control AXI Interface reset control0 - Reset is deasserte ... */
+#define NB_GLOBAL_DDRC_HOLD_RESET_AXI_SYNC_RESET (1 << 2)
+/* DDR PUB Controller reset control0 - Reset is deasserted */
+#define NB_GLOBAL_DDRC_HOLD_RESET_PUB_CTL_SYNC_RESET (1 << 3)
+/* DDR PUB SDR Controller reset control0 - Reset is deasserted */
+#define NB_GLOBAL_DDRC_HOLD_RESET_PUB_SDR_SYNC_RESET (1 << 4)
+/* DDR PHY reset control0 - Reset is deasserted */
+#define NB_GLOBAL_DDRC_HOLD_RESET_PHY_SYNC_RESET (1 << 5)
+/* Memory initialization input to DDR SRAM for parity check supp ... */
+#define NB_GLOBAL_DDRC_HOLD_RESET_DDR_UNIT_MEM_INIT (1 << 6)
+
+/**** Fabric_Software_Reset register ****/
+/* Write 1 apply the software reset. */
+#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_SWRESET_REQ (1 << 0)
+/* Defines the level of software reset. */
+#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_MASK 0x0000000E
+#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_SHIFT 1
+/* Fabric reset */
+#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_FABRIC \
+ (0x0 << NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_SHIFT)
+/* GIC reset */
+#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_GIC \
+ (0x1 << NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_SHIFT)
+/* SMMU reset */
+#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_SMMU \
+ (0x2 << NB_GLOBAL_FABRIC_SOFTWARE_RESET_LEVEL_SHIFT)
+/* CPUn waiting for interrupt enable */
+#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_WFI_MASK_MASK 0x000F0000
+#define NB_GLOBAL_FABRIC_SOFTWARE_RESET_WFI_MASK_SHIFT 16
+
+/**** CPUs_Power_Ctrl register ****/
+/* L2 WFI enableWhen all the processors are in WFI mode or power ... */
+#define NB_GLOBAL_CPUS_POWER_CTRL_L2WFI_EN (1 << 0)
+/* L2 WFI status */
+#define NB_GLOBAL_CPUS_POWER_CTRL_L2WFI_STATUS (1 << 1)
+/* L2 RAMs Power DownPower down the L2 RAMs */
+#define NB_GLOBAL_CPUS_POWER_CTRL_L2RAMS_PWRDN_EN (1 << 2)
+/* L2 RAMs power down status */
+#define NB_GLOBAL_CPUS_POWER_CTRL_L2RAMS_PWRDN_STATUS (1 << 3)
+/* CPU state condition to enable L2 RAM power down0 - Power down ... */
+#define NB_GLOBAL_CPUS_POWER_CTRL_L2RAMS_PWRDN_CPUS_STATE_MASK 0x000000F0
+#define NB_GLOBAL_CPUS_POWER_CTRL_L2RAMS_PWRDN_CPUS_STATE_SHIFT 4
+/* Enable external debugger over power-down */
+#define NB_GLOBAL_CPUS_POWER_CTRL_EXT_DEBUGGER_OVER_PD_EN (1 << 8)
+/* force wakeup the CPU in L2RAM powedwnINTERNAL DEBUG PURPOSE O ... */
+#define NB_GLOBAL_CPUS_POWER_CTRL_FORCE_CPUS_OK_PWRUP (1 << 27)
+/* L2 RAMs power down SM status */
+#define NB_GLOBAL_CPUS_POWER_CTRL_L2RAMS_PWRDN_SM_STATUS_MASK 0xF0000000
+#define NB_GLOBAL_CPUS_POWER_CTRL_L2RAMS_PWRDN_SM_STATUS_SHIFT 28
+
+/**** ACF_Base_High register ****/
+/* Coherency Fabric registers base [39:32]. */
+#define NB_GLOBAL_ACF_BASE_HIGH_BASE_39_32_MASK 0x000000FF
+#define NB_GLOBAL_ACF_BASE_HIGH_BASE_39_32_SHIFT 0
+/* Coherency Fabric registers base [31:15] */
+#define NB_GLOBAL_ACF_BASE_LOW_BASED_31_15_MASK 0xFFFF8000
+#define NB_GLOBAL_ACF_BASE_LOW_BASED_31_15_SHIFT 15
+
+/**** ACF_Control_Override register ****/
+/* Override the AWCACHE[0] and ARCACHE[0] outputs to benon-buffe ... */
+#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_BUFFOVRD_MASK 0x00000007
+#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_BUFFOVRD_SHIFT 0
+/* Overrides the ARQOS and AWQOS input signals */
+#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_QOSOVRD_MASK 0x000000F8
+#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_QOSOVRD_SHIFT 3
+/* If LOW, then AC requests are never issued on the correspondin ... */
+#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_ACE_CH_EN_MASK 0x00001F00
+#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_ACE_CH_EN_SHIFT 8
+/* Internal register:Enables 4k hazard of post-barrier vs pre-ba ... */
+#define NB_GLOBAL_ACF_CONTROL_OVERRIDE_DMB_4K_HAZARD_EN (1 << 13)
+
+/**** LGIC_Base_High register ****/
+/* GIC registers base [39:32] */
+#define NB_GLOBAL_LGIC_BASE_HIGH_BASE_39_32_MASK 0x000000FF
+#define NB_GLOBAL_LGIC_BASE_HIGH_BASE_39_32_SHIFT 0
+/* GIC registers base [31:15] */
+#define NB_GLOBAL_LGIC_BASE_LOW_BASED_31_15_MASK 0xFFFF8000
+#define NB_GLOBAL_LGIC_BASE_LOW_BASED_31_15_SHIFT 15
+
+/**** IOGIC_Base_High register ****/
+/* IOGIC registers base [39:32] */
+#define NB_GLOBAL_IOGIC_BASE_HIGH_BASE_39_32_MASK 0x000000FF
+#define NB_GLOBAL_IOGIC_BASE_HIGH_BASE_39_32_SHIFT 0
+/* IOGIC registers base [31:15] */
+#define NB_GLOBAL_IOGIC_BASE_LOW_BASED_31_15_MASK 0xFFFF8000
+#define NB_GLOBAL_IOGIC_BASE_LOW_BASED_31_15_SHIFT 15
+
+/**** IO_Wr_Split_Control register ****/
+/* Write splitters bypass */
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_BYPASS_MASK 0x00000003
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_BYPASS_SHIFT 0
+/* Write splitters store and forward */
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_ST_FW_MASK 0x0000000C
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_ST_FW_SHIFT 2
+/* Write splitters unmodify snoop type */
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_UNMODIFY_SNP_MASK 0x00000030
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_UNMODIFY_SNP_SHIFT 4
+/* Write splitters unsplit non-coherent access */
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_UNSPLIT_NOSNP_MASK 0x000000C0
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_UNSPLIT_NOSNP_SHIFT 6
+/* Write splitter rate limit. */
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR0_SPLT_RATE_LIMIT_MASK 0x00001F00
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR0_SPLT_RATE_LIMIT_SHIFT 8
+/* Write splitter rate limit */
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR1_SPLT_RATE_LIMIT_MASK 0x0003E000
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR1_SPLT_RATE_LIMIT_SHIFT 13
+/* Clear is not supported */
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_CLEAR_MASK 0xC0000000
+#define NB_GLOBAL_IO_WR_SPLIT_CONTROL_WR_SPLT_CLEAR_SHIFT 30
+
+/**** IO_Rd_ROB_Control register ****/
+/* Read ROB Bypass[0] Rd ROB 0 bypass enable */
+#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD_ROB_BYPASS_MASK 0x00000003
+#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD_ROB_BYPASS_SHIFT 0
+/* Read ROB in order */
+#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD_ROB_INORDER_MASK 0x0000000C
+#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD_ROB_INORDER_SHIFT 2
+/* Read splitter rate limit */
+#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD0_ROB_RATE_LIMIT_MASK 0x00001F00
+#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD0_ROB_RATE_LIMIT_SHIFT 8
+/* Read splitter rate limit */
+#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD1_ROB_RATE_LIMIT_MASK 0x0003E000
+#define NB_GLOBAL_IO_RD_ROB_CONTROL_RD1_ROB_RATE_LIMIT_SHIFT 13
+
+/**** SB_PoS_Error_Log_1 register ****/
+/* Error Log 1[7:0] address_high[16:8] request id[18:17] bresp ... */
+#define NB_GLOBAL_SB_POS_ERROR_LOG_1_ERR_LOG_MASK 0x7FFFFFFF
+#define NB_GLOBAL_SB_POS_ERROR_LOG_1_ERR_LOG_SHIFT 0
+/* Valid logged errorSet on SB PoS error occurance on capturing ... */
+#define NB_GLOBAL_SB_POS_ERROR_LOG_1_VALID (1 << 31)
+
+/**** MSIx_Error_Log register ****/
+/* Error Log
+Corresponds to MSIx address message [30:0]. */
+#define NB_GLOBAL_MSIX_ERROR_LOG_ERR_LOG_MASK 0x7FFFFFFF
+#define NB_GLOBAL_MSIX_ERROR_LOG_ERR_LOG_SHIFT 0
+/* Valid logged error */
+#define NB_GLOBAL_MSIX_ERROR_LOG_VALID (1 << 31)
+
+/**** Error_Cause register ****/
+/* PCIe emulation: inbound pcie read latch timeout */
+#define NB_GLOBAL_ERROR_CAUSE_PCIE_RD_LOG_0_TIMEOUT (1 << 0)
+/* PCIe emulation: inbound pcie read latch timeout */
+#define NB_GLOBAL_ERROR_CAUSE_PCIE_RD_LOG_1_TIMEOUT (1 << 1)
+/* Received msix is not mapped to local GIC or IO-GIC spin */
+#define NB_GLOBAL_ERROR_CAUSE_MSIX_ERR_INT (1 << 2)
+/* Coresight timestamp overflow */
+#define NB_GLOBAL_ERROR_CAUSE_CORESIGHT_TS_OVERFLOW (1 << 3)
+/* Write data parity error from SB channel 0. */
+#define NB_GLOBAL_ERROR_CAUSE_SB0_WRDATA_PERR (1 << 4)
+/* Write data parity error from SB channel 1. */
+#define NB_GLOBAL_ERROR_CAUSE_SB1_WRDATA_PERR (1 << 5)
+/* Read data parity error from SB slaves. */
+#define NB_GLOBAL_ERROR_CAUSE_SB_SLV_RDATA_PERR (1 << 6)
+/* Reserved. Read undefined; must write as zeros. */
+#define NB_GLOBAL_ERROR_CAUSE_RESERVED_31_7_MASK 0xFFFFFF80
+#define NB_GLOBAL_ERROR_CAUSE_RESERVED_31_7_SHIFT 7
+
+/**** QoS_Peak_Control register ****/
+/* Peak Read Low ThresholdWhen the number of outstanding read tr ... */
+#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_RD_L_THRESHOLD_MASK 0x0000007F
+#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_RD_L_THRESHOLD_SHIFT 0
+/* Peak Read High ThresholdWhen the number of outstanding read t ... */
+#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_RD_H_THRESHOLD_MASK 0x00007F00
+#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_RD_H_THRESHOLD_SHIFT 8
+/* Peak Write Low ThresholdWhen the number of outstanding write ... */
+#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_WR_L_THRESHOLD_MASK 0x007F0000
+#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_WR_L_THRESHOLD_SHIFT 16
+/* Peak Write High ThresholdWhen the number of outstanding write ... */
+#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_WR_H_THRESHOLD_MASK 0x7F000000
+#define NB_GLOBAL_QOS_PEAK_CONTROL_PEAK_WR_H_THRESHOLD_SHIFT 24
+
+/**** QoS_Set_Control register ****/
+/* CPU Low priority Read QoS */
+#define NB_GLOBAL_QOS_SET_CONTROL_CPU_LP_ARQOS_MASK 0x0000000F
+#define NB_GLOBAL_QOS_SET_CONTROL_CPU_LP_ARQOS_SHIFT 0
+/* CPU High priority Read QoS */
+#define NB_GLOBAL_QOS_SET_CONTROL_CPU_HP_ARQOS_MASK 0x000000F0
+#define NB_GLOBAL_QOS_SET_CONTROL_CPU_HP_ARQOS_SHIFT 4
+/* CPU Low priority Write QoS */
+#define NB_GLOBAL_QOS_SET_CONTROL_CPU_LP_AWQOS_MASK 0x00000F00
+#define NB_GLOBAL_QOS_SET_CONTROL_CPU_LP_AWQOS_SHIFT 8
+/* CPU High priority Write QoS */
+#define NB_GLOBAL_QOS_SET_CONTROL_CPU_HP_AWQOS_MASK 0x0000F000
+#define NB_GLOBAL_QOS_SET_CONTROL_CPU_HP_AWQOS_SHIFT 12
+/* SB Low priority Read QoS */
+#define NB_GLOBAL_QOS_SET_CONTROL_SB_LP_ARQOS_MASK 0x000F0000
+#define NB_GLOBAL_QOS_SET_CONTROL_SB_LP_ARQOS_SHIFT 16
+/* SB Low-priority Write QoS */
+#define NB_GLOBAL_QOS_SET_CONTROL_SB_LP_AWQOS_MASK 0x00F00000
+#define NB_GLOBAL_QOS_SET_CONTROL_SB_LP_AWQOS_SHIFT 20
+
+/**** DDR_QoS register ****/
+/* High Priority Read ThresholdLimits the number of outstanding ... */
+#define NB_GLOBAL_DDR_QOS_HIGH_PRIO_THRESHOLD_MASK 0x0000007F
+#define NB_GLOBAL_DDR_QOS_HIGH_PRIO_THRESHOLD_SHIFT 0
+/* DDR Low Priority QoSFabric priority below this value is mappe ... */
+#define NB_GLOBAL_DDR_QOS_LP_QOS_MASK 0x00000F00
+#define NB_GLOBAL_DDR_QOS_LP_QOS_SHIFT 8
+
+/**** ACF_Misc register ****/
+/* Disable DDR Write ChopPerformance optimitation feature to cho ... */
+#define NB_GLOBAL_ACF_MISC_DDR_WR_CHOP_DIS (1 << 0)
+/* Disable SB-2-SB path through NB fabric. */
+#define NB_GLOBAL_ACF_MISC_SB2SB_PATH_DIS (1 << 1)
+/* Disable ETR tracing to non-DDR. */
+#define NB_GLOBAL_ACF_MISC_ETR2SB_PATH_DIS (1 << 2)
+/* Disable ETR tracing to non-DDR. */
+#define NB_GLOBAL_ACF_MISC_CPU2MSIX_DIS (1 << 3)
+/* Disable CPU generation of MSIx By default, the CPU can set an ... */
+#define NB_GLOBAL_ACF_MISC_MSIX_TERMINATE_DIS (1 << 4)
+/* Disable snoop override for MSIxBy default, an MSIx transactio ... */
+#define NB_GLOBAL_ACF_MISC_MSIX_SNOOPOVRD_DIS (1 << 5)
+/* POS bypass */
+#define NB_GLOBAL_ACF_MISC_POS_BYPASS (1 << 6)
+/* PoS ReadStronglyOrdered enableSO read forces flushing of all ... */
+#define NB_GLOBAL_ACF_MISC_POS_RSO_EN (1 << 7)
+/* WRAP to INC transfer enable */
+#define NB_GLOBAL_ACF_MISC_POS_WRAP2INC (1 << 8)
+/* PoS DSB flush DisableOn DSB from CPU, PoS blocks the progress ... */
+#define NB_GLOBAL_ACF_MISC_POS_DSB_FLUSH_DIS (1 << 9)
+/* PoS DMB Flush DisableOn DMB from CPU, the PoS blocks the prog ... */
+#define NB_GLOBAL_ACF_MISC_POS_DMB_FLUSH_DIS (1 << 10)
+/* change DMB functionality to DSB (block and drain) */
+#define NB_GLOBAL_ACF_MISC_POS_DMB_TO_DSB_EN (1 << 11)
+/* Disable write after read stall when accessing IO fabric slave ... */
+#define NB_GLOBAL_ACF_MISC_M0_WAR_STALL_DIS (1 << 12)
+/* Disable write after read stall when accessing DDR */
+#define NB_GLOBAL_ACF_MISC_M1_WAR_STALL_DIS (1 << 13)
+/* spare configuration bits[14]: disable pos change to disable/e ... */
+#define NB_GLOBAL_ACF_MISC_CONFIG_SPARE_MASK 0x1FFFC000
+#define NB_GLOBAL_ACF_MISC_CONFIG_SPARE_SHIFT 14
+/* Enable CPU WriteUnique to WriteNoSnoop trasform */
+#define NB_GLOBAL_ACF_MISC_CPU_WU2WNS_EN (1 << 29)
+/* Disable device after device check */
+#define NB_GLOBAL_ACF_MISC_WR_POS_DEV_AFTER_DEV_DIS (1 << 30)
+/* Disable wrap to inc on write */
+#define NB_GLOBAL_ACF_MISC_WR_INC2WRAP_EN (1 << 31)
+
+/**** Config_Bus_Control register ****/
+/* Write slave error enable */
+#define NB_GLOBAL_CONFIG_BUS_CONTROL_WR_SLV_ERR_EN (1 << 0)
+/* Write decode error enable */
+#define NB_GLOBAL_CONFIG_BUS_CONTROL_WR_DEC_ERR_EN (1 << 1)
+/* Read slave error enable */
+#define NB_GLOBAL_CONFIG_BUS_CONTROL_RD_SLV_ERR_EN (1 << 2)
+/* Read decode error enable */
+#define NB_GLOBAL_CONFIG_BUS_CONTROL_RD_DEC_ERR_EN (1 << 3)
+/* Ignore Write ID */
+#define NB_GLOBAL_CONFIG_BUS_CONTROL_IGNORE_WR_ID (1 << 4)
+/* Timeout limit before terminating configuration bus access wit ... */
+#define NB_GLOBAL_CONFIG_BUS_CONTROL_TIMEOUT_LIMIT_MASK 0xFFFFFF00
+#define NB_GLOBAL_CONFIG_BUS_CONTROL_TIMEOUT_LIMIT_SHIFT 8
+
+/**** Coresight_PD register ****/
+/* ETF0 RAM force power down */
+#define NB_GLOBAL_CORESIGHT_PD_ETF0_RAM_FORCE_PD (1 << 0)
+/* ETF1 RAM force power down */
+#define NB_GLOBAL_CORESIGHT_PD_ETF1_RAM_FORCE_PD (1 << 1)
+/* ETF0 RAM force clock gate */
+#define NB_GLOBAL_CORESIGHT_PD_ETF0_RAM_FORCE_CG (1 << 2)
+/* ETF1 RAM force clock gate */
+#define NB_GLOBAL_CORESIGHT_PD_ETF1_RAM_FORCE_CG (1 << 3)
+/* APBIC clock enable */
+#define NB_GLOBAL_CORESIGHT_PD_APBICLKEN (1 << 4)
+/* DAP system clock enable */
+#define NB_GLOBAL_CORESIGHT_PD_DAP_SYS_CLKEN (1 << 5)
+
+/**** Coresight_INTERNAL_0 register ****/
+
+#define NB_GLOBAL_CORESIGHT_INTERNAL_0_CTIAPBSBYPASS (1 << 0)
+/* CA15 CTM and Coresight CTI operate at same clock, bypass mode ... */
+#define NB_GLOBAL_CORESIGHT_INTERNAL_0_CISBYPASS (1 << 1)
+/* CA15 CTM and Coresight CTI operate according to the same cloc ... */
+#define NB_GLOBAL_CORESIGHT_INTERNAL_0_CIHSBYPASS_MASK 0x0000003C
+#define NB_GLOBAL_CORESIGHT_INTERNAL_0_CIHSBYPASS_SHIFT 2
+
+/**** Coresight_DBGROMADDR register ****/
+/* Valid signal for DBGROMADDR.
+Connected to DBGROMADDRV */
+#define NB_GLOBAL_CORESIGHT_DBGROMADDR_VALID (1 << 0)
+/* Specifies bits [39:12] of the ROM table physical address. */
+#define NB_GLOBAL_CORESIGHT_DBGROMADDR_ADDR_39_12_MASK 0x3FFFFFFC
+#define NB_GLOBAL_CORESIGHT_DBGROMADDR_ADDR_39_12_SHIFT 2
+
+/**** Coresight_DBGSELFADDR register ****/
+/* Valid signal for DBGROMADDR.
+Connected to DBGROMADDRV */
+#define NB_GLOBAL_CORESIGHT_DBGSELFADDR_VALID (1 << 0)
+/* Specifies bits [18:17] of the two’s complement signed offset ... */
+#define NB_GLOBAL_CORESIGHT_DBGSELFADDR_ADDR_18_17_MASK 0x00000180
+#define NB_GLOBAL_CORESIGHT_DBGSELFADDR_ADDR_18_17_SHIFT 7
+/* Specifies bits [39:19] of the two’s complement signed offset ... */
+#define NB_GLOBAL_CORESIGHT_DBGSELFADDR_ADDR_39_19_MASK 0x3FFFFE00
+#define NB_GLOBAL_CORESIGHT_DBGSELFADDR_ADDR_39_19_SHIFT 9
+
+/**** Cnt_Control register ****/
+/* System counter enable
+Counter is enabled after reset. */
+#define NB_SYSTEM_COUNTER_CNT_CONTROL_EN (1 << 0)
+/* System counter restartInitial value is reloaded from Counter_ ... */
+#define NB_SYSTEM_COUNTER_CNT_CONTROL_RESTART (1 << 1)
+/* System counter tickSpecifies the counter tick rate relative t ... */
+#define NB_SYSTEM_COUNTER_CNT_CONTROL_SCALE_MASK 0x0000FF00
+#define NB_SYSTEM_COUNTER_CNT_CONTROL_SCALE_SHIFT 8
+
+/**** CA15_RF_Misc register ****/
+
+#define NB_RAMS_CONTROL_MISC_CA15_RF_MISC_NONECPU_RF_MISC_MASK 0x0000000F
+#define NB_RAMS_CONTROL_MISC_CA15_RF_MISC_NONECPU_RF_MISC_SHIFT 0
+
+#define NB_RAMS_CONTROL_MISC_CA15_RF_MISC_CPU_RF_MISC_MASK 0x00FFFF00
+#define NB_RAMS_CONTROL_MISC_CA15_RF_MISC_CPU_RF_MISC_SHIFT 8
+/* Pause for CPUs from the time all power is up to the time the ... */
+#define NB_RAMS_CONTROL_MISC_CA15_RF_MISC_PWR_UP_PAUSE_MASK 0xF8000000
+#define NB_RAMS_CONTROL_MISC_CA15_RF_MISC_PWR_UP_PAUSE_SHIFT 27
+
+/**** NB_RF_Misc register ****/
+/* SMMU TLB RAMs force power down */
+#define NB_RAMS_CONTROL_MISC_NB_RF_MISC_SMMU_RAM_FORCE_PD (1 << 0)
+
+/**** Lockn register ****/
+/* Semaphore LockCPU reads it:If current value ==0, return 0 to ... */
+#define NB_SEMAPHORES_LOCKN_LOCK (1 << 0)
+
+/**** CA15_outputs_1 register ****/
+
+#define NB_DEBUG_CA15_OUTPUTS_1_STANDBYWFI_MASK 0x0000000F
+#define NB_DEBUG_CA15_OUTPUTS_1_STANDBYWFI_SHIFT 0
+
+#define NB_DEBUG_CA15_OUTPUTS_1_CPU_PWR_DN_ACK_MASK 0x000000F0
+#define NB_DEBUG_CA15_OUTPUTS_1_CPU_PWR_DN_ACK_SHIFT 4
+
+#define NB_DEBUG_CA15_OUTPUTS_1_IRQOUT_N_MASK 0x00000F00
+#define NB_DEBUG_CA15_OUTPUTS_1_IRQOUT_N_SHIFT 8
+
+#define NB_DEBUG_CA15_OUTPUTS_1_FIQOUT_N_MASK 0x0000F000
+#define NB_DEBUG_CA15_OUTPUTS_1_FIQOUT_N_SHIFT 12
+
+#define NB_DEBUG_CA15_OUTPUTS_1_CNTHPIRQ_N_MASK 0x000F0000
+#define NB_DEBUG_CA15_OUTPUTS_1_CNTHPIRQ_N_SHIFT 16
+
+#define NB_DEBUG_CA15_OUTPUTS_1_NCNTPNSIRQ_N_MASK 0x00F00000
+#define NB_DEBUG_CA15_OUTPUTS_1_NCNTPNSIRQ_N_SHIFT 20
+
+#define NB_DEBUG_CA15_OUTPUTS_1_NCNTPSIRQ_N_MASK 0x0F000000
+#define NB_DEBUG_CA15_OUTPUTS_1_NCNTPSIRQ_N_SHIFT 24
+
+#define NB_DEBUG_CA15_OUTPUTS_1_NCNTVIRQ_N_MASK 0xF0000000
+#define NB_DEBUG_CA15_OUTPUTS_1_NCNTVIRQ_N_SHIFT 28
+
+/**** CA15_outputs_2 register ****/
+
+#define NB_DEBUG_CA15_OUTPUTS_2_STANDBYWFIL2 (1 << 0)
+
+#define NB_DEBUG_CA15_OUTPUTS_2_L2RAM_PWR_DN_ACK (1 << 1)
+
+/**** cpu_msg register ****/
+/* status/ascii code */
+#define NB_DEBUG_CPU_MSG_STATUS_MASK 0x000000FF
+#define NB_DEBUG_CPU_MSG_STATUS_SHIFT 0
+/* toggle with each ascii write */
+#define NB_DEBUG_CPU_MSG_ASCII_TOGGLE (1 << 8)
+/* signals ascii */
+#define NB_DEBUG_CPU_MSG_ASCII (1 << 9)
+
+#define NB_DEBUG_CPU_MSG_RESERVED_11_10_MASK 0x00000C00
+#define NB_DEBUG_CPU_MSG_RESERVED_11_10_SHIFT 10
+/* Signals new section started in S/W */
+#define NB_DEBUG_CPU_MSG_SECTION_START (1 << 12)
+
+#define NB_DEBUG_CPU_MSG_RESERVED_13 (1 << 13)
+/* Signals a single CPU is done. */
+#define NB_DEBUG_CPU_MSG_CPU_DONE (1 << 14)
+/* Signals test is done */
+#define NB_DEBUG_CPU_MSG_TEST_DONE (1 << 15)
+
+/**** ddrc register ****/
+/* External DLL calibration request */
+#define NB_DEBUG_DDRC_DLL_CALIB_EXT_REQ (1 << 0)
+/* External request to perform short (long isperformed during in ... */
+#define NB_DEBUG_DDRC_ZQ_SHORT_CALIB_EXT_REQ (1 << 1)
+/* External request to perform a refresh command to a specific b ... */
+#define NB_DEBUG_DDRC_RANK_REFRESH_EXT_REQ_MASK 0x0000003C
+#define NB_DEBUG_DDRC_RANK_REFRESH_EXT_REQ_SHIFT 2
+
+/**** ddrc_phy_smode_control register ****/
+/* DDR PHY special mode */
+#define NB_DEBUG_DDRC_PHY_SMODE_CONTROL_CTL_MASK 0x0000FFFF
+#define NB_DEBUG_DDRC_PHY_SMODE_CONTROL_CTL_SHIFT 0
+
+/**** ddrc_phy_smode_status register ****/
+/* DDR PHY special mode */
+#define NB_DEBUG_DDRC_PHY_SMODE_STATUS_STT_MASK 0x0000FFFF
+#define NB_DEBUG_DDRC_PHY_SMODE_STATUS_STT_SHIFT 0
+
+/**** pmc register ****/
+/* Enable system control on NB DRO */
+#define NB_DEBUG_PMC_SYS_EN (1 << 0)
+/* NB PMC HVT35 counter value */
+#define NB_DEBUG_PMC_HVT35_VAL_14_0_MASK 0x0000FFFE
+#define NB_DEBUG_PMC_HVT35_VAL_14_0_SHIFT 1
+/* NB PMC SVT31 counter value */
+#define NB_DEBUG_PMC_SVT31_VAL_14_0_MASK 0x7FFF0000
+#define NB_DEBUG_PMC_SVT31_VAL_14_0_SHIFT 16
+
+/**** cpus_int_out register ****/
+/* Defines which CPUs' IRQ will be triggered out through the cpu ... */
+#define NB_DEBUG_CPUS_INT_OUT_FIQ_EN_MASK 0x0000000F
+#define NB_DEBUG_CPUS_INT_OUT_FIQ_EN_SHIFT 0
+/* Defines which CPUs' FIQ will be triggered out through the cpu ... */
+#define NB_DEBUG_CPUS_INT_OUT_IRQ_EN_MASK 0x000000F0
+#define NB_DEBUG_CPUS_INT_OUT_IRQ_EN_SHIFT 4
+
+/**** track_dump_ctrl register ****/
+/* [24:16]: queue entry pointer[2] target queue: 1'b0: HazardTr ... */
+#define NB_DEBUG_TRACK_DUMP_CTRL_PTR_MASK 0x7FFFFFFF
+#define NB_DEBUG_TRACK_DUMP_CTRL_PTR_SHIFT 0
+/* Track Dump RequestIf set, queue entry info is latched on trac ... */
+#define NB_DEBUG_TRACK_DUMP_CTRL_REQ (1 << 31)
+
+/**** track_dump_rdata_0 register ****/
+/* valid */
+#define NB_DEBUG_TRACK_DUMP_RDATA_0_VALID (1 << 0)
+/* low data */
+#define NB_DEBUG_TRACK_DUMP_RDATA_0_DATA_MASK 0xFFFFFFFE
+#define NB_DEBUG_TRACK_DUMP_RDATA_0_DATA_SHIFT 1
+
+/**** pos_track_dump_ctrl register ****/
+/* [24:16]: queue entry pointer */
+#define NB_DEBUG_POS_TRACK_DUMP_CTRL_PTR_MASK 0x7FFFFFFF
+#define NB_DEBUG_POS_TRACK_DUMP_CTRL_PTR_SHIFT 0
+/* Track Dump RequestIf set, queue entry info is latched on trac ... */
+#define NB_DEBUG_POS_TRACK_DUMP_CTRL_REQ (1 << 31)
+
+/**** pos_track_dump_rdata_0 register ****/
+/* valid */
+#define NB_DEBUG_POS_TRACK_DUMP_RDATA_0_VALID (1 << 0)
+/* low data */
+#define NB_DEBUG_POS_TRACK_DUMP_RDATA_0_DATA_MASK 0xFFFFFFFE
+#define NB_DEBUG_POS_TRACK_DUMP_RDATA_0_DATA_SHIFT 1
+
+/**** c2swb_track_dump_ctrl register ****/
+/* [24:16]: queue entry pointer */
+#define NB_DEBUG_C2SWB_TRACK_DUMP_CTRL_PTR_MASK 0x7FFFFFFF
+#define NB_DEBUG_C2SWB_TRACK_DUMP_CTRL_PTR_SHIFT 0
+/* Track Dump RequestIf set, queue entry info is latched on trac ... */
+#define NB_DEBUG_C2SWB_TRACK_DUMP_CTRL_REQ (1 << 31)
+
+/**** c2swb_track_dump_rdata_0 register ****/
+/* valid */
+#define NB_DEBUG_C2SWB_TRACK_DUMP_RDATA_0_VALID (1 << 0)
+/* low data */
+#define NB_DEBUG_C2SWB_TRACK_DUMP_RDATA_0_DATA_MASK 0xFFFFFFFE
+#define NB_DEBUG_C2SWB_TRACK_DUMP_RDATA_0_DATA_SHIFT 1
+
+/**** c2swb_bar_ovrd_high register ****/
+/* Read barrier is progress downstream when not terminated in th ... */
+#define NB_DEBUG_C2SWB_BAR_OVRD_HIGH_RD_ADDR_OVRD_EN (1 << 0)
+/* address bits 39:32 */
+#define NB_DEBUG_C2SWB_BAR_OVRD_HIGH_ADDR_39_32_MASK 0x00FF0000
+#define NB_DEBUG_C2SWB_BAR_OVRD_HIGH_ADDR_39_32_SHIFT 16
+
+/**** Config register ****/
+/* Individual processor control of the endianness configuration ... */
+#define NB_CPUN_CONFIG_STATUS_CONFIG_ENDIAN (1 << 0)
+/* Individual processor control of the default exception handlin ... */
+#define NB_CPUN_CONFIG_STATUS_CONFIG_TE (1 << 1)
+/* Individual processor control of the location of the exception ... */
+#define NB_CPUN_CONFIG_STATUS_CONFIG_VINITHI (1 << 2)
+/* Individual processor control to disable write access to some ... */
+#define NB_CPUN_CONFIG_STATUS_CONFIG_CP15DISABLE (1 << 3)
+
+/**** Power_Ctrl register ****/
+/* Individual CPU power mode transition requestIf requested to e ... */
+#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_MASK 0x00000003
+#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_SHIFT 0
+/* Normal power mode state */
+#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_NORMAL \
+ (0x0 << NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_SHIFT)
+/* Dormant power mode state */
+#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_DEEP_IDLE \
+ (0x2 << NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_SHIFT)
+/* Powered-off power mode */
+#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_POWEREDOFF \
+ (0x3 << NB_CPUN_CONFIG_STATUS_POWER_CTRL_PM_REQ_SHIFT)
+/* Power down regret disableWhen power down regret is enabled, t ... */
+#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PWRDN_RGRT_DIS (1 << 16)
+/* Power down emulation enableIf set, the entire power down sequ ... */
+#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_PWRDN_EMULATE (1 << 17)
+/* Disable wakeup from Local--GIC FIQ. */
+#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_WU_LGIC_FIQ_DIS (1 << 18)
+/* Disable wakeup from Local-GIC IRQ. */
+#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_WU_LGIC_IRQ_DIS (1 << 19)
+/* Disable wakeup from IO-GIC FIQ. */
+#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_WU_IOGIC_FIQ_DIS (1 << 20)
+/* Disable wakeup from IO-GIC IRQ. */
+#define NB_CPUN_CONFIG_STATUS_POWER_CTRL_WU_IOGIC_IRQ_DIS (1 << 21)
+
+/**** Power_Status register ****/
+/* Read-only bits that reflect the individual CPU power mode sta ... */
+#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_MASK 0x00000003
+#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_SHIFT 0
+/* Normal power mode state */
+#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_NORMAL \
+ (0x0 << NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_SHIFT)
+/* Idle power mode state (WFI) */
+#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_IDLE \
+ (0x1 << NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_SHIFT)
+/* Dormant power mode state */
+#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_DEEP_IDLE \
+ (0x2 << NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_SHIFT)
+/* Powered-off power mode */
+#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_POWEREDOFF \
+ (0x3 << NB_CPUN_CONFIG_STATUS_POWER_STATUS_CPU_PM_SHIFT)
+/* WFI status */
+#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_WFI (1 << 2)
+/* WFE status */
+#define NB_CPUN_CONFIG_STATUS_POWER_STATUS_WFE (1 << 3)
+
+/**** PMU_Control register ****/
+/* Disable all countersWhen this bit is clear, counter state is ... */
+#define NB_MC_PMU_PMU_CONTROL_DISABLE_ALL (1 << 0)
+/* Pause all counters */
+#define NB_MC_PMU_PMU_CONTROL_PAUSE_ALL (1 << 1)
+/* Overflow interrupt enable. */
+#define NB_MC_PMU_PMU_CONTROL_OVRF_INTR_EN (1 << 2)
+/* Number of monitored events supported by the PMU */
+#define NB_MC_PMU_PMU_CONTROL_NUM_OF_EVENTS_MASK 0x00F80000
+#define NB_MC_PMU_PMU_CONTROL_NUM_OF_EVENTS_SHIFT 19
+/* Number of counters impemented by PMU. */
+#define NB_MC_PMU_PMU_CONTROL_NUM_OF_CNTS_MASK 0x0F000000
+#define NB_MC_PMU_PMU_CONTROL_NUM_OF_CNTS_SHIFT 24
+
+/**** Cfg register ****/
+/* Event select */
+#define NB_MC_PMU_COUNTERS_CFG_EVENT_SEL_MASK 0x0000003F
+#define NB_MC_PMU_COUNTERS_CFG_EVENT_SEL_SHIFT 0
+/* Enable setting of counter low overflow status bit. */
+#define NB_MC_PMU_COUNTERS_CFG_OVRF_LOW_STT_EN (1 << 6)
+/* Enable setting of counter high overflow status bit. */
+#define NB_MC_PMU_COUNTERS_CFG_OVRF_HIGH_STT_EN (1 << 7)
+/* Enable pause on trigger in assertion. */
+#define NB_MC_PMU_COUNTERS_CFG_TRIGIN_PAUSE_EN (1 << 8)
+/* Enable increment trigger out for trace */
+#define NB_MC_PMU_COUNTERS_CFG_TRIGOUT_EN (1 << 9)
+/* Trigger out granule valueSpecifies the number of events count ... */
+#define NB_MC_PMU_COUNTERS_CFG_TRIGOUT_GRANULA_MASK 0x00007C00
+#define NB_MC_PMU_COUNTERS_CFG_TRIGOUT_GRANULA_SHIFT 10
+/* Pause on overflow bitmaskIf set for counter , current coun ... */
+#define NB_MC_PMU_COUNTERS_CFG_PAUSE_ON_OVRF_BITMASK_MASK 0x000F0000
+#define NB_MC_PMU_COUNTERS_CFG_PAUSE_ON_OVRF_BITMASK_SHIFT 16
+
+/**** Cntl register ****/
+/* Set the counter state to disable, enable, or pause. */
+#define NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_MASK 0x00000003
+#define NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_SHIFT 0
+/* Disable counter. */
+#define NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_DISABLE \
+ (0x0 << NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_SHIFT)
+/* Enable counter. */
+#define NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_ENABLE \
+ (0x1 << NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_SHIFT)
+/* Pause counter. */
+#define NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_PAUSE \
+ (0x3 << NB_MC_PMU_COUNTERS_CNTL_CNT_STATE_SHIFT)
+
+/**** High register ****/
+/* Counter high value */
+#define NB_MC_PMU_COUNTERS_HIGH_COUNTER_MASK 0x0000FFFF
+#define NB_MC_PMU_COUNTERS_HIGH_COUNTER_SHIFT 0
+
+/**** version register ****/
+/* Revision number (Minor) */
+#define NB_NB_VERSION_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF
+#define NB_NB_VERSION_VERSION_RELEASE_NUM_MINOR_SHIFT 0
+/* Revision number (Major) */
+#define NB_NB_VERSION_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00
+#define NB_NB_VERSION_VERSION_RELEASE_NUM_MAJOR_SHIFT 8
+/* Date of release */
+#define NB_NB_VERSION_VERSION_DATE_DAY_MASK 0x001F0000
+#define NB_NB_VERSION_VERSION_DATE_DAY_SHIFT 16
+/* Month of release */
+#define NB_NB_VERSION_VERSION_DATA_MONTH_MASK 0x01E00000
+#define NB_NB_VERSION_VERSION_DATA_MONTH_SHIFT 21
+/* Year of release (starting from 2000) */
+#define NB_NB_VERSION_VERSION_DATE_YEAR_MASK 0x3E000000
+#define NB_NB_VERSION_VERSION_DATE_YEAR_SHIFT 25
+/* Reserved */
+#define NB_NB_VERSION_VERSION_RESERVED_MASK 0xC0000000
+#define NB_NB_VERSION_VERSION_RESERVED_SHIFT 30
+
+/**** cpu_vmid register ****/
+/* target VMID */
+#define NB_SRIOV_CPU_VMID_VAL_MASK 0x000000FF
+#define NB_SRIOV_CPU_VMID_VAL_SHIFT 0
+
+/**** control register ****/
+/* Enable write accesses logging to FIFO instance 0When an inbou ... */
+#define NB_PCIE_LOGGING_CONTROL_WR_EN (1 << 0)
+/* Enable read accesses loggingWhen an inbound read from PCIe hi ... */
+#define NB_PCIE_LOGGING_CONTROL_RD_EN (1 << 1)
+/* Enable write accesses logging to FIFO instance 1 When an inbo ... */
+#define NB_PCIE_LOGGING_CONTROL_WR_EN_1 (1 << 2)
+/* Enable read accesses logging to PCIe 1 and 2When an inbound r ... */
+#define NB_PCIE_LOGGING_CONTROL_RD_EN_1 (1 << 3)
+/* Enable loggin the original transaction strobes */
+#define NB_PCIE_LOGGING_CONTROL_STRB_EN (1 << 4)
+/* When this bit is set, read will always progress forward (will ... */
+#define NB_PCIE_LOGGING_CONTROL_FREE_RD_ON_WR_EMPTY_EN (1 << 5)
+/* Free stalled read whenever write fifo head pointer bit[31] is ... */
+#define NB_PCIE_LOGGING_CONTROL_FREE_RD_ON_WR_FIFO_PTR_UPD_EN (1 << 6)
+/* Push pended latched read notification to the current pushed w ... */
+#define NB_PCIE_LOGGING_CONTROL_WR_FIFO_PUSH_LATCH_RD_STATUS_EN (1 << 7)
+/* Read latch timeout enable */
+#define NB_PCIE_LOGGING_CONTROL_RD_TIMEOUT_EN (1 << 8)
+/* Logging window low */
+#define NB_PCIE_LOGGING_WR_WINDOW_LOW_LOW_MASK 0xFFFFFFC0
+#define NB_PCIE_LOGGING_WR_WINDOW_LOW_LOW_SHIFT 6
+
+/**** Wr_Window_High register ****/
+/* Window high address bits
+Supports 40-bits memory addressing */
+#define NB_PCIE_LOGGING_WR_WINDOW_HIGH_HIGH_MASK 0x000000FF
+#define NB_PCIE_LOGGING_WR_WINDOW_HIGH_HIGH_SHIFT 0
+/* Size maskCorresponds to window low bits 31:6 */
+#define NB_PCIE_LOGGING_WR_WINDOW_SIZE_SIZE_MASK_MASK 0xFFFFFFC0
+#define NB_PCIE_LOGGING_WR_WINDOW_SIZE_SIZE_MASK_SHIFT 6
+/* FIFO base address.
+Must be alighed to 4KB */
+#define NB_PCIE_LOGGING_FIFO_BASE_ADDR_MASK 0xFFFFF000
+#define NB_PCIE_LOGGING_FIFO_BASE_ADDR_SHIFT 12
+
+/**** FIFO_Size register ****/
+/* FIFO size maskCorresponds to FIFO base address bits 19:12 */
+#define NB_PCIE_LOGGING_FIFO_SIZE_MASK_19_12_MASK 0x000000FF
+#define NB_PCIE_LOGGING_FIFO_SIZE_MASK_19_12_SHIFT 0
+/* Logging window low */
+#define NB_PCIE_LOGGING_WR_WINDOW_LOW_1_LOW_MASK 0xFFFFFFC0
+#define NB_PCIE_LOGGING_WR_WINDOW_LOW_1_LOW_SHIFT 6
+
+/**** Wr_Window_High_1 register ****/
+/* Window high address bits
+Supports 40-bits memory addressing */
+#define NB_PCIE_LOGGING_WR_WINDOW_HIGH_1_HIGH_MASK 0x000000FF
+#define NB_PCIE_LOGGING_WR_WINDOW_HIGH_1_HIGH_SHIFT 0
+/* Size maskCorresponds to window low bits 31:6 */
+#define NB_PCIE_LOGGING_WR_WINDOW_SIZE_1_SIZE_MASK_MASK 0xFFFFFFC0
+#define NB_PCIE_LOGGING_WR_WINDOW_SIZE_1_SIZE_MASK_SHIFT 6
+/* FIFO base address.
+Must be alighed to 4KB */
+#define NB_PCIE_LOGGING_FIFO_BASE_1_ADDR_MASK 0xFFFFF000
+#define NB_PCIE_LOGGING_FIFO_BASE_1_ADDR_SHIFT 12
+
+/**** FIFO_Size_1 register ****/
+/* FIFO size maskCorresponds to FIFO base address bits 19:12 */
+#define NB_PCIE_LOGGING_FIFO_SIZE_1_MASK_19_12_MASK 0x000000FF
+#define NB_PCIE_LOGGING_FIFO_SIZE_1_MASK_19_12_SHIFT 0
+/* Logging window low */
+#define NB_PCIE_LOGGING_RD_WINDOW_LOW_LOW_MASK 0xFFFFFFC0
+#define NB_PCIE_LOGGING_RD_WINDOW_LOW_LOW_SHIFT 6
+
+/**** Rd_Window_High register ****/
+/* Window high address bits
+Supports 40-bits memory addressing */
+#define NB_PCIE_LOGGING_RD_WINDOW_HIGH_HIGH_MASK 0x000000FF
+#define NB_PCIE_LOGGING_RD_WINDOW_HIGH_HIGH_SHIFT 0
+/* Size maskCorresponds to window low bits 31:6 */
+#define NB_PCIE_LOGGING_RD_WINDOW_SIZE_SIZE_MASK_MASK 0xFFFFFFC0
+#define NB_PCIE_LOGGING_RD_WINDOW_SIZE_SIZE_MASK_SHIFT 6
+
+/**** Read_Latch register ****/
+/* Set by hardware when the read address is stalled and latched ... */
+#define NB_PCIE_LOGGING_READ_LATCH_VALID (1 << 0)
+/* Latched read address [30:0] */
+#define NB_PCIE_LOGGING_READ_LATCH_ADDR_MASK 0xFFFFFFFE
+#define NB_PCIE_LOGGING_READ_LATCH_ADDR_SHIFT 1
+/* Logging window low */
+#define NB_PCIE_LOGGING_RD_WINDOW_LOW_1_LOW_MASK 0xFFFFFFC0
+#define NB_PCIE_LOGGING_RD_WINDOW_LOW_1_LOW_SHIFT 6
+
+/**** Rd_Window_High_1 register ****/
+/* Window high address bits
+Supports 40-bits memory addressing */
+#define NB_PCIE_LOGGING_RD_WINDOW_HIGH_1_HIGH_MASK 0x000000FF
+#define NB_PCIE_LOGGING_RD_WINDOW_HIGH_1_HIGH_SHIFT 0
+/* Size maskCorresponds to window low bits 31:6 */
+#define NB_PCIE_LOGGING_RD_WINDOW_SIZE_1_SIZE_MASK_MASK 0xFFFFFFC0
+#define NB_PCIE_LOGGING_RD_WINDOW_SIZE_1_SIZE_MASK_SHIFT 6
+
+/**** Read_Latch_1 register ****/
+/* Set by hardware when the read address is stalled and latched ... */
+#define NB_PCIE_LOGGING_READ_LATCH_1_VALID (1 << 0)
+/* Latched read address [30:0] */
+#define NB_PCIE_LOGGING_READ_LATCH_1_ADDR_MASK 0xFFFFFFFE
+#define NB_PCIE_LOGGING_READ_LATCH_1_ADDR_SHIFT 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_NB_REG_H */
+
+/** @} end of ... group */
+
+
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_pbs_regs.h b/arch/arm/mach-alpine/include/al_hal/al_hal_pbs_regs.h
new file mode 100644
index 0000000..de83719
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_pbs_regs.h
@@ -0,0 +1,963 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __AL_PBS_REG_H
+#define __AL_PBS_REG_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+
+
+struct al_pbs_unit {
+ uint32_t conf_bus; /* conf_bus, configuration of ... */
+ uint32_t dram_0_nb_bar_high; /* PASW high */
+ uint32_t dram_0_nb_bar_low; /* PASW low */
+ uint32_t dram_1_nb_bar_high; /* PASW high */
+ uint32_t dram_1_nb_bar_low; /* PASW low */
+ uint32_t dram_2_nb_bar_high; /* PASW high */
+ uint32_t dram_2_nb_bar_low; /* PASW low */
+ uint32_t dram_3_nb_bar_high; /* PASW high */
+ uint32_t dram_3_nb_bar_low; /* PASW low */
+ uint32_t msix_nb_bar_high; /* PASW high */
+ uint32_t msix_nb_bar_low; /* PASW low */
+ uint32_t dram_0_sb_bar_high; /* PASW high */
+ uint32_t dram_0_sb_bar_low; /* PASW low */
+ uint32_t dram_1_sb_bar_high; /* PASW high */
+ uint32_t dram_1_sb_bar_low; /* PASW low */
+ uint32_t dram_2_sb_bar_high; /* PASW high */
+ uint32_t dram_2_sb_bar_low; /* PASW low */
+ uint32_t dram_3_sb_bar_high; /* PASW high */
+ uint32_t dram_3_sb_bar_low; /* PASW low */
+ uint32_t msix_sb_bar_high; /* PASW high */
+ uint32_t msix_sb_bar_low; /* PASW low */
+ uint32_t pcie_mem0_bar_high; /* PASW high */
+ uint32_t pcie_mem0_bar_low; /* PASW low */
+ uint32_t pcie_mem1_bar_high; /* PASW high */
+ uint32_t pcie_mem1_bar_low; /* PASW low */
+ uint32_t pcie_mem2_bar_high; /* PASW high */
+ uint32_t pcie_mem2_bar_low; /* PASW low */
+ uint32_t pcie_ext_ecam0_bar_high; /* PASW high */
+ uint32_t pcie_ext_ecam0_bar_low; /* PASW low */
+ uint32_t pcie_ext_ecam1_bar_high; /* PASW high */
+ uint32_t pcie_ext_ecam1_bar_low; /* PASW low */
+ uint32_t pcie_ext_ecam2_bar_high; /* PASW high */
+ uint32_t pcie_ext_ecam2_bar_low; /* PASW low */
+ uint32_t pbs_nor_bar_high; /* PASW high */
+ uint32_t pbs_nor_bar_low; /* PASW low */
+ uint32_t pbs_spi_bar_high; /* PASW high */
+ uint32_t pbs_spi_bar_low; /* PASW low */
+ uint32_t rsrvd_0[3];
+ uint32_t pbs_nand_bar_high; /* PASW high */
+ uint32_t pbs_nand_bar_low; /* PASW low */
+ uint32_t pbs_int_mem_bar_high; /* PASW high */
+ uint32_t pbs_int_mem_bar_low; /* PASW low */
+ uint32_t pbs_boot_bar_high; /* PASW high */
+ uint32_t pbs_boot_bar_low; /* PASW low */
+ uint32_t nb_int_bar_high; /* PASW high */
+ uint32_t nb_int_bar_low; /* PASW low */
+ uint32_t nb_stm_bar_high; /* PASW high */
+ uint32_t nb_stm_bar_low; /* PASW low */
+ uint32_t pcie_ecam_int_bar_high; /* PASW high */
+ uint32_t pcie_ecam_int_bar_low; /* PASW low */
+ uint32_t pcie_mem_int_bar_high; /* PASW high */
+ uint32_t pcie_mem_int_bar_low; /* PASW low */
+ uint32_t winit_cntl; /* control */
+ uint32_t latch_bars; /* control */
+ uint32_t pcie_conf_0; /* control */
+ uint32_t pcie_conf_1; /* control */
+ uint32_t serdes_mux_pipe; /* control */
+ uint32_t dma_io_master_map; /* control */
+ uint32_t i2c_pld_status_high; /* status */
+ uint32_t i2c_pld_status_low; /* status */
+ uint32_t spi_dbg_status_high; /* status */
+ uint32_t spi_dbg_status_low; /* status */
+ uint32_t spi_mst_status_high; /* status */
+ uint32_t spi_mst_status_low; /* status */
+ uint32_t mem_pbs_parity_err_high; /* log */
+ uint32_t mem_pbs_parity_err_low; /* log */
+ uint32_t boot_strap; /* log */
+ uint32_t cfg_axi_conf_0; /* conf */
+ uint32_t cfg_axi_conf_1; /* conf */
+ uint32_t cfg_axi_conf_2; /* conf */
+ uint32_t cfg_axi_conf_3; /* conf */
+ uint32_t spi_mst_conf_0; /* conf */
+ uint32_t spi_mst_conf_1; /* conf */
+ uint32_t spi_slv_conf_0; /* conf */
+ uint32_t apb_mem_conf_int; /* conf */
+ uint32_t sb2nb_cfg_dram_remap; /* PASW remap register */
+ uint32_t pbs_mux_sel_0; /* control */
+ uint32_t pbs_mux_sel_1; /* control */
+ uint32_t pbs_mux_sel_2; /* control */
+ uint32_t pbs_mux_conf; /* control */
+ uint32_t sb_int_bar_high; /* PASW high */
+ uint32_t sb_int_bar_low; /* PASW low */
+ uint32_t ufc_pbs_parity_err_high; /* log */
+ uint32_t ufc_pbs_parity_err_low; /* log */
+ uint32_t gen_conf; /* cntl */
+ uint32_t cpu_debug; /* cntl */
+ uint32_t uart0_debug; /* status */
+ uint32_t uart1_debug; /* status */
+ uint32_t uart2_debug; /* status */
+ uint32_t uart3_debug; /* status */
+ uint32_t uart0_conf_status; /* cntl */
+ uint32_t uart1_conf_status; /* cntl */
+ uint32_t uart2_conf_status; /* cntl */
+ uint32_t uart3_conf_status; /* cntl */
+ uint32_t gpio0_conf_status; /* cntl */
+ uint32_t gpio1_conf_status; /* cntl */
+ uint32_t gpio2_conf_status; /* cntl */
+ uint32_t gpio3_conf_status; /* cntl */
+ uint32_t gpio4_conf_status; /* cntl */
+ uint32_t i2c_gen_conf_status; /* cntl */
+ uint32_t i2c_gen_debug; /* cntl */
+ uint32_t watch_dog_reset_out; /* cntl */
+ uint32_t otp_magic_num; /* cntl */
+ uint32_t otp_cntl; /* cntl */
+ uint32_t otp_cfg_0; /* cfg */
+ uint32_t otp_cfg_1; /* cfg */
+ uint32_t otp_cfg_3; /* cfg */
+ uint32_t cfg_nand_0; /* cfg */
+ uint32_t cfg_nand_1; /* cfg */
+ uint32_t cfg_nand_2; /* cfg */
+ uint32_t cfg_nand_3; /* cfg */
+ uint32_t nb_nic_regs_bar_high; /* PASW high */
+ uint32_t nb_nic_regs_bar_low; /* PASW low */
+ uint32_t sb_nic_regs_bar_high; /* PASW high */
+ uint32_t sb_nic_regs_bar_low; /* PASW low */
+ uint32_t serdes_mux_multi_0; /* control */
+ uint32_t serdes_mux_multi_1; /* control */
+ uint32_t pbs_ulpi_mux_conf; /* control */
+ uint32_t wr_once_dbg_dis_ovrd_reg; /* cntl */
+ uint32_t gpio5_conf_status; /* cntl */
+ uint32_t rsrvd[6];
+};
+
+struct al_pbs_regs {
+ struct al_pbs_unit unit;
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** conf_bus register ****/
+/* read slave error enable */
+#define PBS_UNIT_CONF_BUS_RD_SLVERR_EN (1 << 0)
+/* write slave error enable */
+#define PBS_UNIT_CONF_BUS_WR_SLVERR_EN (1 << 1)
+/* read decode error enable */
+#define PBS_UNIT_CONF_BUS_RD_DECERR_EN (1 << 2)
+/* write decode error enable */
+#define PBS_UNIT_CONF_BUS_WR_DECERR_EN (1 << 3)
+/* for debug clear the apb SM */
+#define PBS_UNIT_CONF_BUS_CLR_APB_FSM (1 << 4)
+/* for debug clear the WFIFO */
+#define PBS_UNIT_CONF_BUS_CLR_WFIFO_CLEAR (1 << 5)
+/* Arbiter between read and write channel */
+#define PBS_UNIT_CONF_BUS_WRR_CNT_MASK 0x000001C0
+#define PBS_UNIT_CONF_BUS_WRR_CNT_SHIFT 6
+
+/**** dram_0_nb_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_DRAM_0_NB_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_DRAM_0_NB_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_DRAM_0_NB_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_DRAM_0_NB_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_DRAM_0_NB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_DRAM_0_NB_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** dram_1_nb_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_DRAM_1_NB_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_DRAM_1_NB_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_DRAM_1_NB_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_DRAM_1_NB_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_DRAM_1_NB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_DRAM_1_NB_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** dram_2_nb_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_DRAM_2_NB_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_DRAM_2_NB_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_DRAM_2_NB_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_DRAM_2_NB_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_DRAM_2_NB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_DRAM_2_NB_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** dram_3_nb_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_DRAM_3_NB_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_DRAM_3_NB_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_DRAM_3_NB_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_DRAM_3_NB_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_DRAM_3_NB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_DRAM_3_NB_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** msix_nb_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_MSIX_NB_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_MSIX_NB_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_MSIX_NB_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_MSIX_NB_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_MSIX_NB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_MSIX_NB_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** dram_0_sb_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_DRAM_0_SB_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_DRAM_0_SB_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_DRAM_0_SB_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_DRAM_0_SB_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_DRAM_0_SB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_DRAM_0_SB_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** dram_1_sb_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_DRAM_1_SB_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_DRAM_1_SB_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_DRAM_1_SB_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_DRAM_1_SB_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_DRAM_1_SB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_DRAM_1_SB_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** dram_2_sb_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_DRAM_2_SB_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_DRAM_2_SB_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_DRAM_2_SB_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_DRAM_2_SB_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_DRAM_2_SB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_DRAM_2_SB_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** dram_3_sb_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_DRAM_3_SB_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_DRAM_3_SB_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_DRAM_3_SB_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_DRAM_3_SB_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_DRAM_3_SB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_DRAM_3_SB_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** msix_sb_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_MSIX_SB_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_MSIX_SB_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_MSIX_SB_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_MSIX_SB_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_MSIX_SB_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_MSIX_SB_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pcie_mem0_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PCIE_MEM0_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PCIE_MEM0_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PCIE_MEM0_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PCIE_MEM0_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PCIE_MEM0_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PCIE_MEM0_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pcie_mem1_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PCIE_MEM1_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PCIE_MEM1_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PCIE_MEM1_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PCIE_MEM1_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PCIE_MEM1_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PCIE_MEM1_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pcie_mem2_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PCIE_MEM2_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PCIE_MEM2_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PCIE_MEM2_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PCIE_MEM2_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PCIE_MEM2_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PCIE_MEM2_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pcie_ext_ecam0_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PCIE_EXT_ECAM0_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PCIE_EXT_ECAM0_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PCIE_EXT_ECAM0_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PCIE_EXT_ECAM0_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PCIE_EXT_ECAM0_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PCIE_EXT_ECAM0_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pcie_ext_ecam1_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PCIE_EXT_ECAM1_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PCIE_EXT_ECAM1_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PCIE_EXT_ECAM1_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PCIE_EXT_ECAM1_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PCIE_EXT_ECAM1_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PCIE_EXT_ECAM1_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pcie_ext_ecam2_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PCIE_EXT_ECAM2_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PCIE_EXT_ECAM2_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PCIE_EXT_ECAM2_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PCIE_EXT_ECAM2_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PCIE_EXT_ECAM2_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PCIE_EXT_ECAM2_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pbs_nor_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PBS_NOR_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PBS_NOR_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PBS_NOR_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PBS_NOR_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PBS_NOR_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PBS_NOR_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pbs_spi_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PBS_SPI_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PBS_SPI_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PBS_SPI_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PBS_SPI_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PBS_SPI_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PBS_SPI_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pbs_nand_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PBS_NAND_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PBS_NAND_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PBS_NAND_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PBS_NAND_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PBS_NAND_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PBS_NAND_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pbs_int_mem_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PBS_INT_MEM_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PBS_INT_MEM_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PBS_INT_MEM_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PBS_INT_MEM_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PBS_INT_MEM_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PBS_INT_MEM_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pbs_boot_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PBS_BOOT_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PBS_BOOT_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PBS_BOOT_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PBS_BOOT_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PBS_BOOT_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PBS_BOOT_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** nb_int_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_NB_INT_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_NB_INT_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_NB_INT_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_NB_INT_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_NB_INT_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_NB_INT_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** nb_stm_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_NB_STM_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_NB_STM_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_NB_STM_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_NB_STM_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_NB_STM_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_NB_STM_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pcie_ecam_int_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PCIE_ECAM_INT_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PCIE_ECAM_INT_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PCIE_ECAM_INT_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PCIE_ECAM_INT_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PCIE_ECAM_INT_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PCIE_ECAM_INT_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** pcie_mem_int_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_PCIE_MEM_INT_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_PCIE_MEM_INT_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_PCIE_MEM_INT_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_PCIE_MEM_INT_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_PCIE_MEM_INT_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_PCIE_MEM_INT_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** winit_cntl register ****/
+/* When set enable accsess to winit regs, in normal mode. */
+#define PBS_UNIT_WINIT_CNTL_ENABLE_WINIT_REGS_ACCESS (1 << 0)
+/* Rsrvd */
+#define PBS_UNIT_WINIT_CNTL_RSRVD_MASK 0xFFFFFFFE
+#define PBS_UNIT_WINIT_CNTL_RSRVD_SHIFT 1
+
+/**** latch_bars register ****/
+/* The SW should clear this bit before any bar update, and reset ... */
+#define PBS_UNIT_LATCH_BARS_ENABLE (1 << 0)
+/* Rsrvd */
+#define PBS_UNIT_LATCH_BARS_RSRVD_MASK 0xFFFFFFFE
+#define PBS_UNIT_LATCH_BARS_RSRVD_SHIFT 1
+
+/**** pcie_conf_0 register ****/
+/* NOT_use, config internal inside each PCIe core */
+#define PBS_UNIT_PCIE_CONF_0_DEVS_TYPE_MASK 0x00000FFF
+#define PBS_UNIT_PCIE_CONF_0_DEVS_TYPE_SHIFT 0
+/* sys_aux_det value */
+#define PBS_UNIT_PCIE_CONF_0_SYS_AUX_PWR_DET_VEC_MASK 0x00007000
+#define PBS_UNIT_PCIE_CONF_0_SYS_AUX_PWR_DET_VEC_SHIFT 12
+/* Rsrvd */
+#define PBS_UNIT_PCIE_CONF_0_RSRVD_MASK 0xFFFF8000
+#define PBS_UNIT_PCIE_CONF_0_RSRVD_SHIFT 15
+
+/**** pcie_conf_1 register ****/
+/* which pcie exist, the PCIe device will be under reset untill ... */
+#define PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_MASK 0x00000007
+#define PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT 0
+/* Rsrvd */
+#define PBS_UNIT_PCIE_CONF_1_RSRVD_MASK 0xFFFFFFF8
+#define PBS_UNIT_PCIE_CONF_1_RSRVD_SHIFT 3
+
+/**** serdes_mux_pipe register ****/
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_SERDES_2_MASK 0x00000007
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_SERDES_2_SHIFT 0
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_PIPE_RSRVD_3 (1 << 3)
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_SERDES_3_MASK 0x00000070
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_SERDES_3_SHIFT 4
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_PIPE_RSRVD_7 (1 << 7)
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_B_0_MASK 0x00000300
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_B_0_SHIFT 8
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_B_1_MASK 0x00000C00
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_B_1_SHIFT 10
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_C_0_MASK 0x00003000
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_C_0_SHIFT 12
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_C_1_MASK 0x0000C000
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_PCI_C_1_SHIFT 14
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_USB_A_0_MASK 0x00030000
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_USB_A_0_SHIFT 16
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_USB_B_0_MASK 0x000C0000
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_USB_B_0_SHIFT 18
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_CLKI_SER_2_MASK 0x00300000
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_CLKI_SER_2_SHIFT 20
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_PIPE_RSRVD_23_22_MASK 0x00C00000
+#define PBS_UNIT_SERDES_MUX_PIPE_RSRVD_23_22_SHIFT 22
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_CLKI_SER_3_MASK 0x07000000
+#define PBS_UNIT_SERDES_MUX_PIPE_SELECT_OH_CLKI_SER_3_SHIFT 24
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_PIPE_RSRVD_MASK 0xF8000000
+#define PBS_UNIT_SERDES_MUX_PIPE_RSRVD_SHIFT 27
+
+/**** dma_io_master_map register ****/
+/* when set map all the dma_io transaction to the dram, regardle ... */
+#define PBS_UNIT_DMA_IO_MASTER_MAP_CNTL (1 << 0)
+/* Rsrvd */
+#define PBS_UNIT_DMA_IO_MASTER_MAP_RSRVD_MASK 0xFFFFFFFE
+#define PBS_UNIT_DMA_IO_MASTER_MAP_RSRVD_SHIFT 1
+
+/**** i2c_pld_status_high register ****/
+/* i2c pre load status */
+#define PBS_UNIT_I2C_PLD_STATUS_HIGH_STATUS_MASK 0x000000FF
+#define PBS_UNIT_I2C_PLD_STATUS_HIGH_STATUS_SHIFT 0
+
+/**** spi_dbg_status_high register ****/
+/* spi dbg load status */
+#define PBS_UNIT_SPI_DBG_STATUS_HIGH_STATUS_MASK 0x000000FF
+#define PBS_UNIT_SPI_DBG_STATUS_HIGH_STATUS_SHIFT 0
+
+/**** spi_mst_status_high register ****/
+/* sp imst load status */
+#define PBS_UNIT_SPI_MST_STATUS_HIGH_STATUS_MASK 0x000000FF
+#define PBS_UNIT_SPI_MST_STATUS_HIGH_STATUS_SHIFT 0
+
+/**** mem_pbs_parity_err_high register ****/
+/* address latch in case of error */
+#define PBS_UNIT_MEM_PBS_PARITY_ERR_HIGH_ADDR_MASK 0x000000FF
+#define PBS_UNIT_MEM_PBS_PARITY_ERR_HIGH_ADDR_SHIFT 0
+
+/**** cfg_axi_conf_0 register ****/
+/* value */
+#define PBS_UNIT_CFG_AXI_CONF_0_DBG_RD_ID_MASK 0x0000007F
+#define PBS_UNIT_CFG_AXI_CONF_0_DBG_RD_ID_SHIFT 0
+/* value */
+#define PBS_UNIT_CFG_AXI_CONF_0_DBG_WR_ID_MASK 0x00003F80
+#define PBS_UNIT_CFG_AXI_CONF_0_DBG_WR_ID_SHIFT 7
+/* value */
+#define PBS_UNIT_CFG_AXI_CONF_0_PLD_WR_ID_MASK 0x001FC000
+#define PBS_UNIT_CFG_AXI_CONF_0_PLD_WR_ID_SHIFT 14
+/* value */
+#define PBS_UNIT_CFG_AXI_CONF_0_DBG_AWCACHE_MASK 0x01E00000
+#define PBS_UNIT_CFG_AXI_CONF_0_DBG_AWCACHE_SHIFT 21
+/* value */
+#define PBS_UNIT_CFG_AXI_CONF_0_DBG_ARCACHE_MASK 0x1E000000
+#define PBS_UNIT_CFG_AXI_CONF_0_DBG_ARCACHE_SHIFT 25
+/* value */
+#define PBS_UNIT_CFG_AXI_CONF_0_DBG_AXPROT_MASK 0xE0000000
+#define PBS_UNIT_CFG_AXI_CONF_0_DBG_AXPROT_SHIFT 29
+
+/**** cfg_axi_conf_1 register ****/
+/* value */
+#define PBS_UNIT_CFG_AXI_CONF_1_DBG_ARUSER_MASK 0x03FFFFFF
+#define PBS_UNIT_CFG_AXI_CONF_1_DBG_ARUSER_SHIFT 0
+/* value */
+#define PBS_UNIT_CFG_AXI_CONF_1_DBG_ARQOS_MASK 0x3C000000
+#define PBS_UNIT_CFG_AXI_CONF_1_DBG_ARQOS_SHIFT 26
+
+/**** cfg_axi_conf_2 register ****/
+/* value */
+#define PBS_UNIT_CFG_AXI_CONF_2_DBG_AWUSER_MASK 0x03FFFFFF
+#define PBS_UNIT_CFG_AXI_CONF_2_DBG_AWUSER_SHIFT 0
+/* value */
+#define PBS_UNIT_CFG_AXI_CONF_2_DBG_AWQOS_MASK 0x3C000000
+#define PBS_UNIT_CFG_AXI_CONF_2_DBG_AWQOS_SHIFT 26
+
+/**** spi_mst_conf_0 register ****/
+/* value */
+#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_SRL (1 << 0)
+/* value */
+#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_SCPOL (1 << 1)
+/* value */
+#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_SCPH (1 << 2)
+/* value */
+#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_SER_MASK 0x00000078
+#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_SER_SHIFT 3
+/* value */
+#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_BAUD_MASK 0x007FFF80
+#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_BAUD_SHIFT 7
+/* value */
+#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_RD_CMD_MASK 0x7F800000
+#define PBS_UNIT_SPI_MST_CONF_0_CFG_SPI_MST_RD_CMD_SHIFT 23
+
+/**** spi_mst_conf_1 register ****/
+/* value */
+#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_WR_CMD_MASK 0x000000FF
+#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_WR_CMD_SHIFT 0
+/* value */
+#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_ADDR_BYTES_NUM_MASK 0x00000700
+#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_ADDR_BYTES_NUM_SHIFT 8
+/* value */
+#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_TMODE_MASK 0x00001800
+#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_TMODE_SHIFT 11
+/* value */
+#define PBS_UNIT_SPI_MST_CONF_1_CFG_SPI_MST_FAST_RD (1 << 13)
+
+/**** spi_slv_conf_0 register ****/
+/* value */
+#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_BAUD_MASK 0x0000FFFF
+#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_BAUD_SHIFT 0
+/* value. The reset va;ue is according to boot strap */
+#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_SCPOL (1 << 16)
+/* value. The reset va;ue is according to boot strap */
+#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_SCPH (1 << 17)
+/* value */
+#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_SER_MASK 0x03FC0000
+#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_SER_SHIFT 18
+/* value */
+#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_SRL (1 << 26)
+/* value */
+#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_TMODE_MASK 0x18000000
+#define PBS_UNIT_SPI_SLV_CONF_0_CFG_SPI_SLV_TMODE_SHIFT 27
+
+/**** apb_mem_conf_int register ****/
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_PBS_WRR_CNT_MASK 0x00000007
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_PBS_WRR_CNT_SHIFT 0
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_I2C_PLD_APB_MIX_ARB (1 << 3)
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_SPI_DBG_APB_MIX_ARB (1 << 4)
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_SPI_MST_APB_MIX_ARB (1 << 5)
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_I2C_PLD_CLEAR_FSM (1 << 6)
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_SPI_DBG_CLEAR_FSM (1 << 7)
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_SPI_MST_CLEAR_FSM (1 << 8)
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_PBS_AXI_FSM_CLEAR (1 << 9)
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_PBS_AXI_FIFOS_CLEAR (1 << 10)
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_BOOTROM_PARITY_EN (1 << 11)
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_RD_SLV_ERR_EN (1 << 12)
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_RD_DEC_ERR_EN (1 << 13)
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_WR_SLV_ERR_EN (1 << 14)
+/* value */
+#define PBS_UNIT_APB_MEM_CONF_INT_CFG_WR_DEC_ERR_EN (1 << 15)
+
+/**** sb_int_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_SB_INT_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_SB_INT_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reseved fiels */
+#define PBS_UNIT_SB_INT_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_SB_INT_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_SB_INT_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_SB_INT_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** ufc_pbs_parity_err_high register ****/
+/* address latch in case of error */
+#define PBS_UNIT_UFC_PBS_PARITY_ERR_HIGH_ADDR_MASK 0x000000FF
+#define PBS_UNIT_UFC_PBS_PARITY_ERR_HIGH_ADDR_SHIFT 0
+
+/**** uart0_conf_status register ****/
+/* Conf:// [0] -- DSR_N RW bit// [1] -- DCD_N RW bit// [2] -- RI ... */
+#define PBS_UNIT_UART0_CONF_STATUS_CONF_MASK 0x0000FFFF
+#define PBS_UNIT_UART0_CONF_STATUS_CONF_SHIFT 0
+/* Status:// [16] -- dtr_n RO bit// [17] -- OUT1_N RO bit// [18] ... */
+#define PBS_UNIT_UART0_CONF_STATUS_STATUS_MASK 0xFFFF0000
+#define PBS_UNIT_UART0_CONF_STATUS_STATUS_SHIFT 16
+
+/**** uart1_conf_status register ****/
+/* Conf: // [0] -- DSR_N RW bit // [1] -- DCD_N RW bit // [2] -- ... */
+#define PBS_UNIT_UART1_CONF_STATUS_CONF_MASK 0x0000FFFF
+#define PBS_UNIT_UART1_CONF_STATUS_CONF_SHIFT 0
+/* Status: // [16] -- dtr_n RO bit // [17] -- OUT1_N RO bit // [ ... */
+#define PBS_UNIT_UART1_CONF_STATUS_STATUS_MASK 0xFFFF0000
+#define PBS_UNIT_UART1_CONF_STATUS_STATUS_SHIFT 16
+
+/**** uart2_conf_status register ****/
+/* Conf: // [0] -- DSR_N RW bit // [1] -- DCD_N RW bit // [2] -- ... */
+#define PBS_UNIT_UART2_CONF_STATUS_CONF_MASK 0x0000FFFF
+#define PBS_UNIT_UART2_CONF_STATUS_CONF_SHIFT 0
+/* Status: // [16] -- dtr_n RO bit // [17] -- OUT1_N RO bit // [ ... */
+#define PBS_UNIT_UART2_CONF_STATUS_STATUS_MASK 0xFFFF0000
+#define PBS_UNIT_UART2_CONF_STATUS_STATUS_SHIFT 16
+
+/**** uart3_conf_status register ****/
+/* Conf: // [0] -- DSR_N RW bit // [1] -- DCD_N RW bit // [2] -- ... */
+#define PBS_UNIT_UART3_CONF_STATUS_CONF_MASK 0x0000FFFF
+#define PBS_UNIT_UART3_CONF_STATUS_CONF_SHIFT 0
+/* Status: // [16] -- dtr_n RO bit // [17] -- OUT1_N RO bit // [ ... */
+#define PBS_UNIT_UART3_CONF_STATUS_STATUS_MASK 0xFFFF0000
+#define PBS_UNIT_UART3_CONF_STATUS_STATUS_SHIFT 16
+
+/**** gpio0_conf_status register ****/
+/* Cntl:// [7:0] nGPAFEN; // from regfile// [15:8 ... */
+#define PBS_UNIT_GPIO0_CONF_STATUS_CONF_MASK 0x0000FFFF
+#define PBS_UNIT_GPIO0_CONF_STATUS_CONF_SHIFT 0
+/* staus:
+// [24:16] GPAFIN; // to regfile */
+#define PBS_UNIT_GPIO0_CONF_STATUS_STATUS_MASK 0xFFFF0000
+#define PBS_UNIT_GPIO0_CONF_STATUS_STATUS_SHIFT 16
+
+/**** gpio1_conf_status register ****/
+/* Cntl:// [7:0] nGPAFEN; // from regfile// [15:8 ... */
+#define PBS_UNIT_GPIO1_CONF_STATUS_CONF_MASK 0x0000FFFF
+#define PBS_UNIT_GPIO1_CONF_STATUS_CONF_SHIFT 0
+/* staus:
+// [24:16] GPAFIN; // to regfile */
+#define PBS_UNIT_GPIO1_CONF_STATUS_STATUS_MASK 0xFFFF0000
+#define PBS_UNIT_GPIO1_CONF_STATUS_STATUS_SHIFT 16
+
+/**** gpio2_conf_status register ****/
+/* Cntl:// [7:0] nGPAFEN; // from regfile// [15:8 ... */
+#define PBS_UNIT_GPIO2_CONF_STATUS_CONF_MASK 0x0000FFFF
+#define PBS_UNIT_GPIO2_CONF_STATUS_CONF_SHIFT 0
+/* staus:
+// [24:16] GPAFIN; // to regfile */
+#define PBS_UNIT_GPIO2_CONF_STATUS_STATUS_MASK 0xFFFF0000
+#define PBS_UNIT_GPIO2_CONF_STATUS_STATUS_SHIFT 16
+
+/**** gpio3_conf_status register ****/
+/* Cntl:// [7:0] nGPAFEN; // from regfile// [15:8 ... */
+#define PBS_UNIT_GPIO3_CONF_STATUS_CONF_MASK 0x0000FFFF
+#define PBS_UNIT_GPIO3_CONF_STATUS_CONF_SHIFT 0
+/* staus:
+// [24:16] GPAFIN; // to regfile */
+#define PBS_UNIT_GPIO3_CONF_STATUS_STATUS_MASK 0xFFFF0000
+#define PBS_UNIT_GPIO3_CONF_STATUS_STATUS_SHIFT 16
+
+/**** gpio4_conf_status register ****/
+/* Cntl:// [7:0] nGPAFEN; // from regfile// [15:8 ... */
+#define PBS_UNIT_GPIO4_CONF_STATUS_CONF_MASK 0x0000FFFF
+#define PBS_UNIT_GPIO4_CONF_STATUS_CONF_SHIFT 0
+/* staus:
+// [24:16] GPAFIN; // to regfile */
+#define PBS_UNIT_GPIO4_CONF_STATUS_STATUS_MASK 0xFFFF0000
+#define PBS_UNIT_GPIO4_CONF_STATUS_STATUS_SHIFT 16
+
+/**** i2c_gen_conf_status register ****/
+/* cntl
+// [0] -- dma_tx_ack
+// [1] -- dma_rx_ack */
+#define PBS_UNIT_I2C_GEN_CONF_STATUS_CONF_MASK 0x0000FFFF
+#define PBS_UNIT_I2C_GEN_CONF_STATUS_CONF_SHIFT 0
+/* Status// [16] – dma_tx_req RO bit// [17] -- dma_tx_single RO ... */
+#define PBS_UNIT_I2C_GEN_CONF_STATUS_STATUS_MASK 0xFFFF0000
+#define PBS_UNIT_I2C_GEN_CONF_STATUS_STATUS_SHIFT 16
+
+/**** watch_dog_reset_out register ****/
+/* [0] if set to 1'b1, WD0 can not generate reset_out_n[1] if se ... */
+#define PBS_UNIT_WATCH_DOG_RESET_OUT_DISABLE_MASK 0x0000000F
+#define PBS_UNIT_WATCH_DOG_RESET_OUT_DISABLE_SHIFT 0
+
+/**** otp_cntl register ****/
+/* from reg file Config To bypass the copy from OTPW to OTPR */
+#define PBS_UNIT_OTP_CNTL_IGNORE_OTPW (1 << 0)
+/* Not use comes from bond. */
+#define PBS_UNIT_OTP_CNTL_IGNORE_PRELOAD (1 << 1)
+/* margin read from the fuse box */
+#define PBS_UNIT_OTP_CNTL_OTPW_MARGIN_READ (1 << 2)
+/* Indicate when OTP busy */
+#define PBS_UNIT_OTP_CNTL_OTP_BUSY (1 << 3)
+
+/**** otp_cfg_0 register ****/
+/* cfg to to OTP cntl. */
+#define PBS_UNIT_OTP_CFG_0_CFG_OTPW_PWRDN_CNT_MASK 0x0000FFFF
+#define PBS_UNIT_OTP_CFG_0_CFG_OTPW_PWRDN_CNT_SHIFT 0
+/* cfg to to OTP cntl. */
+#define PBS_UNIT_OTP_CFG_0_CFG_OTPW_READ_CNT_MASK 0xFFFF0000
+#define PBS_UNIT_OTP_CFG_0_CFG_OTPW_READ_CNT_SHIFT 16
+
+/**** otp_cfg_1 register ****/
+/* cfg to to OTP cntl. */
+#define PBS_UNIT_OTP_CFG_1_CFG_OTPW_PGM_CNT_MASK 0x0000FFFF
+#define PBS_UNIT_OTP_CFG_1_CFG_OTPW_PGM_CNT_SHIFT 0
+/* cfg to to OTP cntl. */
+#define PBS_UNIT_OTP_CFG_1_CFG_OTPW_PREP_CNT_MASK 0xFFFF0000
+#define PBS_UNIT_OTP_CFG_1_CFG_OTPW_PREP_CNT_SHIFT 16
+
+/**** otp_cfg_3 register ****/
+/* cfg to to OTP cntl. */
+#define PBS_UNIT_OTP_CFG_3_CFG_OTPW_PS18_CNT_MASK 0x0000FFFF
+#define PBS_UNIT_OTP_CFG_3_CFG_OTPW_PS18_CNT_SHIFT 0
+/* cfg to to OTP cntl. */
+#define PBS_UNIT_OTP_CFG_3_CFG_OTPW_PWRUP_CNT_MASK 0xFFFF0000
+#define PBS_UNIT_OTP_CFG_3_CFG_OTPW_PWRUP_CNT_SHIFT 16
+
+/**** nb_nic_regs_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_NB_NIC_REGS_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_NB_NIC_REGS_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reserved fields */
+#define PBS_UNIT_NB_NIC_REGS_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_NB_NIC_REGS_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_NB_NIC_REGS_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_NB_NIC_REGS_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** sb_nic_regs_bar_low register ****/
+/* window size = 2 ^ (15 + win_size), zero value disable the win ... */
+#define PBS_UNIT_SB_NIC_REGS_BAR_LOW_WIN_SIZE_MASK 0x0000003F
+#define PBS_UNIT_SB_NIC_REGS_BAR_LOW_WIN_SIZE_SHIFT 0
+/* reserved fields */
+#define PBS_UNIT_SB_NIC_REGS_BAR_LOW_RSRVD_MASK 0x0000FFC0
+#define PBS_UNIT_SB_NIC_REGS_BAR_LOW_RSRVD_SHIFT 6
+/* Rsrvd */
+#define PBS_UNIT_SB_NIC_REGS_BAR_LOW_ADDR_HIGH_MASK 0xFFFF0000
+#define PBS_UNIT_SB_NIC_REGS_BAR_LOW_ADDR_HIGH_SHIFT 16
+
+/**** serdes_mux_multi_0 register ****/
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_8_MASK 0x00000007
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_8_SHIFT 0
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_RSRVD_3 (1 << 3)
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_9_MASK 0x00000070
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_9_SHIFT 4
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_RSRVD_7 (1 << 7)
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_10_MASK 0x00000700
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_10_SHIFT 8
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_RSRVD_11 (1 << 11)
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_11_MASK 0x00007000
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_11_SHIFT 12
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_RSRVD_15 (1 << 15)
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_12_MASK 0x00030000
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_12_SHIFT 16
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_13_MASK 0x000C0000
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_13_SHIFT 18
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_14_MASK 0x00300000
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_14_SHIFT 20
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_15_MASK 0x00C00000
+#define PBS_UNIT_SERDES_MUX_MULTI_0_SELECT_OH_SERDES_15_SHIFT 22
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_MULTI_0_RSRVD_MASK 0xFF000000
+#define PBS_UNIT_SERDES_MUX_MULTI_0_RSRVD_SHIFT 24
+
+/**** serdes_mux_multi_1 register ****/
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_A_0_MASK 0x00000003
+#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_A_0_SHIFT 0
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_3_2_MASK 0x0000000C
+#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_3_2_SHIFT 2
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_B_0_MASK 0x00000070
+#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_B_0_SHIFT 4
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_7 (1 << 7)
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_C_0_MASK 0x00000300
+#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_C_0_SHIFT 8
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_11_10_MASK 0x00000C00
+#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_11_10_SHIFT 10
+/* serdes one hot mux control. */
+#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_D_0_MASK 0x00007000
+#define PBS_UNIT_SERDES_MUX_MULTI_1_SELECT_OH_ETH_D_0_SHIFT 12
+/* Rsrvd */
+#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_MASK 0xFFFF8000
+#define PBS_UNIT_SERDES_MUX_MULTI_1_RSRVD_SHIFT 15
+
+/**** pbs_ulpi_mux_conf register ****/
+/* Value 0 – select dedicate pins for the USB-1 inputs */
+#define PBS_UNIT_PBS_ULPI_MUX_CONF_SEL_UPLI_IN_PBSMUX_MASK 0x000007FF
+#define PBS_UNIT_PBS_ULPI_MUX_CONF_SEL_UPLI_IN_PBSMUX_SHIFT 0
+/* [3] - force to zero[2] == 1 - forcee register selection [1 : ... */
+#define PBS_UNIT_PBS_ULPI_MUX_CONF_REG_MDIO_BYPASS_SEL_MASK 0x0000F000
+#define PBS_UNIT_PBS_ULPI_MUX_CONF_REG_MDIO_BYPASS_SEL_SHIFT 12
+/* [0] set the clk_ulpi OE for USB0, 1'b0 set to input 1'b1 set ... */
+#define PBS_UNIT_PBS_ULPI_MUX_CONF_RSRVD_MASK 0xFFFF0000
+#define PBS_UNIT_PBS_ULPI_MUX_CONF_RSRVD_SHIFT 16
+
+/**** wr_once_dbg_dis_ovrd_reg register ****/
+/* This register can be written only once.. */
+#define PBS_UNIT_WR_ONCE_DBG_DIS_OVRD_REG_WR_ONCE_DBG_DIS_OVRD (1 << 0)
+
+#define PBS_UNIT_WR_ONCE_DBG_DIS_OVRD_REG_RSRVD_MASK 0xFFFFFFFE
+#define PBS_UNIT_WR_ONCE_DBG_DIS_OVRD_REG_RSRVD_SHIFT 1
+
+/**** gpio5_conf_status register ****/
+/* Cntl: // [7:0] nGPAFEN; // from regfile // [15 ... */
+#define PBS_UNIT_GPIO5_CONF_STATUS_CONF_MASK 0x0000FFFF
+#define PBS_UNIT_GPIO5_CONF_STATUS_CONF_SHIFT 0
+/* staus: // [24:16] GPAFIN; // to regfile */
+#define PBS_UNIT_GPIO5_CONF_STATUS_STATUS_MASK 0xFFFF0000
+#define PBS_UNIT_GPIO5_CONF_STATUS_STATUS_SHIFT 16
+
+/**** pbs_sb2nb_cfg_dram_remap register ****/
+#define PBS_UNIT_SB2NB_REMAP_BASE_ADDR_SHIFT 5
+#define PBS_UNIT_SB2NB_REMAP_BASE_ADDR_MASK 0x0000FFE0
+#define PBS_UNIT_SB2NB_REMAP_TRANSL_BASE_ADDR_SHIFT 21
+#define PBS_UNIT_SB2NB_REMAP_TRANSL_BASE_ADDR_MASK 0xFFE00000
+
+/* For remapping are used bits [39 - 29] of DRAM 40bit Physical address */
+#define PBS_UNIT_DRAM_SRC_REMAP_BASE_ADDR_SHIFT 29
+#define PBS_UNIT_DRAM_DST_REMAP_BASE_ADDR_SHIFT 29
+#define PBS_UNIT_DRAM_REMAP_BASE_ADDR_MASK 0xFFE0000000
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_PBS_REG_H */
+
+
+
+
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_pcie.h b/arch/arm/mach-alpine/include/al_hal/al_hal_pcie.h
new file mode 100644
index 0000000..cedb1d0
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_pcie.h
@@ -0,0 +1,827 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup grouppcie PCI Express Controller
+ * @{
+ * @section overview Overview
+ * This header file provide API for the HAL driver of the pcie port, the driver
+ * provides the following functionalities:
+ * - Port initialization
+ * - Link operation
+ * - Interrupts transactions generation (Endpoint mode).
+ * - Configuration Access management functions
+ * - Internal Translation Unit programming
+ *
+ * This API does not provide the following:
+ * - PCIe transactions generation and reception (except interrupts as mentioned
+ * above) as this functionality is done by the port without need for sw
+ * intervention.
+ * - Configuration Access: those transactions are generated automatically by
+ * the port (ECAM or ATU mode) when the CPU issues memory transaction
+ * through the fabric toward the PCIe port. This API provides management
+ * function for controlling the Configuration Access type and bus destination
+ * - Interrupt Handling.
+ * - Message Generation: common used messages are automatically generated, also,
+ * the ATU generic mechanism for generating various kind of messages.
+ * - PCIe Port Management: both link and port power management features can be
+ * managed using the PCI/PCIe standard power management and PCIe capabilities
+ * registers.
+ * - PCIe link and protocol error handling: the feature can be managed using
+ * the Advanced Error Handling PCIe capability registers.
+ *
+ * @section flows Software Flows
+ * @subsection init Initialization
+ * - allocation and set zeros al_pcie_port structure handle
+ * - call al_pcie_handle_init() with pointer to the allocated al_pcie_port handle,
+ * address of the port internal registers space, and port id.
+ * - set the port mode, End-Point or Root-Compex (default).
+ * - set number of lanes connected to the controller.
+ * - enable the controller using the al_pcie_port_enable(). note that this function
+ * expect the virtual address of the PBS registers as first parameter.
+ * - wait for 2000 Southbridge cycles.
+ * - prepare al_pcie_config_params structure depending on chip, board and system
+ * configuration.
+ * for example, when using the port as root complex, the function_mode field
+ * should be set to AL_PCIE_FUNCTION_MODE_RC. In this example we prepare the
+ * following configuration:
+ * - Root Complex mode
+ * - Set the Max Link Speed to Gen2
+ * - Set the max lanes width to 2 (x2)
+ * - Disable reversal mode
+ * - Enable Snoops to support I/O Hardware cache coherency
+ * - Enable pcie core RAM parity
+ * - Enable pcie core AXI parity
+ * - Keep transaction layer default credits
+ * so the structures we prepare:
+ * @code
+ * - struct al_pcie_link_params link_params = { AL_PCIE_LINK_SPEED_GEN2,
+ * AL_FALSE}; // disable reversal mode
+ *
+ * - struct al_pcie_config_params config_params = { AL_PCIE_FUNCTION_MODE_RC,
+ * &link_params,
+ * AL_TRUE, // enable Snoop for inbound memory transactions
+ * AL_TRUE, // enable pcie port RAM parity
+ * AL_TRUE, // enable pcie port AXI parity
+ * NULL, // use default latency/replay timers
+ * NULL, // use default gen2 pipe params
+ * NULL, // gen3_params not needed when max speed set to Gen2
+ * NULL, // don't change TL credits
+ * NULL, // end point params not needed
+ * AL_FALSE, //no fast link
+ * AL_FALSE //return 0xFFFFFFFF for read transactions with pci target error
+ * }
+ * @endcode
+ * - now call al_pcie_port_config() with the handle and the config_params structure.
+ * @subsection linkinit Link Initialization
+ * - once the port configured, we can start PCIe link:
+ * - call al_pcie_link_start()
+ * - call al_pcie_link_up_wait()
+ * - allocate al_pcie_link_status struct and call al_pcie_link_status() and
+ * check the link is established.
+ *
+ * @subsection cap Configuration Access Preparation
+ * - Once the link is established, we can prepare the port for pci
+ * configuration access, this stage requires system knowledge about the PCI
+ * buses enumeration. For example, if 5 buses were discovered on previously
+ * scanned root complex port, then we should start enumeration from bus 5 (PCI
+ * secondary bus), the sub-ordenary bus will be temporarily set to maximum
+ * value (255) until the scan process under this bus is finished, then it will
+ * updated to the maximum bus value found. So we use the following sequence:
+ * - call al_pcie_secondary_bus_set() with secbus = 5
+ * - call al_pcie_subordinary_bus_set() with subbus = 255
+ *
+ * @subsection cfg Configuration (Cfg) Access Generation
+ * - we assume using ECAM method, in this method, the software issues pcie Cfg
+ * access by accessing the ECAM memory space of the pcie port. For example, to
+ * issue 4 byte Cfg Read from bus B, Device D, Function F and register R, the
+ * software issues 4 byte read access to the following physical address
+ * ECAM base address of the port + (B << 20) + (D << 15) + (F << 12) + R.
+ * But, as the default size of the ECAM address space is less than
+ * needed full range (256MB), we modify the target_bus value prior to Cfg
+ * access in order make the port generate Cfg access with bus value set to the
+ * value of the target_bus rather than bits 27:20 of the physical address.
+ * - call al_pcie_target_bus_set() with target_bus set to the required bus of
+ * the next Cfg access to be issued, mask_target_bus will be set to 0xff.
+ * no need to call that function if the next Cfg access bus equals to the last
+ * value set to target_bus.
+ *
+ * @file al_hal_pcie.h
+ * @brief HAL Driver Header for the Annapurna Labs PCI Express port.
+ */
+
+#ifndef _AL_HAL_PCIE_H_
+#define _AL_HAL_PCIE_H_
+
+#include "al_hal_common.h"
+
+/****************************** Constants ***********************************/
+/** Inbound header credits sum */
+#define AL_PCIE_IB_HCRD_SUM 97
+
+/**
+ * Minimal ratio between outstanding header completions and the number of
+ * outstanding outbound reads
+ * (max request size / cache line) + 1 = 256/64+1
+ */
+#define AL_PCIE_NOF_CPL_HDR_NOF_OS_OB_READS_MIN_RATIO 5
+
+/** Maximal value for outstanding headers reads and header writes */
+#define AL_PCIE_NOF_P_NP_HDR_MAX 24
+
+/*********************** Data Structures and Types **************************/
+
+/**
+ * al_pcie_port: data structure used by the HAL to handle a specific pcie port.
+ * this structure is allocated and set to zeros by the upper layer, then it is
+ * initialized by the al_pcie_handle_init() that should be called before any
+ * other function of this API. later, this handle passed to the API functions.
+ */
+struct al_pcie_port {
+ struct al_pcie_regs __iomem *regs;
+
+ /* Revision ID - valid after calling 'al_pcie_port_config' */
+ uint8_t rev_id;
+
+ unsigned int port_id;
+ al_bool write_to_read_only_enabled;
+ uint8_t max_lanes;
+
+ /**
+ * Determine whether configuring 'nof_np_hdr' and 'nof_p_hdr' is
+ * required in the core
+ */
+ al_bool ib_hcrd_config_required;
+
+ /* Internally used - see 'al_pcie_ib_hcrd_os_ob_reads_config' */
+ unsigned int nof_np_hdr;
+
+ /* Internally used - see 'al_pcie_ib_hcrd_os_ob_reads_config' */
+ unsigned int nof_p_hdr;
+
+ /* Internally used - see al_hal_pcie_interrupts.c */
+ uint32_t __iomem *app_int_grp_a_base;
+ uint32_t __iomem *app_int_grp_b_base;
+ uint32_t __iomem *axi_int_grp_a_base;
+};
+
+
+/** Function mode (endpoint, root complex) */
+enum al_pcie_function_mode {
+ AL_PCIE_FUNCTION_MODE_EP,
+ AL_PCIE_FUNCTION_MODE_RC,
+ AL_PCIE_FUNCTION_MODE_UNKNOWN
+};
+
+/* The maximum link speed, measured GT/s (Giga transfer / second)
+ * DEFAULT: do not change the current speed
+ * GEN1: 2.5 GT/s
+ * GEN2: 5 GT/s
+ * GEN3: 8GT/s
+ */
+enum al_pcie_link_speed {
+ AL_PCIE_LINK_SPEED_DEFAULT,
+ AL_PCIE_LINK_SPEED_GEN1 = 1,
+ AL_PCIE_LINK_SPEED_GEN2 = 2,
+ AL_PCIE_LINK_SPEED_GEN3 = 3
+};
+
+/** PCIe capabilities that supported by a specific port */
+struct al_pcie_max_capability {
+ al_bool end_point_mode_supported;
+ al_bool root_complex_mode_supported;
+ enum al_pcie_link_speed max_speed;
+ uint8_t max_lanes;
+ al_bool reversal_supported;
+ uint8_t atu_regions_num;
+ uint32_t atu_min_size;
+};
+
+
+/** PCIe link related parameters */
+struct al_pcie_link_params {
+ enum al_pcie_link_speed max_speed;
+ al_bool enable_reversal;
+};
+
+/** PCIe gen2 link parameters */
+struct al_pcie_gen2_params {
+ al_bool tx_swing_low; /* set tx swing low when true, and tx swing full when false */
+ al_bool tx_compliance_receive_enable;
+ al_bool set_deemphasis;
+};
+
+/** PCIe gen 3 standard per lane equalization parameters */
+struct al_pcie_gen3_lane_eq_params {
+ uint8_t downstream_port_transmitter_preset;
+ uint8_t downstream_port_receiver_preset_hint;
+ uint8_t upstream_port_transmitter_preset;
+ uint8_t upstream_port_receiver_preset_hint;
+};
+
+/** PCIe gen 3 equalization parameters */
+struct al_pcie_gen3_params {
+ al_bool perform_eq;
+ al_bool interrupt_enable_on_link_eq_request;
+ struct al_pcie_gen3_lane_eq_params *eq_params; /* array of lanes params */
+ int eq_params_elements; /* number of elements in the eq_params array */
+
+ al_bool eq_disable; /* disables the equalization feature */
+ al_bool eq_phase2_3_disable; /* Equalization Phase 2 and Phase 3 */
+ /* Disable (RC mode only) */
+ uint8_t local_lf; /* Full Swing (FS) Value for Gen3 Transmit Equalization */
+ /* Value Range: 12 through 63 (decimal).*/
+
+ uint8_t local_fs; /* Low Frequency (LF) Value for Gen3 Transmit Equalization */
+};
+
+/** Transport Layer credits parameters */
+struct al_pcie_tl_credits_params {
+};
+
+/** BAR register configuration parameters (Endpoint Mode only) */
+struct al_pcie_ep_bar_params {
+ al_bool enable;
+ al_bool memory_space; /**< memory or io */
+ al_bool memory_64_bit; /**< is memory space is 64 bit */
+ al_bool memory_is_prefetchable;
+ uint64_t size; /* the bar size in bytes */
+};
+
+/** BARs configuration parameters (Endpoint Mode only) */
+struct al_pcie_ep_params {
+ al_bool cap_d1_d3hot_dis;
+ al_bool cap_flr_dis;
+ al_bool cap_aspm_dis;
+ al_bool relaxed_pcie_ordering;
+ al_bool bar_params_valid;
+ struct al_pcie_ep_bar_params bar_params[6];
+ struct al_pcie_ep_bar_params exp_bar_params;/* expansion ROM BAR*/
+};
+
+/** Various configuration features */
+struct al_pcie_features {
+ /**
+ * Enable MSI fix from the SATA to the PCIe EP
+ * Only valid for port 0, when enabled as EP
+ */
+ al_bool sata_ep_msi_fix;
+};
+
+/**
+ * Inbound posted/non-posted header credits and outstanding outbound reads
+ * completion header configuration
+ *
+ * Constraints:
+ * - nof_cpl_hdr + nof_np_hdr + nof_p_hdr == AL_PCIE_IB_HCRD_SUM
+ * - (nof_outstanding_ob_reads x AL_PCIE_NOF_CPL_HDR_NOF_OS_OB_READS_MIN_RATIO)
+ * <= nof_cpl_hdr
+ * - nof_p_hdr <= AL_PCIE_NOF_P_NP_HDR_MAX
+ * - nof_np_hdr <= AL_PCIE_NOF_P_NP_HDR_MAX
+ * - nof_cpl_hdr > 0
+ * - nof_p_hdr > 0
+ * - nof_np_hdr > 0
+ */
+struct al_pcie_ib_hcrd_os_ob_reads_config {
+ /** Max number of outstanding outbound reads */
+ uint8_t nof_outstanding_ob_reads;
+
+ /**
+ * This value set the possible outstanding headers CMPLs , the core
+ * can get (the core always advertise infinite credits for CMPLs).
+ */
+ unsigned int nof_cpl_hdr;
+
+ /**
+ * This value set the possible outstanding headers reads (non-posted
+ * transactions), the core can get (it set the value in the init FC
+ * process).
+ */
+ unsigned int nof_np_hdr;
+
+ /**
+ * This value set the possible outstanding headers writes (posted
+ * transactions), the core can get (it set the value in the init FC
+ * process).
+ */
+ unsigned int nof_p_hdr;
+};
+
+/** I/O Virtualization support in EP configuration */
+struct al_pcie_ep_iov_params {
+ /**
+ * Enable multiple Virtual Functions support by propogating VMID to
+ * outbound requests
+ */
+
+ al_bool sriov_vfunc_en;
+
+ /**
+ * Fix client1 FMT bits after cutting address 63:56, fix address format
+ * to 32-bits if original request is 32-bit address.
+ */
+ al_bool support_32b_address_in_iov;
+};
+
+/** PCIe Ack/Nak Latency and Replay timers */
+struct al_pcie_latency_replay_timers {
+ uint16_t round_trip_lat_limit;
+ uint16_t replay_timer_limit;
+};
+
+/** PCIe port configuration parameters
+ * This structure includes the parameters that the HAL should apply to the port
+ * (by al_pcie_port_config()).
+ * The fields that are pointers (e.g. link_params) can be set to NULL, in that
+ * case, the al_pcie_port_config() will keep the current HW settings.
+ */
+struct al_pcie_config_params {
+ enum al_pcie_function_mode function_mode; /**< indicates at which mode the controller operates */
+ struct al_pcie_link_params *link_params;
+ al_bool enable_axi_snoop;
+ al_bool enable_ram_parity_int;
+ al_bool enable_axi_parity_int;
+ struct al_pcie_latency_replay_timers *lat_rply_timers;
+ struct al_pcie_gen2_params *gen2_params;
+ struct al_pcie_gen3_params *gen3_params;
+ struct al_pcie_tl_credits_params *tl_credits;
+ struct al_pcie_ep_params *ep_params;
+ struct al_pcie_features *features;
+ struct al_pcie_ep_iov_params *ep_iov_params;
+ al_bool fast_link_mode; /* Sets all internal timers to Fast Mode for speeding up simulation.*/
+ al_bool enable_axi_slave_err_resp; /**< when true, the PCI unit will return Slave Error/Decoding Error to the master unit in case of error. when false, the value 0xFFFFFFFF will be returned without error indication. */
+};
+
+/** PCIe link status */
+struct al_pcie_link_status {
+ al_bool link_up;
+ enum al_pcie_link_speed speed;
+ uint8_t lanes;
+ uint8_t ltssm_state;
+};
+
+/** PCIe MSIX capability configuration parameters */
+struct al_pcie_msix_params {
+ uint16_t table_size;
+ uint16_t table_offset;
+ uint8_t table_bar;
+ uint16_t pba_offset;
+ uint16_t pba_bar;
+};
+
+/*********************** PCIe Port Initialization API **************/
+/** Enable PCIe unit (deassert reset)
+ *
+ * @param pcie_port pcie port handle
+ * @param pbs_reg_base the virtual base address of the pbs registers
+ *
+ * @return 0 if no error found.
+ */
+int al_pcie_port_enable(
+ struct al_pcie_port *pcie_port,
+ void __iomem *pbs_reg_base);
+
+/** Disable PCIe unit (assert reset)
+ *
+ * @param pcie_port pcie port handle
+ * @param pbs_reg_base the virtual base address of the pbs registers
+ */
+void al_pcie_port_disable(
+ struct al_pcie_port *pcie_port,
+ void __iomem *pbs_reg_base);
+
+/**
+ * Initializes a PCIe handle structure.
+ *
+ * @param pcie_port an allocated, non-initialized instance.
+ * @param pcie_reg_base the virtual base address of the port internal registers
+ * @param port_id the port id (used mainly for debug messages)
+ *
+ * @return 0 if no error found.
+ */
+int al_pcie_handle_init(struct al_pcie_port *pcie_port,
+ void __iomem *pcie_reg_base,
+ unsigned int port_id);
+
+/**
+ * Configure number of lanes connected to this port.
+ * This function can be called only before enabling the controller using al_pcie_port_enable().
+ *
+ * @param pcie_port pcie port handle
+ * @param lanes number of lanes
+ *
+ * @return 0 if no error found.
+ */
+int al_pcie_port_max_lanes_set(struct al_pcie_port *pcie_port, uint8_t lanes);
+
+/**
+ * Port memory shutdown/up
+ * This function can be called only when the controller is disabled
+ *
+ * @param pcie_port pcie port handle
+ * @param enable memory shutdown enable or disable
+ *
+ */
+void al_pcie_port_memory_shutdown_set(
+ struct al_pcie_port *pcie_port,
+ al_bool enable);
+
+/**
+ * @brief set current function mode (root complex or endpoint)
+ * This function can be called only before enabling the controller using al_pcie_port_enable().
+ *
+ * @param pcie_port pcie port handle
+ * @param mode pcie port mode
+ *
+ * @return 0 if no error found.
+ */
+int al_pcie_port_func_mode_config(struct al_pcie_port *pcie_port,
+ enum al_pcie_function_mode mode);
+
+/**
+ * @brief Inbound posted/non-posted header credits and outstanding outbound
+ * reads completion header configuration
+ *
+ * @param pcie_port pcie port handle
+ * @param ib_hcrd_os_ob_reads_config
+ * Inbound header credits and outstanding outbound reads
+ * configuration
+ */
+void al_pcie_port_ib_hcrd_os_ob_reads_config(
+ struct al_pcie_port *pcie_port,
+ struct al_pcie_ib_hcrd_os_ob_reads_config *ib_hcrd_os_ob_reads_config);
+
+/**
+ * @brief return current function mode (root complex or endpoint)
+ *
+ * @param pcie_port pcie port handle
+ *
+ * @return pcie port current mode.
+ */
+enum al_pcie_function_mode
+al_pcie_function_type_get(struct al_pcie_port *pcie_port);
+
+
+/**
+ * @brief configure pcie port (mode, link params, etc..)
+ * this function must be called before initializing the link
+ *
+ * @param pcie_port pcie port handle
+ * @param params configuration structure.
+ *
+ * @return 0 if no error found
+ */
+int al_pcie_port_config(struct al_pcie_port *pcie_port,
+ struct al_pcie_config_params *params);
+
+/**
+ * @brief Enable/disable deferring incoming configuration requests until
+ * initialization is complete. When enabled, the core completes incoming
+ * configuration requests with a Configuration Request Retry Status.
+ * Other incoming Requests complete with Unsupported Request status.
+ *
+ * @param pcie_port pcie port handle
+ * @param en enable/disable
+ */
+void al_pcie_app_req_retry_set(
+ struct al_pcie_port *pcie_port,
+ al_bool en);
+
+/**
+ * @brief configure pcie port axi snoop
+ *
+ * @param pcie_port pcie port handle
+ * @param enable_axi_snoop enable snoop.
+ *
+ * @return 0 if no error found
+ */
+int al_pcie_port_snoop_config(struct al_pcie_port *pcie_port,
+ al_bool enable_axi_snoop);
+
+/********************** PCIE Link Operations API ********************/
+/**
+ * @brief start pcie link
+ *
+ * @param pcie_port pcie port handle
+ *
+ * @return 0 if no error found
+ */
+int al_pcie_link_start(struct al_pcie_port *pcie_port);
+
+/**
+ * @brief stop pcie link
+ *
+ * @param pcie_port pcie port handle
+ *
+ * @return 0 if no error found
+ */
+int al_pcie_link_stop(struct al_pcie_port *pcie_port);
+
+/**
+ * @brief wait for link up indication
+ * this function waits for link up indication, it polls LTSSM state until link is ready
+ *
+ * @param pcie_port pcie port handle
+ * @param timeout_ms maximum timeout in milli-seconds to wait for link up
+ *
+ * @return 0 if link up indication detected
+ * -ETIME if not.
+ */
+int al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms);
+
+/**
+ * @brief get link status
+ *
+ * @param pcie_port pcie port handle
+ * @param status structure for link status
+ *
+ * @return 0 if no error found
+ */
+int al_pcie_link_status(struct al_pcie_port *pcie_port, struct al_pcie_link_status *status);
+
+/**
+ * @brief trigger hot reset
+ * this function triggers hot-reset, it doesn't wait for link re-establishment
+ *
+ * @param pcie_port pcie port handle
+ *
+ * @return 0 if no error found
+ */
+int al_pcie_link_hot_reset(struct al_pcie_port *pcie_port);
+
+/* TODO: check if this function needed */
+int al_pcie_link_change_speed(struct al_pcie_port *pcie_port, enum al_pcie_link_speed new_speed);
+
+/* TODO: check if this function needed */
+int al_pcie_link_change_width(struct al_pcie_port *pcie_port, uint8_t width);
+
+
+/* Configuration Space Access Through PCI-E_ECAM_Ext PASW (RC mode only) */
+
+/**
+ * @brief set target_bus and mask_target_bus
+ * @param pcie_port pcie port handle
+ * @param target_bus
+ * @param mask_target_bus
+ * @return 0 if no error found
+ */
+int al_pcie_target_bus_set(struct al_pcie_port *pcie_port,
+ uint8_t target_bus,
+ uint8_t mask_target_bus);
+
+/**
+ * @brief get target_bus and mask_target_bus
+ * @param pcie_port pcie port handle
+ * @param target_bus
+ * @param mask_target_bus
+ * @return 0 if no error found
+ */
+int al_pcie_target_bus_get(struct al_pcie_port *pcie_port,
+ uint8_t *target_bus,
+ uint8_t *mask_target_bus);
+
+/**
+ * Set secondary bus number
+ *
+ * @param pcie_port pcie port handle
+ * @param secbus pci secondary bus number
+ *
+ * @return 0 if no error found.
+ */
+int al_pcie_secondary_bus_set(struct al_pcie_port *pcie_port, uint8_t secbus);
+
+/**
+ * Set subordinary bus number
+ *
+ * @param pcie_port pcie port handle
+ * @param subbus the highest bus number of all of the buses that can be reached
+ * downstream of the PCIE instance.
+ *
+ * @return 0 if no error found.
+ */
+int al_pcie_subordinary_bus_set(struct al_pcie_port *pcie_port,uint8_t subbus);
+
+
+/**
+ * @brief get base address of pci configuration space header
+ * @param pcie_port pcie port handle
+ * @param addr pointer for returned address;
+ * @return 0 if no error found
+ */
+int al_pcie_config_space_get(struct al_pcie_port *pcie_port,
+ uint8_t __iomem **addr);
+
+/**
+ * Read data from the local configuration space
+ *
+ * @param pcie_port
+ * PCIe port handle
+ * @param reg_offset
+ * Configuration space register offset
+ *
+ * @return Read data
+ */
+uint32_t al_pcie_cfg_emul_local_cfg_space_read(
+ struct al_pcie_port *pcie_port,
+ unsigned int reg_offset);
+
+/**
+ * Write data to the local configuration space
+ *
+ * @param pcie_port
+ * PCIe port handle
+ * @param reg_offset
+ * Configuration space register offset
+ * @param data
+ * Data to write
+ * @param ro
+ * Is a read-only register according to PCIe specification
+ *
+ */
+void al_pcie_cfg_emul_local_cfg_space_write(
+ struct al_pcie_port *pcie_port,
+ unsigned int reg_offset,
+ uint32_t data,
+ al_bool ro);
+
+/******************* Internal Address Translation Unit (ATU) *************/
+enum al_pcie_atu_dir {
+ al_pcie_atu_dir_outbound = 0,
+ al_pcie_atu_dir_inbound = 1,
+};
+
+enum al_pcie_atu_tlp {
+ AL_PCIE_TLP_TYPE_MEM = 0,
+ AL_PCIE_TLP_TYPE_IO = 2,
+ AL_PCIE_TLP_TYPE_CFG0 = 4,
+ AL_PCIE_TLP_TYPE_CFG1 = 5,
+ AL_PCIE_TLP_TYPE_MSG = 0x10,
+ AL_PCIE_TLP_TYPE_RESERVED = 0x1f
+};
+
+struct al_pcie_atu_region {
+ al_bool enable;
+ enum al_pcie_atu_dir direction; /* outbound or inbound */
+ uint8_t index; /* region index */
+ uint64_t base_addr;
+ uint64_t limit; /* only bits [39:0] are valid given the Alpine PoC maximum physical address space */
+ uint64_t target_addr; /* the address that matches will be translated to this address + offset */
+ al_bool invert_matching;
+ enum al_pcie_atu_tlp tlp_type; /* pcie tlp type*/
+ uint8_t attr; /* pcie frame header attr field*/
+ /* outbound specific params */
+ uint8_t msg_code; /* pcie message code */
+ al_bool cfg_shift_mode;
+ /* inbound specific params*/
+ uint8_t bar_number;
+ uint8_t match_mode; /* BAR match mode, used in EP for MEM and IO tlps*/
+ al_bool enable_attr_match_mode;
+ al_bool enable_msg_match_mode;
+};
+
+/**
+ * @brief program internal ATU region entry
+ * @param pcie_port pcie port handle
+ * @param atu_region data structure that contains the region index and the translation parameters
+ * @return
+ */
+int al_pcie_atu_region_set(struct al_pcie_port *pcie_port, struct al_pcie_atu_region *atu_region);
+
+/**
+ * @brief Configure axi io bar. every hit to this bar will override size to 4 bytes.
+ * @param pcie_port pcie port handle
+ * @param start the first address of the memory
+ * @param end the last address of the memory
+ * @return
+ */
+void al_pcie_axi_io_config(struct al_pcie_port *pcie_port, al_phys_addr_t start, al_phys_addr_t end);
+
+/********************** Interrupt generation (Endpoint mode Only) ************/
+
+enum al_pcie_legacy_int_type{
+ AL_PCIE_LEGACY_INTA = 0,
+ AL_PCIE_LEGACY_INTB,
+ AL_PCIE_LEGACY_INTC,
+ AL_PCIE_LEGACY_INTD
+};
+
+/**
+ * @brief generate INTx Assert/DeAssert Message
+ * @param pcie_port pcie port handle
+ * @param assert when true, Assert Message is sent.
+ * @param type of message (INTA, INTB, etc)
+ * @return 0 if no error found
+ */
+int al_pcie_legacy_int_gen(struct al_pcie_port *pcie_port, al_bool assert,
+ enum al_pcie_legacy_int_type type /*A,B,..*/);
+
+/**
+ * @brief generate MSI interrupt
+ * @param pcie_port pcie port handle
+ * @param vector the vector index to send interrupt for.
+ * @return 0 if no error found
+ */
+int al_pcie_msi_int_gen(struct al_pcie_port *pcie_port, uint8_t vector);
+
+/**
+ * @brief configure MSIX capability
+ * @param pcie_port pcie port handle
+ * @param msix_params MSIX capability configuration parameters
+ * @return 0 if no error found
+ */
+int al_pcie_msix_config(
+ struct al_pcie_port *pcie_port,
+ struct al_pcie_msix_params *msix_params);
+
+/**
+ * @brief check whether MSIX capability is enabled
+ * @param pcie_port pcie port handle
+ * @return AL_TRUE if MSIX capability is enabled, AL_FALSE otherwise
+ */
+al_bool al_pcie_msix_enabled(struct al_pcie_port *pcie_port);
+
+/**
+ * @brief check whether MSIX capability is masked
+ * @param pcie_port pcie port handle
+ * @return AL_TRUE if MSIX capability is masked, AL_FALSE otherwise
+ */
+al_bool al_pcie_msix_masked(struct al_pcie_port *pcie_port);
+
+/********************** Loopback mode (RC and Endpoint modes) ************/
+
+/**
+ * @brief enter local pipe loopback mode
+ * This mode will connect the pipe RX signals to TX.
+ * no need to start link when using this mode.
+ * Gen3 equalization must be disabled before enabling this mode
+ * The caller must make sure the port is ready to accept the TLPs it sends to
+ * itself. for example, BARs should be initialized before sending memory TLPs.
+ *
+ * @param pcie_port pcie port handle
+ * @return 0 if no error found
+ */
+int al_pcie_local_pipe_loopback_enter(struct al_pcie_port *pcie_port);
+
+/**
+ * @brief exit local pipe loopback mode
+ *
+ * @param pcie_port pcie port handle
+ * @return 0 if no error found
+ */
+int al_pcie_local_pipe_loopback_exit(struct al_pcie_port *pcie_port);
+
+/**
+ * @brief enter master remote loopback mode
+ * No need to configure the link partner to enter slave remote loopback mode
+ * as this should be done as response to special training sequence directives
+ * when master works in remote loopback mode.
+ * The caller must make sure the port is ready to accept the TLPs it sends to
+ * itself. for example, BARs should be initialized before sending memory TLPs.
+ *
+ * @param pcie_port pcie port handle
+ * @return 0 if no error found
+ */
+int al_pcie_remote_loopback_enter(struct al_pcie_port *pcie_port);
+
+/**
+ * @brief exit remote loopback mode
+ *
+ * @param pcie_port pcie port handle
+ * @return 0 if no error found
+ */
+int al_pcie_remote_loopback_exit(struct al_pcie_port *pcie_port);
+
+#endif
+/** @} end of grouppcie group */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_pcie_interrupts.h b/arch/arm/mach-alpine/include/al_hal/al_hal_pcie_interrupts.h
new file mode 100644
index 0000000..1e987a9
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_pcie_interrupts.h
@@ -0,0 +1,157 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef _AL_HAL_PCIE_INTERRUPTS_H_
+#define _AL_HAL_PCIE_INTERRUPTS_H_
+
+#include "al_hal_common.h"
+#include "al_hal_pcie.h"
+#include "al_hal_iofic.h"
+
+/**
+ * @defgroup group_pcie_interrupts PCIe interrupts
+ * @ingroup grouppcie
+ * @{
+ * The PCIe interrupts HAL can be used to control PCIe unit interrupts.
+ * There are 3 groups of interrupts: app group A, app group B and AXI.
+ *
+ * @file al_hal_pcie_interrupts.h
+ *
+ */
+
+/** App group A interrupts mask - don't change */
+enum al_pcie_app_int_grp_a {
+ AL_PCIE_APP_INT_DEASSERT_INTD = AL_BIT(0),
+ AL_PCIE_APP_INT_DEASSERT_INTC = AL_BIT(1),
+ AL_PCIE_APP_INT_DEASSERT_INTB = AL_BIT(2),
+ AL_PCIE_APP_INT_DEASSERT_INTA = AL_BIT(3),
+ AL_PCIE_APP_INT_ASSERT_INTD = AL_BIT(4),
+ AL_PCIE_APP_INT_ASSERT_INTC = AL_BIT(5),
+ AL_PCIE_APP_INT_ASSERT_INTB = AL_BIT(6),
+ AL_PCIE_APP_INT_ASSERT_INTA = AL_BIT(7),
+ AL_PCIE_APP_INT_MSI_CNTR_RCV_INT = AL_BIT(8),
+ AL_PCIE_APP_INT_MSI_TRNS_GNT = AL_BIT(9),
+ AL_PCIE_APP_INT_SYS_ERR_RC = AL_BIT(10),
+ AL_PCIE_APP_INT_FLR_PF_ACTIVE = AL_BIT(11),
+ AL_PCIE_APP_INT_AER_RC_ERR = AL_BIT(12),
+ AL_PCIE_APP_INT_AER_RC_ERR_MSI = AL_BIT(13),
+ AL_PCIE_APP_INT_WAKE = AL_BIT(14),
+ AL_PCIE_APP_INT_PME_INT = AL_BIT(15),
+ AL_PCIE_APP_INT_PME_MSI = AL_BIT(16),
+ AL_PCIE_APP_INT_HP_PME = AL_BIT(17),
+ AL_PCIE_APP_INT_HP_INT = AL_BIT(18),
+ AL_PCIE_APP_INT_HP_MSI = AL_BIT(19),
+ AL_PCIE_APP_INT_VPD_INT = AL_BIT(20),
+ AL_PCIE_APP_INT_LINK_DOWN = AL_BIT(21),
+ AL_PCIE_APP_INT_PM_XTLH_BLOCK_TLP = AL_BIT(22),
+ AL_PCIE_APP_INT_XMLH_LINK_UP = AL_BIT(23),
+ AL_PCIE_APP_INT_RDLH_LINK_UP = AL_BIT(24),
+ AL_PCIE_APP_INT_LTSSM_RCVRY_STATE = AL_BIT(25),
+ AL_PCIE_APP_INT_CFG_WR = AL_BIT(26),
+ AL_PCIE_APP_INT_CFG_EMUL = AL_BIT(31),
+};
+
+/**
+ * @brief Initialize and configure PCIe controller interrupts
+ *
+ * @param pcie_port pcie port handle
+ *
+ * @return 0 if no error found
+ */
+int al_pcie_ints_config(struct al_pcie_port *pcie_port);
+
+/**
+ * Unmask PCIe app group a interrupts
+ *
+ * @param pcie_port pcie port handle
+ * @param int_mask interrupt mask.
+ */
+void al_pcie_app_int_grp_a_unmask(struct al_pcie_port *pcie_port,
+ uint32_t int_mask);
+
+/**
+ * Mask PCIe app group a interrupts
+ *
+ * @param pcie_port pcie port handle
+ * @param int_mask interrupt mask.
+ */
+void al_pcie_app_int_grp_a_mask(struct al_pcie_port *pcie_port,
+ uint32_t int_mask);
+
+/**
+ * Unmask PCIe app group b interrupts
+ *
+ * @param pcie_port pcie port handle
+ * @param int_mask interrupt mask.
+ */
+void al_pcie_app_int_grp_b_unmask(struct al_pcie_port *pcie_port,
+ uint32_t int_mask);
+
+/**
+ * Mask PCIe app group b interrupts
+ *
+ * @param pcie_port pcie port handle
+ * @param int_mask interrupt mask.
+ */
+void al_pcie_app_int_grp_b_mask(struct al_pcie_port *pcie_port,
+ uint32_t int_mask);
+
+/**
+ * Clear the PCIe app group a interrupt cause
+ *
+ * @param pcie_port pcie port handle
+ * @param int_cause interrupt cause register bits to clear
+ */
+static INLINE void al_pcie_app_int_grp_a_cause_clear(
+ struct al_pcie_port *pcie_port,
+ uint32_t int_cause)
+{
+ al_iofic_clear_cause(pcie_port->app_int_grp_a_base, 0, int_cause);
+}
+
+/**
+ * Read PCIe app group a interrupt cause
+ *
+ * @param pcie_port pcie port handle
+ * @return interrupt cause mask
+ */
+static INLINE uint32_t al_pcie_app_int_grp_a_cause_read(
+ struct al_pcie_port *pcie_port)
+{
+ return al_iofic_read_cause(pcie_port->app_int_grp_a_base, 0);
+}
+
+#endif
+/** @} end of group_pcie_interrupts group */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_plat_services.h b/arch/arm/mach-alpine/include/al_hal/al_hal_plat_services.h
new file mode 100644
index 0000000..1a9815a
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_plat_services.h
@@ -0,0 +1,203 @@
+/*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_services Platform Services API
+ * Platform Services API
+ * @{
+ * @file al_hal_plat_services.h
+ *
+ * @brief API for Platform services provided for to HAL drivers
+ *
+ *
+ */
+
+#ifndef __PLAT_SERVICES_H__
+#define __PLAT_SERVICES_H__
+
+#include
+#include
+#include
+#include
+
+#include
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+#define al_reg_read8(l) readb(l)
+#define al_reg_read16(l) readw(l)
+#define al_reg_read32(l) readl(l)
+#define al_reg_read64(l)
+
+#define al_reg_write8(l,v) writeb(v,l)
+#define al_reg_write16(l,v) writew(v,l)
+#define al_reg_write32(l,v) writel(v,l)
+
+#ifdef CONFIG_ARM
+/*
+ * Relaxed register read/write functions don't involve cpu instructions that
+ * force syncronization, nor ordering between the register access and memory
+ * data access.
+ * These instructions are used in performance critical code to avoid the
+ * overhead of the synchronization instructions.
+ */
+#define al_reg_read32_relaxed(l) readl_relaxed(l)
+#define al_reg_write32_relaxed(l,v) writel_relaxed(v,l)
+#else
+#define al_reg_read32_relaxed(l) readl(l)
+#define al_reg_write32_relaxed(l,v) writel(v,l)
+#endif
+
+/**
+ * print message
+ *
+ * @param type of message
+ * @param format
+ */
+#define al_print(type, fmt, ...) printk(KERN_INFO fmt, ##__VA_ARGS__)
+
+/**
+ * print error message
+ *
+ * @param format
+ */
+#define al_err(...) pr_err(__VA_ARGS__)
+
+/**
+ * print warning message
+ *
+ * @param format
+ */
+#define al_warn(...) pr_info(__VA_ARGS__)
+
+/**
+ * print info message
+ *
+ * @param format
+ */
+#define al_info(...) pr_info(__VA_ARGS__)
+
+/**
+ * print debug message
+ *
+ * @param format
+ */
+#define al_dbg(...) pr_debug(__VA_ARGS__)
+
+/**
+ * Assertion
+ *
+ * @param condition
+ */
+#define al_assert(COND) BUG_ON(!(COND))
+
+/**
+ * Make sure data will be visible by DMA masters. usually this is achieved by
+ * the ARM DMB instruction.
+ */
+
+static inline void al_data_memory_barrier(void)
+{
+ mb();
+}
+
+/**
+ * Make sure data will be visible in order by other cpus masters.
+ */
+static inline void al_smp_data_memory_barrier(void)
+{
+ smp_mb();
+}
+
+static inline void al_local_data_memory_barrier(void)
+{
+ mb();
+}
+
+/**
+ * al_udelay - micro sec delay
+ * @param micro seconds to delay
+ */
+#define al_udelay(u) udelay(u)
+
+#define al_msleep(m) msleep(m)
+
+#define swap16_to_le(x) cpu_to_le16(x)
+#define swap32_to_le(x) cpu_to_le32(x)
+#define swap64_to_le(x) cpu_to_le64(x)
+#define swap16_from_le(x) le16_to_cpu(x)
+#define swap32_from_le(x) le32_to_cpu(x)
+#define swap64_from_le(x) le64_to_cpu(x)
+
+/**
+ * Memory set
+ *
+ * @param memory pointer
+ * @param value for setting
+ * @param number of bytes to set
+ */
+#define al_memset(p, val, cnt) memset(p, val, cnt)
+
+/**
+ * memory compare
+ *
+ * @param p1 memory pointer
+ * @param p2 memory pointer
+ * @param cnt number of bytes to compare
+ *
+ * @return 0 if equal, else otherwise
+ */
+#define al_memcmp(p1, p2, cnt) memcmp(p1, p2, cnt)
+
+/**
+ * memory copy
+ *
+ * @param dest memory pointer to destination
+ * @param src memory pointer to source
+ * @param cnt number of bytes to copy
+ */
+#define al_memcpy(dest, src, cnt) memcpy(dest, src, cnt)
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+/** @} end of Platform Services API group */
+#endif /* __PLAT_SERVICES_H__ */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_plat_types.h b/arch/arm/mach-alpine/include/al_hal/al_hal_plat_types.h
new file mode 100644
index 0000000..1bb7544
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_plat_types.h
@@ -0,0 +1,76 @@
+/*******************************************************************************
+Copyright (C) 2015 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_services Platform Services API
+ * Platform Services API
+ * @{
+ * @file al_hal_plat_types.h
+ *
+ * @brief platform dependent data types
+ *
+ *
+ */
+
+#ifndef __PLAT_TYPES_H__
+#define __PLAT_TYPES_H__
+
+#include
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/* Basic data types */
+typedef int al_bool; /*! boolean */
+#define AL_TRUE 1
+#define AL_FALSE 0
+
+/*! in LPAE mode, the address address is 40 bit, we extend it to 64 bit */
+typedef dma_addr_t al_phys_addr_t;
+
+/*! this defines the cpu endiancess. */
+#define PLAT_ARCH_IS_LITTLE() AL_TRUE
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+/** @} end of Platform Services API group */
+
+#endif /* __PLAT_TYPES_H__ */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_reg_utils.h b/arch/arm/mach-alpine/include/al_hal/al_hal_reg_utils.h
new file mode 100644
index 0000000..4065c7e
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_reg_utils.h
@@ -0,0 +1,181 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_common HAL Common Layer
+ * @{
+ * @file al_hal_reg_utils.h
+ *
+ * @brief Register utilities used by HALs and platform layer
+ *
+ *
+ */
+
+#ifndef __AL_HAL_REG_UTILS_H__
+#define __AL_HAL_REG_UTILS_H__
+
+#include "al_hal_plat_types.h"
+#include "al_hal_plat_services.h"
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+#define AL_BIT(b) (1UL << (b))
+
+#define AL_ADDR_LOW(x) ((uint32_t)((al_phys_addr_t)(x)))
+#define AL_ADDR_HIGH(x) ((uint32_t)((((al_phys_addr_t)(x)) >> 16) >> 16))
+
+/** get field out of 32 bit register */
+#define AL_REG_FIELD_GET(reg, mask, shift) (((reg) & (mask)) >> (shift))
+
+/** set field of 32 bit register */
+#define AL_REG_FIELD_SET(reg, mask, shift, val) \
+ (reg) = \
+ (((reg) & (~(mask))) | \
+ ((((unsigned)(val)) << (shift)) & (mask)))
+
+/** get single bit out of 32 bit register */
+#define AL_REG_BIT_GET(reg, shift) \
+ AL_REG_FIELD_GET(reg, AL_BIT(shift), shift)
+
+#define AL_REG_BITS_FIELD(shift, val) \
+ (((unsigned)(val)) << (shift))
+
+/** set single bit field of 32 bit register to a given value */
+#define AL_REG_BIT_VAL_SET(reg, shift, val) \
+ AL_REG_FIELD_SET(reg, AL_BIT(shift), shift, val)
+
+/** set single bit of 32 bit register to 1 */
+#define AL_REG_BIT_SET(reg, shift) \
+ AL_REG_BIT_VAL_SET(reg, shift, 1)
+
+/** clear single bit of 32 bit register */
+#define AL_REG_BIT_CLEAR(reg, shift) \
+ AL_REG_BIT_VAL_SET(reg, shift, 0)
+
+
+#define AL_BIT_MASK(n) \
+ (AL_BIT(n) - 1)
+
+#define AL_FIELD_MASK(msb, lsb) \
+ (AL_BIT(msb) + AL_BIT_MASK(msb) - AL_BIT_MASK(lsb))
+
+/** clear bits specified by clear_mask */
+#define AL_REG_MASK_CLEAR(reg, clear_mask) \
+ ((reg) = (((reg) & (~(clear_mask)))))
+
+/** set bits specified by clear_mask */
+#define AL_REG_MASK_SET(reg, clear_mask) \
+ ((reg) = (((reg) | (clear_mask))))
+
+
+/** clear bits specified by clear_mask, and set bits specified by set_mask */
+#define AL_REG_CLEAR_AND_SET(reg, clear_mask, set_mask) \
+ (reg) = (((reg) & (~(clear_mask))) | (set_mask))
+
+#define AL_ALIGN_UP(val, size) \
+ ((size) * (((val) + (size) - 1) / (size)))
+
+/** take bits selected by mask from one data, the rest from background */
+#define AL_MASK_VAL(mask, data, background) \
+ (((mask) & (data)) | ((~mask) & (background)))
+
+/**
+ * 8 bits register masked write
+ *
+ * @param reg
+ * register address
+ * @param mask
+ * bits not selected (1) by mask will be left unchanged
+ * @param data
+ * data to write. bits not selected by mask ignored.
+ */
+static inline void al_reg_write8_masked(uint8_t __iomem *reg, uint8_t mask
+ , uint8_t data)
+{
+ uint8_t temp;
+ temp = al_reg_read8(reg);
+ al_reg_write8(reg, AL_MASK_VAL(mask, data, temp));
+}
+
+
+/**
+ * 16 bits register masked write
+ *
+ * @param reg
+ * register address
+ * @param mask
+ * bits not selected (1) by mask will be left unchanged
+ * @param data
+ * data to write. bits not selected by mask ignored.
+ */
+static inline void al_reg_write16_masked(uint16_t __iomem *reg, uint16_t mask
+ , uint16_t data)
+{
+ uint16_t temp;
+ temp = al_reg_read16(reg);
+ al_reg_write16(reg, AL_MASK_VAL(mask, data, temp));
+}
+
+
+/**
+ * 32 bits register masked write
+ *
+ * @param reg
+ * register address
+ * @param mask
+ * bits not selected (1) by mask will be left unchanged
+ * @param data
+ * data to write. bits not selected by mask ignored.
+ */
+static inline void al_reg_write32_masked(uint32_t __iomem *reg, uint32_t mask
+ , uint32_t data)
+{
+ uint32_t temp;
+ temp = al_reg_read32(reg);
+ al_reg_write32(reg, AL_MASK_VAL(mask, data, temp));
+}
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+/** @} end of Common group */
+#endif
+
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_serdes.h b/arch/arm/mach-alpine/include/al_hal/al_hal_serdes.h
new file mode 100644
index 0000000..9b8fa20
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_serdes.h
@@ -0,0 +1,948 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_serdes_api API
+ * SerDes HAL driver API
+ * @ingroup group_serdes SerDes
+ * @{
+ *
+ * @file al_hal_serdes.h
+ *
+ * @brief Header file for the SerDes HAL driver
+ *
+ */
+
+#ifndef __AL_HAL_SERDES_H__
+#define __AL_HAL_SERDES_H__
+
+#include "al_hal_common.h"
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+struct al_serdes_obj;
+
+enum al_serdes_group {
+ AL_SRDS_GRP_A = 0,
+ AL_SRDS_GRP_B,
+ AL_SRDS_GRP_C,
+ AL_SRDS_GRP_D,
+
+ AL_SRDS_NUM_GROUPS
+};
+
+struct al_serdes_group_info {
+ /*
+ * Group parent object - filled automatically by al_serdes_handle_init
+ */
+ struct al_serdes_obj *pobj;
+
+ /*
+ * Group specific register base - filled automatically by
+ * al_sedres_handle_init
+ */
+ struct al_serdes_regs __iomem *regs_base;
+};
+
+struct al_serdes_obj {
+ struct al_serdes_group_info grp_info[AL_SRDS_NUM_GROUPS];
+};
+
+enum al_serdes_reg_page {
+ AL_SRDS_REG_PAGE_0_LANE_0 = 0,
+ AL_SRDS_REG_PAGE_1_LANE_1,
+ AL_SRDS_REG_PAGE_2_LANE_2,
+ AL_SRDS_REG_PAGE_3_LANE_3,
+ AL_SRDS_REG_PAGE_4_COMMON,
+ AL_SRDS_REG_PAGE_0123_LANES_0123 = 7,
+};
+
+enum al_serdes_reg_type {
+ AL_SRDS_REG_TYPE_PMA = 0,
+ AL_SRDS_REG_TYPE_PCS,
+};
+
+enum al_serdes_lane {
+ AL_SRDS_LANE_0 = AL_SRDS_REG_PAGE_0_LANE_0,
+ AL_SRDS_LANE_1 = AL_SRDS_REG_PAGE_1_LANE_1,
+ AL_SRDS_LANE_2 = AL_SRDS_REG_PAGE_2_LANE_2,
+ AL_SRDS_LANE_3 = AL_SRDS_REG_PAGE_3_LANE_3,
+
+ AL_SRDS_NUM_LANES,
+ AL_SRDS_LANES_0123 = AL_SRDS_REG_PAGE_0123_LANES_0123,
+};
+
+/** Serdes loopback mode */
+enum al_serdes_lb_mode {
+ /** No loopback */
+ AL_SRDS_LB_MODE_OFF,
+
+ /**
+ * Transmits the untimed, partial equalized RX signal out the transmit
+ * IO pins.
+ * No clock used (untimed)
+ */
+ AL_SRDS_LB_MODE_PMA_IO_UN_TIMED_RX_TO_TX,
+
+ /**
+ * Loops back the TX serializer output into the CDR.
+ * CDR recovered bit clock used (without attenuation)
+ */
+ AL_SRDS_LB_MODE_PMA_INTERNALLY_BUFFERED_SERIAL_TX_TO_RX,
+
+ /**
+ * Loops back the TX driver IO signal to the RX IO pins
+ * CDR recovered bit clock used (only through IO)
+ */
+ AL_SRDS_LB_MODE_PMA_SERIAL_TX_IO_TO_RX_IO,
+
+ /**
+ * Parallel loopback from the PMA receive lane data ports, to the
+ * transmit lane data ports
+ * CDR recovered bit clock used
+ */
+ AL_SRDS_LB_MODE_PMA_PARALLEL_RX_TO_TX,
+
+ /** Loops received data after elastic buffer to transmit path */
+ AL_SRDS_LB_MODE_PCS_PIPE,
+
+ /** Loops TX data (to PMA) to RX path (instead of PMA data) */
+ AL_SRDS_LB_MODE_PCS_NEAR_END,
+
+ /** Loops receive data prior to interface block to transmit path */
+ AL_SRDS_LB_MODE_PCS_FAR_END,
+};
+
+/** Serdes BIST pattern */
+enum al_serdes_bist_pattern {
+ AL_SRDS_BIST_PATTERN_USER,
+ AL_SRDS_BIST_PATTERN_PRBS7,
+ AL_SRDS_BIST_PATTERN_PRBS23,
+ AL_SRDS_BIST_PATTERN_PRBS31,
+ AL_SRDS_BIST_PATTERN_CLK1010,
+};
+
+/** SerDes group rate */
+enum al_serdes_rate {
+ AL_SRDS_RATE_1_8,
+ AL_SRDS_RATE_1_4,
+ AL_SRDS_RATE_1_2,
+ AL_SRDS_RATE_FULL,
+};
+
+/** SerDes power mode */
+enum al_serdes_pm {
+ AL_SRDS_PM_PD,
+ AL_SRDS_PM_P2,
+ AL_SRDS_PM_P1,
+ AL_SRDS_PM_P0S,
+ AL_SRDS_PM_P0,
+};
+
+/**
+ * Initializes a SERDES object
+ *
+ * @param serdes_regs_base
+ * The SERDES register file base pointer
+ *
+ * @param obj
+ * An allocated, non initialized object context
+ *
+ *
+ * @return 0 if no error found.
+ *
+ */
+int al_serdes_handle_init(
+ void __iomem *serdes_regs_base,
+ struct al_serdes_obj *obj);
+
+/**
+ * SERDES register read
+ *
+ * Reads a SERDES register
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param page
+ * The SERDES register page within the group
+ *
+ * @param type
+ * The SERDES register type (PMA /PCS)
+ *
+ * @param offset
+ * The SERDES register offset (0 - 4095)
+ *
+ * @param data
+ * The read data
+ *
+ *
+ * @return 0 if no error found.
+ *
+ */
+int al_serdes_reg_read(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t *data);
+
+/**
+ * SERDES register write
+ *
+ * Writes a SERDES register
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param page
+ * The SERDES register page within the group
+ *
+ * @param type
+ * The SERDES register type (PMA /PCS)
+ *
+ * @param offset
+ * The SERDES register offset (0 - 4095)
+ *
+ * @param data
+ * The data to write
+ *
+ *
+ * @return 0 if no error found.
+ *
+ */
+int al_serdes_reg_write(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_reg_page page,
+ enum al_serdes_reg_type type,
+ uint16_t offset,
+ uint8_t data);
+
+/**
+ * Enable BIST required overrides
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param rate
+ * The required speed rate
+ */
+void al_serdes_bist_overrides_enable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_rate rate);
+
+/**
+ * SERDES group power mode control
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param pm
+ * The required power mode
+ */
+void al_serdes_group_pm_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_pm pm);
+
+/**
+ * SERDES lane power mode control
+ *
+ * @param obj
+ * The object context
+ * @param grp
+ * The SERDES group
+ * @param lane
+ * The SERDES lane within the group
+ * @param rx_pm
+ * The required RX power mode
+ * @param tx_pm
+ * The required TX power mode
+ */
+void al_serdes_lane_pm_set(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_pm rx_pm,
+ enum al_serdes_pm tx_pm);
+
+/**
+ * SERDES group PMA hard reset
+ *
+ * Controls Serdes group PMA hard reset
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param enable
+ * Enable/disable hard reset
+ */
+void al_serdes_pma_hard_reset_group(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ al_bool enable);
+
+/**
+ * SERDES lane PMA hard reset
+ *
+ * Controls Serdes lane PMA hard reset
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param lane
+ * The SERDES lane within the group
+ *
+ * @param enable
+ * Enable/disable hard reset
+ */
+void al_serdes_pma_hard_reset_lane(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool enable);
+
+/**
+ * SERDES loopback control
+ *
+ * Controls the loopback
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param lane
+ * The SERDES lane within the group
+ *
+ * @param mode
+ * The requested loopback mode
+ *
+ */
+void al_serdes_loopback_control(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_lb_mode mode);
+
+/**
+ * SERDES BIST pattern selection
+ *
+ * Selects the BIST pattern to be used
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param pattern
+ * The pattern to set
+ *
+ * @param user_data
+ * The pattern user data (when pattern == AL_SRDS_BIST_PATTERN_USER)
+ * 80 bits (8 bytes array)
+ *
+ */
+void al_serdes_bist_pattern_select(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_bist_pattern pattern,
+ uint8_t *user_data);
+
+/**
+ * SERDES BIST TX Enable
+ *
+ * Enables/disables TX BIST per lane
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param lane
+ * The SERDES lane within the group
+ *
+ * @param enable
+ * Enable or disable TX BIST
+ */
+void al_serdes_bist_tx_enable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool enable);
+
+/**
+ * SERDES BIST TX single bit error injection
+ *
+ * Injects single bit error during a TX BIST
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ */
+void al_serdes_bist_tx_err_inject(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp);
+
+/**
+ * SERDES BIST RX Enable
+ *
+ * Enables/disables RX BIST per lane
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param lane
+ * The SERDES lane within the group
+ *
+ * @param enable
+ * Enable or disable TX BIST
+ */
+void al_serdes_bist_rx_enable(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool enable);
+
+/**
+ * SERDES BIST RX status
+ *
+ * Checks the RX BIST status for a specific SERDES lane
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param lane
+ * The SERDES lane within the group
+ *
+ * @param is_locked
+ * An indication whether RX BIST is locked
+ *
+ * @param err_cnt_overflow
+ * An indication whether error count overflow occured
+ *
+ * @param err_cnt
+ * Current bit error count
+ */
+void al_serdes_bist_rx_status(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ al_bool *is_locked,
+ al_bool *err_cnt_overflow,
+ uint16_t *err_cnt);
+
+/**
+ * SERDES Digital Test Bus
+ *
+ * Samples the digital test bus of a specific SERDES lane
+ *
+ * @param obj
+ * The object context
+ *
+ * @param grp
+ * The SERDES group
+ *
+ * @param lane
+ * The SERDES lane within the group
+ *
+ * @param sel
+ * The selected sampling group (0 - 31)
+ *
+ * @param sampled_data
+ * The sampled data (5 bytes array)
+ *
+ *
+ * @return 0 if no error found.
+ *
+ */
+int al_serdes_digital_test_bus(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ uint8_t sel,
+ uint8_t *sampled_data);
+
+
+/* KR link training */
+/**
+ * Set the tx de-emphasis to preset values
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ */
+void al_serdes_tx_deemph_preset(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane);
+
+/**
+ * Tx de-emphasis parameters
+ */
+enum al_serdes_tx_deemph_param {
+ AL_SERDES_TX_DEEMP_C_ZERO, /*< c(0) */
+ AL_SERDES_TX_DEEMP_C_PLUS, /*< c(1) */
+ AL_SERDES_TX_DEEMP_C_MINUS, /*< c(-1) */
+};
+
+/**
+ * Increase tx de-emphasis param.
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param param which tx de-emphasis to change
+ *
+ * @return false in case max is reached. true otherwise.
+ */
+al_bool al_serdes_tx_deemph_inc(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_tx_deemph_param param);
+
+/**
+ * Decrease tx de-emphasis param.
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param param which tx de-emphasis to change
+ *
+ * @return false in case min is reached. true otherwise.
+ */
+al_bool al_serdes_tx_deemph_dec(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ enum al_serdes_tx_deemph_param param);
+
+/**
+ * run Rx eye measurement.
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param timeout timeout in uSec
+ *
+ * @param value Rx eye measurement value
+ * (0 - completely closed eye, 0xffff - completely open eye).
+ *
+ * @return 0 if no error found.
+ */
+int al_serdes_eye_measure_run(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ uint32_t timeout,
+ unsigned int *value);
+
+/**
+ * Eye diagram single sampling
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param x Sampling X position (0 - 63 --> -1.00 UI ... 1.00 UI)
+ *
+ * @param y Sampling Y position (0 - 62 --> 500mV ... -500mV)
+ *
+ * @param timeout timeout in uSec
+ *
+ * @param value Eye diagram sample value (BER - 0x0000 - 0xffff)
+ *
+ * @return 0 if no error found.
+ */
+int al_serdes_eye_diag_sample(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ unsigned int x,
+ int y,
+ unsigned int timeout,
+ unsigned int *value);
+
+/**
+ * Check if signal is detected
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @return true if signal is detected. false otherwise.
+ */
+al_bool al_serdes_signal_is_detected(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane);
+
+
+struct al_serdes_adv_tx_params {
+ /*
+ * select the input values location.
+ * When set to true the values will be taken from the internal registers
+ * that will be override with the next following parameters.
+ * When set to false the values will be taken from external pins (the
+ * other parameters in this case is not needed)
+ */
+ al_bool override;
+ /*
+ * Transmit Amplitude control signal. Used to define the full-scale
+ * maximum swing of the driver.
+ * 000 - Not Supported
+ * 001 - 952mVdiff-pkpk
+ * 010 - 1024mVdiff-pkpk
+ * 011 - 1094mVdiff-pkpk
+ * 100 - 1163mVdiff-pkpk
+ * 101 - 1227mVdiff-pkpk
+ * 110 - 1283mVdiff-pkpk
+ * 111 - 1331mVdiff-pkpk
+ */
+ uint8_t amp;
+ /* Defines the total number of driver units allocated in the driver */
+ uint8_t total_driver_units;
+ /* Defines the total number of driver units allocated to the
+ * first post-cursor (C+1) tap. */
+ uint8_t c_plus_1;
+ /* Defines the total number of driver units allocated to the
+ * second post-cursor (C+2) tap. */
+ uint8_t c_plus_2;
+ /* Defines the total number of driver units allocated to the
+ * first pre-cursor (C-1) tap. */
+ uint8_t c_minus_1;
+ /* TX driver Slew Rate control:
+ * 00 - 31ps
+ * 01 - 33ps
+ * 10 - 68ps
+ * 11 - 170ps
+ */
+ uint8_t slew_rate;
+};
+
+struct al_serdes_adv_rx_params {
+ /*
+ * select the input values location.
+ * When set to true the values will be taken from the internal registers
+ * that will be override with the next following parameters.
+ * When set to false the values will be taken based in the equalization
+ * results (the other parameters in this case is not needed)
+ */
+ al_bool override;
+ /* RX agc high frequency dc gain:
+ * -3'b000: -3dB
+ * -3'b001: -2.5dB
+ * -3'b010: -2dB
+ * -3'b011: -1.5dB
+ * -3'b100: -1dB
+ * -3'b101: -0.5dB
+ * -3'b110: -0dB
+ * -3'b111: 0.5dB
+ */
+ uint8_t dcgain;
+ /* DFE post-shaping tap 3dB frequency
+ * -3'b000: 684MHz
+ * -3'b001: 576MHz
+ * -3'b010: 514MHz
+ * -3'b011: 435MHz
+ * -3'b100: 354MHz
+ * -3'b101: 281MHz
+ * -3'b110: 199MHz
+ * -3'b111: 125MHz
+ */
+ uint8_t dfe_3db_freq;
+ /* DFE post-shaping tap gain
+ * 0: no pulse shaping tap
+ * 1: -24mVpeak
+ * 2: -45mVpeak
+ * 3: -64mVpeak
+ * 4: -80mVpeak
+ * 5: -93mVpeak
+ * 6: -101mVpeak
+ * 7: -105mVpeak
+ */
+ uint8_t dfe_gain;
+ /* DFE first tap gain control
+ * -4'b0000: +1mVpeak
+ * -4'b0001: +10mVpeak
+ * ....
+ * -4'b0110: +55mVpeak
+ * -4'b0111: +64mVpeak
+ * -4'b1000: -1mVpeak
+ * -4'b1001: -10mVpeak
+ * ....
+ * -4'b1110: -55mVpeak
+ * -4'b1111: -64mVpeak
+ */
+ uint8_t dfe_first_tap_ctrl;
+ /* DFE second tap gain control
+ * -4'b0000: +0mVpeak
+ * -4'b0001: +9mVpeak
+ * ....
+ * -4'b0110: +46mVpeak
+ * -4'b0111: +53mVpeak
+ * -4'b1000: -0mVpeak
+ * -4'b1001: -9mVpeak
+ * ....
+ * -4'b1110: -46mVpeak
+ * -4'b1111: -53mVpeak
+ */
+ uint8_t dfe_secound_tap_ctrl;
+ /* DFE third tap gain control
+ * -4'b0000: +0mVpeak
+ * -4'b0001: +7mVpeak
+ * ....
+ * -4'b0110: +38mVpeak
+ * -4'b0111: +44mVpeak
+ * -4'b1000: -0mVpeak
+ * -4'b1001: -7mVpeak
+ * ....
+ * -4'b1110: -38mVpeak
+ * -4'b1111: -44mVpeak
+ */
+ uint8_t dfe_third_tap_ctrl;
+ /* DFE fourth tap gain control
+ * -4'b0000: +0mVpeak
+ * -4'b0001: +6mVpeak
+ * ....
+ * -4'b0110: +29mVpeak
+ * -4'b0111: +33mVpeak
+ * -4'b1000: -0mVpeak
+ * -4'b1001: -6mVpeak
+ * ....
+ * -4'b1110: -29mVpeak
+ * -4'b1111: -33mVpeak
+ */
+ uint8_t dfe_fourth_tap_ctrl;
+ /* Low frequency agc gain (att) select
+ * -3'b000: Disconnected
+ * -3'b001: -18.5dB
+ * -3'b010: -12.5dB
+ * -3'b011: -9dB
+ * -3'b100: -6.5dB
+ * -3'b101: -4.5dB
+ * -3'b110: -2.9dB
+ * -3'b111: -1.6dB
+ */
+ uint8_t low_freq_agc_gain;
+ /* Provides a RX Equalizer pre-hint, prior to beginning
+ * adaptive equalization */
+ uint8_t precal_code_sel;
+ /* High frequency agc boost control
+ * Min d0: Boost ~4dB
+ * Max d31: Boost ~20dB
+ */
+ uint8_t high_freq_agc_boost;
+};
+
+/**
+ * configure tx advanced parameters
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param params pointer to the tx parameters
+ */
+void al_serdes_tx_advanced_params_set(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_tx_params *params);
+
+/**
+ * read tx advanced parameters
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param params pointer to the tx parameters
+ */
+void al_serdes_tx_advanced_params_get(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_tx_params *params);
+
+/**
+ * configure rx advanced parameters
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param params pointer to the rx parameters
+ */
+void al_serdes_rx_advanced_params_set(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_rx_params *params);
+
+/**
+ * read rx advanced parameters
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param params pointer to the rx parameters
+ */
+void al_serdes_rx_advanced_params_get(struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ struct al_serdes_adv_rx_params* params);
+
+/**
+ * Switch entire SerDes group to SGMII mode based on 156.25 Mhz reference clock
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ */
+void al_serdes_mode_set_sgmii(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp);
+
+/**
+ * Switch entire SerDes group to KR mode based on 156.25 Mhz reference clock
+ *
+ * @param obj The object context
+ *
+ * @param grp The SERDES group
+ */
+void al_serdes_mode_set_kr(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp);
+
+/**
+ * performs SerDes HW equalization test and update equalization parameters
+ *
+ * @param obj the object context
+ *
+ * @param grp the SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ */
+int al_serdes_rx_equalization(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane);
+
+/**
+ * performs Rx equalization and compute the width and height of the eye
+ *
+ * @param obj the object context
+ *
+ * @param grp the SERDES group
+ *
+ * @param lane The SERDES lane within the group
+ *
+ * @param width the output width of the eye
+ *
+ * @param height the output height of the eye
+ */
+int al_serdes_calc_eye_size(
+ struct al_serdes_obj *obj,
+ enum al_serdes_group grp,
+ enum al_serdes_lane lane,
+ int* width,
+ int* height);
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+
+/* *INDENT-ON* */
+#endif /* __AL_SRDS__ */
+
+/** @} end of SERDES group */
+
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_serdes_internal_regs.h b/arch/arm/mach-alpine/include/al_hal/al_hal_serdes_internal_regs.h
new file mode 100644
index 0000000..ff9bef6
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_serdes_internal_regs.h
@@ -0,0 +1,652 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __AL_SERDES_INTERNAL_REGS_H__
+#define __AL_SERDES_INTERNAL_REGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+ * Per lane register fields
+ ******************************************************************************/
+/*
+ * RX and TX lane hard reset
+ * 0 - Hard reset is asserted
+ * 1 - Hard reset is de-asserted
+ */
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_REG_NUM 2
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_MASK 0x01
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_ASSERT 0x00
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_VAL_DEASSERT 0x01
+
+/*
+ * RX and TX lane hard reset control
+ * 0 - Hard reset is taken from the interface pins
+ * 1 - Hard reset is taken from registers
+ */
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_REG_NUM 2
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_MASK 0x02
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_IFACE 0x00
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_VAL_REGS 0x02
+
+/* RX lane power state control */
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_REG_NUM 3
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_MASK 0x1f
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_PD 0x01
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P2 0x02
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P1 0x04
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0S 0x08
+#define SERDES_IREG_FLD_LANEPCSPSTATE_RX_VAL_P0 0x10
+
+/* TX lane power state control */
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_REG_NUM 4
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_MASK 0x1f
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_PD 0x01
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P2 0x02
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P1 0x04
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0S 0x08
+#define SERDES_IREG_FLD_LANEPCSPSTATE_TX_VAL_P0 0x10
+
+/* RX lane word width */
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_REG_NUM 5
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_MASK 0x07
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_8 0x00
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_10 0x01
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_16 0x02
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_20 0x03
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_32 0x04
+#define SERDES_IREG_FLD_PCSRX_DATAWIDTH_VAL_40 0x05
+
+/* TX lane word width */
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_REG_NUM 5
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_MASK 0x70
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_8 0x00
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_10 0x10
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_16 0x20
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_20 0x30
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_32 0x40
+#define SERDES_IREG_FLD_PCSTX_DATAWIDTH_VAL_40 0x50
+
+/* RX lane rate select */
+#define SERDES_IREG_FLD_PCSRX_DIVRATE_REG_NUM 6
+#define SERDES_IREG_FLD_PCSRX_DIVRATE_MASK 0x07
+#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_8 0x00
+#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_4 0x01
+#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_2 0x02
+#define SERDES_IREG_FLD_PCSRX_DIVRATE_VAL_1_1 0x03
+
+/* TX lane rate select */
+#define SERDES_IREG_FLD_PCSTX_DIVRATE_REG_NUM 6
+#define SERDES_IREG_FLD_PCSTX_DIVRATE_MASK 0x70
+#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_8 0x00
+#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_4 0x10
+#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_2 0x20
+#define SERDES_IREG_FLD_PCSTX_DIVRATE_VAL_1_1 0x30
+
+/*
+ * PMA serial RX-to-TX loop-back enable (from AGC to IO Driver). Serial receive
+ * to transmit loopback: 0 - Disables loopback 1 - Transmits the untimed,
+ * partial equalized RX signal out the transmit IO pins
+ */
+#define SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN_REG_NUM 7
+#define SERDES_IREG_FLD_LB_RX2TXUNTIMEDEN 0x10
+
+/*
+ * PMA TX-to-RX buffered serial loop-back enable (bypasses IO Driver). Serial
+ * transmit to receive buffered loopback: 0 - Disables loopback 1 - Loops back
+ * the TX serializer output into the CDR
+ */
+#define SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN_REG_NUM 7
+#define SERDES_IREG_FLD_LB_TX2RXBUFTIMEDEN 0x20
+
+/*
+ * PMA TX-to-RX I/O serial loop-back enable (loop back done directly from TX to
+ * RX pads). Serial IO loopback from the transmit lane IO pins to the receive
+ * lane IO pins: 0 - Disables loopback 1 - Loops back the driver IO signal to
+ * the RX IO pins
+ */
+#define SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN_REG_NUM 7
+#define SERDES_IREG_FLD_LB_TX2RXIOTIMEDEN 0x40
+
+/*
+ * PMA Parallel RX-to-TX loop-back enable. Parallel loopback from the PMA
+ * receive lane 20-bit data ports, to the transmit lane 20-bit data ports 0 -
+ * Disables loopback 1 - Loops back the 20-bit receive data port to the
+ * transmitter
+ */
+#define SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN_REG_NUM 7
+#define SERDES_IREG_FLD_LB_PARRX2TXTIMEDEN 0x80
+
+/*
+ * PMA CDR recovered-clock loopback enable; asserted when PARRX2TXTIMEDEN is 1.
+ * Transmit bit clock select: 0 - Selects synthesizer bit clock for transmit 1
+ * - Selects CDR clock for transmit
+ */
+#define SERDES_IREG_FLD_LB_CDRCLK2TXEN_REG_NUM 7
+#define SERDES_IREG_FLD_LB_CDRCLK2TXEN 0x01
+
+/* Receive lane BIST enable. Active High */
+#define SERDES_IREG_FLD_PCSRXBIST_EN_REG_NUM 8
+#define SERDES_IREG_FLD_PCSRXBIST_EN 0x01
+
+/* TX lane BIST enable. Active High */
+#define SERDES_IREG_FLD_PCSTXBIST_EN_REG_NUM 8
+#define SERDES_IREG_FLD_PCSTXBIST_EN 0x02
+
+/*
+ * RX BIST completion signal 0 - Indicates test is not completed 1 - Indicates
+ * the test has completed, and will remain high until a new test is initiated
+ */
+#define SERDES_IREG_FLD_RXBIST_DONE_REG_NUM 8
+#define SERDES_IREG_FLD_RXBIST_DONE 0x04
+
+/*
+ * RX BIST error count overflow indicator. Indicates an overflow in the number
+ * of byte errors identified during the course of the test. This word is stable
+ * to sample when *_DONE_* signal has asserted
+ */
+#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW_REG_NUM 8
+#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_OVERFLOW 0x08
+
+/*
+ * RX BIST locked indicator 0 - Indicates BIST is not word locked and error
+ * comparisons have not begun yet 1 - Indicates BIST is word locked and error
+ * comparisons have begun
+ */
+#define SERDES_IREG_FLD_RXBIST_RXLOCKED_REG_NUM 8
+#define SERDES_IREG_FLD_RXBIST_RXLOCKED 0x10
+
+/*
+ * RX BIST error count word. Indicates the number of byte errors identified
+ * during the course of the test. This word is stable to sample when *_DONE_*
+ * signal has asserted
+ */
+#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_MSB_REG_NUM 9
+#define SERDES_IREG_FLD_RXBIST_ERRCOUNT_LSB_REG_NUM 10
+
+/* Tx params */
+#define SERDES_IREG_TX_DRV_1_REG_NUM 21
+#define SERDES_IREG_TX_DRV_1_HLEV_MASK 0x7
+#define SERDES_IREG_TX_DRV_1_HLEV_SHIFT 0
+#define SERDES_IREG_TX_DRV_1_LEVN_MASK 0xf8
+#define SERDES_IREG_TX_DRV_1_LEVN_SHIFT 3
+
+#define SERDES_IREG_TX_DRV_2_REG_NUM 22
+#define SERDES_IREG_TX_DRV_2_LEVNM1_MASK 0xf
+#define SERDES_IREG_TX_DRV_2_LEVNM1_SHIFT 0
+#define SERDES_IREG_TX_DRV_2_LEVNM2_MASK 0x30
+#define SERDES_IREG_TX_DRV_2_LEVNM2_SHIFT 4
+
+#define SERDES_IREG_TX_DRV_3_REG_NUM 23
+#define SERDES_IREG_TX_DRV_3_LEVNP1_MASK 0x7
+#define SERDES_IREG_TX_DRV_3_LEVNP1_SHIFT 0
+#define SERDES_IREG_TX_DRV_3_SLEW_MASK 0x18
+#define SERDES_IREG_TX_DRV_3_SLEW_SHIFT 3
+
+/* Rx params */
+#define SERDES_IREG_RX_CALEQ_1_REG_NUM 24
+#define SERDES_IREG_RX_CALEQ_1_DCGAIN_MASK 0x7
+#define SERDES_IREG_RX_CALEQ_1_DCGAIN_SHIFT 0
+/* DFE post-shaping tap 3dB frequency */
+#define SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_MASK 0x38
+#define SERDES_IREG_RX_CALEQ_1_DFEPSTAP3DB_SHIFT 3
+
+#define SERDES_IREG_RX_CALEQ_2_REG_NUM 25
+/* DFE post-shaping tap gain */
+#define SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_MASK 0x7
+#define SERDES_IREG_RX_CALEQ_2_DFEPSTAPGAIN_SHIFT 0
+/* DFE first tap gain control */
+#define SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_MASK 0x78
+#define SERDES_IREG_RX_CALEQ_2_DFETAP1GAIN_SHIFT 3
+
+#define SERDES_IREG_RX_CALEQ_3_REG_NUM 26
+#define SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_MASK 0xf
+#define SERDES_IREG_RX_CALEQ_3_DFETAP2GAIN_SHIFT 0
+#define SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_MASK 0xf0
+#define SERDES_IREG_RX_CALEQ_3_DFETAP3GAIN_SHIFT 4
+
+#define SERDES_IREG_RX_CALEQ_4_REG_NUM 27
+#define SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_MASK 0xf
+#define SERDES_IREG_RX_CALEQ_4_DFETAP4GAIN_SHIFT 0
+#define SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_MASK 0x70
+#define SERDES_IREG_RX_CALEQ_4_LOFREQAGCGAIN_SHIFT 4
+
+#define SERDES_IREG_RX_CALEQ_5_REG_NUM 28
+#define SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_MASK 0x7
+#define SERDES_IREG_RX_CALEQ_5_PRECAL_CODE_SEL_SHIFT 0
+#define SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_MASK 0xf8
+#define SERDES_IREG_RX_CALEQ_5_HIFREQAGCCAP_SHIFT 3
+
+/* RX lane best eye point measurement result */
+#define SERDES_IREG_RXEQ_BEST_EYE_MSB_VAL_REG_NUM 29
+#define SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_REG_NUM 30
+#define SERDES_IREG_RXEQ_BEST_EYE_LSB_VAL_MASK 0x3F
+
+/*
+ * Adaptive RX Equalization enable
+ * 0 - Disables adaptive RX equalization.
+ * 1 - Enables adaptive RX equalization.
+ */
+#define SERDES_IREG_FLD_PCSRXEQ_START_REG_NUM 31
+#define SERDES_IREG_FLD_PCSRXEQ_START (1 << 0)
+
+/*
+ * Enables an eye diagram measurement
+ * within the PHY.
+ * 0 - Disables eye diagram measurement
+ * 1 - Enables eye diagram measurement
+ */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START_REG_NUM 31
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_START (1 << 1)
+
+
+/*
+ * RX lane single roam eye point measurement start signal.
+ * If asserted, single measurement at fix XADJUST and YADJUST is started.
+ */
+#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_REG_NUM 31
+#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_CYCLEEN_START (1 << 2)
+
+
+/*
+ * PHY Eye diagram measurement status
+ * signal
+ * 0 - Indicates eye diagram results are not
+ * valid for sampling
+ * 1 - Indicates eye diagram is complete and
+ * results are valid for sampling
+ */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE_REG_NUM 32
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_DONE (1 << 0)
+
+/*
+ * Eye diagram error signal. Indicates if the
+ * measurement was invalid because the eye
+ * diagram was interrupted by the link entering
+ * electrical idle.
+ * 0 - Indicates eye diagram is valid
+ * 1- Indicates an error occurred, and the eye
+ * diagram measurement should be re-run
+ */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR_REG_NUM 32
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_ERR (1 << 1)
+
+/*
+ * PHY Adaptive Equalization status
+ * 0 - Indicates Adaptive Equalization results are not valid for sampling
+ * 1 - Indicates Adaptive Equalization is complete and results are valid for
+ * sampling
+ */
+#define SERDES_IREG_FLD_RXCALROAMEYEMEASDONE_REG_NUM 32
+#define SERDES_IREG_FLD_RXCALROAMEYEMEASDONE (1 << 2)
+
+/*
+ *
+ * PHY Adaptive Equalization Status Signal
+ * 0 – Indicates adaptive equalization results
+ * are not valid for sampling
+ * 1 – Indicates adaptive equalization is
+ * complete and results are valid for sampling.
+ */
+#define SERDES_IREG_FLD_RXEQ_DONE_REG_NUM 32
+#define SERDES_IREG_FLD_RXEQ_DONE (1 << 3)
+
+
+/*
+ * 7-bit eye diagram time adjust control
+ * - 6-bits per UI
+ * - spans 2 UI
+ */
+#define SERDES_IREG_FLD_RXCALROAMXADJUST_REG_NUM 33
+
+/* 6-bit eye diagram voltage adjust control - spans +/-300mVdiff */
+#define SERDES_IREG_FLD_RXCALROAMYADJUST_REG_NUM 34
+
+/*
+ * Eye diagram status signal. Safe for
+ * sampling when *DONE* signal has
+ * asserted
+ * 14'h0000 - Completely Closed Eye
+ * 14'hFFFF - Completely Open Eye
+ */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_REG_NUM 35
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_MAKE 0xFF
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_MSB_SHIFT 0
+
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_REG_NUM 36
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_MAKE 0x3F
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_EYESUM_LSB_SHIFT 0
+
+/*
+ * RX lane single roam eye point measurement result.
+ * If 0, eye is open at current XADJUST and YADJUST settings.
+ */
+#define SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_MSB_REG_NUM 37
+#define SERDES_IREG_FLD_RXCALROAMEYEMEAS_ACC_LSB_REG_NUM 38
+
+/*
+ * Override enable for CDR lock to reference clock
+ * 0 - CDR is always locked to reference
+ * 1 - CDR operation mode (Lock2Reference or Lock2data are controlled internally
+ * depending on the incoming signal and ppm status)
+ */
+#define SERDES_IREG_FLD_RXLOCK2REF_OVREN_REG_NUM 39
+#define SERDES_IREG_FLD_RXLOCK2REF_OVREN (1 << 1)
+
+/*
+ * Selects Eye to capture based on edge
+ * 0 - Capture 1st Eye in Eye Diagram
+ * 1 - Capture 2nd Eye in Eye Diagram measurement
+ */
+#define SERDES_IREG_FLD_RXROAM_XORBITSEL_REG_NUM 39
+#define SERDES_IREG_FLD_RXROAM_XORBITSEL (1 << 2)
+#define SERDES_IREG_FLD_RXROAM_XORBITSEL_MASK (1 << 2)
+#define SERDES_IREG_FLD_RXROAM_XORBITSEL_1ST 0
+#define SERDES_IREG_FLD_RXROAM_XORBITSEL_2ND (1 << 2)
+
+/*
+ * RX Signal detect. 0 indicates no signal, 1 indicates signal detected.
+ */
+#define SERDES_IREG_FLD_RXRANDET_REG_NUM 41
+#define SERDES_IREG_FLD_RXRANDET_STAT 0x20
+
+/*
+ * RX data polarity inversion control:
+ * 1'b0: no inversion
+ * 1'b1: invert polarity
+ */
+#define SERDES_IREG_FLD_POLARITY_RX_REG_NUM 46
+#define SERDES_IREG_FLD_POLARITY_RX_INV (1 << 0)
+
+/*
+ * TX data polarity inversion control:
+ * 1'b0: no inversion
+ * 1'b1: invert polarity
+ */
+#define SERDES_IREG_FLD_POLARITY_TX_REG_NUM 46
+#define SERDES_IREG_FLD_POLARITY_TX_INV (1 << 1)
+
+/* LANEPCSPSTATE* override enable (Active low) */
+#define SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN_REG_NUM 85
+#define SERDES_IREG_FLD_LANEPCSPSTATE_LOCWREN (1 << 0)
+
+/* LB* override enable (Active low) */
+#define SERDES_IREG_FLD_LB_LOCWREN_REG_NUM 85
+#define SERDES_IREG_FLD_LB_LOCWREN (1 << 1)
+
+/* PCSRX* override enable (Active low) */
+#define SERDES_IREG_FLD_PCSRX_LOCWREN_REG_NUM 85
+#define SERDES_IREG_FLD_PCSRX_LOCWREN (1 << 4)
+
+/* PCSRXBIST* override enable (Active low) */
+#define SERDES_IREG_FLD_PCSRXBIST_LOCWREN_REG_NUM 85
+#define SERDES_IREG_FLD_PCSRXBIST_LOCWREN (1 << 5)
+
+/* PCSRXEQ* override enable (Active low) */
+#define SERDES_IREG_FLD_PCSRXEQ_LOCWREN_REG_NUM 85
+#define SERDES_IREG_FLD_PCSRXEQ_LOCWREN (1 << 6)
+
+/* PCSTX* override enable (Active low) */
+#define SERDES_IREG_FLD_PCSTX_LOCWREN_REG_NUM 85
+#define SERDES_IREG_FLD_PCSTX_LOCWREN (1 << 7)
+
+/*
+ * group registers:
+ * SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN,
+ * SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN
+ * SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN
+ */
+#define SERDES_IREG_FLD_RXCAL_LOCWREN_REG_NUM 86
+
+/* PCSTXBIST* override enable (Active low) */
+#define SERDES_IREG_FLD_PCSTXBIST_LOCWREN_REG_NUM 86
+#define SERDES_IREG_FLD_PCSTXBIST_LOCWREN (1 << 0)
+
+/* Override RX_CALCEQ through the internal registers (Active low) */
+#define SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN_REG_NUM 86
+#define SERDES_IREG_FLD_RX_DRV_OVERRIDE_EN (1 << 3)
+
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN_REG_NUM 86
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSMIN_LOCWREN (1 << 4)
+
+
+/* RXCALROAMEYEMEASIN* override enable - Active Low */
+#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN_REG_NUM 86
+#define SERDES_IREG_FLD_RXCALROAMEYEMEASIN_LOCWREN (1 << 6)
+
+/* RXCALROAMXADJUST* override enable - Active Low */
+#define SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN_REG_NUM 86
+#define SERDES_IREG_FLD_RXCALROAMXADJUST_LOCWREN (1 << 7)
+
+/* RXCALROAMYADJUST* override enable - Active Low */
+#define SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN_REG_NUM 87
+#define SERDES_IREG_FLD_RXCALROAMYADJUST_LOCWREN (1 << 0)
+
+/* RXCDRCALFOSC* override enable. Active Low */
+#define SERDES_IREG_FLD_RXCDRCALFOSC_LOCWREN_REG_NUM 87
+#define SERDES_IREG_FLD_RXCDRCALFOSC_LOCWREN (1 << 1)
+
+/* Over-write enable for RXEYEDIAGFSM_INITXVAL */
+#define SERDES_IREG_FLD_RXEYEDIAGFSM_LOCWREN_REG_NUM 87
+#define SERDES_IREG_FLD_RXEYEDIAGFSM_LOCWREN (1 << 2)
+
+/* Over-write enable for CMNCLKGENMUXSEL_TXINTERNAL */
+#define SERDES_IREG_FLD_RXTERMHIZ_LOCWREN_REG_NUM 87
+#define SERDES_IREG_FLD_RXTERMHIZ_LOCWREN (1 << 3)
+
+/* TXCALTCLKDUTY* override enable. Active Low */
+#define SERDES_IREG_FLD_TXCALTCLKDUTY_LOCWREN_REG_NUM 87
+#define SERDES_IREG_FLD_TXCALTCLKDUTY_LOCWREN (1 << 4)
+
+/* Override TX_DRV through the internal registers (Active low) */
+#define SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN_REG_NUM 87
+#define SERDES_IREG_FLD_TX_DRV_OVERRIDE_EN (1 << 5)
+
+/*******************************************************************************
+ * Common lane register fields
+ ******************************************************************************/
+/*
+ * Common lane hard reset control
+ * 0 - Hard reset is taken from the interface pins
+ * 1 - Hard reset is taken from registers
+ */
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_REG_NUM 2
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_MASK 0x01
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_IFACE 0x00
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASSEN_SYNTH_VAL_REGS 0x01
+
+/*
+ * Common lane hard reset
+ * 0 - Hard reset is asserted
+ * 1 - Hard reset is de-asserted
+ */
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_REG_NUM 2
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_MASK 0x02
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_ASSERT 0x00
+#define SERDES_IREG_FLD_CMNCTLPOR_HARDRSTBYPASS_SYNTH_VAL_DEASSERT 0x02
+
+/* Synth power state control */
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_REG_NUM 3
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_MASK 0x1f
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_PD 0x01
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P2 0x02
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P1 0x04
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0S 0x08
+#define SERDES_IREG_FLD_CMNPCSPSTATE_SYNTH_VAL_P0 0x10
+
+/* Transmit datapath FIFO enable (Active High) */
+#define SERDES_IREG_FLD_CMNPCS_TXENABLE_REG_NUM 8
+#define SERDES_IREG_FLD_CMNPCS_TXENABLE (1 << 2)
+
+#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_REG_NUM 30
+#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_MASK 0x7f
+#define SERDES_IREG_FLD_RXEQ_COARSE_ITER_NUM_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_REG_NUM 31
+#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_MASK 0x7f
+#define SERDES_IREG_FLD_RXEQ_FINE_ITER_NUM_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_REG_NUM 32
+#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_MASK 0xff
+#define SERDES_IREG_FLD_RXEQ_COARSE_RUN1_MASK_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_REG_NUM 33
+#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_MASK 0x1
+#define SERDES_IREG_FLD_RXEQ_COARSE_RUN2_MASK_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_REG_NUM 33
+#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_MASK 0x3e
+#define SERDES_IREG_FLD_RXEQ_COARSE_STEP_SHIFT 1
+
+#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_REG_NUM 34
+#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_MASK 0xff
+#define SERDES_IREG_FLD_RXEQ_FINE_RUN1_MASK_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_REG_NUM 35
+#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_MASK 0x1
+#define SERDES_IREG_FLD_RXEQ_FINE_RUN2_MASK_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_FINE_STEP_REG_NUM 35
+#define SERDES_IREG_FLD_RXEQ_FINE_STEP_MASK 0x3e
+#define SERDES_IREG_FLD_RXEQ_FINE_STEP_SHIFT 1
+
+#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_REG_NUM 36
+#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_MASK 0xff
+#define SERDES_IREG_FLD_RXEQ_LOOKUP_CODE_EN_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_REG_NUM 37
+#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_MASK 0x7
+#define SERDES_IREG_FLD_RXEQ_LOOKUP_LASTCODE_SHIFT 0
+
+#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_REG_NUM 43
+#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_MASK 0x7
+#define SERDES_IREG_FLD_RXEQ_DCGAIN_LUP0_SHIFT 0
+
+/*
+ * Selects the transmit BIST mode:
+ * 0 - Uses the 80-bit internal memory pattern (w/ OOB)
+ * 1 - Uses a 27 PRBS pattern
+ * 2 - Uses a 223 PRBS pattern
+ * 3 - Uses a 231 PRBS pattern
+ * 4 - Uses a 1010 clock pattern
+ * 5 and above - Reserved
+ */
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_REG_NUM 80
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_MASK 0x07
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_USER 0x00
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS7 0x01
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS23 0x02
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_PRBS31 0x03
+#define SERDES_IREG_FLD_CMNPCSBIST_MODESEL_VAL_CLK1010 0x04
+
+/* Single-Bit error injection enable (on posedge) */
+#define SERDES_IREG_FLD_TXBIST_BITERROR_EN_REG_NUM 80
+#define SERDES_IREG_FLD_TXBIST_BITERROR_EN 0x20
+
+/* CMNPCIEGEN3* override enable (Active Low) */
+#define SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN_REG_NUM 95
+#define SERDES_IREG_FLD_CMNPCIEGEN3_LOCWREN (1 << 2)
+
+/* CMNPCS* override enable (Active Low) */
+#define SERDES_IREG_FLD_CMNPCS_LOCWREN_REG_NUM 95
+#define SERDES_IREG_FLD_CMNPCS_LOCWREN (1 << 3)
+
+/* CMNPCSBIST* override enable (Active Low) */
+#define SERDES_IREG_FLD_CMNPCSBIST_LOCWREN_REG_NUM 95
+#define SERDES_IREG_FLD_CMNPCSBIST_LOCWREN (1 << 4)
+
+/* CMNPCSPSTATE* override enable (Active Low) */
+#define SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN_REG_NUM 95
+#define SERDES_IREG_FLD_CMNPCSPSTATE_LOCWREN (1 << 5)
+
+/* PCS_EN* override enable (Active Low) */
+#define SERDES_IREG_FLD_PCS_LOCWREN_REG_NUM 96
+#define SERDES_IREG_FLD_PCS_LOCWREN (1 << 3)
+
+/* Eye diagram sample count */
+#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_REG_NUM 150
+#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_MASK 0xff
+#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_MSB_SHIFT 0
+
+#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_REG_NUM 151
+#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_MASK 0xff
+#define SERDES_IREG_FLD_EYE_DIAG_SAMPLE_CNT_LSB_SHIFT 0
+
+/* override control */
+#define SERDES_IREG_FLD_RXLOCK2REF_LOCWREN_REG_NUM 230
+#define SERDES_IREG_FLD_RXLOCK2REF_LOCWREN 1 << 0
+
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_REG_NUM 623
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_MASK 0xff
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD1_SHIFT 0
+
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_REG_NUM 624
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_MASK 0xff
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_BERTHRESHOLD2_SHIFT 0
+
+/* X and Y coefficient return value */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_X_Y_VALWEIGHT_REG_NUM 626
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALWEIGHT_MASK 0x0F
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALWEIGHT_SHIFT 0
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALWEIGHT_MASK 0xF0
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALWEIGHT_SHIFT 4
+
+/* X coarse scan step */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_REG_NUM 627
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_MASK 0x7F
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALCOARSE_SHIFT 0
+
+/* X fine scan step */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_REG_NUM 628
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_MASK 0x7F
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_XVALFINE_SHIFT 0
+
+/* Y coarse scan step */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_REG_NUM 629
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_MASK 0x0F
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALCOARSE_SHIFT 0
+
+/* Y fine scan step */
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_REG_NUM 630
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_MASK 0x0F
+#define SERDES_IREG_FLD_RXCALEYEDIAGFSM_YVALFINE_SHIFT 0
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_serdes_REG_H */
+
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_serdes_regs.h b/arch/arm/mach-alpine/include/al_hal/al_hal_serdes_regs.h
new file mode 100644
index 0000000..0a485bb
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_serdes_regs.h
@@ -0,0 +1,452 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __AL_serdes_REG_H
+#define __AL_serdes_REG_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+struct serdes_intc {
+ uint32_t rsrvd[0x100 / sizeof(uint32_t)];
+};
+
+struct serdes_gen {
+ uint32_t version; /* SERDES registers Version */
+ uint32_t rsrvd1[0x0c / sizeof(uint32_t)];
+ uint32_t reg_addr; /* SERDES register file address */
+ uint32_t reg_data; /* SERDES register file data */
+ uint32_t rsrvd2[0x08 / sizeof(uint32_t)];
+ uint32_t ictl_multi_bist; /* SERDES control */
+ uint32_t ictl_pcs; /* SERDES control */
+ uint32_t ictl_pma; /* SERDES control */
+ uint32_t rsrvd3;
+ uint32_t ipd_multi_synth; /* SERDES control */
+ uint32_t irst; /* SERDES control */
+ uint32_t octl_multi_synthready; /* SERDES control */
+ uint32_t octl_multi_synthstatus; /* SERDES control */
+ uint32_t clk_out; /* SERDES control */
+ uint32_t rsrvd[47];
+};
+struct serdes_lane {
+ uint32_t rsrvd1[0x10 / sizeof(uint32_t)];
+ uint32_t octl_pma; /* SERDES status */
+ uint32_t ictl_multi_andme; /* SERDES control */
+ uint32_t ictl_multi_lb; /* SERDES control */
+ uint32_t ictl_multi_rxbist; /* SERDES control */
+ uint32_t ictl_multi_txbist; /* SERDES control */
+ uint32_t ictl_multi; /* SERDES control */
+ uint32_t ictl_multi_rxeq; /* SERDES control */
+ uint32_t ictl_multi_rxeq_l_low; /* SERDES control */
+ uint32_t ictl_multi_rxeq_l_high; /* SERDES control */
+ uint32_t ictl_multi_rxeyediag; /* SERDES control */
+ uint32_t ictl_multi_txdeemph; /* SERDES control */
+ uint32_t ictl_multi_txmargin; /* SERDES control */
+ uint32_t ictl_multi_txswing; /* SERDES control */
+ uint32_t idat_multi; /* SERDES control */
+ uint32_t ipd_multi; /* SERDES control */
+ uint32_t octl_multi_rxbist; /* SERDES control */
+ uint32_t octl_multi; /* SERDES control */
+ uint32_t octl_multi_rxeyediag; /* SERDES control */
+ uint32_t odat_multi_rxbist; /* SERDES control */
+ uint32_t odat_multi_rxeq; /* SERDES control */
+ uint32_t multi_rx_dvalid; /* SERDES control */
+ uint32_t reserved; /* SERDES control */
+ uint32_t rsrvd[6];
+};
+
+struct al_serdes_regs {
+ struct serdes_intc intc;
+ struct serdes_gen gen;
+ struct serdes_lane lane[4];
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** version register ****/
+/* Revision number (Minor) */
+#define SERDES_GEN_VERSION_RELEASE_NUM_MINOR_MASK 0x000000FF
+#define SERDES_GEN_VERSION_RELEASE_NUM_MINOR_SHIFT 0
+/* Revision number (Major) */
+#define SERDES_GEN_VERSION_RELEASE_NUM_MAJOR_MASK 0x0000FF00
+#define SERDES_GEN_VERSION_RELEASE_NUM_MAJOR_SHIFT 8
+/* date of release */
+#define SERDES_GEN_VERSION_DATE_DAY_MASK 0x001F0000
+#define SERDES_GEN_VERSION_DATE_DAY_SHIFT 16
+/* month of release */
+#define SERDES_GEN_VERSION_DATA_MONTH_MASK 0x01E00000
+#define SERDES_GEN_VERSION_DATA_MONTH_SHIFT 21
+/* year of release (starting from 2000) */
+#define SERDES_GEN_VERSION_DATE_YEAR_MASK 0x3E000000
+#define SERDES_GEN_VERSION_DATE_YEAR_SHIFT 25
+/* Reserved */
+#define SERDES_GEN_VERSION_RESERVED_MASK 0xC0000000
+#define SERDES_GEN_VERSION_RESERVED_SHIFT 30
+
+/**** reg_addr register ****/
+/* address value */
+#define SERDES_GEN_REG_ADDR_VAL_MASK 0x0000FFFF
+#define SERDES_GEN_REG_ADDR_VAL_SHIFT 0
+
+/**** reg_data register ****/
+/* data value */
+#define SERDES_GEN_REG_DATA_VAL_MASK 0x000000FF
+#define SERDES_GEN_REG_DATA_VAL_SHIFT 0
+
+/**** ICTL_MULTI_BIST register ****/
+
+#define SERDES_GEN_ICTL_MULTI_BIST_MODESEL_NT_MASK 0x00000007
+#define SERDES_GEN_ICTL_MULTI_BIST_MODESEL_NT_SHIFT 0
+
+/**** ICTL_PCS register ****/
+
+#define SERDES_GEN_ICTL_PCS_EN_NT (1 << 0)
+
+/**** ICTL_PMA register ****/
+
+#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_MASK 0x00000007
+#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT 0
+
+#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_REF \
+ (0 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT))
+#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_R2L \
+ (3 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT))
+#define SERDES_GEN_ICTL_PMA_REF_SEL_NT_L2R \
+ (4 << (SERDES_GEN_ICTL_PMA_REF_SEL_NT_SHIFT))
+
+#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_MASK 0x00000070
+#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT 4
+
+#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_0 \
+ (0 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT))
+#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_REF \
+ (2 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT))
+#define SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_R2L \
+ (3 << (SERDES_GEN_ICTL_PMA_REFBUSRIGHT2LEFT_MODE_NT_SHIFT))
+
+#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_MASK 0x00000700
+#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT 8
+
+#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_0 \
+ (0 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT))
+#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_REF \
+ (2 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT))
+#define SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_L2R \
+ (3 << (SERDES_GEN_ICTL_PMA_REFBUSLEFT2RIGHT_MODE_NT_SHIFT))
+
+#define SERDES_GEN_ICTL_PMA_TXENABLE_A (1 << 12)
+
+#define SERDES_GEN_ICTL_PMA_SYNTHCKBYPASSEN_NT (1 << 13)
+
+/**** IPD_MULTI_SYNTH register ****/
+
+#define SERDES_GEN_IPD_MULTI_SYNTH_B (1 << 0)
+
+/**** IRST register ****/
+
+#define SERDES_GEN_IRST_PIPE_RST_L3_B_A (1 << 0)
+
+#define SERDES_GEN_IRST_PIPE_RST_L2_B_A (1 << 1)
+
+#define SERDES_GEN_IRST_PIPE_RST_L1_B_A (1 << 2)
+
+#define SERDES_GEN_IRST_PIPE_RST_L0_B_A (1 << 3)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L3_B_A (1 << 4)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L2_B_A (1 << 5)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L1_B_A (1 << 6)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L0_B_A (1 << 7)
+
+#define SERDES_GEN_IRST_MULTI_HARD_SYNTH_B_A (1 << 8)
+
+#define SERDES_GEN_IRST_POR_B_A (1 << 12)
+
+#define SERDES_GEN_IRST_PIPE_RST_L3_B_A_SEL (1 << 16)
+
+#define SERDES_GEN_IRST_PIPE_RST_L2_B_A_SEL (1 << 17)
+
+#define SERDES_GEN_IRST_PIPE_RST_L1_B_A_SEL (1 << 18)
+
+#define SERDES_GEN_IRST_PIPE_RST_L0_B_A_SEL (1 << 19)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L3_B_A_SEL (1 << 20)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L2_B_A_SEL (1 << 21)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L1_B_A_SEL (1 << 22)
+
+#define SERDES_GEN_IRST_MULTI_HARD_TXRX_L0_B_A_SEL (1 << 23)
+
+/**** OCTL_MULTI_SYNTHREADY register ****/
+
+#define SERDES_GEN_OCTL_MULTI_SYNTHREADY_A (1 << 0)
+
+/**** OCTL_MULTI_SYNTHSTATUS register ****/
+
+#define SERDES_GEN_OCTL_MULTI_SYNTHSTATUS_A (1 << 0)
+
+/**** clk_out register ****/
+
+#define SERDES_GEN_CLK_OUT_SEL_MASK 0x0000003F
+#define SERDES_GEN_CLK_OUT_SEL_SHIFT 0
+
+/**** OCTL_PMA register ****/
+
+#define SERDES_LANE_OCTL_PMA_TXSTATUS_L_A (1 << 0)
+
+/**** ICTL_MULTI_ANDME register ****/
+
+#define SERDES_LANE_ICTL_MULTI_ANDME_EN_L_A (1 << 0)
+
+#define SERDES_LANE_ICTL_MULTI_ANDME_EN_L_A_SEL (1 << 1)
+
+/**** ICTL_MULTI_LB register ****/
+
+#define SERDES_LANE_ICTL_MULTI_LB_TX2RXIOTIMEDEN_L_NT (1 << 0)
+
+#define SERDES_LANE_ICTL_MULTI_LB_TX2RXBUFTIMEDEN_L_NT (1 << 1)
+
+#define SERDES_LANE_ICTL_MULTI_LB_RX2TXUNTIMEDEN_L_NT (1 << 2)
+
+#define SERDES_LANE_ICTL_MULTI_LB_PARRX2TXTIMEDEN_L_NT (1 << 3)
+
+#define SERDES_LANE_ICTL_MULTI_LB_CDRCLK2TXEN_L_NT (1 << 4)
+
+#define SERDES_LANE_ICTL_MULTI_LB_TX2RXBUFTIMEDEN_L_NT_SEL (1 << 8)
+
+#define SERDES_LANE_ICTL_MULTI_LB_RX2TXUNTIMEDEN_L_NT_SEL (1 << 9)
+
+/**** ICTL_MULTI_RXBIST register ****/
+
+#define SERDES_LANE_ICTL_MULTI_RXBIST_EN_L_A (1 << 0)
+
+/**** ICTL_MULTI_TXBIST register ****/
+
+#define SERDES_LANE_ICTL_MULTI_TXBIST_EN_L_A (1 << 0)
+
+/**** ICTL_MULTI register ****/
+
+#define SERDES_LANE_ICTL_MULTI_PSTATE_L_MASK 0x00000003
+#define SERDES_LANE_ICTL_MULTI_PSTATE_L_SHIFT 0
+
+#define SERDES_LANE_ICTL_MULTI_PSTATE_L_SEL (1 << 2)
+
+#define SERDES_LANE_ICTL_MULTI_RXDATAWIDTH_L_MASK 0x00000070
+#define SERDES_LANE_ICTL_MULTI_RXDATAWIDTH_L_SHIFT 4
+
+#define SERDES_LANE_ICTL_MULTI_RXOVRCDRLOCK2DATAEN_L_A (1 << 8)
+
+#define SERDES_LANE_ICTL_MULTI_RXOVRCDRLOCK2DATA_L_A (1 << 9)
+
+#define SERDES_LANE_ICTL_MULTI_TXBEACON_L_A (1 << 12)
+
+#define SERDES_LANE_ICTL_MULTI_TXDETECTRXREQ_L_A (1 << 13)
+
+#define SERDES_LANE_ICTL_MULTI_RXRATE_L_MASK 0x00070000
+#define SERDES_LANE_ICTL_MULTI_RXRATE_L_SHIFT 16
+
+#define SERDES_LANE_ICTL_MULTI_RXRATE_L_SEL (1 << 19)
+
+#define SERDES_LANE_ICTL_MULTI_TXRATE_L_MASK 0x00700000
+#define SERDES_LANE_ICTL_MULTI_TXRATE_L_SHIFT 20
+
+#define SERDES_LANE_ICTL_MULTI_TXRATE_L_SEL (1 << 23)
+
+#define SERDES_LANE_ICTL_MULTI_TXAMP_L_MASK 0x07000000
+#define SERDES_LANE_ICTL_MULTI_TXAMP_L_SHIFT 24
+
+#define SERDES_LANE_ICTL_MULTI_TXAMP_EN_L (1 << 27)
+
+#define SERDES_LANE_ICTL_MULTI_TXDATAWIDTH_L_MASK 0x70000000
+#define SERDES_LANE_ICTL_MULTI_TXDATAWIDTH_L_SHIFT 28
+
+/**** ICTL_MULTI_RXEQ register ****/
+
+#define SERDES_LANE_ICTL_MULTI_RXEQ_EN_L (1 << 0)
+
+#define SERDES_LANE_ICTL_MULTI_RXEQ_START_L_A (1 << 1)
+
+#define SERDES_LANE_ICTL_MULTI_RXEQ_PRECAL_CODE_SEL_MASK 0x00000070
+#define SERDES_LANE_ICTL_MULTI_RXEQ_PRECAL_CODE_SEL_SHIFT 4
+
+/**** ICTL_MULTI_RXEQ_L_high register ****/
+
+#define SERDES_LANE_ICTL_MULTI_RXEQ_L_HIGH_VAL (1 << 0)
+
+/**** ICTL_MULTI_RXEYEDIAG register ****/
+
+#define SERDES_LANE_ICTL_MULTI_RXEYEDIAG_START_L_A (1 << 0)
+
+/**** ICTL_MULTI_TXDEEMPH register ****/
+
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_L_MASK 0x0003FFFF
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_L_SHIFT 0
+
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_ZERO_MASK 0x7c0
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_ZERO_SHIFT 6
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_PLUS_MASK 0xf000
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_PLUS_SHIFT 12
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_MINUS_MASK 0x7
+#define SERDES_LANE_ICTL_MULTI_TXDEEMPH_C_MINUS_SHIFT 0
+
+/**** ICTL_MULTI_TXMARGIN register ****/
+
+#define SERDES_LANE_ICTL_MULTI_TXMARGIN_L_MASK 0x00000007
+#define SERDES_LANE_ICTL_MULTI_TXMARGIN_L_SHIFT 0
+
+/**** ICTL_MULTI_TXSWING register ****/
+
+#define SERDES_LANE_ICTL_MULTI_TXSWING_L (1 << 0)
+
+/**** IDAT_MULTI register ****/
+
+#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_MASK 0x0000000F
+#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_SHIFT 0
+
+#define SERDES_LANE_IDAT_MULTI_TXELECIDLE_L_SEL (1 << 4)
+
+/**** IPD_MULTI register ****/
+
+#define SERDES_LANE_IPD_MULTI_TX_L_B (1 << 0)
+
+#define SERDES_LANE_IPD_MULTI_RX_L_B (1 << 1)
+
+/**** OCTL_MULTI_RXBIST register ****/
+
+#define SERDES_LANE_OCTL_MULTI_RXBIST_DONE_L_A (1 << 0)
+
+#define SERDES_LANE_OCTL_MULTI_RXBIST_RXLOCKED_L_A (1 << 1)
+
+/**** OCTL_MULTI register ****/
+
+#define SERDES_LANE_OCTL_MULTI_RXCDRLOCK2DATA_L_A (1 << 0)
+
+#define SERDES_LANE_OCTL_MULTI_RXEQ_DONE_L_A (1 << 1)
+
+#define SERDES_LANE_OCTL_MULTI_RXREADY_L_A (1 << 2)
+
+#define SERDES_LANE_OCTL_MULTI_RXSTATUS_L_A (1 << 3)
+
+#define SERDES_LANE_OCTL_MULTI_TXREADY_L_A (1 << 4)
+
+#define SERDES_LANE_OCTL_MULTI_TXDETECTRXSTAT_L_A (1 << 5)
+
+#define SERDES_LANE_OCTL_MULTI_TXDETECTRXACK_L_A (1 << 6)
+
+#define SERDES_LANE_OCTL_MULTI_RXSIGNALDETECT_L_A (1 << 7)
+
+/**** OCTL_MULTI_RXEYEDIAG register ****/
+
+#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_STAT_L_A_MASK 0x00003FFF
+#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_STAT_L_A_SHIFT 0
+
+#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_DONE_L_A (1 << 16)
+
+#define SERDES_LANE_OCTL_MULTI_RXEYEDIAG_ERR_L_A (1 << 17)
+
+/**** ODAT_MULTI_RXBIST register ****/
+
+#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_L_A_MASK 0x0000FFFF
+#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_L_A_SHIFT 0
+
+#define SERDES_LANE_ODAT_MULTI_RXBIST_ERRCOUNT_OVERFLOW_L_A (1 << 16)
+
+/**** ODAT_MULTI_RXEQ register ****/
+
+#define SERDES_LANE_ODAT_MULTI_RXEQ_BEST_EYE_VAL_L_A_MASK 0x00003FFF
+#define SERDES_LANE_ODAT_MULTI_RXEQ_BEST_EYE_VAL_L_A_SHIFT 0
+
+/**** MULTI_RX_DVALID register ****/
+
+#define SERDES_LANE_MULTI_RX_DVALID_MASK_CDR_LOCK (1 << 0)
+
+#define SERDES_LANE_MULTI_RX_DVALID_MASK_SIGNALDETECT (1 << 1)
+
+#define SERDES_LANE_MULTI_RX_DVALID_MASK_TX_READY (1 << 2)
+
+#define SERDES_LANE_MULTI_RX_DVALID_MASK_RX_READY (1 << 3)
+
+#define SERDES_LANE_MULTI_RX_DVALID_MASK_SYNT_READY (1 << 4)
+
+#define SERDES_LANE_MULTI_RX_DVALID_MASK_RX_ELECIDLE (1 << 5)
+
+#define SERDES_LANE_MULTI_RX_DVALID_MUX_SEL_MASK 0x00FF0000
+#define SERDES_LANE_MULTI_RX_DVALID_MUX_SEL_SHIFT 16
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_00_SEL (1 << 24)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_00_VAL (1 << 25)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_01_SEL (1 << 26)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_01_VAL (1 << 27)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_10_SEL (1 << 28)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_10_VAL (1 << 29)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_11_SEL (1 << 30)
+
+#define SERDES_LANE_MULTI_RX_DVALID_PS_11_VAL (1 << 31)
+
+/**** reserved register ****/
+
+#define SERDES_LANE_RESERVED_OUT_MASK 0x000000FF
+#define SERDES_LANE_RESERVED_OUT_SHIFT 0
+
+#define SERDES_LANE_RESERVED_IN_MASK 0x00FF0000
+#define SERDES_LANE_RESERVED_IN_SHIFT 16
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_serdes_REG_H */
+
+
+
+
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_ssm.h b/arch/arm/mach-alpine/include/al_hal/al_hal_ssm.h
new file mode 100644
index 0000000..c4c7e5c
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_ssm.h
@@ -0,0 +1,172 @@
+/*******************************************************************************
+Copyright (C) 2014 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_ssm_api API
+ * Cryptographic / RAID Acceleration Engine common HAL API
+ * @ingroup group_ssm
+ * @{
+ * @file al_hal_ssm.h
+ */
+
+#ifndef __AL_HAL_SSM_H__
+#define __AL_HAL_SSM_H__
+
+#include "al_hal_common.h"
+#include "al_hal_udma.h"
+#include "al_hal_m2m_udma.h"
+
+#define AL_SSM_MAX_SRC_DESCS 31
+#define AL_SSM_MAX_DST_DESCS 31
+
+enum al_ssm_op_flags {
+ AL_SSM_INTERRUPT = AL_BIT(0), /* enable interrupt when the xaction
+ completes */
+ AL_SSM_BARRIER = AL_BIT(1), /* data memory barrier, subsequent xactions
+ will be served only when the current one
+ completes */
+ AL_SSM_SRC_NO_SNOOP = AL_BIT(2), /* set no snoop on source buffers */
+ AL_SSM_DEST_NO_SNOOP = AL_BIT(3), /* set no snoop on destination buffers */
+};
+
+/** SSM queue types.
+ * must be statically allocated and queue type can not be changed in run
+ * time */
+enum al_ssm_q_type {
+ AL_CRYPT_AUTH_Q,
+ AL_MEM_CRC_MEMCPY_Q,
+ AL_RAID_Q
+};
+
+/** SSM (security, storage, memory) DMA private data structure
+ * The driver maintains M2M UDMA structure as the HW consists of two UDMAS.
+ * both of the UDMAs initializes and managed using the m2m udma module.
+ * the driver uses RX completion descriptors as the sole indication for
+ * completing transactions, and disregards any TX completion descriptors.
+ * Every queue can be marked as crypt/auth queue to be used for crypt/auth
+ * transactions or crc/csum/memcpy queue to be used for crc/csum/memcpy
+ * transactions or raid queue to be used for raid transactions
+ */
+struct al_ssm_dma {
+ uint16_t dev_id; /** in XOR */
+
+/* Tx (M2S) word3 Descriptors -> Out XOR */
+
+#define AL_CRC_CHECKSUM 2
+
+#define RX_COMP_STATUS_MASK 0
+
+/** CRC/checksum operation type according, values according
+ * to HW descriptor setting
+ */
+enum al_crc_checksum_type {
+ AL_CRC_CHECKSUM_NULL = 0,
+ AL_CRC_CHECKSUM_CRC32 = 1,
+ AL_CRC_CHECKSUM_CRC32C = 2,
+ AL_CRC_CHECKSUM_CKSM16 = 3
+};
+
+/** CRC/Checksum Operation bit/byte swap */
+enum al_crcsum_swap_flags {
+ IV_BIT_SWAP = AL_BIT(7),
+ IV_BYTE_SWAP = AL_BIT(6),
+ SRC_BIT_SWAP = AL_BIT(5),
+ SRC_BYTE_SWAP = AL_BIT(4),
+ RES_BIT_SWAP = AL_BIT(1),
+ RES_BYTE_SWAP = AL_BIT(0)
+};
+
+/** Memcpy transaction
+ */
+struct al_memcpy_transaction {
+ enum al_ssm_op_flags flags;
+ struct al_block src; /**< In data - scatter gather*/
+ struct al_block dst; /**< Out data - scatter gather*/
+ uint32_t tx_descs_count;/* number of tx descriptors created for this */
+ /* transaction, this field set by the hal */
+};
+
+/** CRC/Checksum transaction
+ * In case dst, iv_in, crc_out are not valid, set the al_buf->len to 0
+ * When the dst is not empty the src will be copied to the dst and CRC/checksum
+ * will be calculated on the fly.
+ */
+struct al_crc_transaction {
+ enum al_crc_checksum_type crcsum_type; /*< CRC type/ Cheksum */
+ enum al_ssm_op_flags flags;
+ struct al_block src; /**< In data - scatter gather*/
+ struct al_block dst; /**< Out data - scatter gather*/
+ uint32_t tx_descs_count;/* number of tx descriptors created for this */
+ /* transaction, this field set by the hal */
+
+ /**
+ * Virtual machine ID for misc buffers below - input and output data
+ * blocks contains vmid inside the block structure
+ */
+ uint16_t misc_vmid;
+
+ struct al_buf crc_iv_in; /**< CRC IV, if not set will use from cache */
+ uint32_t cached_crc_indx;/**< cached CRC index in crypto engine */
+ al_bool save_crc_iv; /**< Save IV in the cache */
+ al_bool st_crc_out; /**< Store CRC out in the cache */
+ struct al_buf crc_expected; /**< Expected CRC to validate */
+ struct al_buf crc_out; /**< Calculated CRC/Checksum buffer */
+
+ /* Enhanced */
+ enum al_crcsum_swap_flags swap_flags; /**< Swap fields */
+ al_bool xor_valid; /**< valid in and out XOR */
+ uint32_t in_xor; /**< CRC Input XOR */
+ uint32_t res_xor; /**< CRC Result XOR */
+};
+
+/**
+ * Send Memcpy transaction to the HW
+ *
+ * Perform the following steps:
+ * - Calculate the number of needed RX descriptors and check if the RX UDMA
+ * have available space.
+ * - Do the same for TX descriptors.
+ * - Prepare the RX descriptors.
+ * - Update the tail pointer of the submission ring of the RX UDMA about the
+ * new prepared descriptors.
+ * - Prepare the TX descriptors.
+ *
+ * Note: A given UDMA queue can be used either for crypto/authentication
+ * transactions or for crc/csum/memcpy transactions, but not for both types.
+ *
+ * @param dma crypto DMA handle
+ * @param qid queue index
+ * @param xaction transaction context
+ *
+ * @return 0 if no error found.
+ * <0 otherwise.
+ */
+int al_memcpy_prepare(struct al_ssm_dma *dma, uint32_t qid,
+ struct al_memcpy_transaction *xaction);
+
+/**
+ * Send CRC/Checksum transaction to the HW
+ *
+ * Perform the following steps:
+ * - Calculate the number of needed RX descriptors and check if the RX UDMA
+ * have available space.
+ * The number of descriptors depends on which buffers are passed in
+ * the transaction (crc_out) and the number of dest buffers.
+ * - Do the same for TX descriptors. The number of descriptors depends on
+ * which buffers are passed in the transaction (crc_iv_in, crc_expected) and
+ * the number of source buffers.
+ * - Prepare the RX descriptors.
+ * - Update the tail pointer of the submission ring of the RX UDMA
+ * about the new prepared descriptors.
+ * - Prepare the TX descriptors.
+ *
+ * Note: A given UDMA queue can be used either for crypto/authentication
+ * transactions or for crc/csum/memcpy transactions, but not for both types.
+ *
+ * @param dma crypto DMA handle
+ * @param qid queue index
+ * @param xaction transaction context
+ *
+ * @return 0 if no error found.
+ * -ENOSPC if no space available.
+ */
+int al_crc_csum_prepare(struct al_ssm_dma *dma, uint32_t qid,
+ struct al_crc_transaction *xaction);
+
+/**
+ * Start asynchronous execution of crypto/auth or CRC/Checksum transaction
+ *
+ * Update the tail pointer of the submission ring of the TX UDMA about
+ * previously prepared descriptors.
+ * This function could return before the hardware start the work as its an
+ * asynchronous non-blocking call to the hardware.
+ *
+ * @param dma crypto DMA handle
+ * @param qid queue index
+ * @param tx_descs number of tx descriptors to be processed by the engine
+ *
+ * @return 0 if no error found.
+ * -EINVAL if quid is out of range
+ */
+int al_crc_memcpy_dma_action(struct al_ssm_dma *dma, uint32_t qid,
+ int tx_descs);
+
+/**
+ * Check and cleanup completed transaction
+ *
+ * when the upper layer decides to check for completed transaction
+ * (e.g. due to interrupt) it calls al_crypto_dma_completion()
+ * API function provided by this driver. this function will call helper
+ * function provided by the m2m_udma module to check for completed requests.
+ * The al_crypto_dma_completion() is responsible for the cleanup of the
+ * completed request from the completion ring, so upper layer don't need to
+ * worry about the queues management.
+ * This driver doesn't provide the upper layer which transaction was
+ * completed, the upper layer should find this information by itself relying
+ * on the fact that for a given queue, the transaction completed in the same
+ * order it was sent to the same queue, no ordering is guaranteed between
+ * transaction that sent to different queues.
+
+ * @param dma crypto DMA handle
+ * @param qid queue index
+ * @param comp_status status reported by rx completion descriptor
+ *
+ * @return the number of completed transactions.
+ */
+int al_crc_memcpy_dma_completion(struct al_ssm_dma *dma,
+ uint32_t qid,
+ uint32_t *comp_status);
+
+ /* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+/** @} end of crc_memcpy group */
+#endif /* __AL_HAL_SSM_CRC_MEMCPY_H__ */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_ssm_crypto.h b/arch/arm/mach-alpine/include/al_hal/al_hal_ssm_crypto.h
new file mode 100644
index 0000000..2d11aed
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_ssm_crypto.h
@@ -0,0 +1,432 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_crypto_api API
+ * Cryptographic Acceleration Engine HAL driver API
+ * @ingroup group_crypto
+ * @{
+ * @file al_hal_ssm_crypto.h
+ */
+
+#ifndef __AL_HAL_CRYPT_H__
+#define __AL_HAL_CRYPT_H__
+
+#include "al_hal_common.h"
+#include "al_hal_udma.h"
+#include "al_hal_m2m_udma.h"
+#include "al_hal_ssm.h"
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+#define CRYPTO_DEBUG
+
+#ifdef CRYPTO_DEBUG
+#define al_debug al_dbg
+#else
+#define al_debug(...)
+#endif
+
+/* PCI Adapter Device/Revision ID */
+#define AL_CRYPTO_DEV_ID 0x0011
+#define AL_CRYPTO_REV_ID_0 0
+#define AL_CRYPTO_REV_ID_1 1
+
+#define CACHED_SAD_SIZE 16
+#define CRC_IV_CACHE_SIZE 8
+
+/** How many descriptors to save between head and tail in case of
+ * wrap around.
+ */
+#define AL_CRYPT_DESC_RES 0
+
+/* Application IOFIC definitions */
+#define AL_CRYPTO_APP_REGS_BASE_OFFSET 0x800
+#define AL_CRYPTO_APP_IOFIC_OFFSET 0x0
+
+/* interrupt controller group A */
+#define AL_CRYPTO_APP_INT_A_S2M_TIMOUT AL_BIT(0)
+#define AL_CRYPTO_APP_INT_A_M2S_TIMOUT AL_BIT(1)
+#define AL_CRYPTO_APP_INT_A_EOP_WITHOUT_SOP AL_BIT(2)
+#define AL_CRYPTO_APP_INT_A_SOP_WITHOUT_EOP AL_BIT(3)
+#define AL_CRYPTO_APP_INT_A_SOP_WITH_EOP_TOGETHER AL_BIT(4)
+#define AL_CRYPTO_APP_INT_A_UNMAP_PROTOCOL AL_BIT(5)
+#define AL_CRYPTO_APP_INT_A_FIFO_OVERRUN AL_BIT(6)
+#define AL_CRYPTO_APP_INT_A_ALL \
+ AL_CRYPTO_APP_INT_A_S2M_TIMOUT | \
+ AL_CRYPTO_APP_INT_A_M2S_TIMOUT | \
+ AL_CRYPTO_APP_INT_A_EOP_WITHOUT_SOP | \
+ AL_CRYPTO_APP_INT_A_SOP_WITHOUT_EOP | \
+ AL_CRYPTO_APP_INT_A_SOP_WITH_EOP_TOGETHER | \
+ AL_CRYPTO_APP_INT_A_UNMAP_PROTOCOL | \
+ AL_CRYPTO_APP_INT_A_FIFO_OVERRUN
+
+/** Crypto modes, auth, enc or enc+Auth */
+enum al_crypto_sa_op {
+ AL_CRYPT_RES = 0,
+ AL_CRYPT_ENC_ONLY = 1,
+ AL_CRYPT_AUTH_ONLY = 2,
+ AL_CRYPT_ENC_AUTH = 3
+};
+
+/** Encryption types */
+enum al_crypto_sa_enc_type {
+ AL_CRYPT_DES_ECB = 0,
+ AL_CRYPT_DES_CBC = 1,
+ AL_CRYPT_TRIPDES_ECB = 2,
+ AL_CRYPT_TRIPDES_CBC = 3,
+ AL_CRYPT_AES_ECB = 4,
+ AL_CRYPT_AES_CBC = 5,
+ AL_CRYPT_AES_CTR = 6,
+ AL_CRYPT_AES_CCM = 7,
+ AL_CRYPT_AES_GCM = 8,
+ AL_CRYPT_MAX = 9
+};
+
+/** 3des modes */
+enum al_crypto_sa_tripdes_m {
+ AL_CRYPT_TRIPDES_EDE = 1
+};
+
+/** AES key sizes */
+enum al_crypto_sa_aes_ksize {
+ AL_CRYPT_AES_128 = 0,
+ AL_CRYPT_AES_192 = 1,
+ AL_CRYPT_AES_256 = 2
+};
+
+/** Authentication types */
+enum al_crypto_sa_auth_type {
+ AL_CRYPT_AUTH_MD5 = 0,
+ AL_CRYPT_AUTH_SHA1 = 1,
+ AL_CRYPT_AUTH_SHA2 = 2,
+ AL_CRYPT_AUTH_AES_CCM = 5,
+ AL_CRYPT_AUTH_AES_GCM = 6
+};
+
+/** SHA2 key sizes */
+enum al_crypto_sa_sha2_mode {
+ AL_CRYPT_SHA2_256 = 0,
+ AL_CRYPT_SHA2_384 = 1,
+ AL_CRYPT_SHA2_512 = 2
+};
+
+/** CNTR size */
+enum al_crypto_cntr_size {
+ AL_CRYPT_CNTR_16_BIT = 0,
+ AL_CRYPT_CNTR_32_BIT = 1,
+ AL_CRYPT_CNTR_64_BIT = 2,
+ AL_CRYPT_CNTR_128_BIT = 3
+};
+
+/** Crypto SA (Security Association) parameters match the HW crypto SAD
+ * The cached SAD is not managed by the HAL, The HAL only supply the ability
+ * to push new SA to the cached SAD and evict a cached SAD through the
+ * al_crypto_dma_action API.
+ * Evicting an SA may be required in the following cases:
+ * - Each time SA is evicted while using IV generated by the Crypto engine
+ * - Each time SA is evicted while using the SA to hold a temp MAC signature
+ * - On the first time SA is evicted when using AES decryption key
+ * generated by the HW
+ * Fetching an SA can be done by pushing a new SA entry through
+ * al_crypto_transaction SA_in and placing an appropriate buffer in the
+ * SA_out.
+ * Initializing a new SA entry should be done through al_crypto_hw_sa_init.
+ *
+ */
+struct al_crypto_sa {
+ enum al_crypto_sa_op sa_op; /**< crypto operation */
+
+ /* Enc */
+ enum al_crypto_sa_enc_type enc_type;
+ enum al_crypto_sa_tripdes_m tripdes_m; /**< 3des mode EDE */
+ enum al_crypto_sa_aes_ksize aes_ksize;
+ enum al_crypto_cntr_size cntr_size; /**< relevant only for Alg using
+ CNTR mode*/
+
+ uint32_t enc_offset; /**<
+ enc start offset from start of buffer,
+ used only if not set through the crypto operation */
+ uint32_t enc_offset_eop; /**<
+ enc offset from end of buffer,
+ used only if not set through the crypto operation */
+
+ uint8_t enc_key[32];
+ uint8_t enc_iv[16];
+
+ /* Auth */
+ enum al_crypto_sa_auth_type auth_type;
+ enum al_crypto_sa_sha2_mode sha2_mode;
+ al_bool auth_hmac_en;
+ uint32_t signature_size; /**< sign size out in 4 * (size + 1) bytes */
+ al_bool auth_signature_msb; /**< when the signature output size is smaller than
+ the authentication algorithm output size take the more significant
+ bits from the full size signature */
+ uint32_t auth_offset; /**<
+ auth start offser from start of buffer,
+ used only if not set through the crypto operation */
+ uint32_t auth_offset_eop;/**<
+ auth offset from end of buffer,
+ used only if not set through the crypto operation */
+ uint8_t auth_iv_in[64];
+ uint8_t hmac_iv_in[64]; /**< H(K xor ipad) */
+ uint8_t hmac_iv_out[64];/**< H(K xor opad) */
+ uint8_t enc_ccm_cbc_iv_add[4]; /**<
+ Used in CCM to generate Auth IV from encryption IV */
+ uint8_t aes_gcm_auth_iv[16]; /**< GCM auth IV */
+
+ /* Combined */
+ al_bool sign_after_enc; /**< common case is true */
+ al_bool auth_after_dec; /**< common case is false */
+
+};
+
+/** A single Crypto SA HW as cached in the SAD, each SA is described as an
+ * array of 32-bit words */
+struct al_crypto_hw_sa {
+ uint32_t sa_word[64];
+};
+
+/** Crypto operation direction, values according to HW descriptor setting */
+enum al_crypto_dir {
+ AL_CRYPT_ENCRYPT = 0,
+ AL_CRYPT_DECRYPT = 1,
+};
+
+/* transaction completion status */
+#define AL_CRYPT_AUTH_ERROR AL_BIT(0)
+#define AL_CRYPT_SA_IV_EVICT_FIFO_ERROR AL_BIT(8)
+#define AL_CRYPT_DES_ILLEGAL_KEY_ERROR AL_BIT(9)
+#define AL_CRYPT_M2S_ERROR AL_BIT(10)
+#define AL_CRYPT_SRAM_PARITY_ERROR AL_BIT(11)
+#define AL_CRYPT_INTERNAL_FLOW_VIOLATION_ERROR AL_BIT(15)
+
+/** Crypto transaction for enc, auth or enc+auth.
+ * In case sa_update, iv_*, auth_* are not valid, set the al_buf->len to 0
+ * In case dst is not valid set the al_block->num to 0.
+ *
+ * All Crypto transaction are associated with a cached SA,
+ * this SA is passed as the SA index into the cached SAD sa_index.
+ *
+ * The al_crypto_dma_action support source scatter list buffer encryption,
+ * authentication and encryption and authentication in one pass.
+ *
+ * When using an SA type of Authentication only, the Crypto can support
+ * splitting the Authentication operation into few requests by using the
+ * auth_first, last and valid flags and using an Authentication IV auth_iv_in
+ * and auth_iv_out.
+ *
+ * When using an SA type of Encryption (Enc only or enc+Auth), the Crypto
+ * can get the IV required for the encryption from the upper layer enc_iv_in,
+ * or using IV generated by the engine (based on the previous encryption
+ * executed using this SA). In any case the IV used by the engine can be
+ * passed to the upper layer through the enc_iv_out.
+ *
+ * When executing a signature verification operation on an SA type of
+ * Encryption and Authentication or of Authentication Only with last
+ * indication, the crypto can compare the actual buffer signature
+ * auth_sign_in to the engine outcome and indicate the result on the S2M
+ * completion. In any case the engine signature can be passed to the upper
+ * layer through the auth_sign_out.
+ *
+ * When using Authentication only and dst isnt empty the src will be copied
+ * to the dst.
+ */
+struct al_crypto_transaction {
+ enum al_crypto_dir dir;
+ enum al_ssm_op_flags flags;
+ struct al_block src; /**< In data - scatter gather*/
+ uint32_t src_size; /**< Size of source buffer */
+ struct al_block dst; /**< Out data - scatter gather */
+ uint32_t tx_descs_count;/* number of tx descriptors created for this */
+ /* transaction, this field set by the hal */
+
+ /**
+ * Virtual machine ID for misc buffers below - input and output data
+ * blocks contains vmid inside the block structure
+ */
+ uint16_t misc_vmid;
+
+ /* SA */
+ uint32_t sa_indx; /**< SA index in the cached SAD to use */
+ struct al_buf sa_in; /**< pointer to SA al_crypto_hw_sa to
+ update in the cached SAD */
+ struct al_buf sa_out; /**< pointer to SA where to place
+ old cached SA */
+
+ /* Enc */
+ struct al_buf enc_iv_in; /**< IV from user, if not set will
+ use IV from the SA */
+ struct al_buf enc_iv_out; /**< Optional - Buffer to place
+ the used IV */
+ struct al_buf enc_next_iv_out; /**< Optional - Buffer to place
+ next used IV */
+ uint32_t enc_in_off; /**< offset where to satrt enc */
+ uint32_t enc_in_len; /**<
+ length of enc, if len set to 0 will use SA defaults */
+
+ /* Auth */
+ al_bool auth_fl_valid; /**<
+ valid indication for the auth first and last
+ indications */
+ al_bool auth_first; /**< Relevant for auth only SA */
+ al_bool auth_last; /**< Relevant for auth onlly SA */
+ struct al_buf auth_iv_in; /**< In case of auth only SA and
+ auth_first isnt set this is the
+ intermediate auth input. */
+ struct al_buf auth_iv_out; /**< In case of auth only SA and
+ auth_last isnt set, This is the
+ intermediate auth output. */
+ struct al_buf auth_sign_in; /**< The Signature to validate in front
+ of auth output.*/
+ struct al_buf auth_sign_out; /**< In case of combined enc and
+ auth SA or Auth only SA with last
+ this is the signature output
+ of the auth.
+ Size should be as indicated in the
+ SA signature_size
+ (sign_size+1)*4 */
+ uint32_t auth_in_off; /**< offset where to start auth */
+ uint32_t auth_in_len; /**<
+ length of auth, if set to 0 will use SA defaults */
+ uint32_t auth_bcnt; /**<
+ This field should be zero, unless this
+ this packet is for AUTH only SA with
+ last and not first.
+ In this case is will indicate the byte
+ count of the auth data till this point.
+ When using this feild the auth off and len
+ must contain a valid data */
+
+
+};
+
+/**
+ * Initialize a single hw_sa
+ *
+ * @param sa crypto SA containing the desired SA parameters
+ * @param hw_sa crypto HW SA filled with zeros
+ * to be initialized according to the sa
+ *
+ * @return 0 if no error found,
+ * -EINVAL otherwise.
+ */
+int al_crypto_hw_sa_init(struct al_crypto_sa *sa,
+ struct al_crypto_hw_sa *hw_sa);
+
+/**
+ * Prepare crypto/auth transaction to the HW
+ *
+ * Perform the following steps:
+ * - Calculate needed RX descriptors and check if the RX UDMA have available
+ * space. The number of descriptors depends on which buffers are passed in
+ * the transaction (SA_out, enc_IV_out, Sign_out) and the number of dest
+ * buffers.
+ * - Do the same for TX descriptors. The number of descriptors depends on
+ * which buffers are passed in the transaction (SA_in, enc_IV_in, Sign_in)
+ * and the number of source buffers.
+ * - Prepare the RX descriptors.
+ * - Update the tail pointer of the submission ring of the RX UDMA
+ * about the new prepared descriptors.
+ * - Prepare the TX descriptors.
+ *
+ * Note: UDMA queue can be used either for crypto/authentication transactions
+ * or for crc/csum/memcpy transactions, but not for both types.
+ *
+ * @param dma crypto DMA handle
+ * @param qid queue index
+ * @param xaction transaction context, number of prepared TX descriptors is
+ * returned in xaction->tx_descs_count
+ *
+ * @return 0 if no error found.
+ * -ENOSPC if no space available.
+ */
+int al_crypto_dma_prepare(struct al_ssm_dma *dma, uint32_t qid,
+ struct al_crypto_transaction *xaction);
+
+/**
+ * Start asynchronous execution of crypto/auth or CRC/Checksum transaction
+ *
+ * Update the tail pointer of the submission ring of the TX UDMA about
+ * previously prepared descriptors.
+ * This function could return before the hardware start the work as its an
+ * asynchronous non-blocking call to the hardware.
+ *
+ * @param dma crypto DMA handle
+ * @param qid queue index
+ * @param tx_descs number of tx descriptors to be processed by the engine
+ *
+ * @return 0 if no error found.
+ * -EINVAL if quid is out of range
+ */
+int al_crypto_dma_action(struct al_ssm_dma *dma, uint32_t qid,
+ int tx_descs);
+
+/**
+ * Check and cleanup completed transaction
+ *
+ * when the upper layer decides to check for completed transaction
+ * (e.g. due to interrupt) it calls al_crypto_dma_completion()
+ * API function provided by this driver. this function will call helper
+ * function provided by the m2m_udma module to check for completed requests.
+ * The al_crypto_dma_completion() is responsible for the cleanup of the
+ * completed request from the completion ring, so upper layer don't need to
+ * worry about the queues management.
+ * This driver doesn't provide the upper layer which transaction was
+ * completed, the upper layer should find this information by itself relying
+ * on the fact that for a given queue, the transaction completed in the same
+ * order it was sent to the same queue, no ordering is guaranteed between
+ * transaction that sent to different queues.
+
+ * @param dma crypto DMA handle
+ * @param qid queue index
+ * @param comp_status status reported by rx completion descriptor
+ *
+ * @return the number of completed transactions.
+ */
+int al_crypto_dma_completion(struct al_ssm_dma *dma,
+ uint32_t qid,
+ uint32_t *comp_status);
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+/** @} end of Crypto group */
+#endif /* __AL_HAL_CRYPT_H__ */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_types.h b/arch/arm/mach-alpine/include/al_hal/al_hal_types.h
new file mode 100644
index 0000000..3732b64
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_types.h
@@ -0,0 +1,112 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_common HAL Common Layer
+ * @{
+ * @file al_hal_types.h
+ *
+ * @brief macros used by HALs and platform layer
+ *
+ */
+
+#ifndef __AL_HAL_TYPES_H__
+#define __AL_HAL_TYPES_H__
+
+#include "al_hal_plat_types.h"
+#include "al_hal_plat_services.h"
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/* Common defines */
+
+typedef int AL_RETURN;
+
+#if !defined(NULL)
+#define NULL (void *)0
+#endif
+
+#if !defined(likely)
+#define likely(x) (x)
+#define unlikely(x) (x)
+#endif
+
+
+#ifdef __GNUC__
+#if !defined(__packed)
+#define __packed __attribute__ ((packed))
+#endif
+ /* packed and alinged types */
+#define __packed_a4 __attribute__ ((packed, aligned(4)))
+#define __packed_a8 __attribute__ ((packed, aligned(8)))
+#define __packed_a16 __attribute__ ((packed, aligned(16)))
+
+#else
+#if !defined(__packed)
+#error "__packed is not defined!!"
+#endif
+#endif
+
+#if !defined(__iomem)
+#define __iomem
+#endif
+
+#if !defined(__cache_aligned)
+#ifdef __GNUC__
+#define __cache_aligned __attribute__ ((__aligned__(64)))
+#else
+#define __cache_aligned
+#endif
+#endif
+
+#if !defined(INLINE)
+#ifdef __GNUC__
+#define INLINE inline
+#else
+#define INLINE
+#endif
+#endif
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+/** @} end of Common group */
+#endif /* __TYPES_H__ */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_udma.h b/arch/arm/mach-alpine/include/al_hal/al_hal_udma.h
new file mode 100644
index 0000000..a8f4ea5
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_udma.h
@@ -0,0 +1,651 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_udma_api API
+ * @ingroup group_udma
+ * UDMA API
+ * @{
+ * @}
+ *
+ * @defgroup group_udma_main UDMA Main
+ * @ingroup group_udma_api
+ * UDMA main API
+ * @{
+ * @file al_hal_udma.h
+ *
+ * @brief C Header file for the Universal DMA HAL driver
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_H__
+#define __AL_HAL_UDMA_H__
+
+#include "al_hal_common.h"
+#include "al_hal_udma_regs.h"
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+#define DMA_MAX_Q 4
+#define AL_UDMA_MIN_Q_SIZE 4
+#define AL_UDMA_MAX_Q_SIZE (1 << 16) /* hw can do more, but we limit it */
+
+/* Default Max number of descriptors supported per action */
+#define AL_UDMA_DEFAULT_MAX_ACTN_DESCS 16
+
+#define DMA_RING_ID_MASK 0x3
+/* New registers ?? */
+/* Statistics - TBD */
+
+/** UDMA submission descriptor */
+union al_udma_desc {
+ /* TX */
+ struct {
+ uint32_t len_ctrl;
+ uint32_t meta_ctrl;
+ uint64_t buf_ptr;
+ } tx;
+ /* TX Meta, used by upper layer */
+ struct {
+ uint32_t len_ctrl;
+ uint32_t meta_ctrl;
+ uint32_t meta1;
+ uint32_t meta2;
+ } tx_meta;
+ /* RX */
+ struct {
+ uint32_t len_ctrl;
+ uint32_t buf2_ptr_lo;
+ uint64_t buf1_ptr;
+ } rx;
+} __packed_a16;
+
+/* TX desc length and control fields */
+
+#define AL_M2S_DESC_CONCAT AL_BIT(31) /* concatenate */
+#define AL_M2S_DESC_DMB AL_BIT(30)
+ /** Data Memory Barrier */
+#define AL_M2S_DESC_NO_SNOOP_H AL_BIT(29)
+#define AL_M2S_DESC_INT_EN AL_BIT(28) /** enable interrupt */
+#define AL_M2S_DESC_LAST AL_BIT(27)
+#define AL_M2S_DESC_FIRST AL_BIT(26)
+#define AL_M2S_DESC_RING_ID_SHIFT 24
+#define AL_M2S_DESC_RING_ID_MASK (0x3 << AL_M2S_DESC_RING_ID_SHIFT)
+#define AL_M2S_DESC_META_DATA AL_BIT(23)
+#define AL_M2S_DESC_DUMMY AL_BIT(22) /* for Metdata only */
+#define AL_M2S_DESC_LEN_ADJ_SHIFT 20
+#define AL_M2S_DESC_LEN_ADJ_MASK (0x7 << AL_M2S_DESC_LEN_ADJ_SHIFT)
+#define AL_M2S_DESC_LEN_SHIFT 0
+#define AL_M2S_DESC_LEN_MASK (0xfffff << AL_M2S_DESC_LEN_SHIFT)
+
+#define AL_S2M_DESC_NO_SNOOP_H AL_BIT(29)
+#define AL_S2M_DESC_INT_EN AL_BIT(28) /** enable interrupt */
+#define AL_S2M_DESC_RING_ID_SHIFT 24
+#define AL_S2M_DESC_RING_ID_MASK (0x3 << AL_S2M_DESC_RING_ID_SHIFT)
+#define AL_S2M_DESC_LEN_SHIFT 0
+#define AL_S2M_DESC_LEN_MASK (0xffff << AL_S2M_DESC_LEN_SHIFT)
+
+/* TX/RX descriptor VMID field (in the buffer address 64 bit field) */
+#define AL_UDMA_DESC_VMID_SHIFT 48
+
+/** UDMA completion descriptor */
+union al_udma_cdesc {
+ /* TX completion */
+ struct {
+ uint32_t ctrl_meta;
+ } al_desc_comp_tx;
+ /* RX completion */
+ struct {
+ /* TBD */
+ uint32_t ctrl_meta;
+ } al_desc_comp_rx;
+} __packed_a4;
+
+/* TX/RX common completion desc ctrl_meta feilds */
+#define AL_UDMA_CDESC_ERROR AL_BIT(31)
+#define AL_UDMA_CDESC_LAST AL_BIT(27)
+#define AL_UDMA_CDESC_FIRST AL_BIT(26)
+/* word 2 */
+#define AL_UDMA_CDESC_BUF2_USED AL_BIT(31)
+/** Basic Buffer structure */
+struct al_buf {
+ al_phys_addr_t addr; /**< Buffer physical address */
+ uint32_t len; /**< Buffer lenght in bytes */
+};
+
+/** Block is a set of buffers that belong to same source or destination */
+struct al_block {
+ struct al_buf *bufs; /**< The buffers of the block */
+ uint32_t num; /**< Number of buffers of the block */
+
+ /**<
+ * VMID to be assigned to the block descriptors
+ * Requires VMID in descriptor to be enabled for the specific UDMA
+ * queue.
+ */
+ uint16_t vmid;
+};
+
+/** UDMA type */
+enum al_udma_type {
+ UDMA_TX,
+ UDMA_RX
+};
+
+/** UDMA state */
+enum al_udma_state {
+ UDMA_DISABLE = 0,
+ UDMA_IDLE,
+ UDMA_NORMAL,
+ UDMA_ABORT,
+ UDMA_RESET
+};
+
+extern const char *const al_udma_states_name[];
+
+/** UDMA Q specific parameters from upper layer */
+struct al_udma_q_params {
+ uint32_t size; /**< ring size (in descriptors), submission and
+ * completion rings must have same size
+ */
+ union al_udma_desc *desc_base; /**< cpu address for submission ring
+ * descriptors
+ */
+ al_phys_addr_t desc_phy_base; /**< submission ring descriptors
+ * physical base address
+ */
+ uint8_t *cdesc_base; /**< completion descriptors pointer, NULL */
+ /* means no completion update */
+ al_phys_addr_t cdesc_phy_base; /**< completion descriptors ring
+ * physical base address
+ */
+ uint32_t cdesc_size; /**< size (in bytes) of a single dma completion
+ * descriptor
+ */
+
+ uint16_t dev_id; /**next_cdesc_idx - (udma_q->next_desc_idx + 1);
+ tmp &= udma_q->size_mask;
+
+ return (uint32_t) tmp;
+}
+
+/**
+ * check if queue has pending descriptors
+ *
+ * @param udma_q queue handle
+ *
+ * @return AL_TRUE if descriptors are submitted to completion ring and still
+ * not completed (with ack). AL_FALSE otherwise.
+ */
+static INLINE al_bool al_udma_is_empty(struct al_udma_q *udma_q)
+{
+ if (((udma_q->next_cdesc_idx - udma_q->next_desc_idx) &
+ udma_q->size_mask) == 0)
+ return AL_TRUE;
+
+ return AL_FALSE;
+}
+
+/**
+ * get next available descriptor
+ * @param udma_q queue handle
+ *
+ * @return pointer to the next available descriptor
+ */
+static INLINE union al_udma_desc *al_udma_desc_get(struct al_udma_q *udma_q)
+{
+ union al_udma_desc *desc;
+ uint16_t next_desc_idx;
+
+ al_assert(udma_q);
+
+ next_desc_idx = udma_q->next_desc_idx;
+ desc = udma_q->desc_base_ptr + next_desc_idx;
+
+ next_desc_idx++;
+
+ /* if reached end of queue, wrap around */
+ udma_q->next_desc_idx = next_desc_idx & udma_q->size_mask;
+
+ return desc;
+}
+
+/**
+ * get ring id for the last allocated descriptor
+ * @param udma_q
+ *
+ * @return ring id for the last allocated descriptor
+ * this function must be called each time a new descriptor is allocated
+ * by the al_udma_desc_get(), unless ring id is ignored.
+ */
+static INLINE uint32_t al_udma_ring_id_get(struct al_udma_q *udma_q)
+{
+ uint32_t ring_id;
+
+ al_assert(udma_q);
+
+ ring_id = udma_q->desc_ring_id;
+
+ /* calculate the ring id of the next desc */
+ /* if next_desc points to first desc, then queue wrapped around */
+ if (unlikely(udma_q->next_desc_idx) == 0)
+ udma_q->desc_ring_id = (udma_q->desc_ring_id + 1) &
+ DMA_RING_ID_MASK;
+ return ring_id;
+}
+
+/* add DMA action - trigger the engine */
+/**
+ * add num descriptors to the submission queue.
+ *
+ * @param udma_q queue handle
+ * @param num number of descriptors to add to the queues ring.
+ *
+ * @return 0;
+ */
+static INLINE int al_udma_desc_action_add(struct al_udma_q *udma_q,
+ uint32_t num)
+{
+ uint32_t *addr;
+
+ al_assert(udma_q);
+ al_assert((num > 0) && (num <= udma_q->size));
+
+ addr = &udma_q->q_regs->rings.drtp_inc;
+ /* make sure data written to the descriptors will be visible by the */
+ /* DMA */
+ al_local_data_memory_barrier();
+
+ /*
+ * As we explicitly invoke the synchronization function
+ * (al_data_memory_barrier()), then we can use the relaxed version.
+ */
+ al_reg_write32_relaxed(addr, num);
+
+ return 0;
+}
+
+#define cdesc_is_first(flags) ((flags) & AL_UDMA_CDESC_FIRST)
+#define cdesc_is_last(flags) ((flags) & AL_UDMA_CDESC_LAST)
+
+/**
+ * return pointer to the cdesc + offset desciptors. wrap around when needed.
+ *
+ * @param udma_q queue handle
+ * @param cdesc pointer that set by this function
+ * @param offset offset desciptors
+ *
+ */
+static INLINE volatile union al_udma_cdesc *al_cdesc_next(
+ struct al_udma_q *udma_q,
+ volatile union al_udma_cdesc *cdesc,
+ uint32_t offset)
+{
+ volatile uint8_t *tmp = (volatile uint8_t *) cdesc + offset * udma_q->cdesc_size;
+ al_assert(udma_q);
+ al_assert(cdesc);
+
+ /* if wrap around */
+ if (unlikely((tmp > udma_q->end_cdesc_ptr)))
+ return (union al_udma_cdesc *)
+ (udma_q->cdesc_base_ptr +
+ (tmp - udma_q->end_cdesc_ptr - udma_q->cdesc_size));
+
+ return (volatile union al_udma_cdesc *) tmp;
+}
+
+/**
+ * check if the flags of the descriptor indicates that is new one
+ * the function uses the ring id from the descriptor flags to know whether it
+ * new one by comparing it with the curring ring id of the queue
+ *
+ * @param udma_q queue handle
+ * @param flags the flags of the completion descriptor
+ *
+ * @return AL_TRUE if the completion descriptor is new one.
+ * AL_FALSE if it old one.
+ */
+static INLINE al_bool al_udma_new_cdesc(struct al_udma_q *udma_q,
+ uint32_t flags)
+{
+ if (((flags & AL_M2S_DESC_RING_ID_MASK) >> AL_M2S_DESC_RING_ID_SHIFT)
+ == udma_q->comp_ring_id)
+ return AL_TRUE;
+ return AL_FALSE;
+}
+
+/**
+ * get next completion descriptor
+ * this function will also increment the completion ring id when the ring wraps
+ * around
+ *
+ * @param udma_q queue handle
+ * @param cdesc current completion descriptor
+ *
+ * @return pointer to the completion descriptor that follows the one pointed by
+ * cdesc
+ */
+static INLINE volatile union al_udma_cdesc *al_cdesc_next_update(
+ struct al_udma_q *udma_q,
+ volatile union al_udma_cdesc *cdesc)
+{
+ /* if last desc, wrap around */
+ if (unlikely(((volatile uint8_t *) cdesc == udma_q->end_cdesc_ptr))) {
+ udma_q->comp_ring_id =
+ (udma_q->comp_ring_id + 1) & DMA_RING_ID_MASK;
+ return (union al_udma_cdesc *) udma_q->cdesc_base_ptr;
+ }
+ return (volatile union al_udma_cdesc *) ((volatile uint8_t *) cdesc + udma_q->cdesc_size);
+}
+
+/**
+ * get next completed packet from completion ring of the queue
+ *
+ * @param udma_q udma queue handle
+ * @param desc pointer that set by this function to the first descriptor
+ * note: desc is valid only when return value is not zero
+ * @return number of descriptors that belong to the packet. 0 means no completed
+ * full packet was found.
+ * If the descriptors found in the completion queue don't form full packet (no
+ * desc with LAST flag), then this function will do the following:
+ * (1) save the number of processed descriptors.
+ * (2) save last processed descriptor, so next time it called, it will resume
+ * from there.
+ * (3) return 0.
+ * note: the descriptors that belong to the completed packet will still be
+ * considered as used, that means the upper layer is safe to access those
+ * descriptors when this function returns. the al_udma_cdesc_ack() should be
+ * called to inform the udma driver that those descriptors are freed.
+ */
+uint32_t al_udma_cdesc_packet_get(
+ struct al_udma_q *udma_q,
+ volatile union al_udma_cdesc **desc);
+
+/** get completion descriptor pointer from its index */
+#define al_udma_cdesc_idx_to_ptr(udma_q, idx) \
+ ((volatile union al_udma_cdesc *) ((udma_q)->cdesc_base_ptr + \
+ (idx) * (udma_q)->cdesc_size))
+
+
+/**
+ * return number of all completed descriptors in the completion ring
+ *
+ * @param udma_q udma queue handle
+ * @param cdesc pointer that set by this function to the first descriptor
+ * note: desc is valid only when return value is not zero
+ * note: pass NULL if not interested
+ * @return number of descriptors. 0 means no completed descriptors were found.
+ * note: the descriptors that belong to the completed packet will still be
+ * considered as used, that means the upper layer is safe to access those
+ * descriptors when this function returns. the al_udma_cdesc_ack() should be
+ * called to inform the udma driver that those descriptors are freed.
+ */
+static INLINE uint32_t al_udma_cdesc_get_all(
+ struct al_udma_q *udma_q,
+ volatile union al_udma_cdesc **cdesc)
+{
+ uint16_t count = 0;
+
+ al_assert(udma_q);
+
+ udma_q->comp_head_idx = (uint16_t)
+ (al_reg_read32(&udma_q->q_regs->rings.crhp) &
+ 0xFFFF);
+
+ count = (udma_q->comp_head_idx - udma_q->next_cdesc_idx) &
+ udma_q->size_mask;
+
+ if (cdesc)
+ *cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx);
+
+ return (uint32_t)count;
+}
+
+/**
+ * acknowledge the driver that the upper layer completed processing completion
+ * descriptors
+ *
+ * @param udma_q udma queue handle
+ * @param num number of descriptors to acknowledge
+ *
+ * @return 0
+ */
+static INLINE int al_udma_cdesc_ack(struct al_udma_q *udma_q, uint32_t num)
+{
+ al_assert(udma_q);
+
+ udma_q->next_cdesc_idx += num;
+ udma_q->next_cdesc_idx &= udma_q->size_mask;
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+
+#endif /* __AL_HAL_UDMA_H__ */
+/** @} end of UDMA group */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_udma_config.h b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_config.h
new file mode 100644
index 0000000..cbb793b
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_config.h
@@ -0,0 +1,728 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_udma_config UDMA Config
+ * @ingroup group_udma_api
+ * UDMA Config API
+ * @{
+ * @file al_hal_udma_config.h
+ *
+ * @brief C Header file for the Universal DMA HAL driver for configuration APIs
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_CONFIG_H__
+#define __AL_HAL_UDMA_CONFIG_H__
+
+#include
+
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/** Scheduling mode */
+enum al_udma_sch_mode {
+ STRICT, /* Strict */
+ SRR, /* Simple Sound Rubin */
+ DWRR /* Deficit Weighted Round Rubin */
+};
+
+/** AXI configuration */
+struct al_udma_axi_conf {
+ uint32_t axi_timeout; /* Timeout for AXI transactions */
+ uint8_t arb_promotion; /* arbitration promotion */
+ al_bool swap_8_bytes; /* enable 8 bytes swap instead of 4 bytes */
+ al_bool swap_s2m_data;
+ al_bool swap_s2m_desc;
+ al_bool swap_m2s_data;
+ al_bool swap_m2s_desc;
+};
+
+/** UDMA AXI M2S configuration */
+struct al_udma_axi_submaster {
+ uint8_t id; /* AXI ID */
+ uint8_t cache_type;
+ uint8_t burst;
+ uint16_t used_ext;
+ uint8_t bus_size;
+ uint8_t qos;
+ uint8_t prot;
+ uint8_t max_beats;
+};
+
+/** UDMA AXI M2S configuration */
+struct al_udma_m2s_axi_conf {
+ struct al_udma_axi_submaster comp_write;
+ struct al_udma_axi_submaster data_read;
+ struct al_udma_axi_submaster desc_read;
+ al_bool break_on_max_boundary; /* Data read break on max boundary */
+ uint8_t min_axi_beats; /* Minimum burst for writing completion desc. */
+ uint8_t ostand_max_data_read;
+ uint8_t ostand_max_desc_read;
+ uint8_t ostand_max_comp_req;
+ uint8_t ostand_max_comp_write;
+};
+
+/** UDMA AXI S2M configuration */
+struct al_udma_s2m_axi_conf {
+ struct al_udma_axi_submaster data_write;
+ struct al_udma_axi_submaster desc_read;
+ struct al_udma_axi_submaster comp_write;
+ al_bool break_on_max_boundary; /* Data read break on max boundary */
+ uint8_t min_axi_beats; /* Minimum burst for writing completion desc. */
+ uint8_t ostand_max_data_req;
+ uint8_t ostand_max_data_write;
+ uint8_t ostand_max_comp_req;
+ uint8_t ostand_max_comp_write;
+ uint8_t ostand_max_desc_read;
+ uint8_t ack_fifo_depth; /* size of the stream application ack fifo */
+};
+
+/** M2S error logging */
+struct al_udma_err_log {
+ uint32_t error_status;
+ uint32_t header[4];
+};
+
+/** M2S max packet size configuration */
+struct al_udma_m2s_pkt_len_conf {
+ uint32_t max_pkt_size;
+ al_bool encode_64k_as_zero;
+};
+
+/** M2S Descriptor Prefetch configuration */
+struct al_udma_m2s_desc_pref_conf {
+ uint8_t desc_fifo_depth;
+ enum al_udma_sch_mode sch_mode; /* Scheduling mode
+ * (either strict or RR) */
+
+ uint8_t max_desc_per_packet; /* max number of descriptors to
+ * prefetch */
+ /* in one burst (5b) */
+ uint8_t pref_thr;
+ uint8_t min_burst_above_thr; /* min burst size when fifo above
+ * pref_thr (4b)
+ */
+ uint8_t min_burst_below_thr; /* min burst size when fifo below
+ * pref_thr (4b)
+ */
+ uint8_t max_pkt_limit; /* maximum number of packets in the data
+ * read FIFO, defined based on header
+ * FIFO size
+ */
+ uint16_t data_fifo_depth; /* maximum number of data beats in the
+ * data read FIFO,
+ * defined based on header FIFO size
+ */
+};
+
+/** S2M Descriptor Prefetch configuration */
+struct al_udma_s2m_desc_pref_conf {
+ uint8_t desc_fifo_depth;
+ enum al_udma_sch_mode sch_mode; /* Scheduling mode *
+ * (either strict or RR)
+ */
+
+ al_bool q_promotion; /* enable promotion */
+ al_bool force_promotion; /* force promotion */
+ al_bool en_pref_prediction; /* enable prefetch prediction */
+ uint8_t promotion_th; /* Threshold for queue promotion */
+
+ uint8_t pref_thr;
+ uint8_t min_burst_above_thr; /* min burst size when fifo above
+ * pref_thr (4b)
+ */
+ uint8_t min_burst_below_thr; /* min burst size when fifo below
+ * pref_thr (4b)
+ */
+ uint8_t a_full_thr; /* almost full threshold */
+};
+
+/** S2M Data write configuration */
+struct al_udma_s2m_data_write_conf {
+ uint16_t data_fifo_depth; /* maximum number of data beats in the
+ * data write FIFO, defined based on
+ * header FIFO size
+ */
+ uint8_t max_pkt_limit; /* maximum number of packets in the
+ * data write FIFO,defined based on
+ * header FIFO size
+ */
+ uint8_t fifo_margin;
+ uint32_t desc_wait_timer; /* waiting time for the host to write
+ * new descriptor to the queue
+ * (for the current packet in process)
+ */
+ uint32_t flags; /* bitwise of flags of s2m
+ * data_cfg_2 register
+ */
+};
+
+/** S2M Completion configuration */
+struct al_udma_s2m_completion_conf {
+ uint8_t desc_size; /* Size of completion descriptor
+ * in words
+ */
+ al_bool cnt_words; /* Completion fifo in use counter:
+ * AL_TRUE words, AL_FALS descriptors
+ */
+ al_bool q_promotion; /* Enable promotion of the current
+ * unack in progress */
+ /* in the completion write scheduler */
+ al_bool force_rr; /* force RR arbitration in the
+ * scheduler
+ */
+ // uint8_t ack_fifo_depth; /* size of the stream application ack fifo */
+ uint8_t q_free_min; /* minimum number of free completion
+ * entries
+ */
+ /* to qualify for promotion */
+
+ uint16_t comp_fifo_depth; /* Size of completion fifo in words */
+ uint16_t unack_fifo_depth; /* Size of unacked fifo in descs */
+ uint32_t timeout; /* Ack timout from stream interface */
+};
+
+/** M2S UDMA DWRR configuration */
+struct al_udma_m2s_dwrr_conf {
+ al_bool enable_dwrr;
+ uint8_t inc_factor;
+ uint8_t weight;
+ al_bool pkt_mode;
+ uint32_t deficit_init_val;
+};
+
+/** M2S DMA Rate Limitation mode */
+struct al_udma_m2s_rlimit_mode {
+ al_bool pkt_mode_en;
+ uint16_t short_cycle_sz;
+ uint32_t token_init_val;
+};
+
+/** M2S Stream/Q Rate Limitation */
+struct al_udma_m2s_rlimit_cfg {
+ uint32_t max_burst_sz; /* maximum number of accumulated bytes in the
+ * token counter
+ */
+ uint16_t long_cycle_sz; /* number of short cycles between token fill */
+ uint32_t long_cycle; /* number of bits to add in each long cycle */
+ uint32_t short_cycle; /* number of bits to add in each cycle */
+ uint32_t mask; /* mask the different types of rate limiters */
+};
+
+enum al_udma_m2s_rlimit_action {
+ AL_UDMA_STRM_RLIMIT_ENABLE,
+ AL_UDMA_STRM_RLIMIT_PAUSE,
+ AL_UDMA_STRM_RLIMIT_RESET
+};
+
+/** M2S UDMA Q scheduling configuration */
+struct al_udma_m2s_q_dwrr_conf {
+ uint32_t max_deficit_cnt_sz; /*maximum number of accumulated bytes
+ * in the deficit counter
+ */
+ al_bool strict; /* bypass DWRR */
+ uint8_t axi_qos;
+ uint16_t q_qos;
+ uint8_t weight;
+};
+
+/** M2S UDMA / UDMA Q scheduling configuration */
+struct al_udma_m2s_sc {
+ enum al_udma_sch_mode sch_mode; /* Scheduling Mode */
+ struct al_udma_m2s_dwrr_conf dwrr; /* DWRR configuration */
+};
+
+/** UDMA / UDMA Q rate limitation configuration */
+struct al_udma_m2s_rlimit {
+ struct al_udma_m2s_rlimit_mode rlimit_mode;
+ /* rate limitation enablers */
+#if 0
+ struct al_udma_tkn_bkt_conf token_bkt; /* Token Bucket configuration */
+#endif
+};
+
+/** UDMA Data read configuration */
+struct al_udma_m2s_data_rd_conf {
+ uint8_t max_rd_d_beats; /* max burst size for reading data
+ * (in AXI beats-128b) (5b)
+ */
+ uint8_t max_rd_d_out_req; /* max number of outstanding data
+ * read requests (6b)
+ */
+ uint16_t max_rd_d_out_beats; /* max num. of data read beats (10b) */
+};
+
+/** M2S UDMA completion and application timeouts */
+struct al_udma_m2s_comp_timeouts {
+ enum al_udma_sch_mode sch_mode; /* Scheduling mode
+ * (either strict or RR)
+ */
+ al_bool enable_q_promotion;
+ uint8_t unack_fifo_depth; /* unacked desc fifo size */
+ uint8_t comp_fifo_depth; /* desc fifo size */
+ uint32_t coal_timeout; /* (24b) */
+ uint32_t app_timeout; /* (24b) */
+};
+
+/** S2M UDMA per queue completion configuration */
+struct al_udma_s2m_q_comp_conf {
+ al_bool dis_comp_coal; /* disable completion coalescing */
+ al_bool en_comp_ring_update; /* enable writing completion descs */
+ uint32_t comp_timer; /* completion coalescing timer */
+ al_bool en_hdr_split; /* enable header split */
+ al_bool force_hdr_split; /* force header split */
+ uint16_t hdr_split_size; /* size used for the header split */
+ uint8_t q_qos; /* queue QoS */
+};
+
+/** UDMA per queue VMID control configuration */
+struct al_udma_gen_vmid_q_conf {
+ /* Enable usage of the VMID per queue according to 'vmid' */
+ al_bool queue_en;
+
+ /* Enable usage of the VMID from the descriptor buffer address 63:48 */
+ al_bool desc_en;
+
+ /* VMID to be applied when 'queue_en' is asserted */
+ uint16_t vmid;
+};
+
+/** UDMA VMID control configuration */
+struct al_udma_gen_vmid_conf {
+ /* TX queue configuration */
+ struct al_udma_gen_vmid_q_conf tx_q_conf[DMA_MAX_Q];
+
+ /* RX queue configuration */
+ struct al_udma_gen_vmid_q_conf rx_q_conf[DMA_MAX_Q];
+};
+
+/** UDMA VMID MSIX control configuration */
+struct al_udma_gen_vmid_msix_conf {
+ /* Enable write to all VMID_n registers in the MSI-X Controller */
+ al_bool access_en;
+
+ /* use VMID_n [7:0] from MSI-X Controller for MSI-X message */
+ al_bool sel;
+};
+
+/** UDMA per Tx queue advanced VMID control configuration */
+struct al_udma_gen_vmid_advanced_tx_q_conf {
+ /**********************************************************************
+ * Tx Data VMID
+ **********************************************************************/
+ /* Tx data VMID enable */
+ al_bool tx_q_data_vmid_en;
+
+ /*
+ * For Tx data reads, replacement bits for the original address.
+ * The number of bits replaced is determined according to
+ * 'tx_q_addr_hi_sel'
+ */
+ unsigned int tx_q_addr_hi;
+
+ /*
+ * For Tx data reads, 6 bits serving the number of bits taken from the
+ * extra register on account of bits coming from the original address
+ * field.
+ * When 'tx_q_addr_hi_sel'=32 all of 'tx_q_addr_hi' will be taken.
+ * When 'tx_q_addr_hi_sel'=0 none of it will be taken, and when any
+ * value in between, it will start from the MSB bit and sweep down as
+ * many bits as needed. For example if 'tx_q_addr_hi_sel'=8, the final
+ * address [63:56] will carry 'tx_q_addr_hi'[31:24] while [55:32] will
+ * carry the original buffer address[55:32].
+ */
+ unsigned int tx_q_addr_hi_sel;
+
+ /*
+ * Tx data read VMID
+ * Masked per bit with 'tx_q_data_vmid_mask'
+ */
+ unsigned int tx_q_data_vmid;
+
+ /*
+ * Tx data read VMID mask
+ * Each '1' selects from the buffer address, each '0' selects from
+ * 'tx_q_data_vmid'
+ */
+ unsigned int tx_q_data_vmid_mask;
+
+ /**********************************************************************
+ * Tx prefetch VMID
+ **********************************************************************/
+ /* Tx prefetch VMID enable */
+ al_bool tx_q_prefetch_vmid_en;
+
+ /* Tx prefetch VMID */
+ unsigned int tx_q_prefetch_vmid;
+
+ /**********************************************************************
+ * Tx completion VMID
+ **********************************************************************/
+ /* Tx completion VMID enable */
+ al_bool tx_q_compl_vmid_en;
+
+ /* Tx completion VMID */
+ unsigned int tx_q_compl_vmid;
+};
+
+/** UDMA per Rx queue advanced VMID control configuration */
+struct al_udma_gen_vmid_advanced_rx_q_conf {
+ /**********************************************************************
+ * Rx Data VMID
+ **********************************************************************/
+ /* Rx data VMID enable */
+ al_bool rx_q_data_vmid_en;
+
+ /*
+ * For Rx data writes, replacement bits for the original address.
+ * The number of bits replaced is determined according to
+ * 'rx_q_addr_hi_sel'
+ */
+ unsigned int rx_q_addr_hi;
+
+ /*
+ * For Rx data writes, 6 bits serving the number of bits taken from the
+ * extra register on account of bits coming from the original address
+ * field.
+ */
+ unsigned int rx_q_addr_hi_sel;
+
+ /*
+ * Rx data write VMID
+ * Masked per bit with 'rx_q_data_vmid_mask'
+ */
+ unsigned int rx_q_data_vmid;
+
+ /* Rx data write VMID mask */
+ unsigned int rx_q_data_vmid_mask;
+
+ /**********************************************************************
+ * Rx Data Buffer 2 VMID
+ **********************************************************************/
+ /* Rx data buff2 VMID enable */
+ al_bool rx_q_data_buff2_vmid_en;
+
+ /*
+ * For Rx data buff2 writes, replacement bits for the original address.
+ * The number of bits replaced is determined according to
+ * 'rx_q_data_buff2_addr_hi_sel'
+ */
+ unsigned int rx_q_data_buff2_addr_hi;
+
+ /*
+ * For Rx data buff2 writes, 6 bits serving the number of bits taken
+ * from the extra register on account of bits coming from the original
+ * address field.
+ */
+ unsigned int rx_q_data_buff2_addr_hi_sel;
+
+ /*
+ * Rx data buff2 write VMID
+ * Masked per bit with 'rx_q_data_buff2_mask'
+ */
+ unsigned int rx_q_data_buff2_vmid;
+
+ /* Rx data buff2 write VMID mask */
+ unsigned int rx_q_data_buff2_mask;
+
+ /**********************************************************************
+ * Rx DDP VMID
+ **********************************************************************/
+ /* Rx DDP write VMID enable */
+ al_bool rx_q_ddp_vmid_en;
+
+ /*
+ * For Rx DDP writes, replacement bits for the original address.
+ * The number of bits replaced is determined according to
+ * 'rx_q_ddp_addr_hi_sel'
+ */
+ unsigned int rx_q_ddp_addr_hi;
+
+ /*
+ * For Rx DDP writes, 6 bits serving the number of bits taken from the
+ * extra register on account of bits coming from the original address
+ * field.
+ */
+ unsigned int rx_q_ddp_addr_hi_sel;
+
+ /*
+ * Rx DDP write VMID
+ * Masked per bit with 'rx_q_ddp_mask'
+ */
+ unsigned int rx_q_ddp_vmid;
+
+ /* Rx DDP write VMID mask */
+ unsigned int rx_q_ddp_mask;
+
+ /**********************************************************************
+ * Rx prefetch VMID
+ **********************************************************************/
+ /* Rx prefetch VMID enable */
+ al_bool rx_q_prefetch_vmid_en;
+
+ /* Rx prefetch VMID */
+ unsigned int rx_q_prefetch_vmid;
+
+ /**********************************************************************
+ * Rx completion VMID
+ **********************************************************************/
+ /* Rx completion VMID enable */
+ al_bool rx_q_compl_vmid_en;
+
+ /* Rx completion VMID */
+ unsigned int rx_q_compl_vmid;
+};
+
+/**
+ * Header split, buffer 2 per queue configuration
+ * When header split is enabled, Buffer_2 is used as an address for the header
+ * data. Buffer_2 is defined as 32-bits in the RX descriptor and it is defined
+ * that the MSB ([63:32]) of Buffer_1 is used as address [63:32] for the header
+ * address.
+ */
+struct al_udma_gen_hdr_split_buff2_q_conf {
+ /*
+ * MSB of the 64-bit address (bits [63:32]) that can be used for header
+ * split for this queue
+ */
+ unsigned int addr_msb;
+
+ /*
+ * Determine how to select the MSB (bits [63:32]) of the address when
+ * header split is enabled (4 bits, one per byte)
+ * - Bits [3:0]:
+ * [0] – selector for bits [39:32]
+ * [1] – selector for bits [47:40]
+ * [2] – selector for bits [55:48]
+ * [3] – selector for bits [63:55]
+ * - Bit value:
+ * 0 – Use Buffer_1 (legacy operation)
+ * 1 – Use the queue configuration 'addr_msb'
+ */
+ unsigned int add_msb_sel;
+};
+
+/* Report Error - to be used for abort */
+void al_udma_err_report(struct al_udma *udma);
+
+/* Statistics - TBD */
+void al_udma_stats_get(struct al_udma *udma);
+
+/* Misc configurations */
+/* Configure AXI configuration */
+int al_udma_axi_set(struct udma_gen_axi *axi_regs,
+ struct al_udma_axi_conf *axi);
+
+/* Configure UDMA AXI M2S configuration */
+int al_udma_m2s_axi_set(struct al_udma *udma,
+ struct al_udma_m2s_axi_conf *axi_m2s);
+
+/* Configure UDMA AXI S2M configuration */
+int al_udma_s2m_axi_set(struct al_udma *udma,
+ struct al_udma_s2m_axi_conf *axi_s2m);
+
+/* Configure M2S packet len */
+int al_udma_m2s_packet_size_cfg_set(struct al_udma *udma,
+ struct al_udma_m2s_pkt_len_conf *conf);
+
+/* Configure M2S UDMA descriptor prefetch */
+int al_udma_m2s_pref_set(struct al_udma *udma,
+ struct al_udma_m2s_desc_pref_conf *conf);
+int al_udma_m2s_pref_get(struct al_udma *udma,
+ struct al_udma_m2s_desc_pref_conf *conf);
+
+/* set m2s packet's max descriptors (including meta descriptors) */
+#define AL_UDMA_M2S_MAX_ALLOWED_DESCS_PER_PACKET 31
+int al_udma_m2s_max_descs_set(struct al_udma *udma, uint8_t max_descs);
+
+/* set s2m packets' max descriptors */
+#define AL_UDMA_S2M_MAX_ALLOWED_DESCS_PER_PACKET 31
+int al_udma_s2m_max_descs_set(struct al_udma *udma, uint8_t max_descs);
+
+
+/* Configure S2M UDMA descriptor prefetch */
+int al_udma_s2m_pref_set(struct al_udma *udma,
+ struct al_udma_s2m_desc_pref_conf *conf);
+int al_udma_m2s_pref_get(struct al_udma *udma,
+ struct al_udma_m2s_desc_pref_conf *conf);
+
+/* Configure S2M UDMA data write */
+int al_udma_s2m_data_write_set(struct al_udma *udma,
+ struct al_udma_s2m_data_write_conf *conf);
+
+/* Configure the s2m full line write feature */
+int al_udma_s2m_full_line_write_set(struct al_udma *umda, al_bool enable);
+
+/* Configure S2M UDMA completion */
+int al_udma_s2m_completion_set(struct al_udma *udma,
+ struct al_udma_s2m_completion_conf *conf);
+
+/* Configure the M2S UDMA scheduling mode */
+int al_udma_m2s_sc_set(struct al_udma *udma,
+ struct al_udma_m2s_dwrr_conf *sched);
+
+/* Configure the M2S UDMA rate limitation */
+int al_udma_m2s_rlimit_set(struct al_udma *udma,
+ struct al_udma_m2s_rlimit_mode *mode);
+int al_udma_m2s_rlimit_reset(struct al_udma *udma);
+
+/* Configure the M2S Stream rate limitation */
+int al_udma_m2s_strm_rlimit_set(struct al_udma *udma,
+ struct al_udma_m2s_rlimit_cfg *conf);
+int al_udma_m2s_strm_rlimit_act(struct al_udma *udma,
+ enum al_udma_m2s_rlimit_action act);
+
+/* Configure the M2S UDMA Q rate limitation */
+int al_udma_m2s_q_rlimit_set(struct al_udma_q *udma_q,
+ struct al_udma_m2s_rlimit_cfg *conf);
+int al_udma_m2s_q_rlimit_act(struct al_udma_q *udma_q,
+ enum al_udma_m2s_rlimit_action act);
+
+/* Configure the M2S UDMA Q scheduling mode */
+int al_udma_m2s_q_sc_set(struct al_udma_q *udma_q,
+ struct al_udma_m2s_q_dwrr_conf *conf);
+int al_udma_m2s_q_sc_pause(struct al_udma_q *udma_q, al_bool set);
+int al_udma_m2s_q_sc_reset(struct al_udma_q *udma_q);
+
+/* M2S UDMA completion and application timeouts */
+int al_udma_m2s_comp_timeouts_set(struct al_udma *udma,
+ struct al_udma_m2s_comp_timeouts *conf);
+int al_udma_m2s_comp_timeouts_get(struct al_udma *udma,
+ struct al_udma_m2s_comp_timeouts *conf);
+
+
+/**
+ * S2M UDMA Configure the expected behavior of Rx/S2M UDMA when there are no Rx Descriptors.
+ *
+ * @param udma
+ * @param drop_packet when set to true, the UDMA will drop packet.
+ * @param gen_interrupt when set to true, the UDMA will generate
+ * no_desc_hint interrupt when a packet received and the UDMA
+ * doesn't find enough free descriptors for it.
+ * @param wait_for_desc_timeout timeout in SB cycles to wait for new
+ * descriptors before dropping the packets.
+ * Notes:
+ * - The hint interrupt is raised immediately without waiting
+ * for new descs.
+ * - value 0 means wait for ever.
+ *
+ * Notes:
+ * - When get_interrupt is set, the API won't program the iofic to unmask this
+ * interrupt, in this case the callee should take care for doing that unmask
+ * using the al_udma_iofic_config() API.
+ *
+ * - The hardware's default configuration is: no drop packet, generate hint
+ * interrupt.
+ * - This API must be called once and before enabling the UDMA
+ *
+ * @return 0 if no error found.
+ */
+int al_udma_s2m_no_desc_cfg_set(struct al_udma *udma, al_bool drop_packet, al_bool gen_interrupt, uint32_t wait_for_desc_timeout);
+
+/**
+ * S2M UDMA configure a queue's completion update
+ *
+ * @param q_udma
+ * @param enable set to true to enable completion update
+ *
+ * completion update better be disabled for tx queues as those descriptors
+ * doesn't carry useful information, thus disabling it saves DMA accesses.
+ *
+ * @return 0 if no error found.
+ */
+int al_udma_s2m_q_compl_updade_config(struct al_udma_q *udma_q, al_bool enable);
+
+/**
+ * S2M UDMA configure a queue's completion descriptors coalescing
+ *
+ * @param q_udma
+ * @param enable set to true to enable completion coalescing
+ * @param coal_timeout in South Bridge cycles.
+ *
+ * @return 0 if no error found.
+ */
+int al_udma_s2m_q_compl_coal_config(struct al_udma_q *udma_q, al_bool enable, uint32_t coal_timeout);
+
+/**
+ * S2M UDMA configure completion descriptors write burst parameters
+ *
+ * @param udma
+ * @param burst_size completion descriptors write burst size in bytes.
+ *
+ * @return 0 if no error found.
+ */int al_udma_s2m_compl_desc_burst_config(struct al_udma *udma, uint16_t
+ burst_size);
+
+/* S2M UDMA per queue completion configuration */
+int al_udma_s2m_q_comp_set(struct al_udma_q *udma_q,
+ struct al_udma_s2m_q_comp_conf *conf);
+
+/** UDMA VMID control configuration */
+void al_udma_gen_vmid_conf_set(
+ struct unit_regs __iomem *unit_regs,
+ struct al_udma_gen_vmid_conf *conf);
+
+/** UDMA VMID MSIX control configuration */
+void al_udma_gen_vmid_msix_conf_set(
+ struct unit_regs __iomem *unit_regs,
+ struct al_udma_gen_vmid_msix_conf *conf);
+
+/** UDMA VMID control advanced Tx queue configuration */
+void al_udma_gen_vmid_advanced_tx_q_conf(
+ struct al_udma_q *q,
+ struct al_udma_gen_vmid_advanced_tx_q_conf *conf);
+
+/** UDMA VMID control advanced Rx queue configuration */
+void al_udma_gen_vmid_advanced_rx_q_conf(
+ struct al_udma_q *q,
+ struct al_udma_gen_vmid_advanced_rx_q_conf *conf);
+
+/** UDMA header split buffer 2 Rx queue configuration */
+void al_udma_gen_hdr_split_buff2_rx_q_conf(
+ struct al_udma_q *q,
+ struct al_udma_gen_hdr_split_buff2_q_conf *conf);
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+/** @} end of UDMA config group */
+#endif /* __AL_HAL_UDMA_CONFIG_H__ */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_udma_debug.h b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_debug.h
new file mode 100644
index 0000000..81ec671
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_debug.h
@@ -0,0 +1,133 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_udma_debug UDMA Debug
+ * @ingroup group_udma_api
+ * UDMA Debug
+ * @{
+ * @file al_hal_udma_debug.h
+ *
+ * @brief C Header file for the Universal DMA HAL driver for debug APIs
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_DEBUG_H__
+#define __AL_HAL_UDMA_DEBUG_H__
+
+#include
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/* UDMA register print helper macros */
+#define AL_UDMA_PRINT_REG(UDMA, PREFIX, POSTFIX, TYPE, GROUP, REG) \
+ al_dbg(PREFIX #REG " = 0x%08x" POSTFIX, al_reg_read32( \
+ &(UDMA->udma_regs->TYPE.GROUP.REG)))
+
+#define AL_UDMA_PRINT_REG_FIELD( \
+ UDMA, PREFIX, POSTFIX, FMT, TYPE, GROUP, REG, LBL, FIELD) \
+ al_dbg(PREFIX #LBL " = " FMT POSTFIX, al_reg_read32( \
+ &(UDMA->udma_regs->TYPE.GROUP.REG)) \
+ & FIELD ## _MASK >> FIELD ## _SHIFT)
+
+#define AL_UDMA_PRINT_REG_BIT( \
+ UDMA, PREFIX, POSTFIX, TYPE, GROUP, REG, LBL, FIELD) \
+ al_dbg(PREFIX #LBL " = %d" POSTFIX, ((al_reg_read32( \
+ &(UDMA->udma_regs->TYPE.GROUP.REG)) \
+ & FIELD) != 0))
+
+/* UDMA register print mask definitions */
+#define AL_UDMA_DEBUG_QUEUE(n) AL_BIT(n)
+#define AL_UDMA_DEBUG_AXI AL_BIT(DMA_MAX_Q)
+#define AL_UDMA_DEBUG_GENERAL AL_BIT(DMA_MAX_Q + 1)
+#define AL_UDMA_DEBUG_READ AL_BIT(DMA_MAX_Q + 2)
+#define AL_UDMA_DEBUG_WRITE AL_BIT(DMA_MAX_Q + 3)
+#define AL_UDMA_DEBUG_DWRR AL_BIT(DMA_MAX_Q + 4)
+#define AL_UDMA_DEBUG_RATE_LIMITER AL_BIT(DMA_MAX_Q + 5)
+#define AL_UDMA_DEBUG_STREAM_RATE_LIMITER AL_BIT(DMA_MAX_Q + 6)
+#define AL_UDMA_DEBUG_COMP AL_BIT(DMA_MAX_Q + 7)
+#define AL_UDMA_DEBUG_STAT AL_BIT(DMA_MAX_Q + 8)
+#define AL_UDMA_DEBUG_FEATURE AL_BIT(DMA_MAX_Q + 9)
+#define AL_UDMA_DEBUG_ALL 0xFFFFFFFF
+
+/* Debug functions */
+
+/**
+ * Print udma registers according to the provided mask
+ *
+ * @param udma udma data structure
+ * @param mask mask that specifies which registers groups to print
+ * e.g. AL_UDMA_DEBUG_AXI prints AXI registers, AL_UDMA_DEBUG_ALL prints all
+ * registers
+ */
+void al_udma_regs_print(struct al_udma *udma, unsigned int mask);
+
+/**
+ * Print udma queue software structure
+ *
+ * @param udma udma data structure
+ * @param qid queue index
+ */
+void al_udma_q_struct_print(struct al_udma *udma, uint32_t qid);
+
+/** UDMA ring type */
+enum al_udma_ring_type {
+ AL_RING_SUBMISSION,
+ AL_RING_COMPLETION
+};
+
+/**
+ * Print the ring entries for the specified queue index and ring type
+ * (submission/completion)
+ *
+ * @param udma udma data structure
+ * @param qid queue index
+ * @param rtype udma ring type
+ */
+void al_udma_ring_print(struct al_udma *udma, uint32_t qid,
+ enum al_udma_ring_type rtype);
+
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+#endif /* __AL_HAL_UDMA_DEBUG_H__ */
+/** @} end of UDMA debug group */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_udma_fast.h b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_fast.h
new file mode 100644
index 0000000..0fe4b89
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_fast.h
@@ -0,0 +1,231 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_udma_fast UDMA Fast API
+ *
+ ** @{
+ * The UDMA Fast API can be used to perform simple operations by directly modifying
+ * the UDMA descriptors instead of passing via HAL SW structures and functions.
+ * This allows to achieve optimal performance for those operations.
+ *
+ * Currently RAID controller and MEMCOPY and PARALLEL MEMCOPY operations are
+ * supported.
+ *
+ * @file al_hal_udma_fast.h
+ *
+ * @brief Header file for UDMA Fast API
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_FAST_H__
+#define __AL_HAL_UDMA_FAST_H__
+
+#include "al_hal_common.h"
+#include "al_hal_ssm_crc_memcpy.h"
+#include "al_hal_udma.h"
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/**
+ * Prepare MEMCPY udma queues to work in fast mode - init all the descriptors
+ * according to opcode and flags passed in xaction
+ *
+ * @param udma_txq udma tx queue handle
+ * @param udma_rxq udma rx queue handle
+ * @param xaction transaction context
+ *
+ * @return 0 if no error found
+ */
+int al_udma_fast_memcpy_q_prepare(struct al_udma_q *udma_txq,
+ struct al_udma_q *udma_rxq,
+ struct al_memcpy_transaction *xaction);
+
+/**
+ * Get udma descriptor by index in queue
+ *
+ * @param udma_q udma queue handle
+ * @param index descriptor index
+ *
+ * @return udma descriptor handle
+ */
+static INLINE union al_udma_desc *al_udma_fast_desc_get_by_idx(struct al_udma_q *udma_q,
+ uint32_t index)
+{
+ union al_udma_desc *desc;
+
+ al_assert(udma_q);
+ al_assert(index < udma_q->size);
+
+ desc = udma_q->desc_base_ptr + index;
+ return desc;
+}
+
+/* Work with tx desc structures as buf_ptr, flags and len fields are in same
+ * location for tx and rx descs
+ */
+/**
+ * Set udma descriptor buffer address
+ *
+ * @param desc udma descriptor handle
+ * @param buf_ptr buffer adddress
+ * @param vmid virtual machine id
+ */
+static inline void al_udma_fast_desc_buf_set(union al_udma_desc *desc,
+ al_phys_addr_t buf_ptr, uint16_t vmid)
+{
+ al_assert(desc);
+
+ desc->tx.buf_ptr = swap64_to_le(buf_ptr | vmid);
+}
+
+/**
+ * Set udma descriptor flags specified by flags param and mask, while keeping
+ * flags that are not specified by the mask
+ *
+ * @param desc udma descriptor handle
+ * @param flags flags
+ * @param mask flags mask
+ */
+static inline void al_udma_fast_desc_flags_set(union al_udma_desc *desc,
+ uint32_t flags, uint32_t mask)
+{
+ uint32_t flags_len;
+
+ al_assert(desc);
+
+ flags_len = swap32_from_le(desc->tx.len_ctrl);
+ mask &= ~AL_M2S_DESC_LEN_MASK;
+ flags_len &= ~mask;
+ flags_len |= flags;
+ desc->tx.len_ctrl = swap32_to_le(flags_len);
+}
+
+/**
+ * Set udma descriptor ring id
+ *
+ * @param desc udma descriptor handle
+ * @param ring_id ring id
+ */
+static inline void al_udma_fast_desc_ring_id_set(union al_udma_desc *desc,
+ uint32_t ring_id)
+{
+ uint32_t flags_len;
+
+ al_assert(desc);
+
+ flags_len = swap32_from_le(desc->tx.len_ctrl);
+ flags_len &= ~AL_M2S_DESC_RING_ID_MASK;
+ flags_len |= ring_id << AL_M2S_DESC_RING_ID_SHIFT;
+ desc->tx.len_ctrl = swap32_to_le(flags_len);
+}
+
+/**
+ * Set udma descriptor buffer length
+ *
+ * @param desc udma descriptor handle
+ * @param len buffer length
+ */
+static inline void al_udma_fast_desc_len_set(union al_udma_desc *desc,
+ uint16_t len)
+{
+ uint32_t flags_len;
+
+ al_assert(desc);
+
+ flags_len = swap32_from_le(desc->tx.len_ctrl);
+ flags_len &= ~AL_M2S_DESC_LEN_MASK;
+ flags_len |= len;
+ desc->tx.len_ctrl = swap32_to_le(flags_len);
+}
+
+/**
+ * Get up to desc_to_complete completed descriptors
+ *
+ * If use_head is set to AL TRUE head register is used to determine number of
+ * completed descriptors and reg read is performed on every poll operation.
+ * Otherwise each completion descriptor is read to determine whether it is
+ * completed. If HW cache coherency is used there's no cache miss until the
+ * descriptor is completed.
+ *
+ * @param udma_rxq udma rx queue handle
+ * @param descs_to_complete max number of completed descriptors to get
+ * @param use_head poll head register instead of completion descriptors
+ */
+static inline int al_udma_fast_completion(struct al_udma_q *udma_rxq,
+ uint32_t descs_to_complete, al_bool use_head)
+{
+ volatile union al_udma_cdesc *curr;
+ uint32_t cdesc_count = 0;
+
+ if (!use_head) {
+ curr = udma_rxq->comp_head_ptr;
+ while (descs_to_complete != 0) {
+ uint32_t comp_flags;
+
+ comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
+ if (al_udma_new_cdesc(udma_rxq, comp_flags) == AL_FALSE)
+ break;
+
+ cdesc_count++;
+ descs_to_complete--;
+ curr = al_cdesc_next_update(udma_rxq, curr);
+ }
+
+ udma_rxq->comp_head_ptr = curr;
+ } else {
+ cdesc_count = al_udma_cdesc_get_all(udma_rxq, NULL);
+
+ if (cdesc_count > descs_to_complete)
+ cdesc_count = descs_to_complete;
+ }
+
+ if (cdesc_count)
+ al_udma_cdesc_ack(udma_rxq, cdesc_count);
+
+ return cdesc_count;
+}
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+/** @} end of UDMA Fast group */
+#endif /* __AL_HAL_UDMA_FAST_H__ */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_udma_iofic.h b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_iofic.h
new file mode 100644
index 0000000..154a99d
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_iofic.h
@@ -0,0 +1,613 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_udma_interrupts UDMA I/O Fabric Interrupt Controller
+ * @ingroup group_udma_api
+ * UDMA IOFIC API
+ * @{
+ * @file al_hal_udma_iofic.h
+ *
+ * @brief C Header file for programming the interrupt controller that found
+ * in UDMA based units. These APIs rely and use some the Interrupt controller
+ * API under al_hal_iofic.h
+ */
+
+#ifndef __AL_HAL_UDMA_IOFIC_H__
+#define __AL_HAL_UDMA_IOFIC_H__
+
+#include
+#include
+#include
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/**
+ * Interrupt Mode
+ * This is the interrupt mode for the primary interrupt level The secondary
+ * interrupt level does not have mode and it is always a level sensitive
+ * interrupt that is reflected in group D of the primary.
+ */
+enum al_iofic_mode {
+ AL_IOFIC_MODE_LEGACY, /**< level-sensitive interrupt wire */
+ AL_IOFIC_MODE_MSIX_PER_Q, /**< per UDMA queue MSI-X interrupt */
+ AL_IOFIC_MODE_MSIX_PER_GROUP
+};
+
+/** interrupt controller level (primary/secondary) */
+enum al_udma_iofic_level {
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ AL_UDMA_IOFIC_LEVEL_SECONDARY
+};
+
+/*
+ * The next four groups represents the standard 4 groups in the primary
+ * interrupt controller of each bus-master unit in the I/O Fabric.
+ * The first two groups can be used when accessing the secondary interrupt
+ * controller as well.
+ */
+#define AL_INT_GROUP_A 0 /**< summary of the below events */
+#define AL_INT_GROUP_B 1 /**< RX completion queues */
+#define AL_INT_GROUP_C 2 /**< TX completion queues */
+#define AL_INT_GROUP_D 3 /**< Misc */
+
+/*******************************************************************************
+ * Primary interrupt controller, group A bits
+ ******************************************************************************/
+/* Group A bits which are just summary bits of GROUP B, C and D */
+#define AL_INT_GROUP_A_GROUP_B_SUM AL_BIT(0)
+#define AL_INT_GROUP_A_GROUP_C_SUM AL_BIT(1)
+#define AL_INT_GROUP_A_GROUP_D_SUM AL_BIT(2)
+
+/*******************************************************************************
+ * MSIX entry indices
+ ******************************************************************************/
+/** MSIX entry index for summary of group D in group A */
+#define AL_INT_MSIX_GROUP_A_SUM_D_IDX 2
+/** MSIX entry index for RX completion queue 0 */
+#define AL_INT_MSIX_RX_COMPLETION_START 3
+
+/*******************************************************************************
+ * Primary interrupt controller, group D bits
+ ******************************************************************************/
+#define AL_INT_GROUP_D_CROSS_MAIL_BOXES \
+ (AL_BIT(0) | AL_BIT(1) | AL_BIT(2) | AL_BIT(3))
+/** Summary of secondary interrupt controller, group A) */
+#define AL_INT_GROUP_D_M2S AL_BIT(8)
+/** Summary of secondary interrupt controller, group B) */
+#define AL_INT_GROUP_D_S2M AL_BIT(9)
+#define AL_INT_GROUP_D_SW_TIMER_INT AL_BIT(10)
+#define AL_INT_GROUP_D_APP_EXT_INT AL_BIT(11)
+#define AL_INT_GROUP_D_ALL \
+ AL_INT_GROUP_D_CROSS_MAIL_BOXES | \
+ AL_INT_GROUP_D_M2S | \
+ AL_INT_GROUP_D_S2M | \
+ AL_INT_GROUP_D_SW_TIMER_INT | \
+ AL_INT_GROUP_D_APP_EXT_INT
+
+/*
+ * Until this point, all description above is for Groups A/B/C/D in the PRIMARY
+ * Interrupt controller.
+ * Following are definitions related to the secondary interrupt controller with
+ * two cause registers (group A and group B) that covers UDMA M2S/S2M errors.
+ * Secondary interrupt controller summary bits are not mapped to the Processor
+ * GIC directly, rather they are represented in Group D of the primary interrupt
+ * controller.
+ */
+
+/******************************************************************************
+ * Secondary interrupt Controller, Group A, which holds the TX (M2S) error
+ * interrupt bits
+ ******************************************************************************/
+
+/**
+ * MSIx response
+ * MSIX Bus generator response error, the Bus response received with error indication
+ */
+#define AL_INT_2ND_GROUP_A_M2S_MSIX_RESP AL_BIT(27)
+/**
+ * MSIx timeout MSIX Bus generator timeout error.
+ * The generator didn't receive bus response for the MSIx write transaction.
+ */
+#define AL_INT_2ND_GROUP_A_M2S_MSIX_TO AL_BIT(26)
+/** Prefetch header buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_HDR_PARITY AL_BIT(25)
+/** Prefetch descriptor buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_DESC_PARITY AL_BIT(24)
+/** Data buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_DATA_PARITY AL_BIT(23)
+/** Data header buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_HDR_PARITY AL_BIT(22)
+/** Completion coalescing buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_COMPL_COAL_PARITY AL_BIT(21)
+/** UNACK packets buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_UNACK_PKT_PARITY AL_BIT(20)
+/** ACK packets buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_ACK_PKT_PARITY AL_BIT(19)
+/** AXI data buffer parity error */
+#define AL_INT_2ND_GROUP_A_M2S_AX_DATA_PARITY AL_BIT(18)
+/**
+ * Prefetch Ring ID error
+ * A wrong RingId was received while prefetching submission descriptor. This
+ * could indicate a software bug or hardware failure, unless the UDMA is
+ * working in a mode to ignore RingId (the al_udma_iofic_config() API can be
+ * used to configure the UDMA to ignore the Ring ID check)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_RING_ID AL_BIT(17)
+/**
+ * Prefetch last
+ * Error in last bit indication of the descriptor
+ * Descriptor with Last bit asserted is read from the queue to the prefetch
+ * FIFO when the prefetch engine is not in a middle of packet processing (a
+ * descriptor with First bit asserted should be read first to indicate start of
+ * packet)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_LAST AL_BIT(16)
+/**
+ * Prefetch first
+ * Error in first bit indication of the descriptor
+ * Descriptor with First bit asserted is read from the queue to the prefetch
+ * FIFO while the prefetch engine is in a middle of packet processing ( a
+ * descriptor with Last bit asserted should be read to indicate end of packet
+ * before starting a new one)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_FIRST AL_BIT(15)
+/**
+ * Prefetch max descriptors
+ * Number of descriptors per packet exceeds the configurable maximum
+ * descriptors per packet. This could indicate a software bug or a hardware
+ * failure. (The al_udma_m2s_max_descs_set() API is used to configure the
+ * maximum descriptors per packet)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_MAX_DESC AL_BIT(14)
+/**
+ * Packet length
+ * Packet length exceeds the configurable maximum packet size. The
+ * al_udma_m2s_packet_size_cfg_set() API is used to configure the maximum
+ * packet size)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PKT_LEN AL_BIT(13)
+/**
+ * Prefetch AXI timeout
+ * Bus request to I/O Fabric timeout error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_AXI_TO AL_BIT(12)
+/**
+ * Prefetch AXI response
+ * Bus response from I/O Fabric error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_AXI_RESP AL_BIT(11)
+/**
+ * Prefetch AXI parity
+ * Bus parity error on descriptor being prefetched
+ */
+#define AL_INT_2ND_GROUP_A_M2S_PREFETCH_AXI_PARITY AL_BIT(10)
+/**
+ * Data AXI timeout
+ * Bus request to I/O Fabric timeout error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_DATA_AXI_TO AL_BIT(9)
+/**
+ * Data AXI response
+ * Bus response from I/O Fabric error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_DATA_AXI_RESP AL_BIT(8)
+/**
+ * Data AXI parity
+ * Bus parity error on data being read
+ */
+#define AL_INT_2ND_GROUP_A_M2S_SATA_AXI_PARITY AL_BIT(7)
+/**
+ * Completion AXI timeout
+ * Bus request to I/O Fabric timeout error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_CONPL_AXI_TO AL_BIT(6)
+/**
+ * Completion AXI response
+ * Bus response from I/O Fabric error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_COMPL_AXI_RESP AL_BIT(5)
+/**
+ * Completion AXI parity
+ * Bus generator internal SRAM parity error
+ */
+#define AL_INT_2ND_GROUP_A_M2S_COMP_AXI_PARITY AL_BIT(4)
+/**
+ * Stream timeout
+ * Application stream interface timeout indicating a failure at the Application
+ * layer (RAID, Ethernet etc)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_STRM_TO AL_BIT(3)
+/**
+ * Stream response
+ * Application stream interface response error indicating a failure at the
+ * Application layer (RAID, Ethernet etc)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_STRM_RESP AL_BIT(2)
+/**
+ * Stream parity
+ * Application stream interface parity error indicating a failure at the
+ * Application layer (RAID, Ethernet etc)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_STRM_PARITY AL_BIT(1)
+/**
+ * Stream completion mismatch
+ * Application stream interface, packet serial mismatch error indicating a
+ * failure at the Application layer (RAID, Ethernet etc)
+ */
+#define AL_INT_2ND_GROUP_A_M2S_STRM_COMPL_MISMATCH AL_BIT(0)
+
+/*******************************************************************************
+ * Secondary interrupt Controller, Group B, which holds the RX (S2M) error
+ * interrupt bits
+ ******************************************************************************/
+
+/** Prefetch descriptor buffer parity error */
+#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_DESC_PARITY AL_BIT(30)
+/** Completion coalescing buffer parity error */
+#define AL_INT_2ND_GROUP_B_S2M_COMPL_COAL_PARITY AL_BIT(29)
+/** PRE-UNACK packets buffer parity error */
+#define AL_INT_2ND_GROUP_B_S2M_PRE_UNACK_PKT_PARITY AL_BIT(28)
+/** UNACK packets buffer parity error */
+#define AL_INT_2ND_GROUP_B_S2M_UNACK_PKT_PARITY AL_BIT(27)
+/** Data buffer parity error */
+#define AL_INT_2ND_GROUP_B_S2M_DATA_PARITY AL_BIT(26)
+/** Data header buffer parity error */
+#define AL_INT_2ND_GROUP_B_S2M_DATA_HDR_PARITY AL_BIT(25)
+/**
+ * Packet length
+ * Application stream interface, Data counter length mismatch with metadata
+ * packet length indicating a failure at the Application layer (RAID, Ethernet
+ * etc)
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PKT_LEN AL_BIT(24)
+/**
+ * Stream last
+ * Application stream interface, error in Last bit indication, this error is
+ * asserted when a 'last' indication is asserted on the stream interface
+ * (between the application and the UDMA) when the interface is not in the
+ * middle of packet, meaning that there was no 'first' indication before. This
+ * indicates a failure at the application layer.
+ */
+#define AL_INT_2ND_GROUP_B_S2M_STRM_LAST AL_BIT(23)
+/**
+ * Stream first
+ * Application stream interface error in first bit indication, this error is
+ * asserted when a 'first' indication is asserted on the stream interface
+ * (between the application and the UDMA) when the interface is in the middle
+ * of packet, meaning that there was a 'first' indication before and the UDMA
+ * is waiting for a 'last' indication to end the packet. This indicates a
+ * failure at the application layer.
+ */
+#define AL_INT_2ND_GROUP_B_S2M_STRM_FIRST AL_BIT(22)
+/**
+ * Stream data
+ * Application stream interface, error indication during data transaction
+ */
+#define AL_INT_2ND_GROUP_B_S2M_STRM_DATA AL_BIT(21)
+/**
+ * Stream Data parity
+ * Application stream interface, parity error during data transaction
+ */
+#define AL_INT_2ND_GROUP_B_S2M_STRM_DATA_PARITY AL_BIT(20)
+/**
+ * Stream Header error
+ * Application stream interface, error indication during header transaction
+ */
+#define AL_INT_2ND_GROUP_B_S2M_STRM_HDR AL_BIT(19)
+/**
+ * Stream Header parity
+ * Application stream interface, parity error during header transaction
+ */
+#define AL_INT_2ND_GROUP_B_S2M_STRM_HDR_PARITY AL_BIT(18)
+/**
+ * Completion UNACK
+ * Completion write, UNACK timeout due to completion FIFO back pressure
+ */
+#define AL_INT_2ND_GROUP_B_S2M_COMPL_UNACK AL_BIT(17)
+/**
+ * Completion stream
+ * Completion write, UNACK timeout due to stream ACK FIFO back pressure
+ */
+#define AL_INT_2ND_GROUP_B_S2M_COMPL_STRM AL_BIT(16)
+/**
+ * Completion AXI timeout
+ * Bus request to I/O Fabric timeout error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_COMPL_AXI_TO AL_BIT(15)
+/**
+ * Completion AXI response
+ * Bus response from I/O Fabric error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_COMPL_AXI_RESP AL_BIT(14)
+/**
+ * Completion AXI parity
+ * Completion Bus generator internal SRAM parity error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_COMPL_AXI_PARITY AL_BIT(13)
+/**
+ * Prefetch saturate
+ * Prefetch engine, packet length counter saturated (32 bit) , this is caused
+ * by an error at the application layer which sends packet data without
+ * 'last'/'first' indication.
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_SAT AL_BIT(12)
+/**
+ * Prefetch ring ID
+ * Prefetch engine, Ring ID is not matching the expected RingID. This could
+ * indicate a software bug or hardware failure, unless the UDMA is working in a
+ * mode to ignore RingId (the al_udma_iofic_config() API can be used to
+ * configure the UDMA to ignore the Ring ID check)
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_RING_ID AL_BIT(11)
+/**
+ * Prefetch AXI timeout
+ * Bus request to I/O Fabric timeout error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_AXI_TO AL_BIT(10)
+/**
+ * Prefetch AXI response
+ * Bus response from I/O Fabric error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_AXI_RESP AL_BIT(9)
+/**
+ * Prefetch AXI parity
+ * Bus parity error on descriptor being prefetched
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PREFETCH_AXI_PARITY AL_BIT(8)
+/**
+ * No descriptors hint
+ * Data write, Hint to the SW that there are not enough descriptors in the
+ * queue for the current received packet. This is considered a hint and not an
+ * error, as it could be a normal situation in certain application. The S2M
+ * UDMA behavior when it runs out of Rx Descriptor is controlled by driver
+ * which can use this hint to add more descriptors to the Rx queue.
+ */
+#define AL_INT_2ND_GROUP_B_S2M_NO_DESC_HINT AL_BIT(7)
+/**
+ * No descriptors timeout
+ * Data write, Timeout indication when there are not enough descriptors for the
+ * current packet and the timeout expires. The S2M UDMA behavior when it runs
+ * out of Rx Descriptor is controlled by driver which can use this hint to add
+ * more descriptors to the Rx queue. The al_udma_s2m_no_desc_cfg_set() is used
+ * to configure theUDMA S2M timeout and behavior when there are no Rx
+ * descriptors for the received packet.
+ */
+#define AL_INT_2ND_GROUP_B_S2M_NO_DESC_TO AL_BIT(6)
+/**
+ * Promotion indication
+ * Data write, the data write engine checks the queue number of the two packets
+ * at the head of the data FIFO, the data write engine notify the prefetch
+ * engine to promote these queue numbers in the prefetch scheduler to make sure
+ * that these queue will have RX descriptors for these packets. This error
+ * indicates that the prefetch promotion didn't work for the second packet in
+ * the FIFO. This is an indication used for system debug and not an error.
+ */
+#define AL_INT_2ND_GROUP_B_S2M_PROM_IND AL_BIT(5)
+/**
+ * Header split ignored
+ * Data write, The application requested header split but the buffer descriptor
+ * doesn't include a second buffer for the header
+ */
+#define AL_INT_2ND_GROUP_B_S2M_HDR_SPLT_IGNORED AL_BIT(4)
+/**
+ * Header split length
+ * Data write, The application requested header split and the length of the
+ * second buffer allocated for the header is not enough for the requested
+ * header length. The remaining of the header is written to buffer 1 (data
+ * buffer).
+ */
+#define AL_INT_2ND_GROUP_B_S2M_HDR_SPLT_LEN AL_BIT(3)
+/**
+ * Data AXI timeout
+ * Bus request to I/O Fabric timeout error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_DATA_AXI_TO AL_BIT(2)
+/**
+ * Data AXI response
+ * Bus response from I/O Fabric error
+ */
+#define AL_INT_2ND_GROUP_B_S2M_DATA_AXI_RESP AL_BIT(1)
+/**
+ * Data AXI parity
+ * Bus parity error on data being read
+ */
+#define AL_INT_2ND_GROUP_B_S2M_DATA_AXI_PARITY AL_BIT(0)
+
+/*******************************************************************************
+ * Configurations
+ ******************************************************************************/
+
+/**
+ * Configure the UDMA interrupt controller registers, interrupts will are kept
+ * masked.
+ * This is a static setting that should be called while initialized the
+ * interrupt controller within a given UDMA, and should not be modified during
+ * runtime unless the UDMA is completely disabled. The first argument sets the
+ * interrupt and MSIX modes. The m2s/s2m errors/abort are a set of bit-wise
+ * masks to define the behaviour of the UDMA once an error happens: The _abort
+ * will put the UDMA in abort state once an error happens The _error bitmask
+ * will indicate and error in the secondary cause register but will not abort.
+ * The bit-mask that the _errors_disable and _aborts_disable are described in
+ * 'AL_INT_2ND_GROUP_A_*' and 'AL_INT_2ND_GROUP_B_*'
+ *
+ * @param regs pointer to unit registers
+ * @param mode interrupt scheme mode (legacy, MSI-X..)
+ * @param m2s_errors_disable
+ * This is a bit-wise mask, to indicate which one of the error causes in
+ * secondary interrupt group_A should generate an interrupt. When a bit is
+ * set, the error cause is ignored.
+ * Recommended value: 0 (enable all errors).
+ * @param m2s_aborts_disable
+ * This is a bit-wise mask, to indicate which one of the error causes in
+ * secondary interrupt group_A should automatically put the UDMA in
+ * abort state. When a bit is set, the error cause does cause an abort.
+ * Recommended value: 0 (enable all aborts).
+ * @param s2m_errors_disable
+ * This is a bit-wise mask, to indicate which one of the error causes in
+ * secondary interrupt group_A should generate an interrupt. When a bit is
+ * set, the error cause is ignored.
+ * Recommended value: 0xE0 (disable hint errors).
+ * @param s2m_aborts_disable
+ * This is a bit-wise mask, to indicate which one of the error causes in
+ * secondary interrupt group_A should automatically put the UDMA in
+ * abort state. When a bit is set, the error cause does cause an abort.
+ * Recommended value: 0xE0 (disable hint aborts).
+ *
+ * @return 0 on success. -EINVAL otherwise.
+ */
+int al_udma_iofic_config(struct unit_regs __iomem *regs,
+ enum al_iofic_mode mode,
+ uint32_t m2s_errors_disable,
+ uint32_t m2s_aborts_disable,
+ uint32_t s2m_errors_disable,
+ uint32_t s2m_aborts_disable);
+/**
+ * return the offset of the unmask register for a given group.
+ * this function can be used when the upper layer wants to directly
+ * access the unmask regiter and bypass the al_udma_iofic_unmask() API.
+ *
+ * @param regs pointer to udma registers
+ * @param level the interrupt controller level (primary / secondary)
+ * @param group the interrupt group ('AL_INT_GROUP_*')
+ * @return the offset of the unmask register.
+ */
+uint32_t __iomem * al_udma_iofic_unmask_offset_get(
+ struct unit_regs __iomem *regs,
+ enum al_udma_iofic_level level,
+ int group);
+
+/**
+ * Get the interrupt controller base address for either the primary or secondary
+ * interrupt controller
+ *
+ * @param regs pointer to udma unit registers
+ * @param level the interrupt controller level (primary / secondary)
+ *
+ * @returns The interrupt controller base address
+ *
+ */
+static INLINE void __iomem *al_udma_iofic_reg_base_get(
+ struct unit_regs __iomem *regs,
+ enum al_udma_iofic_level level)
+{
+ void __iomem *iofic_regs = (level == AL_UDMA_IOFIC_LEVEL_PRIMARY) ?
+ (void __iomem *)®s->gen.interrupt_regs.main_iofic :
+ (void __iomem *)®s->gen.interrupt_regs.secondary_iofic_ctrl;
+
+ return iofic_regs;
+}
+
+/**
+ * Check the interrupt controller level/group validity
+ *
+ * @param level the interrupt controller level (primary / secondary)
+ * @param group the interrupt group ('AL_INT_GROUP_*')
+ *
+ * @returns 0 - invalid, 1 - valid
+ *
+ */
+static INLINE int al_udma_iofic_level_and_group_valid(
+ enum al_udma_iofic_level level,
+ int group)
+{
+ if (((level == AL_UDMA_IOFIC_LEVEL_PRIMARY) && (group >= 0) && (group < 4)) ||
+ ((level == AL_UDMA_IOFIC_LEVEL_SECONDARY) && (group >= 0) && (group < 2)))
+ return 1;
+
+ return 0;
+}
+/**
+ * unmask specific interrupts for a given group
+ * this functions uses the interrupt mask clear register to guarantee atomicity
+ * it's safe to call it while the mask is changed by the HW (auto mask) or another cpu.
+ *
+ * @param regs pointer to udma unit registers
+ * @param level the interrupt controller level (primary / secondary)
+ * @param group the interrupt group ('AL_INT_GROUP_*')
+ * @param mask bitwise of interrupts to unmask, set bits will be unmasked.
+ */
+static INLINE void al_udma_iofic_unmask(
+ struct unit_regs __iomem *regs,
+ enum al_udma_iofic_level level,
+ int group,
+ uint32_t mask)
+{
+ al_assert(al_udma_iofic_level_and_group_valid(level, group));
+ al_iofic_unmask(al_udma_iofic_reg_base_get(regs, level), group, mask);
+}
+
+/**
+ * mask specific interrupts for a given group
+ * this functions modifies interrupt mask register, the callee must make sure
+ * the mask is not changed by another cpu.
+ *
+ * @param regs pointer to udma unit registers
+ * @param level the interrupt controller level (primary / secondary)
+ * @param group the interrupt group ('AL_INT_GROUP_*')
+ * @param mask bitwise of interrupts to mask, set bits will be masked.
+ */
+static INLINE void al_udma_iofic_mask(
+ struct unit_regs __iomem *regs,
+ enum al_udma_iofic_level level,
+ int group,
+ uint32_t mask)
+{
+ al_assert(al_udma_iofic_level_and_group_valid(level, group));
+ al_iofic_mask(al_udma_iofic_reg_base_get(regs, level), group, mask);
+}
+
+/**
+ * read interrupt cause register for a given group
+ * this will clear the set bits if the Clear on Read mode enabled.
+ * @param regs pointer to udma unit registers
+ * @param level the interrupt controller level (primary / secondary)
+ * @param group the interrupt group ('AL_INT_GROUP_*')
+ */
+static INLINE uint32_t al_udma_iofic_read_cause(
+ struct unit_regs __iomem *regs,
+ enum al_udma_iofic_level level,
+ int group)
+{
+ al_assert(al_udma_iofic_level_and_group_valid(level, group));
+ return al_iofic_read_cause(al_udma_iofic_reg_base_get(regs, level), group);
+}
+
+#endif
+/** @} end of UDMA group */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_udma_iofic_regs.h b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_iofic_regs.h
new file mode 100644
index 0000000..2eccb88
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_iofic_regs.h
@@ -0,0 +1,65 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __AL_HAL_UDMA_IOFIC_REG_H
+#define __AL_HAL_UDMA_IOFIC_REG_H
+
+#include "al_hal_iofic_regs.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** This structure covers all interrupt registers of a given UDMA, which is
+ * built of an al_iofic_regs, which is the common I/O Fabric Interrupt
+ * controller (IOFIC), and additional two interrupts groups dedicated for the
+ * application-specific engine attached to the UDMA, the interrupt summary
+ * of those two groups routed to gourp D of the main controller.
+ */
+struct udma_iofic_regs {
+ struct al_iofic_regs main_iofic;
+ uint32_t rsrvd1[(0x1c00) >> 2];
+ struct al_iofic_grp_ctrl secondary_iofic_ctrl[2];
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_UDMA_IOFIC_REG_H */
+
+
+
+
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_udma_regs.h b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_regs.h
new file mode 100644
index 0000000..ddec1e0
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_regs.h
@@ -0,0 +1,99 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_udma_regs.h
+ *
+ * @brief udma registers definition
+ *
+ *
+ */
+#ifndef __AL_HAL_UDMA_REG_H
+#define __AL_HAL_UDMA_REG_H
+
+#include "al_hal_udma_regs_m2s.h"
+#include "al_hal_udma_regs_s2m.h"
+#include "al_hal_udma_regs_gen.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** UDMA registers, either m2s or s2m */
+union udma_regs {
+ struct udma_m2s_regs m2s;
+ struct udma_s2m_regs s2m;
+};
+
+struct unit_regs {
+ struct udma_m2s_regs m2s;
+ uint32_t rsrvd0[(0x10000 - sizeof(struct udma_m2s_regs)) >> 2];
+ struct udma_s2m_regs s2m;
+ uint32_t rsrvd1[((0x1C000 - 0x10000) - sizeof(struct udma_s2m_regs)) >> 2];
+ struct udma_gen_regs gen;
+};
+
+/** UDMA submission and completion registers, M2S and S2M UDMAs have same stucture */
+struct udma_rings_regs {
+ uint32_t rsrvd0[8];
+ uint32_t cfg; /* Descriptor ring configuration */
+ uint32_t status; /* Descriptor ring status and information */
+ uint32_t drbp_low; /* Descriptor Ring Base Pointer [31:4] */
+ uint32_t drbp_high; /* Descriptor Ring Base Pointer [63:32] */
+ uint32_t drl; /* Descriptor Ring Length[23:2] */
+ uint32_t drhp; /* Descriptor Ring Head Pointer */
+ uint32_t drtp_inc; /* Descriptor Tail Pointer increment */
+ uint32_t drtp; /* Descriptor Tail Pointer */
+ uint32_t dcp; /* Descriptor Current Pointer */
+ uint32_t crbp_low; /* Completion Ring Base Pointer [31:4] */
+ uint32_t crbp_high; /* Completion Ring Base Pointer [63:32] */
+ uint32_t crhp; /* Completion Ring Head Pointer */
+ uint32_t crhp_internal; /* Completion Ring Head Pointer internal, before AX ... */
+};
+
+/** M2S and S2M generic structure of Q registers */
+union udma_q_regs {
+ struct udma_rings_regs rings;
+ struct udma_m2s_q m2s_q;
+ struct udma_s2m_q s2m_q;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_UDMA_REG_H */
+/** @} end of UDMA group */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_udma_regs_gen.h b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_regs_gen.h
new file mode 100644
index 0000000..b6a3c72
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_regs_gen.h
@@ -0,0 +1,413 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @file al_hal_udma_regs_gen.h
+ *
+ * @brief C Header file for the UDMA general registers
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_GEN_REG_H
+#define __AL_HAL_UDMA_GEN_REG_H
+
+#include "al_hal_udma_iofic_regs.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+
+
+struct udma_gen_dma_misc {
+ /* [0x0] Reserved register for the interrupt controller */
+ uint32_t int_cfg;
+ /* [0x4] Revision register */
+ uint32_t revision;
+ /* [0x8] Reserved for future use */
+ uint32_t general_cfg_1;
+ /* [0xc] Reserved for future use */
+ uint32_t general_cfg_2;
+ /* [0x10] Reserved for future use */
+ uint32_t general_cfg_3;
+ /* [0x14] Reserved for future use */
+ uint32_t general_cfg_4;
+ /* [0x18] General timer configuration */
+ uint32_t general_cfg_5;
+ uint32_t rsrvd[57];
+};
+struct udma_gen_mailbox {
+ /*
+ * [0x0] Mailbox interrupt generator.
+ * Generates interrupt to neighbor DMA
+ */
+ uint32_t interrupt;
+ /* [0x4] Mailbox message data out */
+ uint32_t msg_out;
+ /* [0x8] Mailbox message data in */
+ uint32_t msg_in;
+ uint32_t rsrvd[13];
+};
+struct udma_gen_axi {
+ /* [0x0] Configuration of the AXI masters */
+ uint32_t cfg_1;
+ /* [0x4] Configuration of the AXI masters */
+ uint32_t cfg_2;
+ /* [0x8] Configuration of the AXI masters. Endianess configuration */
+ uint32_t endian_cfg;
+ uint32_t rsrvd[61];
+};
+struct udma_gen_sram_ctrl {
+ /* [0x0] Timing configuration */
+ uint32_t timing;
+};
+struct udma_gen_vmid {
+ /* [0x0] VMID control */
+ uint32_t cfg_vmid_0;
+ /* [0x4] TX queue 0/1 VMID */
+ uint32_t cfg_vmid_1;
+ /* [0x8] TX queue 2/3 VMID */
+ uint32_t cfg_vmid_2;
+ /* [0xc] RX queue 0/1 VMID */
+ uint32_t cfg_vmid_3;
+ /* [0x10] RX queue 2/3 VMID */
+ uint32_t cfg_vmid_4;
+};
+struct udma_gen_vmaddr {
+ /* [0x0] TX queue 0/1 VMADDR */
+ uint32_t cfg_vmaddr_0;
+ /* [0x4] TX queue 2/3 VMADDR */
+ uint32_t cfg_vmaddr_1;
+ /* [0x8] RX queue 0/1 VMADDR */
+ uint32_t cfg_vmaddr_2;
+ /* [0xc] RX queue 2/3 VMADDR */
+ uint32_t cfg_vmaddr_3;
+};
+struct udma_gen_vmpr {
+ /* [0x0] TX VMPR control */
+ uint32_t cfg_vmpr_0;
+ /* [0x4] TX VMPR Address High Regsiter */
+ uint32_t cfg_vmpr_1;
+ /* [0x8] TX queue VMID values */
+ uint32_t cfg_vmpr_2;
+ /* [0xc] TX queue VMID values */
+ uint32_t cfg_vmpr_3;
+ /* [0x10] RX VMPR control */
+ uint32_t cfg_vmpr_4;
+ /* [0x14] RX VMPR Buffer2 MSB address */
+ uint32_t cfg_vmpr_5;
+ /* [0x18] RX queue VMID values */
+ uint32_t cfg_vmpr_6;
+ /* [0x1c] RX queue BUF1 VMID values */
+ uint32_t cfg_vmpr_7;
+ /* [0x20] RX queue BUF2 VMID values */
+ uint32_t cfg_vmpr_8;
+ /* [0x24] RX queue Direct Data Placement VMID values */
+ uint32_t cfg_vmpr_9;
+ /* [0x28] RX VMPR BUF1 Address High Regsiter */
+ uint32_t cfg_vmpr_10;
+ /* [0x2c] RX VMPR BUF2 Address High Regsiter */
+ uint32_t cfg_vmpr_11;
+ /* [0x30] RX VMPR DDP Address High Regsiter */
+ uint32_t cfg_vmpr_12;
+ uint32_t rsrvd[3];
+};
+
+struct udma_gen_regs {
+ struct udma_iofic_regs interrupt_regs; /* [0x0000] */
+ struct udma_gen_dma_misc dma_misc; /* [0x2080] */
+ struct udma_gen_mailbox mailbox[4]; /* [0x2180] */
+ struct udma_gen_axi axi; /* [0x2280] */
+ struct udma_gen_sram_ctrl sram_ctrl[25]; /* [0x2380] */
+ uint32_t rsrvd_1[2];
+ struct udma_gen_vmid vmid; /* [0x23ec] */
+ struct udma_gen_vmaddr vmaddr; /* [0x2400] */
+ uint32_t rsrvd_2[252];
+ struct udma_gen_vmpr vmpr[4]; /* [0x2800] */
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** int_cfg register ****/
+/*
+ * MSIX data width
+ * 1 - 64 bit
+ * 0 – 32 bit
+ */
+#define UDMA_GEN_DMA_MISC_INT_CFG_MSIX_64 (1 << 0)
+/* General configuration */
+#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_3_1_MASK 0x0000000E
+#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_3_1_SHIFT 1
+/* MSIx AXI QoS */
+#define UDMA_GEN_DMA_MISC_INT_CFG_MSIX_AXI_QOS_MASK 0x00000070
+#define UDMA_GEN_DMA_MISC_INT_CFG_MSIX_AXI_QOS_SHIFT 4
+
+#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_31_7_MASK 0xFFFFFF80
+#define UDMA_GEN_DMA_MISC_INT_CFG_RESERVED_31_7_SHIFT 7
+
+/**** revision register ****/
+/* Design programming interface revision ID */
+#define UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_MASK 0x00000FFF
+#define UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_SHIFT 0
+/* Design minor revision ID */
+#define UDMA_GEN_DMA_MISC_REVISION_MINOR_ID_MASK 0x00FFF000
+#define UDMA_GEN_DMA_MISC_REVISION_MINOR_ID_SHIFT 12
+/* Design major revision ID */
+#define UDMA_GEN_DMA_MISC_REVISION_MAJOR_ID_MASK 0xFF000000
+#define UDMA_GEN_DMA_MISC_REVISION_MAJOR_ID_SHIFT 24
+
+/**** Interrupt register ****/
+/* Generate interrupt to another DMA */
+#define UDMA_GEN_MAILBOX_INTERRUPT_SET (1 << 0)
+
+/**** cfg_2 register ****/
+/*
+ * Enable arbitration promotion.
+ * Increment master priority after configured number of arbitration cycles
+ */
+#define UDMA_GEN_AXI_CFG_2_ARB_PROMOTION_MASK 0x0000000F
+#define UDMA_GEN_AXI_CFG_2_ARB_PROMOTION_SHIFT 0
+
+/**** endian_cfg register ****/
+/* Swap M2S descriptor read and completion descriptor write. */
+#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DESC (1 << 0)
+/* Swap M2S data read. */
+#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DATA (1 << 1)
+/* Swap S2M descriptor read and completion descriptor write. */
+#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DESC (1 << 2)
+/* Swap S2M data write. */
+#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DATA (1 << 3)
+/*
+ * Swap 32 or 64 bit mode:
+ * 0 - Swap groups of 4 bytes
+ * 1 - Swap groups of 8 bytes
+ */
+#define UDMA_GEN_AXI_ENDIAN_CFG_SWAP_64B_EN (1 << 4)
+
+/**** timing register ****/
+/* Write margin */
+#define UDMA_GEN_SRAM_CTRL_TIMING_RMA_MASK 0x0000000F
+#define UDMA_GEN_SRAM_CTRL_TIMING_RMA_SHIFT 0
+/* Write margin enable */
+#define UDMA_GEN_SRAM_CTRL_TIMING_RMEA (1 << 8)
+/* Read margin */
+#define UDMA_GEN_SRAM_CTRL_TIMING_RMB_MASK 0x000F0000
+#define UDMA_GEN_SRAM_CTRL_TIMING_RMB_SHIFT 16
+/* Read margin enable */
+#define UDMA_GEN_SRAM_CTRL_TIMING_RMEB (1 << 24)
+
+/**** cfg_vmid_0 register ****/
+/* For M2S queues 3:0, enable usage of the VMID from the buffer address 63:56 */
+#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_MASK 0x0000000F
+#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_DESC_EN_SHIFT 0
+/*
+ * For M2S queues 3:0, enable usage of the VMID from the configuration register
+ * (cfg_vmid_1/2 used for M2S queue_x)
+ */
+#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_MASK 0x000000F0
+#define UDMA_GEN_VMID_CFG_VMID_0_TX_Q_VMID_QUEUE_EN_SHIFT 4
+/* use VMID_n [7:0] from MSI-X Controller for MSI-X message */
+#define UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_SEL (1 << 8)
+/* Enable write to all VMID_n registers in the MSI-X Controller */
+#define UDMA_GEN_VMID_CFG_VMID_0_MSIX_VMID_ACCESS_EN (1 << 9)
+/* For S2M queues 3:0, enable usage of the VMID from the buffer address 63:56 */
+#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_MASK 0x000F0000
+#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_DESC_EN_SHIFT 16
+/*
+ * For S2M queues 3:0, enable usage of the VMID from the configuration register
+ * (cfg_vmid_3/4 used for M2S queue_x)
+ */
+#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_MASK 0x00F00000
+#define UDMA_GEN_VMID_CFG_VMID_0_RX_Q_VMID_QUEUE_EN_SHIFT 20
+
+/**** cfg_vmid_1 register ****/
+/* TX queue 0 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_0_VMID_SHIFT 0
+/* TX queue 1 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_MASK 0xFFFF0000
+#define UDMA_GEN_VMID_CFG_VMID_1_TX_Q_1_VMID_SHIFT 16
+
+/**** cfg_vmid_2 register ****/
+/* TX queue 2 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_2_VMID_SHIFT 0
+/* TX queue 3 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_MASK 0xFFFF0000
+#define UDMA_GEN_VMID_CFG_VMID_2_TX_Q_3_VMID_SHIFT 16
+
+/**** cfg_vmid_3 register ****/
+/* RX queue 0 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_0_VMID_SHIFT 0
+/* RX queue 1 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_MASK 0xFFFF0000
+#define UDMA_GEN_VMID_CFG_VMID_3_RX_Q_1_VMID_SHIFT 16
+
+/**** cfg_vmid_4 register ****/
+/* RX queue 2 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_2_VMID_SHIFT 0
+/* RX queue 3 VMID value */
+#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_MASK 0xFFFF0000
+#define UDMA_GEN_VMID_CFG_VMID_4_RX_Q_3_VMID_SHIFT 16
+
+/**** cfg_vmaddr_0 register ****/
+/* TX queue 0 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_0_VMADDR_MASK 0x0000FFFF
+#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_0_VMADDR_SHIFT 0
+/* TX queue 1 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_1_VMADDR_MASK 0xFFFF0000
+#define UDMA_GEN_VMADDR_CFG_VMADDR_0_TX_Q_1_VMADDR_SHIFT 16
+
+/**** cfg_vmaddr_1 register ****/
+/* TX queue 2 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_2_VMADDR_MASK 0x0000FFFF
+#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_2_VMADDR_SHIFT 0
+/* TX queue 3 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_3_VMADDR_MASK 0xFFFF0000
+#define UDMA_GEN_VMADDR_CFG_VMADDR_1_TX_Q_3_VMADDR_SHIFT 16
+
+/**** cfg_vmaddr_2 register ****/
+/* RX queue 0 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_0_VMADDR_MASK 0x0000FFFF
+#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_0_VMADDR_SHIFT 0
+/* RX queue 1 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_1_VMADDR_MASK 0xFFFF0000
+#define UDMA_GEN_VMADDR_CFG_VMADDR_2_RX_Q_1_VMADDR_SHIFT 16
+
+/**** cfg_vmaddr_3 register ****/
+/* RX queue 2 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_2_VMADDR_MASK 0x0000FFFF
+#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_2_VMADDR_SHIFT 0
+/* RX queue 3 VMADDR value */
+#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_3_VMADDR_MASK 0xFFFF0000
+#define UDMA_GEN_VMADDR_CFG_VMADDR_3_RX_Q_3_VMADDR_SHIFT 16
+
+/**** cfg_vmpr_0 register ****/
+/* TX High Address Select Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_MASK 0x0000003F
+#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_HISEL_SHIFT 0
+/* TX Data VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_DATA_VMID_EN (1 << 7)
+/* TX Prefetch VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_PREF_VMID_EN (1 << 28)
+/* TX Completions VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_0_TX_Q_CMPL_VMID_EN (1 << 29)
+
+/**** cfg_vmpr_2 register ****/
+/* TX queue Prefetch VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_PREF_VMID_SHIFT 0
+/* TX queue Completion VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_MASK 0xFFFF0000
+#define UDMA_GEN_VMPR_CFG_VMPR_2_TX_Q_CMPL_VMID_SHIFT 16
+
+/**** cfg_vmpr_3 register ****/
+/* TX queue Data VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SHIFT 0
+/* TX queue Data VMID select */
+#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_MASK 0xFFFF0000
+#define UDMA_GEN_VMPR_CFG_VMPR_3_TX_Q_DATA_VMID_SEL_SHIFT 16
+
+/**** cfg_vmpr_4 register ****/
+/* RX Data Buffer1 - High Address Select Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_MASK 0x0000003F
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_HISEL_SHIFT 0
+/* RX Data Buffer1 VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF1_VMID_EN (1 << 7)
+/* RX Data Buffer2 - High Address Select Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_MASK 0x00003F00
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_HISEL_SHIFT 8
+/* RX Data Buffer2 VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_VMID_EN (1 << 15)
+/* RX Direct Data Placement - High Address Select Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_MASK 0x003F0000
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_HISEL_SHIFT 16
+/* RX Direct Data Placement VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_DDP_VMID_EN (1 << 23)
+/* RX Buffer 2 MSB address word selects per bytes, per queue */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_MASK 0x0F000000
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_BUF2_MSB_ADDR_SEL_SHIFT 24
+/* RX Prefetch VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_PREF_VMID_EN (1 << 28)
+/* RX Completions VMID Enable Per Q */
+#define UDMA_GEN_VMPR_CFG_VMPR_4_RX_Q_CMPL_VMID_EN (1 << 29)
+
+/**** cfg_vmpr_6 register ****/
+/* RX queue Prefetch VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_PREF_VMID_SHIFT 0
+/* RX queue Completion VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_MASK 0xFFFF0000
+#define UDMA_GEN_VMPR_CFG_VMPR_6_RX_Q_CMPL_VMID_SHIFT 16
+
+/**** cfg_vmpr_7 register ****/
+/* RX queue Data Buffer 1 VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SHIFT 0
+/* RX queue Data Buffer 1 VMID select */
+#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_MASK 0xFFFF0000
+#define UDMA_GEN_VMPR_CFG_VMPR_7_RX_Q_BUF1_VMID_SEL_SHIFT 16
+
+/**** cfg_vmpr_8 register ****/
+/* RX queue Data Buffer 2 VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SHIFT 0
+/* RX queue Data Buffer 2 VMID select */
+#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_MASK 0xFFFF0000
+#define UDMA_GEN_VMPR_CFG_VMPR_8_RX_Q_BUF2_VMID_SEL_SHIFT 16
+
+/**** cfg_vmpr_9 register ****/
+/* RX queue DDP VMID */
+#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_MASK 0x0000FFFF
+#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SHIFT 0
+/* RX queue DDP VMID select */
+#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_MASK 0xFFFF0000
+#define UDMA_GEN_VMPR_CFG_VMPR_9_RX_Q_DDP_VMID_SEL_SHIFT 16
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_UDMA_GEN_REG_H */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_udma_regs_m2s.h b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_regs_m2s.h
new file mode 100644
index 0000000..0a91ef2
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_regs_m2s.h
@@ -0,0 +1,1158 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @file al_hal_udma_regs_m2s.h
+ *
+ * @brief C Header file for the UDMA M2S registers
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_M2S_REG_H
+#define __AL_HAL_UDMA_M2S_REG_H
+
+#include "al_hal_plat_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+
+
+struct udma_axi_m2s {
+ /* [0x0] Completion write master configuration */
+ uint32_t comp_wr_cfg_1;
+ /* [0x4] Completion write master configuration */
+ uint32_t comp_wr_cfg_2;
+ /* [0x8] Data read master configuration */
+ uint32_t data_rd_cfg_1;
+ /* [0xc] Data read master configuration */
+ uint32_t data_rd_cfg_2;
+ /* [0x10] Descriptor read master configuration */
+ uint32_t desc_rd_cfg_1;
+ /* [0x14] Descriptor read master configuration */
+ uint32_t desc_rd_cfg_2;
+ /* [0x18] Data read master configuration */
+ uint32_t data_rd_cfg;
+ /* [0x1c] Descriptors read master configuration */
+ uint32_t desc_rd_cfg_3;
+ /* [0x20] Descriptors write master configuration (completion) */
+ uint32_t desc_wr_cfg_1;
+ /* [0x24] AXI outstanding configuration */
+ uint32_t ostand_cfg;
+ uint32_t rsrvd[54];
+};
+struct udma_m2s {
+ /*
+ * [0x0] DMA state.
+ * 00 - No pending tasks
+ * 01 – Normal (active)
+ * 10 – Abort (error condition)
+ * 11 – Reserved
+ */
+ uint32_t state;
+ /* [0x4] CPU request to change DMA state */
+ uint32_t change_state;
+ uint32_t rsrvd_0;
+ /*
+ * [0xc] M2S DMA error log mask.
+ * Each error has an interrupt controller cause bit.
+ * This register determines if these errors cause the M2S DMA to log the
+ * error condition.
+ * 0 - Log is enabled.
+ * 1 - Log is masked.
+ */
+ uint32_t err_log_mask;
+ uint32_t rsrvd_1;
+ /*
+ * [0x14] DMA header log.
+ * Sample the packet header that caused the error.
+ */
+ uint32_t log_0;
+ /*
+ * [0x18] DMA header log.
+ * Sample the packet header that caused the error.
+ */
+ uint32_t log_1;
+ /*
+ * [0x1c] DMA header log.
+ * Sample the packet header that caused the error.
+ */
+ uint32_t log_2;
+ /*
+ * [0x20] DMA header log.
+ * Sample the packet header that caused the error.
+ */
+ uint32_t log_3;
+ /* [0x24] DMA clear error log */
+ uint32_t clear_err_log;
+ /* [0x28] M2S data FIFO status */
+ uint32_t data_fifo_status;
+ /* [0x2c] M2S header FIFO status */
+ uint32_t header_fifo_status;
+ /* [0x30] M2S unack FIFO status */
+ uint32_t unack_fifo_status;
+ /* [0x34] Select queue for debug */
+ uint32_t indirect_ctrl;
+ /*
+ * [0x38] M2S prefetch FIFO status.
+ * Status of the selected queue in M2S_indirect_ctrl
+ */
+ uint32_t sel_pref_fifo_status;
+ /*
+ * [0x3c] M2S completion FIFO status.
+ * Status of the selected queue in M2S_indirect_ctrl
+ */
+ uint32_t sel_comp_fifo_status;
+ /*
+ * [0x40] M2S rate limit status.
+ * Status of the selected queue in M2S_indirect_ctrl
+ */
+ uint32_t sel_rate_limit_status;
+ /*
+ * [0x44] M2S DWRR scheduler status.
+ * Status of the selected queue in M2S_indirect_ctrl
+ */
+ uint32_t sel_dwrr_status;
+ /* [0x48] M2S state machine and FIFO clear control */
+ uint32_t clear_ctrl;
+ /* [0x4c] Misc Check enable */
+ uint32_t check_en;
+ /* [0x50] M2S FIFO enable control, internal */
+ uint32_t fifo_en;
+ /* [0x54] M2S packet length configuration */
+ uint32_t cfg_len;
+ /* [0x58] Stream interface configuration */
+ uint32_t stream_cfg;
+ uint32_t rsrvd[41];
+};
+struct udma_m2s_rd {
+ /* [0x0] M2S descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_1;
+ /* [0x4] M2S descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_2;
+ /* [0x8] M2S descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_3;
+ uint32_t rsrvd_0;
+ /* [0x10] Data burst read configuration */
+ uint32_t data_cfg;
+ uint32_t rsrvd[11];
+};
+struct udma_m2s_dwrr {
+ /* [0x0] Tx DMA DWRR scheduler configuration */
+ uint32_t cfg_sched;
+ /* [0x4] Token bucket rate limit control */
+ uint32_t ctrl_deficit_cnt;
+ uint32_t rsrvd[14];
+};
+struct udma_m2s_rate_limiter {
+ /* [0x0] Token bucket rate limit configuration */
+ uint32_t gen_cfg;
+ /*
+ * [0x4] Token bucket rate limit control.
+ * Controls the cycle counters.
+ */
+ uint32_t ctrl_cycle_cnt;
+ /*
+ * [0x8] Token bucket rate limit control.
+ * Controls the token bucket counter.
+ */
+ uint32_t ctrl_token;
+ uint32_t rsrvd[13];
+};
+
+struct udma_rlimit_common {
+ /* [0x0] Token bucket configuration */
+ uint32_t cfg_1s;
+ /* [0x4] Token bucket rate limit configuration */
+ uint32_t cfg_cycle;
+ /* [0x8] Token bucket rate limit configuration */
+ uint32_t cfg_token_size_1;
+ /* [0xc] Token bucket rate limit configuration */
+ uint32_t cfg_token_size_2;
+ /* [0x10] Token bucket rate limit configuration */
+ uint32_t sw_ctrl;
+ /*
+ * [0x14] Mask the different types of rate limiter.
+ * 0 - Rate limit is active.
+ * 1 - Rate limit is masked.
+ */
+ uint32_t mask;
+};
+
+struct udma_m2s_stream_rate_limiter {
+ struct udma_rlimit_common rlimit;
+ uint32_t rsrvd[10];
+};
+struct udma_m2s_comp {
+ /* [0x0] Completion controller configuration */
+ uint32_t cfg_1c;
+ /* [0x4] Completion controller coalescing configuration */
+ uint32_t cfg_coal;
+ /* [0x8] Completion controller application acknowledge configuration */
+ uint32_t cfg_application_ack;
+ uint32_t rsrvd[61];
+};
+struct udma_m2s_stat {
+ /* [0x0] Statistics counters configuration */
+ uint32_t cfg_st;
+ /* [0x4] Counting number of descriptors with First-bit set. */
+ uint32_t tx_pkt;
+ /*
+ * [0x8] Counting the net length of the data buffers [64-bit]
+ * Should be read before tx_bytes_high
+ */
+ uint32_t tx_bytes_low;
+ /*
+ * [0xc] Counting the net length of the data buffers [64-bit],
+ * Should be read after tx_bytes_low (value is sampled when reading
+ * Should be read before tx_bytes_low
+ */
+ uint32_t tx_bytes_high;
+ /* [0x10] Total number of descriptors read from the host memory */
+ uint32_t prefed_desc;
+ /* [0x14] Number of packets read from the unack FIFO */
+ uint32_t comp_pkt;
+ /* [0x18] Number of descriptors written into the completion ring */
+ uint32_t comp_desc;
+ /*
+ * [0x1c] Number of acknowledged packets.
+ * (acknowledge received from the stream interface)
+ */
+ uint32_t ack_pkts;
+ uint32_t rsrvd[56];
+};
+struct udma_m2s_feature {
+ /*
+ * [0x0] M2S Feature register.
+ * M2S instantiation parameters
+ */
+ uint32_t reg_1;
+ /* [0x4] Reserved M2S feature register */
+ uint32_t reg_2;
+ /*
+ * [0x8] M2S Feature register.
+ * M2S instantiation parameters
+ */
+ uint32_t reg_3;
+ /*
+ * [0xc] M2S Feature register.
+ * M2S instantiation parameters
+ */
+ uint32_t reg_4;
+ /*
+ * [0x10] M2S Feature register.
+ * M2S instantiation parameters
+ */
+ uint32_t reg_5;
+ uint32_t rsrvd[59];
+};
+struct udma_m2s_q {
+ uint32_t rsrvd_0[8];
+ /* [0x20] M2S descriptor ring configuration */
+ uint32_t cfg;
+ /* [0x24] M2S descriptor ring status and information */
+ uint32_t status;
+ /* [0x28] TX Descriptor Ring Base Pointer [31:4] */
+ uint32_t tdrbp_low;
+ /* [0x2c] TX Descriptor Ring Base Pointer [63:32] */
+ uint32_t tdrbp_high;
+ /*
+ * [0x30] TX Descriptor Ring Length[23:2]
+ */
+ uint32_t tdrl;
+ /* [0x34] TX Descriptor Ring Head Pointer */
+ uint32_t tdrhp;
+ /* [0x38] Tx Descriptor Tail Pointer increment */
+ uint32_t tdrtp_inc;
+ /* [0x3c] Tx Descriptor Tail Pointer */
+ uint32_t tdrtp;
+ /* [0x40] TX Descriptor Current Pointer */
+ uint32_t tdcp;
+ /* [0x44] Tx Completion Ring Base Pointer [31:4] */
+ uint32_t tcrbp_low;
+ /* [0x48] TX Completion Ring Base Pointer [63:32] */
+ uint32_t tcrbp_high;
+ /* [0x4c] TX Completion Ring Head Pointer */
+ uint32_t tcrhp;
+ /*
+ * [0x50] Tx Completion Ring Head Pointer internal (Before the
+ * coalescing FIFO)
+ */
+ uint32_t tcrhp_internal;
+ uint32_t rsrvd_1[3];
+ /* [0x60] Rate limit configuration */
+ struct udma_rlimit_common rlimit;
+ uint32_t rsrvd_2[2];
+ /* [0x80] DWRR scheduler configuration */
+ uint32_t dwrr_cfg_1;
+ /* [0x84] DWRR scheduler configuration */
+ uint32_t dwrr_cfg_2;
+ /* [0x88] DWRR scheduler configuration */
+ uint32_t dwrr_cfg_3;
+ /* [0x8c] DWRR scheduler software control */
+ uint32_t dwrr_sw_ctrl;
+ uint32_t rsrvd_3[4];
+ /* [0xa0] Completion controller configuration */
+ uint32_t comp_cfg;
+ uint32_t rsrvd_4[3];
+ /* [0xb0] SW control */
+ uint32_t q_sw_ctrl;
+ uint32_t rsrvd_5[3];
+ /* [0xc0] Number of M2S Tx packets after the scheduler */
+ uint32_t q_tx_pkt;
+ uint32_t rsrvd[975];
+};
+
+struct udma_m2s_regs {
+ uint32_t rsrvd_0[64];
+ struct udma_axi_m2s axi_m2s; /* [0x100] */
+ struct udma_m2s m2s; /* [0x200] */
+ struct udma_m2s_rd m2s_rd; /* [0x300] */
+ struct udma_m2s_dwrr m2s_dwrr; /* [0x340] */
+ struct udma_m2s_rate_limiter m2s_rate_limiter; /* [0x380] */
+ struct udma_m2s_stream_rate_limiter m2s_stream_rate_limiter; /* [0x3c0] */
+ struct udma_m2s_comp m2s_comp; /* [0x400] */
+ struct udma_m2s_stat m2s_stat; /* [0x500] */
+ struct udma_m2s_feature m2s_feature; /* [0x600] */
+ uint32_t rsrvd_1[576];
+ struct udma_m2s_q m2s_q[4]; /* [0x1000] */
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** comp_wr_cfg_1 register ****/
+/* AXI write ID (AWID) */
+#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_MASK 0x000000FF
+#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_SHIFT 0
+/* Cache Type */
+#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_MASK 0x000F0000
+#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_SHIFT 16
+/* Burst type */
+#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK 0x03000000
+#define UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_SHIFT 24
+
+/**** comp_wr_cfg_2 register ****/
+/* User extension */
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_MASK 0x000FFFFF
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_SHIFT 0
+/* Bus size, 128-bit */
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_MASK 0x00700000
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_SHIFT 20
+/*
+ * AXI Master QoS.
+ * Used for arbitration between AXI masters
+ */
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_MASK 0x07000000
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_SHIFT 24
+/* Protection Type */
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_MASK 0x70000000
+#define UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_SHIFT 28
+
+/**** data_rd_cfg_1 register ****/
+/* AXI read ID (ARID) */
+#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARID_MASK 0x000000FF
+#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARID_SHIFT 0
+/* Cache Type */
+#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARCACHE_MASK 0x000F0000
+#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARCACHE_SHIFT 16
+/* Burst type */
+#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARBURST_MASK 0x03000000
+#define UDMA_AXI_M2S_DATA_RD_CFG_1_ARBURST_SHIFT 24
+
+/**** data_rd_cfg_2 register ****/
+/* User extension */
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARUSER_MASK 0x000FFFFF
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARUSER_SHIFT 0
+/* Bus size, 128-bit */
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARSIZE_MASK 0x00700000
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARSIZE_SHIFT 20
+/*
+ * AXI Master QoS.
+ * Used for arbitration between AXI masters
+ */
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARQOS_MASK 0x07000000
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARQOS_SHIFT 24
+/* Protection Type */
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARPROT_MASK 0x70000000
+#define UDMA_AXI_M2S_DATA_RD_CFG_2_ARPROT_SHIFT 28
+
+/**** desc_rd_cfg_1 register ****/
+/* AXI read ID (ARID) */
+#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARID_MASK 0x000000FF
+#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARID_SHIFT 0
+/* Cache Type */
+#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARCACHE_MASK 0x000F0000
+#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARCACHE_SHIFT 16
+/* Burst type */
+#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARBURST_MASK 0x03000000
+#define UDMA_AXI_M2S_DESC_RD_CFG_1_ARBURST_SHIFT 24
+
+/**** desc_rd_cfg_2 register ****/
+/* User extension */
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARUSER_MASK 0x000FFFFF
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARUSER_SHIFT 0
+/* Bus size, 128-bit */
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARSIZE_MASK 0x00700000
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARSIZE_SHIFT 20
+/*
+ * AXI Master QoS
+ * Used for arbitration between AXI masters
+ */
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARQOS_MASK 0x07000000
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARQOS_SHIFT 24
+/* Protection Type */
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARPROT_MASK 0x70000000
+#define UDMA_AXI_M2S_DESC_RD_CFG_2_ARPROT_SHIFT 28
+
+/**** data_rd_cfg register ****/
+/*
+ * Defines the maximum number of AXI beats for a single AXI burst.
+ * This value is used for a burst split decision.
+ */
+#define UDMA_AXI_M2S_DATA_RD_CFG_MAX_AXI_BEATS_MASK 0x000000FF
+#define UDMA_AXI_M2S_DATA_RD_CFG_MAX_AXI_BEATS_SHIFT 0
+/*
+ * Enable breaking data read request.
+ * Aligned to max_AXI_beats when the total read size is less than max_AXI_beats
+ */
+#define UDMA_AXI_M2S_DATA_RD_CFG_ALWAYS_BREAK_ON_MAX_BOUDRY (1 << 16)
+
+/**** desc_rd_cfg_3 register ****/
+/*
+ * Defines the maximum number of AXI beats for a single AXI burst.
+ * This value is used for a burst split decision.
+ * Maximum burst size for reading data( in AXI beats, 128-bits)
+ * (default – 16 beats, 256 bytes)
+ */
+#define UDMA_AXI_M2S_DESC_RD_CFG_3_MAX_AXI_BEATS_MASK 0x000000FF
+#define UDMA_AXI_M2S_DESC_RD_CFG_3_MAX_AXI_BEATS_SHIFT 0
+/*
+ * Enable breaking descriptor read request.
+ * Aligned to max_AXI_beats when the total read size is less than max_AXI_beats.
+ */
+#define UDMA_AXI_M2S_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY (1 << 16)
+
+/**** desc_wr_cfg_1 register ****/
+/*
+ * Defines the maximum number of AXI beats for a single AXI burst.
+ * This value is used for a burst split decision.
+ */
+#define UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK 0x000000FF
+#define UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT 0
+/*
+ * Minimum burst for writing completion descriptors.
+ * Defined in AXI beats
+ * 4 Descriptors per beat.
+ * Value must be aligned to cache lines (64 bytes).
+ * Default value is 2 cache lines, 32 descriptors, 8 beats.
+ */
+#define UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK 0x00FF0000
+#define UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT 16
+
+/**** ostand_cfg register ****/
+/* Maximum number of outstanding data reads to the AXI (AXI transactions) */
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_MASK 0x0000003F
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_SHIFT 0
+/*
+ * Maximum number of outstanding descriptor reads to the AXI (AXI transactions)
+ */
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_MASK 0x00003F00
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_SHIFT 8
+/*
+ * Maximum number of outstanding descriptor writes to the AXI (AXI transactions)
+ */
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_MASK 0x003F0000
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_SHIFT 16
+/*
+ * Maximum number of outstanding data beats for descriptor write to AXI (AXI
+ * beats)
+ */
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_MASK 0xFF000000
+#define UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_SHIFT 24
+
+/**** state register ****/
+/* Completion control */
+#define UDMA_M2S_STATE_COMP_CTRL_MASK 0x00000003
+#define UDMA_M2S_STATE_COMP_CTRL_SHIFT 0
+/* Stream interface */
+#define UDMA_M2S_STATE_STREAM_IF_MASK 0x00000030
+#define UDMA_M2S_STATE_STREAM_IF_SHIFT 4
+/* Data read control */
+#define UDMA_M2S_STATE_DATA_RD_CTRL_MASK 0x00000300
+#define UDMA_M2S_STATE_DATA_RD_CTRL_SHIFT 8
+/* Descriptor prefetch */
+#define UDMA_M2S_STATE_DESC_PREF_MASK 0x00003000
+#define UDMA_M2S_STATE_DESC_PREF_SHIFT 12
+
+/**** change_state register ****/
+/* Start normal operation */
+#define UDMA_M2S_CHANGE_STATE_NORMAL (1 << 0)
+/* Stop normal operation */
+#define UDMA_M2S_CHANGE_STATE_DIS (1 << 1)
+/*
+ * Stop all machines.
+ * (Prefetch, scheduling, completion and stream interface)
+ */
+#define UDMA_M2S_CHANGE_STATE_ABORT (1 << 2)
+
+/**** err_log_mask register ****/
+/*
+ * Mismatch of packet serial number.
+ * (between first packet in the unacknowledged FIFO and received ack from the
+ * stream)
+ */
+#define UDMA_M2S_ERR_LOG_MASK_COMP_PKT_MISMATCH (1 << 0)
+/* Parity error */
+#define UDMA_M2S_ERR_LOG_MASK_STREAM_AXI_PARITY (1 << 1)
+/* AXI response error */
+#define UDMA_M2S_ERR_LOG_MASK_STREAM_AXI_RESPONSE (1 << 2)
+/* AXI timeout (ack not received) */
+#define UDMA_M2S_ERR_LOG_MASK_STREAM_AXI_TOUT (1 << 3)
+/* Parity error */
+#define UDMA_M2S_ERR_LOG_MASK_COMP_AXI_PARITY (1 << 4)
+/* AXI response error */
+#define UDMA_M2S_ERR_LOG_MASK_COMP_AXI_RESPONSE (1 << 5)
+/* AXI timeout */
+#define UDMA_M2S_ERR_LOG_MASK_COMP_AXI_TOUT (1 << 6)
+/* Parity error */
+#define UDMA_M2S_ERR_LOG_MASK_DATA_AXI_PARITY (1 << 7)
+/* AXI response error */
+#define UDMA_M2S_ERR_LOG_MASK_DATA_AXI_RESPONSE (1 << 8)
+/* AXI timeout */
+#define UDMA_M2S_ERR_LOG_MASK_DATA_AXI_TOUT (1 << 9)
+/* Parity error */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_AXI_PARITY (1 << 10)
+/* AXI response error */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_AXI_RESPONSE (1 << 11)
+/* AXI timeout */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_AXI_TOUT (1 << 12)
+/* Packet length error */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_PKT_LEN_OVERFLOW (1 << 13)
+/* Maximum number of descriptors per packet error */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_MAX_DESC_CNT (1 << 14)
+/* Error in first bit indication of the descriptor */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_FIRST (1 << 15)
+/* Error in last bit indication of the descriptor */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_LAST (1 << 16)
+/* Ring_ID error */
+#define UDMA_M2S_ERR_LOG_MASK_PREF_RING_ID (1 << 17)
+/* Data buffer parity error */
+#define UDMA_M2S_ERR_LOG_MASK_DATA_BUFF_PARITY (1 << 18)
+/* Internal error */
+#define UDMA_M2S_ERR_LOG_MASK_INTERNAL_MASK 0xFFF80000
+#define UDMA_M2S_ERR_LOG_MASK_INTERNAL_SHIFT 19
+
+/**** clear_err_log register ****/
+/* Clear error log */
+#define UDMA_M2S_CLEAR_ERR_LOG_CLEAR (1 << 0)
+
+/**** data_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_M2S_DATA_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_M2S_DATA_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_M2S_DATA_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_M2S_DATA_FIFO_STATUS_FULL (1 << 28)
+
+/**** header_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_M2S_HEADER_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_M2S_HEADER_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_M2S_HEADER_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_M2S_HEADER_FIFO_STATUS_FULL (1 << 28)
+
+/**** unack_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_M2S_UNACK_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_M2S_UNACK_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_M2S_UNACK_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_M2S_UNACK_FIFO_STATUS_FULL (1 << 28)
+
+/**** indirect_ctrl register ****/
+/* Selected queue for status read */
+#define UDMA_M2S_INDIRECT_CTRL_Q_NUM_MASK 0x00000FFF
+#define UDMA_M2S_INDIRECT_CTRL_Q_NUM_SHIFT 0
+
+/**** sel_pref_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_M2S_SEL_PREF_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_M2S_SEL_PREF_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_M2S_SEL_PREF_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_M2S_SEL_PREF_FIFO_STATUS_FULL (1 << 28)
+
+/**** sel_comp_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_M2S_SEL_COMP_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_M2S_SEL_COMP_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_M2S_SEL_COMP_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_M2S_SEL_COMP_FIFO_STATUS_FULL (1 << 28)
+
+/**** sel_rate_limit_status register ****/
+/* Token counter */
+#define UDMA_M2S_SEL_RATE_LIMIT_STATUS_TOKEN_CNT_MASK 0x00FFFFFF
+#define UDMA_M2S_SEL_RATE_LIMIT_STATUS_TOKEN_CNT_SHIFT 0
+
+/**** sel_dwrr_status register ****/
+/* Deficit counter */
+#define UDMA_M2S_SEL_DWRR_STATUS_DEFICIT_CNT_MASK 0x00FFFFFF
+#define UDMA_M2S_SEL_DWRR_STATUS_DEFICIT_CNT_SHIFT 0
+
+/**** cfg_len register ****/
+/* Maximum packet size for the M2S */
+#define UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_MASK 0x000FFFFF
+#define UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_SHIFT 0
+/*
+ * Length encoding for 64K.
+ * 0 - length 0x0000 = 0
+ * 1 - length 0x0000 = 64k
+ */
+#define UDMA_M2S_CFG_LEN_ENCODE_64K (1 << 24)
+
+/**** stream_cfg register ****/
+/*
+ * Disables the stream interface operation.
+ * Changing to 1 stops at the end of packet transmission.
+ */
+#define UDMA_M2S_STREAM_CFG_DISABLE (1 << 0)
+/*
+ * Configuration of the stream FIFO read control.
+ * 0 - Cut through
+ * 1 - Threshold based
+ */
+#define UDMA_M2S_STREAM_CFG_RD_MODE (1 << 1)
+/* Minimum number of beats to start packet transmission. */
+#define UDMA_M2S_STREAM_CFG_RD_TH_MASK 0x0003FF00
+#define UDMA_M2S_STREAM_CFG_RD_TH_SHIFT 8
+
+/**** desc_pref_cfg_1 register ****/
+/* Size of the descriptor prefetch FIFO (in descriptors) */
+#define UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK 0x000000FF
+#define UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_SHIFT 0
+
+/**** desc_pref_cfg_2 register ****/
+/* Maximum number of descriptors per packet */
+#define UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK 0x0000001F
+#define UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_SHIFT 0
+/*
+ * Force RR arbitration in the prefetch arbiter.
+ * 0 -Standard arbitration based on queue QoS
+ * 1 - Force Round Robin arbitration
+ */
+#define UDMA_M2S_RD_DESC_PREF_CFG_2_PREF_FORCE_RR (1 << 16)
+
+/**** desc_pref_cfg_3 register ****/
+/*
+ * Minimum descriptor burst size when prefetch FIFO level is below the
+ * descriptor prefetch threshold
+ * (must be 1)
+ */
+#define UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK 0x0000000F
+#define UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_SHIFT 0
+/*
+ * Minimum descriptor burst size when prefetch FIFO level is above the
+ * descriptor prefetch threshold
+ */
+#define UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK 0x000000F0
+#define UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT 4
+/*
+ * Descriptor fetch threshold.
+ * Used as a threshold to determine the allowed minimum descriptor burst size.
+ * (Must be at least max_desc_per_pkt)
+ */
+#define UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK 0x0000FF00
+#define UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT 8
+
+/**** data_cfg register ****/
+/*
+ * Maximum number of data beats in the data read FIFO.
+ * Defined based on data FIFO size
+ * (default FIFO size 2KB → 128 beats)
+ */
+#define UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK 0x000003FF
+#define UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_SHIFT 0
+/*
+ * Maximum number of packets in the data read FIFO.
+ * Defined based on header FIFO size
+ */
+#define UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_MASK 0x00FF0000
+#define UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_SHIFT 16
+
+/**** cfg_sched register ****/
+/*
+ * Enable the DWRR scheduler.
+ * If this bit is 0, queues with same QoS will be served with RR scheduler.
+ */
+#define UDMA_M2S_DWRR_CFG_SCHED_EN_DWRR (1 << 0)
+/*
+ * Scheduler operation mode.
+ * 0 - Byte mode
+ * 1 - Packet mode
+ */
+#define UDMA_M2S_DWRR_CFG_SCHED_PKT_MODE_EN (1 << 4)
+/*
+ * Enable incrementing the weight factor between DWRR iterations.
+ * 00 - Don't increase the increment factor.
+ * 01 - Increment once
+ * 10 - Increment exponential
+ * 11 - Reserved
+ */
+#define UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_MASK 0x00000300
+#define UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_SHIFT 8
+/*
+ * Increment factor power of 2.
+ * 7 --> 128 bytes
+ * This is the factor used to multiply the weight.
+ */
+#define UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_MASK 0x000F0000
+#define UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_SHIFT 16
+
+/**** ctrl_deficit_cnt register ****/
+/*
+ * Init value for the deficit counter.
+ * Initializes the deficit counters of all queues to this value any time this
+ * register is written.
+ */
+#define UDMA_M2S_DWRR_CTRL_DEFICIT_CNT_INIT_MASK 0x00FFFFFF
+#define UDMA_M2S_DWRR_CTRL_DEFICIT_CNT_INIT_SHIFT 0
+
+/**** gen_cfg register ****/
+/* Size of the basic token fill cycle, system clock cycles */
+#define UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_MASK 0x0000FFFF
+#define UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_SHIFT 0
+/*
+ * Rate limiter operation mode.
+ * 0 - Byte mode
+ * 1 - Packet mode
+ */
+#define UDMA_M2S_RATE_LIMITER_GEN_CFG_PKT_MODE_EN (1 << 24)
+
+/**** ctrl_cycle_cnt register ****/
+/* Reset the short and long cycle counters. */
+#define UDMA_M2S_RATE_LIMITER_CTRL_CYCLE_CNT_RST (1 << 0)
+
+/**** ctrl_token register ****/
+/*
+ * Init value for the token counter.
+ * Initializes the token counters of all queues to this value any time this
+ * register is written.
+ */
+#define UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_MASK 0x00FFFFFF
+#define UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_SHIFT 0
+
+/**** cfg_1s register ****/
+/* Maximum number of accumulated bytes in the token counter */
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_MASK 0x00FFFFFF
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_SHIFT 0
+/* Enable the rate limiter. */
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_EN (1 << 24)
+/* Stop token fill. */
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_PAUSE (1 << 25)
+
+/**** cfg_cycle register ****/
+/* Number of short cycles between token fills */
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_MASK 0x0000FFFF
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_SHIFT 0
+
+/**** cfg_token_size_1 register ****/
+/* Number of bits to add in each long cycle */
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK 0x0007FFFF
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_SHIFT 0
+
+/**** cfg_token_size_2 register ****/
+/* Number of bits to add in each short cycle */
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK 0x0007FFFF
+#define UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_SHIFT 0
+
+/**** sw_ctrl register ****/
+/* Reset the token bucket counter. */
+#define UDMA_M2S_STREAM_RATE_LIMITER_SW_CTRL_RST_TOKEN_CNT (1 << 0)
+
+/**** mask register ****/
+/* Mask the external rate limiter. */
+#define UDMA_M2S_STREAM_RATE_LIMITER_MASK_EXTERNAL_RATE_LIMITER (1 << 0)
+/* Mask the internal rate limiter. */
+#define UDMA_M2S_STREAM_RATE_LIMITER_MASK_INTERNAL_RATE_LIMITER (1 << 1)
+/* Mask the external application pause interface. */
+#define UDMA_M2S_STREAM_RATE_LIMITER_MASK_EXTERNAL_PAUSE (1 << 3)
+
+/**** cfg_1c register ****/
+/*
+ * Completion FIFO size
+ * (descriptors per queue)
+ */
+#define UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_MASK 0x000000FF
+#define UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_SHIFT 0
+/*
+ * Unacknowledged FIFO size.
+ * (descriptors)
+ */
+#define UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_MASK 0x0001FF00
+#define UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_SHIFT 8
+/*
+ * Enable promotion.
+ * Enable the promotion of the current queue in progress for the completion
+ * write scheduler.
+ */
+#define UDMA_M2S_COMP_CFG_1C_Q_PROMOTION (1 << 24)
+/* Force RR arbitration in the completion arbiter */
+#define UDMA_M2S_COMP_CFG_1C_FORCE_RR (1 << 25)
+/* Minimum number of free completion entries to qualify for promotion */
+#define UDMA_M2S_COMP_CFG_1C_Q_FREE_MIN_MASK 0xF0000000
+#define UDMA_M2S_COMP_CFG_1C_Q_FREE_MIN_SHIFT 28
+
+/**** cfg_application_ack register ****/
+/*
+ * Acknowledge timeout timer.
+ * ACK from the application through the stream interface)
+ */
+#define UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_MASK 0x00FFFFFF
+#define UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_SHIFT 0
+
+/**** cfg_st register ****/
+/* Use additional length value for all statistics counters. */
+#define UDMA_M2S_STAT_CFG_ST_USE_EXTRA_LEN (1 << 0)
+
+/**** reg_1 register ****/
+/*
+ * Read the size of the descriptor prefetch FIFO
+ * (descriptors).
+ */
+#define UDMA_M2S_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_MASK 0x000000FF
+#define UDMA_M2S_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_SHIFT 0
+
+/**** reg_3 register ****/
+/*
+ * Maximum number of data beats in the data read FIFO.
+ * Defined based on data FIFO size
+ * (default FIFO size 2KB → 128 beats)
+ */
+#define UDMA_M2S_FEATURE_REG_3_DATA_FIFO_DEPTH_MASK 0x000003FF
+#define UDMA_M2S_FEATURE_REG_3_DATA_FIFO_DEPTH_SHIFT 0
+/*
+ * Maximum number of packets in the data read FIFO.
+ * Defined based on header FIFO size
+ */
+#define UDMA_M2S_FEATURE_REG_3_DATA_RD_MAX_PKT_LIMIT_MASK 0x00FF0000
+#define UDMA_M2S_FEATURE_REG_3_DATA_RD_MAX_PKT_LIMIT_SHIFT 16
+
+/**** reg_4 register ****/
+/*
+ * Size of the completion FIFO of each queue
+ * (words)
+ */
+#define UDMA_M2S_FEATURE_REG_4_COMP_FIFO_DEPTH_MASK 0x000000FF
+#define UDMA_M2S_FEATURE_REG_4_COMP_FIFO_DEPTH_SHIFT 0
+/* Size of the unacknowledged FIFO (descriptors) */
+#define UDMA_M2S_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_MASK 0x0001FF00
+#define UDMA_M2S_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_SHIFT 8
+
+/**** reg_5 register ****/
+/* Maximum number of outstanding data reads to AXI */
+#define UDMA_M2S_FEATURE_REG_5_MAX_DATA_RD_OSTAND_MASK 0x0000003F
+#define UDMA_M2S_FEATURE_REG_5_MAX_DATA_RD_OSTAND_SHIFT 0
+/* Maximum number of outstanding descriptor reads to AXI */
+#define UDMA_M2S_FEATURE_REG_5_MAX_DESC_RD_OSTAND_MASK 0x00003F00
+#define UDMA_M2S_FEATURE_REG_5_MAX_DESC_RD_OSTAND_SHIFT 8
+/*
+ * Maximum number of outstanding descriptor writes to AXI.
+ * (AXI transactions)
+ */
+#define UDMA_M2S_FEATURE_REG_5_MAX_COMP_REQ_MASK 0x003F0000
+#define UDMA_M2S_FEATURE_REG_5_MAX_COMP_REQ_SHIFT 16
+/*
+ * Maximum number of outstanding data beats for descriptor write to AXI.
+ * (AXI beats)
+ */
+#define UDMA_M2S_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_MASK 0xFF000000
+#define UDMA_M2S_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_SHIFT 24
+
+/**** cfg register ****/
+/*
+ * Length offset to be used for each packet from this queue.
+ * (length offset is used for the scheduler and rate limiter).
+ */
+#define UDMA_M2S_Q_CFG_PKT_LEN_OFFSET_MASK 0x0000FFFF
+#define UDMA_M2S_Q_CFG_PKT_LEN_OFFSET_SHIFT 0
+/*
+ * Enable operation of this queue.
+ * Start prefetch.
+ */
+#define UDMA_M2S_Q_CFG_EN_PREF (1 << 16)
+/*
+ * Enable operation of this queue.
+ * Start scheduling.
+ */
+#define UDMA_M2S_Q_CFG_EN_SCHEDULING (1 << 17)
+/* Allow prefetch of less than minimum prefetch burst size. */
+#define UDMA_M2S_Q_CFG_ALLOW_LT_MIN_PREF (1 << 20)
+/* Configure the AXI AWCACHE for completion write. */
+#define UDMA_M2S_Q_CFG_AXI_AWCACHE_COMP_MASK 0x0F000000
+#define UDMA_M2S_Q_CFG_AXI_AWCACHE_COMP_SHIFT 24
+/*
+ * AXI QoS for the selected queue.
+ * This value is used in AXI transactions associated with this queue and the
+ * prefetch and completion arbiters.
+ */
+#define UDMA_M2S_Q_CFG_AXI_QOS_MASK 0x70000000
+#define UDMA_M2S_Q_CFG_AXI_QOS_SHIFT 28
+
+/**** status register ****/
+/* Indicates how many entries are used in the queue */
+#define UDMA_M2S_Q_STATUS_Q_USED_MASK 0x01FFFFFF
+#define UDMA_M2S_Q_STATUS_Q_USED_SHIFT 0
+/*
+ * prefetch status
+ * 0 – prefetch operation is stopped
+ * 1 – prefetch is operational
+ */
+#define UDMA_M2S_Q_STATUS_PREFETCH (1 << 28)
+/*
+ * Queue scheduler status
+ * 0 – queue is not active and not participating in scheduling
+ * 1 – queue is active and participating in the scheduling process
+ */
+#define UDMA_M2S_Q_STATUS_SCHEDULER (1 << 29)
+/* Queue is suspended due to DMB */
+#define UDMA_M2S_Q_STATUS_Q_DMB (1 << 30)
+/*
+ * Queue full indication.
+ * (used by the host when head pointer equals tail pointer).
+ */
+#define UDMA_M2S_Q_STATUS_Q_FULL (1 << 31)
+/*
+ * M2S Descriptor Ring Base address [31:4].
+ * Value of the base address of the M2S descriptor ring
+ * [3:0] - 0 - 16B alignment is enforced
+ * ([11:4] should be 0 for 4KB alignment)
+ */
+#define UDMA_M2S_Q_TDRBP_LOW_ADDR_MASK 0xFFFFFFF0
+#define UDMA_M2S_Q_TDRBP_LOW_ADDR_SHIFT 4
+
+/**** TDRL register ****/
+/*
+ * Length of the descriptor ring.
+ * (descriptors)
+ * Associated with the ring base address, ends at maximum burst size alignment.
+ */
+#define UDMA_M2S_Q_TDRL_OFFSET_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TDRL_OFFSET_SHIFT 0
+
+/**** TDRHP register ****/
+/*
+ * Relative offset of the next descriptor that needs to be read into the
+ * prefetch FIFO.
+ * Incremented when the DMA reads valid descriptors from the host memory to the
+ * prefetch FIFO.
+ * Note that this is the offset in # of descriptors and not in byte address.
+ */
+#define UDMA_M2S_Q_TDRHP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TDRHP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_M2S_Q_TDRHP_RING_ID_MASK 0xC0000000
+#define UDMA_M2S_Q_TDRHP_RING_ID_SHIFT 30
+
+/**** TDRTP_inc register ****/
+/* Increments the value in Q_TDRTP (descriptors) */
+#define UDMA_M2S_Q_TDRTP_INC_VAL_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TDRTP_INC_VAL_SHIFT 0
+
+/**** TDRTP register ****/
+/*
+ * Relative offset of the next free descriptor in the host memory.
+ * Note that this is the offset in # of descriptors and not in byte address.
+ */
+#define UDMA_M2S_Q_TDRTP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TDRTP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_M2S_Q_TDRTP_RING_ID_MASK 0xC0000000
+#define UDMA_M2S_Q_TDRTP_RING_ID_SHIFT 30
+
+/**** TDCP register ****/
+/*
+ * Relative offset of the first descriptor in the prefetch FIFO.
+ * This is the next descriptor that will be read by the scheduler.
+ */
+#define UDMA_M2S_Q_TDCP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TDCP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_M2S_Q_TDCP_RING_ID_MASK 0xC0000000
+#define UDMA_M2S_Q_TDCP_RING_ID_SHIFT 30
+/*
+ * M2S Descriptor Ring Base address [31:4].
+ * Value of the base address of the M2S descriptor ring
+ * [3:0] - 0 - 16B alignment is enforced
+ * ([11:4] should be 0 for 4KB alignment)
+ * NOTE:
+ * Length of the descriptor ring (in descriptors) associated with the ring base
+ * address. Ends at maximum burst size alignment.
+ */
+#define UDMA_M2S_Q_TCRBP_LOW_ADDR_MASK 0xFFFFFFF0
+#define UDMA_M2S_Q_TCRBP_LOW_ADDR_SHIFT 4
+
+/**** TCRHP register ****/
+/*
+ * Relative offset of the next descriptor that needs to be updated by the
+ * completion controller.
+ * Note: This is in descriptors and not in byte address.
+ */
+#define UDMA_M2S_Q_TCRHP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TCRHP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_M2S_Q_TCRHP_RING_ID_MASK 0xC0000000
+#define UDMA_M2S_Q_TCRHP_RING_ID_SHIFT 30
+
+/**** TCRHP_internal register ****/
+/*
+ * Relative offset of the next descriptor that needs to be updated by the
+ * completion controller.
+ * Note: This is in descriptors and not in byte address.
+ */
+#define UDMA_M2S_Q_TCRHP_INTERNAL_OFFSET_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_TCRHP_INTERNAL_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_M2S_Q_TCRHP_INTERNAL_RING_ID_MASK 0xC0000000
+#define UDMA_M2S_Q_TCRHP_INTERNAL_RING_ID_SHIFT 30
+
+/**** rate_limit_cfg_1 register ****/
+/* Maximum number of accumulated bytes in the token counter. */
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_1_MAX_BURST_SIZE_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_1_MAX_BURST_SIZE_SHIFT 0
+/* Enable the rate limiter. */
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_1_EN (1 << 24)
+/* Stop token fill. */
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_1_PAUSE (1 << 25)
+
+/**** rate_limit_cfg_cycle register ****/
+/* Number of short cycles between token fills */
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_CYCLE_LONG_CYCLE_SIZE_MASK 0x0000FFFF
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_CYCLE_LONG_CYCLE_SIZE_SHIFT 0
+
+/**** rate_limit_cfg_token_size_1 register ****/
+/* Number of bits to add in each long cycle */
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK 0x0007FFFF
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_TOKEN_SIZE_1_LONG_CYCLE_SHIFT 0
+
+/**** rate_limit_cfg_token_size_2 register ****/
+/* Number of bits to add in each cycle */
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK 0x0007FFFF
+#define UDMA_M2S_Q_RATE_LIMIT_CFG_TOKEN_SIZE_2_SHORT_CYCLE_SHIFT 0
+
+/**** rate_limit_sw_ctrl register ****/
+/* Reset the token bucket counter. */
+#define UDMA_M2S_Q_RATE_LIMIT_SW_CTRL_RST_TOKEN_CNT (1 << 0)
+
+/**** rate_limit_mask register ****/
+/* Mask the external rate limiter. */
+#define UDMA_M2S_Q_RATE_LIMIT_MASK_EXTERNAL_RATE_LIMITER (1 << 0)
+/* Mask the internal rate limiter. */
+#define UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_RATE_LIMITER (1 << 1)
+/*
+ * Mask the internal pause mechanism for DMB.
+ * (Data Memory Barrier).
+ */
+#define UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_PAUSE_DMB (1 << 2)
+/* Mask the external application pause interface. */
+#define UDMA_M2S_Q_RATE_LIMIT_MASK_EXTERNAL_PAUSE (1 << 3)
+
+/**** dwrr_cfg_1 register ****/
+/* Maximum number of accumulated bytes in the deficit counter */
+#define UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_MASK 0x00FFFFFF
+#define UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_SHIFT 0
+/* Bypass the DWRR. */
+#define UDMA_M2S_Q_DWRR_CFG_1_STRICT (1 << 24)
+/* Stop deficit counter increment. */
+#define UDMA_M2S_Q_DWRR_CFG_1_PAUSE (1 << 25)
+
+/**** dwrr_cfg_2 register ****/
+/*
+ * Value for the queue QoS.
+ * Queues with the same QoS value are scheduled with RR/DWRR.
+ * Only LOG(number of queues) is used.
+ */
+#define UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK 0x000000FF
+#define UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_SHIFT 0
+
+/**** dwrr_cfg_3 register ****/
+/* Queue weight */
+#define UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_MASK 0x000000FF
+#define UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_SHIFT 0
+
+/**** dwrr_sw_ctrl register ****/
+/* Reset the DWRR deficit counter. */
+#define UDMA_M2S_Q_DWRR_SW_CTRL_RST_CNT (1 << 0)
+
+/**** comp_cfg register ****/
+/* Enable writing to the completion ring */
+#define UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE (1 << 0)
+/* Disable the completion coalescing function. */
+#define UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL (1 << 1)
+
+/**** q_sw_ctrl register ****/
+/*
+ * Reset the DMB hardware barrier
+ * (enable queue operation).
+ */
+#define UDMA_M2S_Q_SW_CTRL_RST_DMB (1 << 0)
+/* Reset the tail pointer hardware. */
+#define UDMA_M2S_Q_SW_CTRL_RST_TAIL_PTR (1 << 1)
+/* Reset the head pointer hardware. */
+#define UDMA_M2S_Q_SW_CTRL_RST_HEAD_PTR (1 << 2)
+/* Reset the current pointer hardware. */
+#define UDMA_M2S_Q_SW_CTRL_RST_CURRENT_PTR (1 << 3)
+/* Reset the queue */
+#define UDMA_M2S_Q_SW_CTRL_RST_Q (1 << 8)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_UDMA_M2S_REG_H */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_udma_regs_s2m.h b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_regs_s2m.h
new file mode 100644
index 0000000..bcefdb9
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_udma_regs_s2m.h
@@ -0,0 +1,997 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @file al_hal_udma_regs_s2m.h
+ *
+ * @brief C Header file for the UDMA S2M registers
+ *
+ */
+
+#ifndef __AL_HAL_UDMA_S2M_REG_H
+#define __AL_HAL_UDMA_S2M_REG_H
+
+#include "al_hal_plat_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+
+
+struct udma_axi_s2m {
+ /* [0x0] Data write master configuration */
+ uint32_t data_wr_cfg_1;
+ /* [0x4] Data write master configuration */
+ uint32_t data_wr_cfg_2;
+ /* [0x8] Descriptor read master configuration */
+ uint32_t desc_rd_cfg_4;
+ /* [0xc] Descriptor read master configuration */
+ uint32_t desc_rd_cfg_5;
+ /* [0x10] Completion write master configuration */
+ uint32_t comp_wr_cfg_1;
+ /* [0x14] Completion write master configuration */
+ uint32_t comp_wr_cfg_2;
+ /* [0x18] Data write master configuration */
+ uint32_t data_wr_cfg;
+ /* [0x1c] Descriptors read master configuration */
+ uint32_t desc_rd_cfg_3;
+ /* [0x20] Completion descriptors write master configuration */
+ uint32_t desc_wr_cfg_1;
+ /* [0x24] AXI outstanding read configuration */
+ uint32_t ostand_cfg_rd;
+ /* [0x28] AXI outstanding write configuration */
+ uint32_t ostand_cfg_wr;
+ uint32_t rsrvd[53];
+};
+struct udma_s2m {
+ /*
+ * [0x0] DMA state
+ * 00 - No pending tasks
+ * 01 – Normal (active)
+ * 10 – Abort (error condition)
+ * 11 – Reserved
+ */
+ uint32_t state;
+ /* [0x4] CPU request to change DMA state */
+ uint32_t change_state;
+ uint32_t rsrvd_0;
+ /*
+ * [0xc] S2M DMA error log mask.
+ * Each error has an interrupt controller cause bit.
+ * This register determines if these errors cause the S2M DMA to log the
+ * error condition.
+ * 0 - Log is enable
+ * 1 - Log is masked.
+ */
+ uint32_t err_log_mask;
+ uint32_t rsrvd_1;
+ /*
+ * [0x14] DMA header log
+ * Sample the packet header that caused the error
+ */
+ uint32_t log_0;
+ /*
+ * [0x18] DMA header log
+ * Sample the packet header that caused the error.
+ */
+ uint32_t log_1;
+ /*
+ * [0x1c] DMA header log
+ * Sample the packet header that caused the error.
+ */
+ uint32_t log_2;
+ /*
+ * [0x20] DMA header log
+ * Sample the packet header that caused the error
+ */
+ uint32_t log_3;
+ /* [0x24] DMA clear error log */
+ uint32_t clear_err_log;
+ /* [0x28] S2M stream data FIFO status */
+ uint32_t s_data_fifo_status;
+ /* [0x2c] S2M stream header FIFO status */
+ uint32_t s_header_fifo_status;
+ /* [0x30] S2M AXI data FIFO status */
+ uint32_t axi_data_fifo_status;
+ /* [0x34] S2M unack FIFO status */
+ uint32_t unack_fifo_status;
+ /* [0x38] Select queue for debug */
+ uint32_t indirect_ctrl;
+ /*
+ * [0x3c] S2M prefetch FIFO status.
+ * Status of the selected queue in S2M_indirect_ctrl
+ */
+ uint32_t sel_pref_fifo_status;
+ /*
+ * [0x40] S2M completion FIFO status.
+ * Status of the selected queue in S2M_indirect_ctrl
+ */
+ uint32_t sel_comp_fifo_status;
+ /* [0x44] S2M state machine and FIFO clear control */
+ uint32_t clear_ctrl;
+ /* [0x48] S2M Misc Check enable */
+ uint32_t check_en;
+ /* [0x4c] S2M FIFO enable control, internal */
+ uint32_t fifo_en;
+ /* [0x50] Stream interface configuration */
+ uint32_t stream_cfg;
+ uint32_t rsrvd[43];
+};
+struct udma_s2m_rd {
+ /* [0x0] S2M descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_1;
+ /* [0x4] S2M descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_2;
+ /* [0x8] S2M descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_3;
+ /* [0xc] S2M descriptor prefetch configuration */
+ uint32_t desc_pref_cfg_4;
+ uint32_t rsrvd[12];
+};
+struct udma_s2m_wr {
+ /* [0x0] Stream data FIFO configuration */
+ uint32_t data_cfg_1;
+ /* [0x4] Data write configuration */
+ uint32_t data_cfg_2;
+ uint32_t rsrvd[14];
+};
+struct udma_s2m_comp {
+ /* [0x0] Completion controller configuration */
+ uint32_t cfg_1c;
+ /* [0x4] Completion controller configuration */
+ uint32_t cfg_2c;
+ uint32_t rsrvd_0;
+ /* [0xc] Completion controller application acknowledge configuration */
+ uint32_t cfg_application_ack;
+ uint32_t rsrvd[12];
+};
+struct udma_s2m_stat {
+ uint32_t rsrvd_0;
+ /* [0x4] Number of dropped packets */
+ uint32_t drop_pkt;
+ /*
+ * [0x8] Counting the net length of the data buffers [64-bit]
+ * Should be read before rx_bytes_high
+ */
+ uint32_t rx_bytes_low;
+ /*
+ * [0xc] Counting the net length of the data buffers [64-bit]
+ * Should be read after tx_bytes_low (value is sampled when reading
+ * Should be read before rx_bytes_low
+ */
+ uint32_t rx_bytes_high;
+ /* [0x10] Total number of descriptors read from the host memory */
+ uint32_t prefed_desc;
+ /* [0x14] Number of packets written into the completion ring */
+ uint32_t comp_pkt;
+ /* [0x18] Number of descriptors written into the completion ring */
+ uint32_t comp_desc;
+ /*
+ * [0x1c] Number of acknowledged packets.
+ * (acknowledge sent to the stream interface)
+ */
+ uint32_t ack_pkts;
+ uint32_t rsrvd[56];
+};
+struct udma_s2m_feature {
+ /*
+ * [0x0] S2M Feature register
+ * S2M instantiation parameters
+ */
+ uint32_t reg_1;
+ /* [0x4] Reserved S2M feature register */
+ uint32_t reg_2;
+ /*
+ * [0x8] S2M Feature register
+ * S2M instantiation parameters
+ */
+ uint32_t reg_3;
+ /*
+ * [0xc] S2M Feature register.
+ * S2M instantiation parameters.
+ */
+ uint32_t reg_4;
+ /*
+ * [0x10] S2M Feature register.
+ * S2M instantiation parameters.
+ */
+ uint32_t reg_5;
+ /* [0x14] S2M Feature register. S2M instantiation parameters. */
+ uint32_t reg_6;
+ uint32_t rsrvd[58];
+};
+struct udma_s2m_q {
+ uint32_t rsrvd_0[8];
+ /* [0x20] S2M Descriptor ring configuration */
+ uint32_t cfg;
+ /* [0x24] S2M Descriptor ring status and information */
+ uint32_t status;
+ /* [0x28] Rx Descriptor Ring Base Pointer [31:4] */
+ uint32_t rdrbp_low;
+ /* [0x2c] Rx Descriptor Ring Base Pointer [63:32] */
+ uint32_t rdrbp_high;
+ /*
+ * [0x30] Rx Descriptor Ring Length[23:2]
+ */
+ uint32_t rdrl;
+ /* [0x34] RX Descriptor Ring Head Pointer */
+ uint32_t rdrhp;
+ /* [0x38] Rx Descriptor Tail Pointer increment */
+ uint32_t rdrtp_inc;
+ /* [0x3c] Rx Descriptor Tail Pointer */
+ uint32_t rdrtp;
+ /* [0x40] RX Descriptor Current Pointer */
+ uint32_t rdcp;
+ /* [0x44] Rx Completion Ring Base Pointer [31:4] */
+ uint32_t rcrbp_low;
+ /* [0x48] Rx Completion Ring Base Pointer [63:32] */
+ uint32_t rcrbp_high;
+ /* [0x4c] Rx Completion Ring Head Pointer */
+ uint32_t rcrhp;
+ /*
+ * [0x50] RX Completion Ring Head Pointer internal.
+ * (Before the coalescing FIFO)
+ */
+ uint32_t rcrhp_internal;
+ /* [0x54] Completion controller configuration for the queue */
+ uint32_t comp_cfg;
+ /* [0x58] Completion controller configuration for the queue */
+ uint32_t comp_cfg_2;
+ /* [0x5c] Packet handler configuration */
+ uint32_t pkt_cfg;
+ /* [0x60] Queue QoS configuration */
+ uint32_t qos_cfg;
+ /* [0x64] DMB software control */
+ uint32_t q_sw_ctrl;
+ /* [0x68] Number of S2M Rx packets after completion */
+ uint32_t q_rx_pkt;
+ uint32_t rsrvd[997];
+};
+
+struct udma_s2m_regs {
+ uint32_t rsrvd_0[64];
+ struct udma_axi_s2m axi_s2m; /* [0x100] */
+ struct udma_s2m s2m; /* [0x200] */
+ struct udma_s2m_rd s2m_rd; /* [0x300] */
+ struct udma_s2m_wr s2m_wr; /* [0x340] */
+ struct udma_s2m_comp s2m_comp; /* [0x380] */
+ uint32_t rsrvd_1[80];
+ struct udma_s2m_stat s2m_stat; /* [0x500] */
+ struct udma_s2m_feature s2m_feature; /* [0x600] */
+ uint32_t rsrvd_2[576];
+ struct udma_s2m_q s2m_q[4]; /* [0x1000] */
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** data_wr_cfg_1 register ****/
+/* AXI write ID (AWID) */
+#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWID_MASK 0x000000FF
+#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWID_SHIFT 0
+/* Cache Type */
+#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWCACHE_MASK 0x000F0000
+#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWCACHE_SHIFT 16
+/* Burst type */
+#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWBURST_MASK 0x03000000
+#define UDMA_AXI_S2M_DATA_WR_CFG_1_AWBURST_SHIFT 24
+
+/**** data_wr_cfg_2 register ****/
+/* User extension */
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWUSER_MASK 0x000FFFFF
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWUSER_SHIFT 0
+/* Bus size, 128-bit */
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWSIZE_MASK 0x00700000
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWSIZE_SHIFT 20
+/*
+ * AXI Master QoS.
+ * Used for arbitration between AXI masters
+ */
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWQOS_MASK 0x07000000
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWQOS_SHIFT 24
+/* Protection Type */
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWPROT_MASK 0x70000000
+#define UDMA_AXI_S2M_DATA_WR_CFG_2_AWPROT_SHIFT 28
+
+/**** desc_rd_cfg_4 register ****/
+/* AXI read ID (ARID) */
+#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARID_MASK 0x000000FF
+#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARID_SHIFT 0
+/* Cache Type */
+#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARCACHE_MASK 0x000F0000
+#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARCACHE_SHIFT 16
+/* Burst type */
+#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARBURST_MASK 0x03000000
+#define UDMA_AXI_S2M_DESC_RD_CFG_4_ARBURST_SHIFT 24
+
+/**** desc_rd_cfg_5 register ****/
+/* User extension */
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARUSER_MASK 0x000FFFFF
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARUSER_SHIFT 0
+/* Bus size, 128-bit */
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARSIZE_MASK 0x00700000
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARSIZE_SHIFT 20
+/*
+ * AXI Master QoS.
+ * Used for arbitration between AXI masters
+ */
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARQOS_MASK 0x07000000
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARQOS_SHIFT 24
+/* Protection Type */
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARPROT_MASK 0x70000000
+#define UDMA_AXI_S2M_DESC_RD_CFG_5_ARPROT_SHIFT 28
+
+/**** comp_wr_cfg_1 register ****/
+/* AXI write ID (AWID) */
+#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK 0x000000FF
+#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_SHIFT 0
+/* Cache Type */
+#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_MASK 0x000F0000
+#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_SHIFT 16
+/* Burst type */
+#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK 0x03000000
+#define UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_SHIFT 24
+
+/**** comp_wr_cfg_2 register ****/
+/* User extension */
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_MASK 0x000FFFFF
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_SHIFT 0
+/* Bus size, 128-bit */
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_MASK 0x00700000
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_SHIFT 20
+/*
+ * AXI Master QoS.
+ * Used for arbitration between AXI masters
+ */
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_MASK 0x07000000
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_SHIFT 24
+/* Protection Type */
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_MASK 0x70000000
+#define UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_SHIFT 28
+
+/**** data_wr_cfg register ****/
+/*
+ * Defines the maximum number of AXI beats for a single AXI burst. This value is
+ * used for the burst split decision.
+ */
+#define UDMA_AXI_S2M_DATA_WR_CFG_MAX_AXI_BEATS_MASK 0x000000FF
+#define UDMA_AXI_S2M_DATA_WR_CFG_MAX_AXI_BEATS_SHIFT 0
+
+/**** desc_rd_cfg_3 register ****/
+/*
+ * Defines the maximum number of AXI beats for a single AXI burst. This value is
+ * used for the burst split decision.
+ */
+#define UDMA_AXI_S2M_DESC_RD_CFG_3_MAX_AXI_BEATS_MASK 0x000000FF
+#define UDMA_AXI_S2M_DESC_RD_CFG_3_MAX_AXI_BEATS_SHIFT 0
+/*
+ * Enables breaking descriptor read request.
+ * Aligned to max_AXI_beats when the total read size is less than max_AXI_beats.
+ */
+#define UDMA_AXI_S2M_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY (1 << 16)
+
+/**** desc_wr_cfg_1 register ****/
+/*
+ * Defines the maximum number of AXI beats for a single AXI burst. This value is
+ * used for the burst split decision.
+ */
+#define UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK 0x000000FF
+#define UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT 0
+/*
+ * Minimum burst for writing completion descriptors.
+ * (AXI beats).
+ * Value must be aligned to cache lines (64 bytes).
+ * Default value is 2 cache lines, 8 beats.
+ */
+#define UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK 0x00FF0000
+#define UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT 16
+
+/**** ostand_cfg_rd register ****/
+/*
+ * Maximum number of outstanding descriptor reads to the AXI.
+ * (AXI transactions).
+ */
+#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_MASK 0x0000003F
+#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_SHIFT 0
+/* Maximum number of outstanding stream acknowledges. */
+#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_MASK 0x001F0000
+#define UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_SHIFT 16
+
+/**** ostand_cfg_wr register ****/
+/*
+ * Maximum number of outstanding data writes to the AXI.
+ * (AXI transactions).
+ */
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_MASK 0x0000003F
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_SHIFT 0
+/*
+ * Maximum number of outstanding data beats for data write to AXI.
+ * (AXI beats).
+ */
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_MASK 0x0000FF00
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_SHIFT 8
+/*
+ * Maximum number of outstanding descriptor writes to the AXI.
+ * (AXI transactions).
+ */
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_MASK 0x003F0000
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_SHIFT 16
+/*
+ * Maximum number of outstanding data beats for descriptor write to AXI.
+ * (AXI beats).
+ */
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_MASK 0xFF000000
+#define UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_SHIFT 24
+
+/**** state register ****/
+
+#define UDMA_S2M_STATE_COMP_CTRL_MASK 0x00000003
+#define UDMA_S2M_STATE_COMP_CTRL_SHIFT 0
+
+#define UDMA_S2M_STATE_STREAM_IF_MASK 0x00000030
+#define UDMA_S2M_STATE_STREAM_IF_SHIFT 4
+
+#define UDMA_S2M_STATE_DATA_WR_CTRL_MASK 0x00000300
+#define UDMA_S2M_STATE_DATA_WR_CTRL_SHIFT 8
+
+#define UDMA_S2M_STATE_DESC_PREF_MASK 0x00003000
+#define UDMA_S2M_STATE_DESC_PREF_SHIFT 12
+
+#define UDMA_S2M_STATE_AXI_WR_DATA_MASK 0x00030000
+#define UDMA_S2M_STATE_AXI_WR_DATA_SHIFT 16
+
+/**** change_state register ****/
+/* Start normal operation */
+#define UDMA_S2M_CHANGE_STATE_NORMAL (1 << 0)
+/* Stop normal operation */
+#define UDMA_S2M_CHANGE_STATE_DIS (1 << 1)
+/*
+ * Stop all machines.
+ * (Prefetch, scheduling, completion and stream interface)
+ */
+#define UDMA_S2M_CHANGE_STATE_ABORT (1 << 2)
+
+/**** clear_err_log register ****/
+/* Clear error log */
+#define UDMA_S2M_CLEAR_ERR_LOG_CLEAR (1 << 0)
+
+/**** s_data_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_S2M_S_DATA_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_S2M_S_DATA_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_S2M_S_DATA_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_S2M_S_DATA_FIFO_STATUS_FULL (1 << 28)
+
+/**** s_header_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_S2M_S_HEADER_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_S2M_S_HEADER_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_S2M_S_HEADER_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_S2M_S_HEADER_FIFO_STATUS_FULL (1 << 28)
+
+/**** axi_data_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_S2M_AXI_DATA_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_S2M_AXI_DATA_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_S2M_AXI_DATA_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_S2M_AXI_DATA_FIFO_STATUS_FULL (1 << 28)
+
+/**** unack_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_S2M_UNACK_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_S2M_UNACK_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_S2M_UNACK_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_S2M_UNACK_FIFO_STATUS_FULL (1 << 28)
+
+/**** indirect_ctrl register ****/
+/* Selected queue for status read */
+#define UDMA_S2M_INDIRECT_CTRL_Q_NUM_MASK 0x00000FFF
+#define UDMA_S2M_INDIRECT_CTRL_Q_NUM_SHIFT 0
+
+/**** sel_pref_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_S2M_SEL_PREF_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_S2M_SEL_PREF_FIFO_STATUS_USED_SHIFT 0
+/* FIFO empty indication */
+#define UDMA_S2M_SEL_PREF_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_S2M_SEL_PREF_FIFO_STATUS_FULL (1 << 28)
+
+/**** sel_comp_fifo_status register ****/
+/* FIFO used indication */
+#define UDMA_S2M_SEL_COMP_FIFO_STATUS_USED_MASK 0x0000FFFF
+#define UDMA_S2M_SEL_COMP_FIFO_STATUS_USED_SHIFT 0
+/* Coalescing ACTIVE FSM state indication. */
+#define UDMA_S2M_SEL_COMP_FIFO_STATUS_COAL_ACTIVE_STATE_MASK 0x00300000
+#define UDMA_S2M_SEL_COMP_FIFO_STATUS_COAL_ACTIVE_STATE_SHIFT 20
+/* FIFO empty indication */
+#define UDMA_S2M_SEL_COMP_FIFO_STATUS_EMPTY (1 << 24)
+/* FIFO full indication */
+#define UDMA_S2M_SEL_COMP_FIFO_STATUS_FULL (1 << 28)
+
+/**** stream_cfg register ****/
+/*
+ * Disables the stream interface operation.
+ * Changing to 1 stops at the end of packet reception.
+ */
+#define UDMA_S2M_STREAM_CFG_DISABLE (1 << 0)
+/*
+ * Flush the stream interface operation.
+ * Changing to 1 stops at the end of packet reception and assert ready to the
+ * stream I/F.
+ */
+#define UDMA_S2M_STREAM_CFG_FLUSH (1 << 4)
+/* Stop descriptor prefetch when the stream is disabled and the S2M is idle. */
+#define UDMA_S2M_STREAM_CFG_STOP_PREFETCH (1 << 8)
+
+/**** desc_pref_cfg_1 register ****/
+/*
+ * Size of the descriptor prefetch FIFO.
+ * (descriptors)
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK 0x000000FF
+#define UDMA_S2M_RD_DESC_PREF_CFG_1_FIFO_DEPTH_SHIFT 0
+
+/**** desc_pref_cfg_2 register ****/
+/* Enable promotion of the current queue in progress */
+#define UDMA_S2M_RD_DESC_PREF_CFG_2_Q_PROMOTION (1 << 0)
+/* Force promotion of the current queue in progress */
+#define UDMA_S2M_RD_DESC_PREF_CFG_2_FORCE_PROMOTION (1 << 1)
+/* Enable prefetch prediction of next packet in line. */
+#define UDMA_S2M_RD_DESC_PREF_CFG_2_EN_PREF_PREDICTION (1 << 2)
+/*
+ * Threshold for queue promotion.
+ * Queue is promoted for prefetch if there are less descriptors in the prefetch
+ * FIFO than the threshold
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_MASK 0x0000FF00
+#define UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_SHIFT 8
+/*
+ * Force RR arbitration in the prefetch arbiter.
+ * 0 - Standard arbitration based on queue QoS
+ * 1 - Force round robin arbitration
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_2_PREF_FORCE_RR (1 << 16)
+
+/**** desc_pref_cfg_3 register ****/
+/*
+ * Minimum descriptor burst size when prefetch FIFO level is below the
+ * descriptor prefetch threshold
+ * (must be 1)
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK 0x0000000F
+#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_SHIFT 0
+/*
+ * Minimum descriptor burst size when prefetch FIFO level is above the
+ * descriptor prefetch threshold
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK 0x000000F0
+#define UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT 4
+/*
+ * Descriptor fetch threshold.
+ * Used as a threshold to determine the allowed minimum descriptor burst size.
+ * (Must be at least "max_desc_per_pkt")
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK 0x0000FF00
+#define UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT 8
+
+/**** desc_pref_cfg_4 register ****/
+/*
+ * Used as a threshold for generating almost FULL indication to the application
+ */
+#define UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_MASK 0x000000FF
+#define UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_SHIFT 0
+
+/**** data_cfg_1 register ****/
+/*
+ * Maximum number of data beats in the data write FIFO.
+ * Defined based on data FIFO size
+ * (default FIFO size 512B → 32 beats)
+ */
+#define UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_MASK 0x000003FF
+#define UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_SHIFT 0
+/*
+ * Maximum number of packets in the data write FIFO.
+ * Defined based on header FIFO size
+ */
+#define UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_MASK 0x00FF0000
+#define UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_SHIFT 16
+/*
+ * Internal use
+ * Data FIFO margin
+ */
+#define UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_MASK 0xFF000000
+#define UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_SHIFT 24
+
+/**** data_cfg_2 register ****/
+/*
+ * Drop timer.
+ * Waiting time for the host to write new descriptor to the queue
+ * (for the current packet in process)
+ */
+#define UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK 0x00FFFFFF
+#define UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_SHIFT 0
+/*
+ * Drop enable.
+ * Enable packet drop if there are no available descriptors in the system for
+ * this queue
+ */
+#define UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC (1 << 27)
+/*
+ * Lack of descriptors hint.
+ * Generate interrupt when a packet is waiting but there are no available
+ * descriptors in the queue
+ */
+#define UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC (1 << 28)
+/*
+ * Drop conditions
+ * Wait until a descriptor is available in the prefetch FIFO or the host before
+ * dropping packet.
+ * 1 - Drop if a descriptor is not available in the prefetch.
+ * 0 - Drop if a descriptor is not available in the system
+ */
+#define UDMA_S2M_WR_DATA_CFG_2_WAIT_FOR_PREF (1 << 29)
+/*
+ * DRAM write optimization
+ * 0 - Data write with byte enable
+ * 1 - Data write is always in Full AXI bus width (128 bit)
+ */
+#define UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE (1 << 30)
+/*
+ * Direct data write address
+ * 1 - Use buffer 1 instead of buffer 2 when direct data placement is used with
+ * header split.
+ * 0 - Use buffer 2 for the header.
+ */
+#define UDMA_S2M_WR_DATA_CFG_2_DIRECT_HDR_USE_BUF1 (1 << 31)
+
+/**** cfg_1c register ****/
+/*
+ * Completion descriptor size.
+ * (words)
+ */
+#define UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK 0x0000000F
+#define UDMA_S2M_COMP_CFG_1C_DESC_SIZE_SHIFT 0
+/*
+ * Completion queue counter configuration.
+ * Completion FIFO in use counter measured in words or descriptors
+ * 1 - Words
+ * 0 - Descriptors
+ */
+#define UDMA_S2M_COMP_CFG_1C_CNT_WORDS (1 << 8)
+/*
+ * Enable promotion of the current queue in progress in the completion write
+ * scheduler.
+ */
+#define UDMA_S2M_COMP_CFG_1C_Q_PROMOTION (1 << 12)
+/* Force RR arbitration in the completion arbiter */
+#define UDMA_S2M_COMP_CFG_1C_FORCE_RR (1 << 16)
+/* Minimum number of free completion entries to qualify for promotion */
+#define UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_MASK 0xF0000000
+#define UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_SHIFT 28
+
+/**** cfg_2c register ****/
+/*
+ * Completion FIFO size.
+ * (words per queue)
+ */
+#define UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_MASK 0x00000FFF
+#define UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_SHIFT 0
+/*
+ * Unacknowledged FIFO size.
+ * (descriptors)
+ */
+#define UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK 0x0FFF0000
+#define UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_SHIFT 16
+
+/**** reg_1 register ****/
+/*
+ * Descriptor prefetch FIFO size
+ * (descriptors)
+ */
+#define UDMA_S2M_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_MASK 0x000000FF
+#define UDMA_S2M_FEATURE_REG_1_DESC_PREFERCH_FIFO_DEPTH_SHIFT 0
+
+/**** reg_3 register ****/
+/*
+ * Maximum number of data beats in the data write FIFO.
+ * Defined based on data FIFO size
+ * (default FIFO size 512B →32 beats)
+ */
+#define UDMA_S2M_FEATURE_REG_3_DATA_FIFO_DEPTH_MASK 0x000003FF
+#define UDMA_S2M_FEATURE_REG_3_DATA_FIFO_DEPTH_SHIFT 0
+/*
+ * Maximum number of packets in the data write FIFO.
+ * Defined based on header FIFO size
+ */
+#define UDMA_S2M_FEATURE_REG_3_DATA_WR_MAX_PKT_LIMIT_MASK 0x00FF0000
+#define UDMA_S2M_FEATURE_REG_3_DATA_WR_MAX_PKT_LIMIT_SHIFT 16
+
+/**** reg_4 register ****/
+/*
+ * Completion FIFO size.
+ * (words per queue)
+ */
+#define UDMA_S2M_FEATURE_REG_4_COMP_FIFO_DEPTH_MASK 0x00000FFF
+#define UDMA_S2M_FEATURE_REG_4_COMP_FIFO_DEPTH_SHIFT 0
+/*
+ * Unacknowledged FIFO size.
+ * (descriptors)
+ */
+#define UDMA_S2M_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_MASK 0x0FFF0000
+#define UDMA_S2M_FEATURE_REG_4_COMP_UNACK_FIFO_DEPTH_SHIFT 16
+
+/**** reg_5 register ****/
+/* Maximum number of outstanding data writes to the AXI */
+#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_WR_OSTAND_MASK 0x0000003F
+#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_WR_OSTAND_SHIFT 0
+/*
+ * Maximum number of outstanding data beats for data write to AXI.
+ * (AXI beats)
+ */
+#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_BEATS_WR_OSTAND_MASK 0x0000FF00
+#define UDMA_S2M_FEATURE_REG_5_MAX_DATA_BEATS_WR_OSTAND_SHIFT 8
+/*
+ * Maximum number of outstanding descriptor reads to the AXI.
+ * (AXI transactions)
+ */
+#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_MASK 0x003F0000
+#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_REQ_SHIFT 16
+/*
+ * Maximum number of outstanding data beats for descriptor write to AXI.
+ * (AXI beats)
+ */
+#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_MASK 0xFF000000
+#define UDMA_S2M_FEATURE_REG_5_MAX_COMP_DATA_WR_OSTAND_SHIFT 24
+
+/**** reg_6 register ****/
+/* Maximum number of outstanding descriptor reads to the AXI */
+#define UDMA_S2M_FEATURE_REG_6_MAX_DESC_RD_OSTAND_MASK 0x0000003F
+#define UDMA_S2M_FEATURE_REG_6_MAX_DESC_RD_OSTAND_SHIFT 0
+/* Maximum number of outstanding stream acknowledges */
+#define UDMA_S2M_FEATURE_REG_6_MAX_STREAM_ACK_MASK 0x001F0000
+#define UDMA_S2M_FEATURE_REG_6_MAX_STREAM_ACK_SHIFT 16
+
+/**** cfg register ****/
+/*
+ * Configure the AXI AWCACHE
+ * for header write.
+ */
+#define UDMA_S2M_Q_CFG_AXI_AWCACHE_HDR_MASK 0x0000000F
+#define UDMA_S2M_Q_CFG_AXI_AWCACHE_HDR_SHIFT 0
+/*
+ * Configure the AXI AWCACHE
+ * for data write.
+ */
+#define UDMA_S2M_Q_CFG_AXI_AWCACHE_DATA_MASK 0x000000F0
+#define UDMA_S2M_Q_CFG_AXI_AWCACHE_DATA_SHIFT 4
+/*
+ * Enable operation of this queue.
+ * Start prefetch.
+ */
+#define UDMA_S2M_Q_CFG_EN_PREF (1 << 16)
+/* Enables the reception of packets from the stream to this queue */
+#define UDMA_S2M_Q_CFG_EN_STREAM (1 << 17)
+/* Allow prefetch of less than minimum prefetch burst size. */
+#define UDMA_S2M_Q_CFG_ALLOW_LT_MIN_PREF (1 << 20)
+/*
+ * Configure the AXI AWCACHE
+ * for completion descriptor write
+ */
+#define UDMA_S2M_Q_CFG_AXI_AWCACHE_COMP_MASK 0x0F000000
+#define UDMA_S2M_Q_CFG_AXI_AWCACHE_COMP_SHIFT 24
+/*
+ * AXI QoS
+ * This value is used in AXI transactions associated with this queue and the
+ * prefetch and completion arbiters.
+ */
+#define UDMA_S2M_Q_CFG_AXI_QOS_MASK 0x70000000
+#define UDMA_S2M_Q_CFG_AXI_QOS_SHIFT 28
+
+/**** status register ****/
+/* Indicates how many entries are used in the Queue */
+#define UDMA_S2M_Q_STATUS_Q_USED_MASK 0x01FFFFFF
+#define UDMA_S2M_Q_STATUS_Q_USED_SHIFT 0
+/*
+ * prefetch status
+ * 0 – prefetch operation is stopped
+ * 1 – prefetch is operational
+ */
+#define UDMA_S2M_Q_STATUS_PREFETCH (1 << 28)
+/*
+ * Queue receive status
+ * 0 -queue RX operation is stopped
+ * 1 – RX queue is active and processing packets
+ */
+#define UDMA_S2M_Q_STATUS_RX (1 << 29)
+/*
+ * Indicates if the queue is full.
+ * (Used by the host when head pointer equals tail pointer)
+ */
+#define UDMA_S2M_Q_STATUS_Q_FULL (1 << 31)
+/*
+ * S2M Descriptor Ring Base address [31:4].
+ * Value of the base address of the S2M descriptor ring
+ * [3:0] - 0 - 16B alignment is enforced
+ * ([11:4] should be 0 for 4KB alignment)
+ */
+#define UDMA_S2M_Q_RDRBP_LOW_ADDR_MASK 0xFFFFFFF0
+#define UDMA_S2M_Q_RDRBP_LOW_ADDR_SHIFT 4
+
+/**** RDRL register ****/
+/*
+ * Length of the descriptor ring.
+ * (descriptors)
+ * Associated with the ring base address ends at maximum burst size alignment
+ */
+#define UDMA_S2M_Q_RDRL_OFFSET_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RDRL_OFFSET_SHIFT 0
+
+/**** RDRHP register ****/
+/*
+ * Relative offset of the next descriptor that needs to be read into the
+ * prefetch FIFO.
+ * Incremented when the DMA reads valid descriptors from the host memory to the
+ * prefetch FIFO.
+ * Note that this is the offset in # of descriptors and not in byte address.
+ */
+#define UDMA_S2M_Q_RDRHP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RDRHP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_S2M_Q_RDRHP_RING_ID_MASK 0xC0000000
+#define UDMA_S2M_Q_RDRHP_RING_ID_SHIFT 30
+
+/**** RDRTP_inc register ****/
+/*
+ * Increments the value in Q_RDRTP with the value written to this field in
+ * number of descriptors.
+ */
+#define UDMA_S2M_Q_RDRTP_INC_VAL_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RDRTP_INC_VAL_SHIFT 0
+
+/**** RDRTP register ****/
+/*
+ * Relative offset of the next free descriptor in the host memory.
+ * Note that this is the offset in # of descriptors and not in byte address.
+ */
+#define UDMA_S2M_Q_RDRTP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RDRTP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_S2M_Q_RDRTP_RING_ID_MASK 0xC0000000
+#define UDMA_S2M_Q_RDRTP_RING_ID_SHIFT 30
+
+/**** RDCP register ****/
+/* Relative offset of the first descriptor in the prefetch FIFO. */
+#define UDMA_S2M_Q_RDCP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RDCP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_S2M_Q_RDCP_RING_ID_MASK 0xC0000000
+#define UDMA_S2M_Q_RDCP_RING_ID_SHIFT 30
+/*
+ * S2M Descriptor Ring Base address [31:4].
+ * Value of the base address of the S2M descriptor ring
+ * [3:0] - 0 - 16B alignment is enforced
+ * ([11:4] Must be 0 for 4KB alignment)
+ * NOTE:
+ * Length of the descriptor ring (in descriptors) associated with the ring base
+ * address ends at maximum burst size alignment
+ */
+#define UDMA_S2M_Q_RCRBP_LOW_ADDR_MASK 0xFFFFFFF0
+#define UDMA_S2M_Q_RCRBP_LOW_ADDR_SHIFT 4
+
+/**** RCRHP register ****/
+/*
+ * Relative offset of the next descriptor that needs to be updated by the
+ * completion controller.
+ * Note: This is in descriptors and not in byte address.
+ */
+#define UDMA_S2M_Q_RCRHP_OFFSET_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RCRHP_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_S2M_Q_RCRHP_RING_ID_MASK 0xC0000000
+#define UDMA_S2M_Q_RCRHP_RING_ID_SHIFT 30
+
+/**** RCRHP_internal register ****/
+/*
+ * Relative offset of the next descriptor that needs to be updated by the
+ * completion controller.
+ * Note: This is in descriptors and not in byte address.
+ */
+#define UDMA_S2M_Q_RCRHP_INTERNAL_OFFSET_MASK 0x00FFFFFF
+#define UDMA_S2M_Q_RCRHP_INTERNAL_OFFSET_SHIFT 0
+/* Ring ID */
+#define UDMA_S2M_Q_RCRHP_INTERNAL_RING_ID_MASK 0xC0000000
+#define UDMA_S2M_Q_RCRHP_INTERNAL_RING_ID_SHIFT 30
+
+/**** comp_cfg register ****/
+/* Enables writing to the completion ring. */
+#define UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE (1 << 0)
+/* Disables the completion coalescing function. */
+#define UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL (1 << 1)
+/* Reserved */
+#define UDMA_S2M_Q_COMP_CFG_FIRST_PKT_PROMOTION (1 << 2)
+/*
+ * Buffer 2 location.
+ * Determines the position of the buffer 2 length in the S2M completion
+ * descriptor.
+ * 0 - WORD 1 [31:16]
+ * 1 - WORD 2 [31:16]
+ */
+#define UDMA_S2M_Q_COMP_CFG_BUF2_LEN_LOCATION (1 << 3)
+
+/**** pkt_cfg register ****/
+/* Header size. (bytes) */
+#define UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK 0x0000FFFF
+#define UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_SHIFT 0
+/* Force header split */
+#define UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT (1 << 16)
+/* Enable header split. */
+#define UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT (1 << 17)
+
+/**** qos_cfg register ****/
+/* Queue QoS */
+#define UDMA_S2M_QOS_CFG_Q_QOS_MASK 0x000000FF
+#define UDMA_S2M_QOS_CFG_Q_QOS_SHIFT 0
+/* Reset the tail pointer hardware. */
+#define UDMA_S2M_Q_SW_CTRL_RST_TAIL_PTR (1 << 1)
+/* Reset the head pointer hardware. */
+#define UDMA_S2M_Q_SW_CTRL_RST_HEAD_PTR (1 << 2)
+/* Reset the current pointer hardware. */
+#define UDMA_S2M_Q_SW_CTRL_RST_CURRENT_PTR (1 << 3)
+/* Reset the prefetch FIFO */
+#define UDMA_S2M_Q_SW_CTRL_RST_PREFETCH (1 << 4)
+/* Reset the queue */
+#define UDMA_S2M_Q_SW_CTRL_RST_Q (1 << 8)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_UDMA_S2M_REG_H */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_hal_unit_adapter_regs.h b/arch/arm/mach-alpine/include/al_hal/al_hal_unit_adapter_regs.h
new file mode 100644
index 0000000..234c2ca
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_hal_unit_adapter_regs.h
@@ -0,0 +1,266 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __AL_HAL_UNIT_ADAPTER_REGS_H__
+#define __AL_HAL_UNIT_ADAPTER_REGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define AL_PCI_COMMAND 0x04 /* 16 bits */
+#define AL_PCI_COMMAND_IO 0x1 /* Enable response in I/O space */
+#define AL_PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */
+#define AL_PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */
+
+#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */
+
+#define AL_PCI_BASE_ADDRESS_SPACE_IO 0x01
+#define AL_PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
+#define AL_PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
+#define AL_PCI_BASE_ADDRESS_DEVICE_ID 0x0c
+#define AL_PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
+
+#define AL_PCI_BASE_ADDRESS_2 0x18
+#define AL_PCI_BASE_ADDRESS_4 0x20
+
+#define AL_PCI_AXI_CFG_AND_CTR_0 0x110
+#define AL_PCI_AXI_CFG_AND_CTR_1 0x130
+#define AL_PCI_AXI_CFG_AND_CTR_2 0x150
+#define AL_PCI_AXI_CFG_AND_CTR_3 0x170
+
+#define AL_PCI_APP_CONTROL 0x220
+
+#define AL_PCI_SRIOV_TOTAL_AND_INITIAL_VFS 0x30c
+
+#define AL_PCI_VF_BASE_ADDRESS_0 0x324
+
+
+#define AL_PCI_EXP_CAP_BASE 0x40
+#define AL_PCI_EXP_DEVCAP 4 /* Device capabilities */
+#define AL_PCI_EXP_DEVCAP_PAYLOAD 0x07 /* Max_Payload_Size */
+#define AL_PCI_EXP_DEVCAP_PHANTOM 0x18 /* Phantom functions */
+#define AL_PCI_EXP_DEVCAP_EXT_TAG 0x20 /* Extended tags */
+#define AL_PCI_EXP_DEVCAP_L0S 0x1c0 /* L0s Acceptable Latency */
+#define AL_PCI_EXP_DEVCAP_L1 0xe00 /* L1 Acceptable Latency */
+#define AL_PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */
+#define AL_PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */
+#define AL_PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */
+#define AL_PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */
+#define AL_PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */
+#define AL_PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */
+#define AL_PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
+#define AL_PCI_EXP_DEVCTL 8 /* Device Control */
+#define AL_PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
+#define AL_PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */
+#define AL_PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */
+#define AL_PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */
+#define AL_PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
+#define AL_PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
+#define AL_PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
+#define AL_PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */
+#define AL_PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
+#define AL_PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */
+#define AL_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
+#define AL_PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
+#define AL_PCI_EXP_DEVSTA 0xA /* Device Status */
+#define AL_PCI_EXP_DEVSTA_CED 0x01 /* Correctable Error Detected */
+#define AL_PCI_EXP_DEVSTA_NFED 0x02 /* Non-Fatal Error Detected */
+#define AL_PCI_EXP_DEVSTA_FED 0x04 /* Fatal Error Detected */
+#define AL_PCI_EXP_DEVSTA_URD 0x08 /* Unsupported Request Detected */
+#define AL_PCI_EXP_DEVSTA_AUXPD 0x10 /* AUX Power Detected */
+#define AL_PCI_EXP_DEVSTA_TRPND 0x20 /* Transactions Pending */
+#define AL_PCI_EXP_LNKCAP 0xC /* Link Capabilities */
+#define AL_PCI_EXP_LNKCAP_SLS 0xf /* Supported Link Speeds */
+#define AL_PCI_EXP_LNKCAP_SLS_2_5GB 0x1 /* LNKCAP2 SLS Vector bit 0 (2.5GT/s) */
+#define AL_PCI_EXP_LNKCAP_SLS_5_0GB 0x2 /* LNKCAP2 SLS Vector bit 1 (5.0GT/s) */
+#define AL_PCI_EXP_LNKCAP_MLW 0x3f0 /* Maximum Link Width */
+#define AL_PCI_EXP_LNKCAP_ASPMS 0xc00 /* ASPM Support */
+#define AL_PCI_EXP_LNKCAP_L0SEL 0x7000 /* L0s Exit Latency */
+#define AL_PCI_EXP_LNKCAP_L1EL 0x38000 /* L1 Exit Latency */
+#define AL_PCI_EXP_LNKCAP_CLKPM 0x40000 /* L1 Clock Power Management */
+#define AL_PCI_EXP_LNKCAP_SDERC 0x80000 /* Surprise Down Error Reporting Capable */
+#define AL_PCI_EXP_LNKCAP_DLLLARC 0x100000 /* Data Link Layer Link Active Reporting Capable */
+#define AL_PCI_EXP_LNKCAP_LBNC 0x200000 /* Link Bandwidth Notification Capability */
+#define AL_PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */
+
+#define AL_PCI_EXP_LNKSTA 0x12 /* Link Status */
+#define AL_PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
+#define AL_PCI_EXP_LNKSTA_CLS_2_5GB 0x01 /* Current Link Speed 2.5GT/s */
+#define AL_PCI_EXP_LNKSTA_CLS_5_0GB 0x02 /* Current Link Speed 5.0GT/s */
+#define AL_PCI_EXP_LNKSTA_CLS_8_0GB 0x03 /* Current Link Speed 8.0GT/s */
+#define AL_PCI_EXP_LNKSTA_NLW 0x03f0 /* Nogotiated Link Width */
+#define AL_PCI_EXP_LNKSTA_NLW_SHIFT 4 /* start of NLW mask in link status */
+#define AL_PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */
+#define AL_PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */
+#define AL_PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */
+#define AL_PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
+#define AL_PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
+
+#define AL_PCI_EXP_LNKCTL2 0x30 /* Link Control 2 */
+
+#define AL_PCI_MSIX_MSGCTRL 0 /* MSIX message control reg */
+#define AL_PCI_MSIX_MSGCTRL_TBL_SIZE 0x7ff /* MSIX table size */
+#define AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT 16 /* MSIX table size shift */
+#define AL_PCI_MSIX_MSGCTRL_EN 0x80000000 /* MSIX enable */
+#define AL_PCI_MSIX_MSGCTRL_MASK 0x40000000 /* MSIX mask */
+
+#define AL_PCI_MSIX_TABLE 0x4 /* MSIX table offset and bar reg */
+#define AL_PCI_MSIX_TABLE_OFFSET 0xfffffff8 /* MSIX table offset */
+#define AL_PCI_MSIX_TABLE_BAR 0x7 /* MSIX table BAR */
+
+#define AL_PCI_MSIX_PBA 0x8 /* MSIX pba offset and bar reg */
+#define AL_PCI_MSIX_PBA_OFFSET 0xfffffff8 /* MSIX pba offset */
+#define AL_PCI_MSIX_PBA_BAR 0x7 /* MSIX pba BAR */
+
+
+/* Adapter power management register 0 */
+#define AL_ADAPTER_PM_0 0x80
+#define AL_ADAPTER_PM_0_PM_NEXT_CAP_MASK 0xff00
+#define AL_ADAPTER_PM_0_PM_NEXT_CAP_SHIFT 8
+#define AL_ADAPTER_PM_0_PM_NEXT_CAP_VAL_MSIX 0x90
+
+/* Adapter power management register 1 */
+#define AL_ADAPTER_PM_1 0x84
+#define AL_ADAPTER_PM_1_PME_EN 0x100 /* PM enable */
+#define AL_ADAPTER_PM_1_PWR_STATE_MASK 0x3 /* PM state mask */
+#define AL_ADAPTER_PM_1_PWR_STATE_D3 0x3 /* PM D3 state */
+
+/*
+ * Generic control register
+ */
+#define AL_ADAPTER_SMCC 0x110 /* Sub Master Configuration & Control */
+#define AL_ADAPTER_GENERIC_CONTROL_0 0x1E0
+/* Enable clock gating */
+#define AL_ADAPTER_GENERIC_CONTROL_0_CLK_GATE_EN 0x01
+/* When set, all transactions through the PCI conf & mem BARs get timeout */
+#define AL_ADAPTER_GENERIC_CONTROL_0_ADAPTER_DIS 0x40
+#define AL_ADAPTER_GENERIC_CONTROL_11 0x220 /* Generic Control registers */
+
+#define AL_ADAPTER_GENERIC_CONTROL_0_ETH_RESET_1GMAC AL_BIT(18)
+#define AL_ADAPTER_GENERIC_CONTROL_0_ETH_RESET_1GMAC_ON_FLR AL_BIT(26)
+
+#define AL_ADAPTER_GENERIC_CONTROL_2 0x1E8
+#define AL_ADAPTER_GENERIC_CONTROL_3 0x1EC
+
+/*
+ * Unit adapter inline functions
+ */
+
+/**
+ * Perform function level reset and takes care for all needed PCIe config space
+ * register save and restore.
+ * Utilizes reading/writing to the pcie config space and for performing the
+ * actual reset.
+ *
+ * @param pcie_read_config_u32
+ * pointer to function that reads register from pcie config space
+ * @param pcie_write_config_u32
+ * register to pcie config space
+ *
+ * @param pcie_flr
+ * pointer to function that makes the actual reset.
+ * That function is responsible for performing the post reset
+ * delay.
+ *
+ * @param handle
+ * pointer passes to the above functions as first parameter
+ */
+static inline void al_pcie_perform_flr(int (* pcie_read_config_u32)(void *handle, int where, uint32_t *val),
+ int (* pcie_write_config_u32)(void *handle, int where, uint32_t val),
+ int (* pcie_flr)(void *handle),
+ void *handle)
+{
+ int i;
+ uint32_t cfg_reg_store[11];
+
+ i = 0;
+ pcie_read_config_u32(handle, AL_PCI_COMMAND,
+ &cfg_reg_store[i++]);
+ pcie_read_config_u32(handle, AL_PCI_BASE_ADDRESS_DEVICE_ID,
+ &cfg_reg_store[i++]);
+ pcie_read_config_u32(handle, AL_PCI_BASE_ADDRESS_0,
+ &cfg_reg_store[i++]);
+ pcie_read_config_u32(handle, AL_PCI_BASE_ADDRESS_2,
+ &cfg_reg_store[i++]);
+ pcie_read_config_u32(handle, AL_PCI_BASE_ADDRESS_4,
+ &cfg_reg_store[i++]);
+ pcie_read_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_0,
+ &cfg_reg_store[i++]);
+ pcie_read_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_1,
+ &cfg_reg_store[i++]);
+ pcie_read_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_2,
+ &cfg_reg_store[i++]);
+ pcie_read_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_3,
+ &cfg_reg_store[i++]);
+ pcie_read_config_u32(handle, AL_PCI_APP_CONTROL,
+ &cfg_reg_store[i++]);
+ pcie_read_config_u32(handle, AL_PCI_VF_BASE_ADDRESS_0,
+ &cfg_reg_store[i++]);
+
+ pcie_flr(handle);
+
+ i = 0;
+ pcie_write_config_u32(handle, AL_PCI_COMMAND,
+ cfg_reg_store[i++]);
+ pcie_write_config_u32(handle, AL_PCI_BASE_ADDRESS_DEVICE_ID,
+ cfg_reg_store[i++]);
+ pcie_write_config_u32(handle, AL_PCI_BASE_ADDRESS_0,
+ cfg_reg_store[i++]);
+ pcie_write_config_u32(handle, AL_PCI_BASE_ADDRESS_2,
+ cfg_reg_store[i++]);
+ pcie_write_config_u32(handle, AL_PCI_BASE_ADDRESS_4,
+ cfg_reg_store[i++]);
+ pcie_write_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_0,
+ cfg_reg_store[i++]);
+ pcie_write_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_1,
+ cfg_reg_store[i++]);
+ pcie_write_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_2,
+ cfg_reg_store[i++]);
+ pcie_write_config_u32(handle, AL_PCI_AXI_CFG_AND_CTR_3,
+ cfg_reg_store[i++]);
+ pcie_write_config_u32(handle, AL_PCI_APP_CONTROL,
+ cfg_reg_store[i++]);
+ pcie_write_config_u32(handle, AL_PCI_VF_BASE_ADDRESS_0,
+ cfg_reg_store[i++]);
+}
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-alpine/include/al_hal/al_init_ccu_regs.h b/arch/arm/mach-alpine/include/al_hal/al_init_ccu_regs.h
new file mode 100644
index 0000000..4a9f694
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_init_ccu_regs.h
@@ -0,0 +1,46 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __AL_HAL_CCU_H__
+#define __AL_HAL_CCU_H__
+
+
+#define AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET 0x4000
+#define AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET 0x5000
+#define AL_CCU_SPECULATION_CONTROL_OFFSET 0x4
+#define AL_CCU_SECURE_ACCESS_OFFSET 0x8
+
+
+#endif /* __AL_HAL_CCU_H__ */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_init_cpu_resume.h b/arch/arm/mach-alpine/include/al_hal/al_init_cpu_resume.h
new file mode 100644
index 0000000..8b9c1db
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_init_cpu_resume.h
@@ -0,0 +1,57 @@
+/*
+ * Annapurna labs resume address.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef AL_CPU_RESUME_H_
+#define AL_CPU_RESUME_H_
+
+struct al_cpu_resume_regs_per_cpu {
+ /* Flags */
+ uint32_t flags;
+
+ /* Resume address */
+ uintptr_t resume_addr;
+};
+
+struct al_cpu_resume_regs {
+ /* Watermark for validating the CPU resume struct */
+ uint32_t watermark;
+
+ /* Various flags to control the resume behavior */
+ uint32_t flags;
+
+ /* Per cpu regs */
+ struct al_cpu_resume_regs_per_cpu per_cpu[];
+};
+
+/* The expected magic number for validating the resume addresses */
+#define AL_CPU_RESUME_MAGIC_NUM 0xf0e1d200
+#define AL_CPU_RESUME_MAGIC_NUM_MASK 0xffffff00
+
+/* The expected minimal version number for validating the capabilities */
+#define AL_CPU_RESUME_MIN_VER 0x000000c3
+#define AL_CPU_RESUME_MIN_VER_MASK 0x000000ff
+
+/* General resume flags*/
+#define AL_CPU_RESUME_FLG_SWITCH_TO_NS_DIS (1 << 0)
+
+/* Per-cpu resume flags */
+/* Don't init anything outside the cluster */
+#define AL_CPU_RESUME_FLG_PERCPU_EXTERNAL_SKIP (1 << 0)
+/* Don't init anything outside the core */
+#define AL_CPU_RESUME_FLG_PERCPU_CLUSTER_SKIP (2 << 0)
+
+#endif /* AL_CPU_RESUME_H_ */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_init_sys_fabric.h b/arch/arm/mach-alpine/include/al_hal/al_init_sys_fabric.h
new file mode 100644
index 0000000..b7d87aa
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_init_sys_fabric.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * The fabric consists of CCU, SMMU(*2), GIC(*2), and NB registers.
+ *
+ * The fabric hal provides simple api for fabric initialization, and not a
+ * complete coverage of the unit's functionality.
+ * main objective: to keep consistent fabric initialization between
+ * different environments. Only functions used by all environments added.
+ *
+ * GIC and SMMU are only handled when initializing the secure context.
+ * Both internal and external gic are initialized in that aspect.
+ */
+
+#ifndef __AL_HAL_SYS_FABRIC_H__
+#define __AL_HAL_SYS_FABRIC_H__
+
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Initialize NB service regs
+ *
+ * @param nb_regs_address
+ * address of nb service registers
+ * @param dev_ord_relax
+ * optimization: relax ordering between device-access reads and
+ * writes to different addresses.
+ */
+void al_nbservice_init(void __iomem *nb_regs_address,
+ al_bool dev_ord_relax);
+
+/**
+ * Initialize CCU
+ *
+ * @param ccu_address
+ * address of ccu registers
+ * @param iocc
+ * enable I/O cache coherency
+ */
+void al_ccu_init(void __iomem *ccu_address, al_bool iocc);
+
+/**
+ * Clear NB service regs settings
+ *
+ * @param nb_regs_address
+ * address of nb service registers
+ */
+void al_nbservice_clear_settings(void __iomem *nb_regs_address);
+
+/**
+ * Clear ccu settings
+ *
+ * @param ccu_address
+ * address of ccu registers
+ */
+void al_ccu_clear_settings(void __iomem *ccu_address);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AL_HAL_SYS_FABRIC_H_ */
diff --git a/arch/arm/mach-alpine/include/al_hal/al_init_sys_fabric_offsets.h b/arch/arm/mach-alpine/include/al_hal/al_init_sys_fabric_offsets.h
new file mode 100644
index 0000000..eb624f2
--- /dev/null
+++ b/arch/arm/mach-alpine/include/al_hal/al_init_sys_fabric_offsets.h
@@ -0,0 +1,51 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __AL_HAL_SYS_FABRIC_OFFSETS_H__
+#define __AL_HAL_SYS_FABRIC_OFFSETS_H__
+
+/*
+ * Offsets of System-Fabric sub-units
+ * Inside AL northbridge-PASW
+ */
+
+#define AL_NB_SMMU0_OFFSET 0x30000 /*SMMU for I/O fabric 0 */
+#define AL_NB_SMMU1_OFFSET 0x40000 /*SMMU for I/O fabric 1 */
+#define AL_NB_SERVICE_OFFSET 0x70000
+#define AL_NB_CCU_OFFSET 0x90000
+#define AL_NB_GIC_OFFSET(id) (0 + (id)*0x8000)
+
+
+#endif /* __AL_HAL_SYS_FABRIC_OFFSETS_H__ */
diff --git a/arch/arm/mach-alpine/include/mach/al_fabric.h b/arch/arm/mach-alpine/include/mach/al_fabric.h
new file mode 100644
index 0000000..45640cc
--- /dev/null
+++ b/arch/arm/mach-alpine/include/mach/al_fabric.h
@@ -0,0 +1,124 @@
+/*
+ * linux/arch/arm/mach-alpine/include/mach/al_fabric.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __AL_FABRIC_H__
+#define __AL_FABRIC_H__
+
+/*
+ * North Bridge cause interrupt register definitions
+ */
+/* How many SYS Fabric IRQ Group instances exist in the system */
+#define AL_FABRIC_INSTANCE_N 4
+
+/* Number of SYS Fabric IRQ */
+#define AL_FABRIC_IRQ_N 32
+
+/* Cross trigger interrupt */
+#define AL_FABRIC_IRQ_NCTI_0 0
+#define AL_FABRIC_IRQ_NCTI_1 1
+#define AL_FABRIC_IRQ_NCTI_2 2
+#define AL_FABRIC_IRQ_NCTI_3 3
+/* Communications channel receive */
+#define AL_FABRIC_IRQ_COMMRX_0 4
+#define AL_FABRIC_IRQ_COMMRX_1 5
+#define AL_FABRIC_IRQ_COMMRX_2 6
+#define AL_FABRIC_IRQ_COMMRX_3 7
+/* Communication channel transmit */
+#define AL_FABRIC_IRQ_COMMTX_0 8
+#define AL_FABRIC_IRQ_COMMTX_1 9
+#define AL_FABRIC_IRQ_COMMTX_2 10
+#define AL_FABRIC_IRQ_COMMTX_3 11
+/* Write logging FIFO has valid transactions */
+#define AL_FABRIC_IRQ_WR_LOG_FIFO_VALID_M0 12
+/* Emulation write fifo log is wrapped */
+#define AL_FABRIC_IRQ_WR_LOG_FIFO_WRAP_A0 12
+/* Write logging FIFO wrap occurred */
+#define AL_FABRIC_IRQ_WR_LOG_FIFO_WRAP_M0 13
+/* Emulation write fifo log is full (new pushes might corrupt data) */
+#define AL_FABRIC_IRQ_WR_LOG_FIFO_FULL_A0 13
+/* Write logging FIFO is full */
+#define AL_FABRIC_IRQ_WR_LOG_FIFO_FULL_M0 14
+/* Emulation write fifo log is wrapped */
+#define AL_FABRIC_IRQ_WR_LOG_FIFO_WRAP_1_A0 14
+/* Reserved, read undefined must write as zeros. */
+#define AL_FABRIC_IRQ_RESERVED_15_15_M0 15
+/* Emulation write fifo log is full (new pushes might corrupt data) */
+#define AL_FABRIC_IRQ_WR_LOG_FIFO_FULL_1_A0 15
+/* Error indicator for AXI write transactions with a BRESP error */
+#define AL_FABRIC_IRQ_CPU_AXIERRIRQ 16
+/* Error indicator for: L2 RAM double-bit ECC error, illegal write */
+#define AL_FABRIC_IRQ_CPU_INTERRIRQ 17
+/* Coherent fabric error summary interrupt */
+#define AL_FABRIC_IRQ_ACF_ERRORIRQ 18
+/* DDR Controller ECC Correctable error summary interrupt */
+#define AL_FABRIC_IRQ_MCTL_ECC_CORR_ERR 19
+/* DDR Controller ECC Uncorrectable error summary interrupt */
+#define AL_FABRIC_IRQ_MCTL_ECC_UNCORR_ERR 20
+/* DRAM parity error interrupt */
+#define AL_FABRIC_IRQ_MCTL_PARITY_ERR 21
+/* Reserved, not functional */
+#define AL_FABRIC_IRQ_MCTL_WDATARAM_PAR 22
+/* Reserved */
+#define AL_FABRIC_IRQ_MCTL_RSVRD 23
+/* SB PoS error */
+#define AL_FABRIC_IRQ_SB_POS_ERR 24
+/* Received msix is not mapped to local GIC or IO-GIC spin */
+#define AL_FABRIC_IRQ_MSIX_ERR_INT 25
+/* Coresight timestamp overflow */
+#define AL_FABRIC_IRQ_CORESIGHT_TS_OVERFLOW 26
+/* Write data parity error from SB channel 0. */
+#define AL_FABRIC_IRQ_SB0_WRDATA_PERR 27
+/* Write data parity error from SB channel 1. */
+#define AL_FABRIC_IRQ_SB1_WRDATA_PERR 28
+/* Read data parity error from SB slaves. */
+#define AL_FABRIC_IRQ_SB_SLV_RDATA_PERR 29
+/* Logged read transaction is received */
+#define AL_FABRIC_IRQ_RD_LOG_VALID 30
+/* Reserved, read undefined must write as zeros. */
+#define AL_FABRIC_IRQ_RESERVED_31_31_M0 31
+/* Write logging FIFO has valid transactions */
+#define AL_FABRIC_IRQ_WR_LOG_FIFO_VALID_A0 31
+
+
+/**
+ * Get SW interrupt index corresponding to a given sys fabric irq index.
+ *
+ * @param idx
+ * The SYS Fabric IRQ Group index
+ * @param irq
+ * The SYS Fabric IRQ index (use AL_FABRIC_IRQ_*)
+ *
+ * @returns Software interrupt index
+ *
+ * Usecase example - inside your module, get the SW irq using the API, and bind
+ * it to your handler:
+ * irq = al_fabric_get_cause_irq(0, AL_FABRIC_IRQ_);
+ * request_irq(irq, irq_handler, ...);
+ */
+int al_fabric_get_cause_irq(unsigned int idx, int irq);
+
+/**
+ * Check if Hardware Cache-Coherency is enabled or not
+ * @return 0 if Hardware Cache-Coherency is not enabled and a positive number
+ * otherwise
+ */
+int al_fabric_hwcc_enabled(void);
+
+int al_fabric_init(void);
+
+#endif /* __AL_FABRIC_H__ */
diff --git a/arch/arm/mach-alpine/include/mach/al_hal_iomap.h b/arch/arm/mach-alpine/include/mach/al_hal_iomap.h
new file mode 100644
index 0000000..79f9bcc
--- /dev/null
+++ b/arch/arm/mach-alpine/include/mach/al_hal_iomap.h
@@ -0,0 +1,176 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * This file contains the default I/O mappings for Annapurna Labs
+ */
+
+#ifndef __AL_HAL_IOMAP_H__
+#define __AL_HAL_IOMAP_H__
+
+/* Primary Windows */
+#define AL_PCIE_0_BASE 0xe0000000
+#define AL_PCIE_0_SIZE SZ_128M
+#define AL_PCIE_1_BASE 0xe8000000
+#define AL_PCIE_1_SIZE SZ_64M
+#define AL_PCIE_2_BASE 0xec000000
+#define AL_PCIE_2_SIZE SZ_64M
+#define AL_NOR_BASE 0xf4000000
+#define AL_SPI_BASE 0xf8000000
+#define AL_NAND_BASE 0xfa100000
+#define AL_SB_BASE 0xfc000000
+#define AL_SB_SIZE SZ_32M
+#define AL_NB_BASE 0xfb000000
+#define AL_NB_SIZE SZ_2M
+#define AL_PCIE_0_ECAM_BASE 0xfb600000
+#define AL_PCIE_0_ECAM_SIZE SZ_2M
+#define AL_PCIE_1_ECAM_BASE 0xfb800000
+#define AL_PCIE_1_ECAM_SIZE SZ_2M
+#define AL_PCIE_2_ECAM_BASE 0xfba00000
+#define AL_PCIE_2_ECAM_SIZE SZ_2M
+#define AL_PCIE_INT_ECAM_BASE 0xfbc00000
+#define AL_PCIE_INT_ECAM_SIZE SZ_1M
+#define AL_PCIE_INT_BASE 0xfe000000
+#define AL_PCIE_INT_SIZE SZ_16M
+
+#define AL_MSIX_SPACE_BASE_LOW 0xfbe00000
+#define AL_MSIX_SPACE_BASE_HIGH 0x0
+
+#define AL_PBS_INT_MEM_BASE 0xfbff0000
+
+#define AL_DRAM_2_HIGH 0x00000001 /* 4GB - 8GB */
+#define AL_DRAM_3_HIGH 0x00000002 /* 3GB - 4GB */
+
+/******************/
+/* SB Sub Windows */
+/******************/
+#define AL_SB_PCIE_BASE(idx) (AL_SB_BASE + 0x01800000 + \
+ ((idx) * 0x20000))
+
+#define AL_SB_PCIE_NUM 3
+
+#define AL_SB_RING_BASE (AL_SB_BASE + 0x01860000)
+
+#define AL_SB_PBS_BASE (AL_SB_BASE + 0x01880000)
+
+#define AL_SB_SERDES_BASE (AL_SB_BASE + 0x018c0000)
+
+#define AL_SB_DFX_BASE (AL_SB_BASE + 0x018e0000)
+
+/******************/
+/* NB Sub Windows */
+/******************/
+
+/* NB main / secondary GICs and their Sub Windows*/
+#define AL_NB_GIC_MAIN 0
+#define AL_NB_GIC_SECONDARY 1
+
+#define AL_NB_GIC_BASE(id) (AL_NB_BASE + (id)*0x8000)
+
+#define AL_NB_GIC_DIST_BASE(id) (AL_NB_GIC_BASE(id) + 0x00001000)
+#define AL_NB_GIC_CPU_BASE(id) (AL_NB_GIC_BASE(id) + 0x00002000)
+
+#define AL_NB_IOMMU_BASE(idx) (AL_NB_BASE + 0x30000 + (idx)*0x10000)
+
+#define AL_NB_IOMMU_NUM 2
+
+/* NB service registers */
+#define AL_NB_SERVICE_BASE (AL_NB_BASE + 0x00070000)
+
+/* DDR Controller */
+#define AL_NB_DDR_CTL_BASE (AL_NB_BASE + 0x00080000)
+
+/* DDR PHY */
+#define AL_NB_DDR_PHY_BASE (AL_NB_BASE + 0x00088000)
+
+/* CCI Controller */
+#define AL_NB_CCI_BASE (AL_NB_BASE + 0x00090000)
+
+/* SB PBS Sub Windows */
+#define AL_I2C_PLD_BASE (AL_SB_PBS_BASE + 0x00000000)
+#define AL_SPI_SLAVE_BASE (AL_SB_PBS_BASE + 0x00001000)
+#define AL_SPI_MASTER_BASE (AL_SB_PBS_BASE + 0x00002000)
+
+#define AL_UART_BASE(idx) (AL_SB_PBS_BASE + 0x00003000 \
+ + ((idx) * 0x1000))
+#define AL_UART_NUM 4
+
+#define AL_GPIO_BASE(idx) (AL_SB_PBS_BASE + \
+ ((idx != 5) ? 0x00007000 + ((idx) * 0x1000) : 0x17000))
+#define AL_GPIO_NUM 6
+
+#define AL_WD_BASE(idx) (AL_SB_PBS_BASE + 0x0000c000 \
+ + ((idx) * 0x1000))
+#define AL_WD_NUM 4
+
+#define AL_TIMER_BASE(idx, sub_idx) \
+ (AL_SB_PBS_BASE + 0x00010000 \
+ + ((idx) * 0x1000) + ((sub_idx) * 0x20))
+
+#define AL_TIMER_NUM 4
+#define AL_TIMER_SUB_TIMERS_NUM 2
+
+#define AL_I2C_GEN_BASE (AL_SB_PBS_BASE + 0x00014000)
+#define AL_PBS_UFC_WRAP_BASE (AL_SB_PBS_BASE + 0x00015000)
+#define AL_PBS_UFC_CNTL_BASE (AL_SB_PBS_BASE + 0x00015800)
+#define AL_PBS_OTP_BASE (AL_SB_PBS_BASE + 0x00016000)
+#define AL_PBS_BOOT_ROM_BASE (AL_SB_PBS_BASE + 0x00020000)
+#define AL_PBS_SRAM_BASE (AL_SB_PBS_BASE + 0x00024000)
+#define AL_PBS_REGFILE_BASE (AL_SB_PBS_BASE + 0x00028000)
+
+/* SB Ring Sub Windows */
+#define AL_CMOS_NUM_GROUPS 10
+
+#define AL_CMOS_GROUP_BASE(idx) (AL_SB_RING_BASE + (idx) * 0x100)
+
+#define AL_TEMP_SENSOR_BASE (AL_SB_RING_BASE + 0xa00)
+
+#define AL_PLL_SB 0
+#define AL_PLL_NB 1
+#define AL_PLL_CPU 2
+
+#define AL_PLL_BASE(id) (AL_SB_RING_BASE + 0xb00 + (id) * 0x100)
+
+/* SB DFX Sub Windows */
+#define AL_DFX_CTRL_BASE (AL_SB_DFX_BASE + 0x0)
+#define AL_DAP2JTAG_BASE (AL_SB_DFX_BASE + 0x8000)
+
+/***************************/
+/* PBS int mem sub windows */
+/***************************/
+#define AL_PBS_INT_MEM_BOOT_ROM_BASE (AL_PBS_INT_MEM_BASE + 0x0)
+#define AL_PBS_INT_MEM_SRAM_BASE (AL_PBS_INT_MEM_BASE + 0x4000)
+
+#endif
diff --git a/arch/arm/mach-alpine/include/mach/alpine_machine.h b/arch/arm/mach-alpine/include/mach/alpine_machine.h
new file mode 100644
index 0000000..a0b0c21
--- /dev/null
+++ b/arch/arm/mach-alpine/include/mach/alpine_machine.h
@@ -0,0 +1,63 @@
+/*
+ * linux/arch/arm/mach-alpine/include/mach/alpine_machine.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ALPINE_MACHINE_H__
+#define __ALPINE_MACHINE_H__
+
+/* Get SerDes group regs base, to be used in relevant Alpine drivers.
+ * Valid group is 0..3.
+ * Returns virtual base address of the group regs base. */
+void __iomem *alpine_serdes_resource_get(u32 group);
+
+/* SerDes ETH mode */
+enum alpine_serdes_eth_mode {
+ ALPINE_SERDES_ETH_MODE_SGMII,
+ ALPINE_SERDES_ETH_MODE_KR,
+};
+
+/*
+ * Set SerDes ETH mode for an entire group, unless already set
+ * Valid group is 0..3.
+ * Returns 0 upon success.
+ */
+int alpine_serdes_eth_mode_set(
+ u32 group,
+ enum alpine_serdes_eth_mode mode);
+
+/* Lock the all serdes group for using common registers */
+void alpine_serdes_eth_group_lock(u32 group);
+
+/* Unlock the all serdes group for using common registers */
+void alpine_serdes_eth_group_unlock(u32 group);
+
+/* Alpine CPU Power Management Services Initialization */
+void __init alpine_cpu_pm_init(void);
+
+/* Determine whether Alpine CPU PM services are available */
+int alpine_cpu_suspend_wakeup_supported(void);
+
+/* Wake-up a CPU */
+void alpine_cpu_wakeup(unsigned int cpu, uintptr_t resume_addr);
+
+/* Power-off a CPU permanently */
+void alpine_cpu_die(unsigned int cpu);
+
+/* Suspend a CPU temporarily */
+void alpine_cpu_suspend(void);
+
+#endif /* __ALPINE_MACHINE_H__ */
diff --git a/arch/arm/mach-alpine/include/mach/timex.h b/arch/arm/mach-alpine/include/mach/timex.h
new file mode 100644
index 0000000..c0442eb
--- /dev/null
+++ b/arch/arm/mach-alpine/include/mach/timex.h
@@ -0,0 +1,20 @@
+/*
+ * linux/arch/arm/mach-alpine/include/mach/timex.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#define CLOCK_TICK_RATE (50000000 / 16)
diff --git a/arch/arm/mach-alpine/include/mach/uncompress.h b/arch/arm/mach-alpine/include/mach/uncompress.h
new file mode 100644
index 0000000..d75db4d
--- /dev/null
+++ b/arch/arm/mach-alpine/include/mach/uncompress.h
@@ -0,0 +1,59 @@
+/*
+ * linux/arch/arm/mach-alpine/include/mach/uncompress.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include
+#include
+
+#define get_uart_base() (AL_UART_BASE(0))
+
+#define SERIAL_BASE get_uart_base()
+
+static void putc(const char c)
+{
+ unsigned char *base = (unsigned char *)SERIAL_BASE;
+ int i;
+
+ for (i = 0; i < 0x1000; i++) {
+ if (base[UART_LSR << 2] & UART_LSR_THRE)
+ break;
+ barrier();
+ }
+
+ base[UART_TX << 2] = c;
+}
+
+static void flush(void)
+{
+ unsigned char *base = (unsigned char *)SERIAL_BASE;
+ unsigned char mask;
+ int i;
+
+ mask = UART_LSR_TEMT | UART_LSR_THRE;
+
+ for (i = 0; i < 0x1000; i++) {
+ if ((base[UART_LSR << 2] & mask) == mask)
+ break;
+ barrier();
+ }
+}
+
+/*
+ * nothing to do
+ */
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
diff --git a/arch/arm/mach-alpine/msix.c b/arch/arm/mach-alpine/msix.c
new file mode 100644
index 0000000..1b1d2c2
--- /dev/null
+++ b/arch/arm/mach-alpine/msix.c
@@ -0,0 +1,227 @@
+/*
+ * arch/arm/mach-alpine/msix.c
+ *
+ * Annapurna Labs MSIX support services
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+/*
+ * The IRQ range currently supported overlaps legacy interrupts of units
+ * capable of using MSIX. The assumption is that either all MSIX capable units
+ * will use MSIX, or none of them.
+ *
+ * The services below currently support only the primary GIC and not the
+ * secondary GIC.
+ */
+
+static u32 al_irq_msi_addr_high;
+static u32 al_irq_msi_addr_low;
+static int al_irq_msi_first;
+static int al_irq_msi_last;
+static int al_irq_num_msi_irqs;
+
+static DECLARE_BITMAP(msi_irq_in_use, 1000);
+
+/*
+ * Dynamic irq allocate and deallocation
+ */
+static int al_msix_create_irq(void)
+{
+ int irq, pos;
+
+again:
+ pos = find_first_zero_bit(msi_irq_in_use, al_irq_num_msi_irqs);
+ if (pos >= al_irq_num_msi_irqs)
+ return -ENOSPC;
+
+ irq = al_irq_msi_first + pos;
+
+ /* test_and_set_bit operates on 32-bits at a time */
+ if (test_and_set_bit(pos, msi_irq_in_use))
+ goto again;
+
+ return irq;
+}
+
+void destroy_irq(unsigned int irq)
+{
+ clear_bit(irq - al_irq_msi_first, msi_irq_in_use);
+}
+
+static void al_msix_irq_mask(struct irq_data *d)
+{
+ if (d->msi_desc)
+ mask_msi_irq(d);
+}
+
+static void al_msix_irq_unmask(struct irq_data *d)
+{
+ if (d->msi_desc)
+ unmask_msi_irq(d);
+}
+
+void arch_teardown_msi_irq(unsigned int irq)
+{
+ struct irq_desc *irq_desc;
+
+ pr_debug("%s(%d)\n", __func__, irq);
+
+ irq_desc = irq_to_desc(irq);
+
+ if (irq_desc)
+ mask_msi_irq(&irq_desc->irq_data);
+ else
+ pr_err("%s: irq_to_desc failed!\n", __func__);
+
+ destroy_irq(irq);
+}
+
+int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+{
+ struct device *dev = &pdev->dev;
+ struct irq_desc *irq_desc;
+ struct irq_data *irq_data;
+ struct irq_domain *domain;
+ int irq;
+ int sgi;
+ struct msi_msg msg;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ irq = al_msix_create_irq();
+
+ if (irq < 0)
+ return irq;
+
+ irq_set_msi_desc(irq, desc);
+
+ /*get the hwirq from irq using the domain*/
+ irq_data = irq_get_irq_data(irq);
+ domain = irq_data->domain;
+ if (domain->revmap_type) /*revmap type is not legacy*/
+ return -1;
+
+ sgi = irq - domain->revmap_data.legacy.first_irq +
+ domain->revmap_data.legacy.first_hwirq;
+ sgi = sgi - 32;
+
+ /*
+ * MSIX message address format:
+ * [63:20] - MSIx TBAR
+ * Same value as the MSIx Translation Base Address Register
+ * [19] - WFE_EXIT
+ * Once set by MSIx message, an EVENTI is signal to the CPUs
+ * cluster specified by ‘Local GIC Target List’
+ * [18:17] - Target GIC ID
+ * Specifies which IO-GIC (external shared GIC) is targeted
+ * 0: Local GIC, as specified by the Local GIC Target List
+ * 1: IO-GIC 0
+ * 2: Reserved
+ * 3: Reserved
+ * [16:13] - Local GIC Target List
+ * Specifies the Local GICs list targeted by this MSIx
+ * message.
+ * [16] If set, SPIn is set in Cluster 0 local GIC
+ * [15:13] Reserved
+ * [15] If set, SPIn is set in Cluster 1 local GIC
+ * [14] If set, SPIn is set in Cluster 2 local GIC
+ * [13] If set, SPIn is set in Cluster 3 local GIC
+ * [12:3] - SPIn
+ * Specifies the SPI (Shared Peripheral Interrupt) index to
+ * be set in target GICs
+ * Notes:
+ * If targeting any local GIC than only SPI[249:0] are valid
+ * [2] - Function vector
+ * MSI Data vector extension hint
+ * [1:0] - Reserved
+ * Must be set to zero
+ *
+ * In the case below:
+ * Cluster 0 local GIC. 'irq' is subtracted by 32, because the first 32
+ * interrupt IDs are for SGI and PPI.
+ */
+ msg.address_hi = al_irq_msi_addr_high;
+
+ /* Only the PPIs of the main gic are used.
+ * PPIS are hw-irqs 17-31.
+ * first_hwirq will be 16 for main gic and 32 for secondary gic.
+ * */
+ if (domain->revmap_data.legacy.first_hwirq == 16)
+ msg.address_lo = al_irq_msi_addr_low + (1<<16) + (sgi << 3);
+ else if (domain->revmap_data.legacy.first_hwirq == 32)
+ msg.address_lo = al_irq_msi_addr_low + (1<<17) + (sgi << 3);
+ else
+ return -1;
+
+ msg.data = 0;
+
+ write_msi_msg(irq, &msg);
+
+ irq_desc = irq_to_desc(irq);
+
+ if (!irq_desc) {
+ dev_err(dev, "%s: irq_to_desc failed!\n", __func__);
+ return -1;
+ }
+
+ unmask_msi_irq(&irq_desc->irq_data);
+
+ return 0;
+}
+
+int al_msix_init(void)
+{
+ int status = 0;
+ int irq;
+ struct device_node *np;
+ struct resource res;
+
+ /* TODO: do for primary CPU only - what about sync? */
+ np = of_find_compatible_node(NULL, NULL, "annapurna-labs,al-msix");
+ BUG_ON(!np);
+
+ if (of_address_to_resource(np, 0, &res))
+ BUG_ON(1);
+
+ al_irq_msi_addr_high = ((u64)res.start) >> 32;
+ al_irq_msi_addr_low = res.start & 0xffffffff;
+ al_irq_msi_first = irq_of_parse_and_map(np, 0);
+ al_irq_msi_last = irq_of_parse_and_map(np, 1);
+ al_irq_num_msi_irqs = al_irq_msi_last - al_irq_msi_first + 1;
+
+ for (irq = al_irq_msi_first; irq <= al_irq_msi_last; irq++) {
+ status = irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+
+ if (status < 0) {
+ pr_err("%s: set_irq_type(%d) failed!\n", __func__, irq);
+ break;
+ }
+ }
+
+ gic_arch_extn.irq_mask = al_msix_irq_mask;
+ gic_arch_extn.irq_unmask = al_msix_irq_unmask;
+
+ return status;
+}
diff --git a/arch/arm/mach-alpine/pcie_of.c b/arch/arm/mach-alpine/pcie_of.c
new file mode 100644
index 0000000..e650715
--- /dev/null
+++ b/arch/arm/mach-alpine/pcie_of.c
@@ -0,0 +1,664 @@
+/*
+ * Annapurna Labs PCI host bridge device tree driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * - This driver for both internal PCIe bus and for external PCIe ports
+ * (in Root-Complex mode).
+ * - The driver requires PCI_DOMAINS as each port registered as a pci domain
+ * - for the external PCIe ports, the following applies:
+ * - Configuration access to bus 0 device 0 are routed to the configuration
+ * space header register that found in the host bridge.
+ * - The driver assumes the controller link is initialized by the
+ * bootloader.
+ */
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "al_hal_pcie.h"
+#include "al_hal_iomap.h"
+#include "core.h"
+
+enum al_pci_type {
+ AL_PCI_TYPE_INTERNAL = 0,
+ AL_PCI_TYPE_EXTERNAL = 1,
+};
+
+/* PCI bridge private data */
+struct al_pcie_pd {
+ struct device *dev;
+ enum al_pci_type type;
+ struct resource ecam;
+ struct resource mem;
+ struct resource io;
+ struct resource realio;
+ struct resource regs;
+ struct resource busn;
+
+ void __iomem *ecam_base;
+ void __iomem *regs_base;
+
+ void __iomem *local_bridge_config_space;
+ unsigned int index;
+ /* lock configuration access as we change the target_bus */
+ spinlock_t conf_lock;
+ /*HAL structure*/
+ struct al_pcie_port pcie_port;
+ struct al_pcie_link_status status;
+ u8 target_bus;
+};
+
+
+static inline struct al_pcie_pd *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static int al_pcie_enable_controller(struct al_pcie_pd *pcie)
+{
+ if (pcie->type == AL_PCI_TYPE_INTERNAL)
+ return 0;
+
+ al_pcie_handle_init(&pcie->pcie_port, pcie->regs_base, pcie->index);
+ if (al_pcie_function_type_get(&pcie->pcie_port) != AL_PCIE_FUNCTION_MODE_RC) {
+ dev_err(pcie->dev, "controller is not configured to Root-Complex mode\n");
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static bool al_pcie_port_check_link(struct al_pcie_pd *pcie)
+{
+ struct al_pcie_link_status *status = &pcie->status;
+ int rc;
+
+ if (pcie->type == AL_PCI_TYPE_INTERNAL)
+ return true;
+
+ rc = al_pcie_link_status(&pcie->pcie_port, status);
+ if (rc < 0) {
+ dev_err(pcie->dev, "failed to get pcie link status\n");
+ return false;
+ }
+ if (status->link_up == AL_FALSE) {
+ dev_info(pcie->dev, "link %u down\n", pcie->index);
+ return false;
+ }
+ dev_info(pcie->dev, "link up: speed Gen %d width x%x\n",
+ status->speed, status->lanes);
+
+ return true;
+}
+
+/* prepare controller for issueing IO transactions*/
+static int al_pcie_io_prepare(struct al_pcie_pd *pcie)
+{
+ struct al_pcie_port *pcie_port = &pcie->pcie_port;
+ if (pcie->type == AL_PCI_TYPE_INTERNAL) {
+ return 0;
+ } else {
+ struct al_pcie_atu_region io_atu_region = {
+ .enable = AL_TRUE,
+ .direction = al_pcie_atu_dir_outbound,
+ .index = 0,
+ .base_addr = (uint64_t)pcie->io.start,
+ .limit = (uint64_t)pcie->io.start + resource_size(&pcie->io) - 1,
+ .target_addr = (uint64_t)pcie->realio.start, /* the address that matches will be translated to this address + offset */
+ .invert_matching = AL_FALSE,
+ .tlp_type = AL_PCIE_TLP_TYPE_IO, /* pcie tlp type*/
+ .attr = 0, /* pcie frame header attr field*/
+ /* outbound specific params */
+ .msg_code = 0, /* pcie message code */
+ .cfg_shift_mode = AL_FALSE,
+ /* inbound specific params*/
+ };
+
+ dev_dbg(pcie->dev, "%s: base %llx, limit %llx, target %llx\n",
+ __func__, io_atu_region.base_addr,
+ io_atu_region.limit, io_atu_region.target_addr);
+ al_pcie_atu_region_set(pcie_port, &io_atu_region);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_AL_PCIE_RMN_1010
+/* prepare controller for issuing mem transactions */
+static int al_pcie_mem_prepare(struct al_pcie_pd *pcie)
+{
+ struct al_pcie_port *pcie_port = &pcie->pcie_port;
+ if (pcie->type == AL_PCI_TYPE_INTERNAL) {
+ return 0;
+ } else {
+ struct al_pcie_atu_region mem_atu_region;
+
+ /*
+ * This region is meant to insure all accesses to this section
+ * will be always with type memory (accessing from DMA may
+ * change the type to IO).
+ */
+ mem_atu_region.enable = AL_TRUE;
+ mem_atu_region.direction = al_pcie_atu_dir_outbound;
+ mem_atu_region.index = 1;
+ mem_atu_region.base_addr = pcie->mem.start;
+ mem_atu_region.limit = pcie->mem.end;
+ mem_atu_region.target_addr = pcie->mem.start;
+ mem_atu_region.invert_matching = AL_FALSE;
+ mem_atu_region.tlp_type = AL_PCIE_TLP_TYPE_MEM; /* pcie tlp type*/
+ mem_atu_region.attr = 0; /* pcie frame header attr field*/
+ mem_atu_region.msg_code = 0; /* pcie message code */
+ mem_atu_region.cfg_shift_mode = AL_FALSE;
+ mem_atu_region.bar_number = 0; /* not used */
+ mem_atu_region.match_mode = 0; /* address match mode */
+ mem_atu_region.enable_attr_match_mode = AL_FALSE;
+ mem_atu_region.enable_msg_match_mode = AL_FALSE;
+
+ dev_dbg(pcie->dev, "%s: base %llx, limit %llx, target %llx\n",
+ __func__, mem_atu_region.base_addr,
+ mem_atu_region.limit, mem_atu_region.target_addr);
+
+ al_pcie_atu_region_set(pcie_port, &mem_atu_region);
+ }
+
+ return 0;
+}
+#endif
+
+/* prepare controller for issueing CFG transactions*/
+static int al_pcie_cfg_prepare(struct al_pcie_pd *pcie)
+{
+ struct al_pcie_port *pcie_port = &pcie->pcie_port;
+
+ if (pcie->type == AL_PCI_TYPE_INTERNAL)
+ return 0;
+
+ spin_lock_init(&pcie->conf_lock);
+ pcie->target_bus = 1;
+ /*
+ * force the controller to set the pci bus in the TLP to
+ * pcie->target_bus no matter what is the bus portion of the ECAM addess
+ * is.
+ */
+ al_pcie_target_bus_set(pcie_port, pcie->target_bus, 0xFF);
+
+ /* the bus connected to the controller always enumberated as bus 1*/
+ al_pcie_secondary_bus_set(pcie_port, 1);
+ /* set subordinary to max value */
+ al_pcie_subordinary_bus_set(pcie_port, 0xff);
+
+ return 0;
+}
+
+/* Get ECAM address according to bus, device, function, and offset */
+static void __iomem *al_pcie_cfg_addr(struct al_pcie_pd *pcie,
+ struct pci_bus *bus,
+ unsigned int devfn, int offset)
+{
+ void __iomem *ecam_base = pcie->ecam_base;
+ unsigned int busnr = bus->number;
+ int slot = PCI_SLOT(devfn);
+ void __iomem *ret_val;
+
+ /* Trap out illegal values */
+ if (busnr > 255)
+ BUG();
+ if (devfn > 255)
+ BUG();
+
+ ret_val = (ecam_base + ((PCI_SLOT(devfn) << 15) |
+ (PCI_FUNC(devfn) << 12) |
+ offset));
+ if (pcie->type == AL_PCI_TYPE_INTERNAL)
+ return ret_val;
+
+ /* If there is no link, just show the PCI bridge. */
+ if ((pcie->status.link_up == AL_FALSE) && (busnr > 0 || slot > 0))
+ return NULL;
+
+ if (busnr == 0) {
+ if (slot > 0)
+ return NULL;
+ ret_val = pcie->local_bridge_config_space;
+ ret_val += offset;
+ } else {
+ if (busnr != pcie->target_bus) {
+ dev_dbg(pcie->dev, "change target bus number from %d to %d\n",
+ pcie->target_bus, busnr);
+ pcie->target_bus = busnr;
+ al_pcie_target_bus_set(&pcie->pcie_port,
+ pcie->target_bus,
+ 0xFF);
+ }
+ }
+ return ret_val;
+}
+
+/* PCI config space read */
+static int al_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ struct al_pcie_pd *pcie = sys_to_pcie(bus->sysdata);
+ int rc = PCIBIOS_SUCCESSFUL;
+ unsigned long flags;
+ void __iomem *addr;
+ u32 v = 0xffffffff;
+
+ dev_dbg(pcie->dev, "read_config from %d size %d dev (domain %d) %d:%d:%d\n",
+ where, size, pcie->index,
+ bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+ spin_lock_irqsave(&pcie->conf_lock, flags);
+
+ addr = al_pcie_cfg_addr(pcie, bus, devfn, where);
+ dev_dbg(pcie->dev, " read address %p\n", addr);
+
+ if (addr) {
+ switch (size) {
+ case 1:
+ v = readb(addr);
+ break;
+ case 2:
+ v = readw(addr);
+ break;
+ case 4:
+ v = readl(addr);
+ break;
+ default:
+ rc = PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ } else {
+ rc = PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ spin_unlock_irqrestore(&pcie->conf_lock, flags);
+ *val = v;
+ pr_debug("read_config_byte read %#x\n", *val);
+ return rc;
+}
+
+/* PCI config space write */
+static int al_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
+{
+ struct al_pcie_pd *pcie = sys_to_pcie(bus->sysdata);
+ int rc = PCIBIOS_SUCCESSFUL;
+ unsigned long flags;
+ void __iomem *addr;
+
+ dev_dbg(pcie->dev, "write_config_byte %#x to %d size %d dev (domain %d) %d:%d:%d\n", val,
+ where, size, pcie->index,
+ bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+ spin_lock_irqsave(&pcie->conf_lock, flags);
+
+ addr = al_pcie_cfg_addr(pcie, bus, devfn, where);
+ dev_dbg(pcie->dev, " write address %p\n", addr);
+ if (addr) {
+ switch (size) {
+ case 1:
+ writeb((u8)val, addr);
+ break;
+
+ case 2:
+ writew((u16)val, addr);
+ break;
+
+ case 4:
+ writel(val, addr);
+ break;
+ default:
+ rc = PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ } else {
+ rc = PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ spin_unlock_irqrestore(&pcie->conf_lock, flags);
+ return rc;
+}
+
+/* PCI bridge config space read/write operations */
+static struct pci_ops al_pcie_ops = {
+ .read = al_read_config,
+ .write = al_write_config,
+};
+
+/* PCI config space read */
+static int al_internal_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct al_pcie_pd *pcie = sys_to_pcie(bus->sysdata);
+ void __iomem *addr = al_pcie_cfg_addr(pcie, bus, devfn, where & ~3);
+ u32 v;
+
+ pr_debug("read_config from %d size %d dev %d:%d:%d\n", where, size,
+ bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+ switch (size) {
+ case 1:
+ v = readl(addr);
+ v = (v >> ((where&0x3)*8)) & 0xff;
+ break;
+ case 2:
+ v = readl(addr);
+ v = (v >> ((where&0x3)*8)) & 0xffff;
+ break;
+ default:
+ v = readl(addr);
+ break;
+ }
+
+ *val = v;
+ pr_debug("read_config_byte read %#x\n", *val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* PCI config space write */
+static int al_internal_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct al_pcie_pd *pcie = sys_to_pcie(bus->sysdata);
+ void __iomem *addr = al_pcie_cfg_addr(pcie, bus, devfn, where);
+
+ pr_debug("write_config %#x to %d size %d dev %d:%d:%d\n", val,
+ where, size, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+ switch (size) {
+ case 1:
+ writeb((u8)val, addr);
+ break;
+ case 2:
+ writew((u16)val, addr);
+ break;
+ case 4:
+ writel(val, addr);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* PCI bridge config space read/write operations */
+static struct pci_ops al_internal_pcie_ops = {
+ .read = al_internal_read_config,
+ .write = al_internal_write_config,
+};
+
+static int al_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct al_pcie_pd *pcie = sys_to_pcie(sys);
+
+ if (pcie->type == AL_PCI_TYPE_EXTERNAL)
+ pci_add_resource_offset(&sys->resources,
+ &pcie->realio,
+ sys->io_offset);
+
+ pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
+ pci_add_resource(&sys->resources, &pcie->busn);
+
+ return 1;
+}
+
+static int al_pcie_parse_dt(struct al_pcie_pd *pcie)
+{
+ struct device_node *np = pcie->dev->of_node;
+ struct of_pci_range_iter iter;
+ int err;
+ static int index;
+
+ if (pcie->type == AL_PCI_TYPE_EXTERNAL) {
+ /* Get registers resources */
+ err = of_address_to_resource(np, 0, &pcie->regs);
+ if (err < 0) {
+ dev_dbg(pcie->dev, "of_address_to_resource(): %d\n",
+ err);
+ return err;
+ }
+ dev_dbg(pcie->dev, " regs %pR\n", &pcie->regs);
+ pcie->regs_base = devm_request_and_ioremap(pcie->dev,
+ &pcie->regs);
+ if (!pcie->regs_base)
+ return -EADDRNOTAVAIL;
+ /* set the base address of the configuration space of the local
+ * bridge
+ */
+ pcie->local_bridge_config_space = pcie->regs_base + 0x2000;
+ }
+ /* Get the ECAM, I/O and memory ranges from DT */
+ for_each_of_pci_range(&iter, np) {
+ unsigned long restype = iter.flags & IORESOURCE_TYPE_BITS;
+ if (restype == 0) {
+ range_iter_fill_resource(iter, np, &pcie->ecam);
+ pcie->ecam.flags = IORESOURCE_MEM;
+ pcie->ecam.name = "ECAM";
+ }
+ if (restype == IORESOURCE_IO) {
+ range_iter_fill_resource(iter, np, &pcie->io);
+ range_iter_fill_resource(iter, np, &pcie->realio);
+ pcie->realio.start = iter.pci_addr;
+ pcie->realio.end = iter.pci_addr + iter.size - 1;
+ pcie->io.name = "I/O";
+
+ pci_ioremap_io(iter.pci_addr + iter.size, iter.cpu_addr);
+ }
+ if (restype == IORESOURCE_MEM) {
+ range_iter_fill_resource(iter, np, &pcie->mem);
+ pcie->mem.name = "MEM";
+ }
+ }
+
+ /* map ecam space */
+ dev_dbg(pcie->dev, " ecam %pr\n", &pcie->ecam);
+ pcie->ecam_base = devm_request_and_ioremap(pcie->dev, &pcie->ecam);
+ if (!pcie->ecam_base)
+ return -EADDRNOTAVAIL;
+
+ err = of_pci_parse_bus_range(np, &pcie->busn);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to parse ranges property: %d\n",
+ err);
+ pcie->busn.name = np->name;
+ pcie->busn.start = 1;
+ pcie->busn.end = 0xff;
+ pcie->busn.flags = IORESOURCE_BUS;
+ }
+ pcie->index = index++;
+ return 0;
+}
+
+/* map the specified device/slot/pin to an IRQ */
+static int al_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct of_irq oirq;
+ int ret;
+
+ ret = of_irq_map_pci(dev, &oirq);
+ if (ret)
+ return ret;
+
+ return irq_create_of_mapping(oirq.controller, oirq.specifier,
+ oirq.size);
+}
+
+static struct pci_bus *al_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+{
+ struct al_pcie_pd *pcie = sys_to_pcie(sys);
+
+ if (pcie->type == AL_PCI_TYPE_INTERNAL)
+ return pci_scan_root_bus(pcie->dev, sys->busnr,
+ &al_internal_pcie_ops,
+ sys, &sys->resources);
+ else
+ return pci_scan_root_bus(pcie->dev, sys->busnr,
+ &al_pcie_ops,
+ sys, &sys->resources);
+}
+
+
+/*
+ * Fixup function to make sure Max Paylod Size and MaxReadReq
+ * are set based on host bridge Max capabilities.
+ */
+
+extern int pcie_bus_configure_set(struct pci_dev *dev, void *data);
+static void al_pci_fixup(struct pci_dev *dev)
+{
+ u8 smpss = 0;
+ pcie_bus_configure_set(dev, &smpss);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, al_pci_fixup);
+
+
+
+static int al_pcie_add_host_bridge(struct al_pcie_pd *pcie)
+{
+ struct hw_pci hw;
+
+ memset(&hw, 0, sizeof(hw));
+
+ hw.nr_controllers = 1;
+ hw.domain = pcie->index;
+ hw.private_data = (void **)&pcie;
+ hw.setup = al_pcie_setup;
+ hw.scan = al_pcie_scan_bus;
+ hw.map_irq = al_pcie_map_irq;
+
+ pci_common_init(&hw);
+
+ return 0;
+}
+
+static const struct of_device_id al_pcie_of_match[] = {
+ { .compatible = "annapurna-labs,al-pci", .data = (void *)AL_PCI_TYPE_EXTERNAL },
+ { .compatible = "annapurna-labs,al-internal-pcie", .data = (void *)AL_PCI_TYPE_INTERNAL },
+ { },
+};
+
+extern uint64_t al_pcie_read_addr_start[AL_SB_PCIE_NUM];
+extern uint64_t al_pcie_read_addr_end[AL_SB_PCIE_NUM];
+extern uint64_t al_pcie_write_addr_start[AL_SB_PCIE_NUM];
+extern uint64_t al_pcie_write_addr_end[AL_SB_PCIE_NUM];
+extern bool al_pcie_address_valid[AL_SB_PCIE_NUM];
+
+static int al_pcie_probe(struct platform_device *pdev)
+{
+ enum al_pci_type type;
+ const struct of_device_id *of_id;
+ struct al_pcie_pd *pcie;
+ int err;
+
+ of_id = of_match_device(al_pcie_of_match, &pdev->dev);
+ if (of_id)
+ type = (enum al_pci_type) of_id->data;
+ else
+ BUG();
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->type = type;
+ pcie->dev = &pdev->dev;
+
+ err = al_pcie_parse_dt(pcie);
+ if (err < 0)
+ return err;
+
+ err = al_pcie_enable_controller(pcie);
+ if (err)
+ goto err;
+
+ al_pcie_port_check_link(pcie);
+
+ al_pcie_cfg_prepare(pcie);
+
+ al_pcie_io_prepare(pcie);
+
+#ifdef CONFIG_AL_PCIE_RMN_1010
+ al_pcie_mem_prepare(pcie);
+ if (pcie->type != AL_PCI_TYPE_INTERNAL) {
+
+ al_pcie_read_addr_start[pcie->index] =
+ min(pcie->mem.start,
+ pcie->io.start);
+ al_pcie_read_addr_end[pcie->index] =
+ max(pcie->mem.end,
+ pcie->io.end);
+
+ al_pcie_write_addr_start[pcie->index] = pcie->io.start;
+ al_pcie_write_addr_end[pcie->index] = pcie->io.end;
+
+ al_pcie_address_valid[pcie->index] = true;
+
+ dev_info(&pdev->dev, "%s: [pcie %d] use DMA for read from %llx to %llx\n",
+ __func__, pcie->index, al_pcie_read_addr_start[pcie->index],
+ al_pcie_read_addr_end[pcie->index]);
+
+ dev_info(&pdev->dev, "%s: [pcie %d] use DMA for write from %llx to %llx\n",
+ __func__, pcie->index, al_pcie_write_addr_start[pcie->index],
+ al_pcie_write_addr_end[pcie->index]);
+
+ /*
+ * set an axi IO bar to make the accesses to this addresses
+ * with size of 4 bytes. (access from DMA will be 16 Bytes minimum)
+ */
+ al_pcie_axi_io_config(
+ &pcie->pcie_port,
+ al_pcie_read_addr_start[pcie->index],
+ al_pcie_read_addr_end[pcie->index]);
+ }
+#endif
+
+ /* Configure IOCC for external PCIE */
+ if (pcie->type != AL_PCI_TYPE_INTERNAL) {
+ if (pdev->dev.archdata.hwcc) {
+ printk("Configuring PCIE for IOCC\n");
+ al_pcie_port_snoop_config(&pcie->pcie_port, 1);
+ }
+ }
+
+ err = al_pcie_add_host_bridge(pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable PCIe controller: %d\n",
+ err);
+ goto enable_err;
+ }
+
+ platform_set_drvdata(pdev, pcie);
+ return 0;
+enable_err:
+err:
+ return err;
+}
+
+static struct platform_driver al_pcie_driver = {
+ .driver = {
+ .name = "al-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(al_pcie_of_match),
+ },
+ .probe = al_pcie_probe,
+};
+module_platform_driver(al_pcie_driver);
diff --git a/arch/arm/mach-alpine/platsmp.c b/arch/arm/mach-alpine/platsmp.c
new file mode 100644
index 0000000..1225c13
--- /dev/null
+++ b/arch/arm/mach-alpine/platsmp.c
@@ -0,0 +1,169 @@
+/*
+ * linux/arch/arm/mach-alpine/platsmp.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+
+extern void secondary_startup(void);
+
+static void ca15x4_init_cpu_map(void)
+{
+ unsigned int i, ncores;
+
+ asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (ncores));
+ ncores = ((ncores >> 24) & 3) + 1;
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+}
+
+static void ca15x4_smp_enable(unsigned int max_cpus)
+{
+ int i;
+
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+}
+
+
+/*
+ * Write pen_release in a way that is guaranteed to be visible to all
+ * observers, irrespective of whether they're taking part in coherency
+ * or not. This is necessary for the hotplug code to work reliably.
+ */
+static void __cpuinit write_pen_release(int val)
+{
+ pen_release = val;
+ smp_wmb();
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+}
+
+static DEFINE_SPINLOCK(boot_lock);
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+ /*
+ * let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+ write_pen_release(-1);
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
+}
+
+int __cpuinit al_boot_secondary(unsigned int cpu
+ , struct task_struct *idle)
+{
+ unsigned long timeout;
+
+ /* Check CPU resume regs validity */
+ if (!alpine_cpu_suspend_wakeup_supported()) {
+ WARN(1, "%s: wakeup not supported!\n", __func__);
+ return -ENOSYS;
+ }
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ spin_lock(&boot_lock);
+
+ /*
+ * This is really belt and braces; we hold unintended secondary
+ * CPUs in the holding pen until we're ready for them. However,
+ * since we haven't sent them a soft interrupt, they shouldn't
+ * be there.
+ */
+ write_pen_release(cpu);
+
+ /* Wake-up secondary CPU */
+ alpine_cpu_wakeup(cpu, virt_to_phys(secondary_startup));
+
+ /*
+ * Send the secondary CPU a soft interrupt, thereby causing
+ * the boot monitor to read the system wide flags register,
+ * and branch to the address found there.
+ */
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ smp_rmb();
+ if (pen_release == -1)
+ break;
+
+ udelay(10);
+ }
+
+ /*
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init al_smp_init_cpus(void)
+{
+ ca15x4_init_cpu_map();
+}
+
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+ alpine_cpu_pm_init();
+
+ /*
+ * Initialise the present map, which describes the set of CPUs
+ * actually populated at the present time.
+ */
+ ca15x4_smp_enable(max_cpus);
+}
+
+extern int alpine_suspend_finish(unsigned long);
+
+struct smp_operations __initdata al_smp_ops = {
+ .smp_init_cpus = al_smp_init_cpus,
+ .smp_prepare_cpus = platform_smp_prepare_cpus,
+ .smp_secondary_init = platform_secondary_init,
+ .smp_boot_secondary = al_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_die = alpine_cpu_die,
+#endif
+};
diff --git a/arch/arm/mach-alpine/sleep-alpine.S b/arch/arm/mach-alpine/sleep-alpine.S
new file mode 100644
index 0000000..ae799c1
--- /dev/null
+++ b/arch/arm/mach-alpine/sleep-alpine.S
@@ -0,0 +1,45 @@
+/*
+ * Annapurna labs cpu-idle handler.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include
+
+ENTRY(alpine_suspend_finish)
+ mrc p15, 0, r0, c1, c0, 0
+ bic r0, r0, #(1 << 2) @ Disable the C bit
+ mcr p15, 0, r0, c1, c0, 0
+ mov r8, lr @backup lr
+ bl v7_flush_dcache_louis
+ mov lr, r8
+ mrc p15, 0, r0, c1, c0, 1
+ bic r0, r0, #(1 << 6) @ Disable SMP bit
+ mcr p15, 0, r0, c1, c0, 1
+ isb
+ dsb
+ @This should shutdown
+ wfi
+ @We didn't shutdown. probably - we have a pending interrupt.
+ mrc p15, 0, r0, c1, c0, 0
+ orr r0, r0, #(1 << 2) @ Enable the C bit
+ mcr p15, 0, r0, c1, c0, 0
+ mrc p15, 0, r0, c1, c0, 1
+ orr r0, r0, #(1 << 6) @ Enable SMP bit
+ mcr p15, 0, r0, c1, c0, 1
+ isb
+ dsb
+ mov pc, lr
+ENDPROC(alpine_suspend_finish)
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 73a2d90..72de05f 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -1,9 +1,5 @@
if ARCH_IXP4XX
-config ARCH_SUPPORTS_BIG_ENDIAN
- bool
- default y
-
menu "Intel IXP4xx Implementation Options"
comment "IXP4xx Platforms"
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 08c9fe9..9ee7426 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -613,7 +613,7 @@ config ARCH_PHYS_ADDR_T_64BIT
def_bool ARM_LPAE
config ARCH_DMA_ADDR_T_64BIT
- bool
+ def_bool ARM_LPAE
config ARM_THUMB
bool "Support Thumb user binaries" if !CPU_THUMBONLY
@@ -862,6 +862,19 @@ config MIGHT_HAVE_CACHE_L2X0
instead of this option, thus preventing the user from
inadvertently configuring a broken kernel.
+config ARM_UNIPROCESSOR_IOCC
+ bool "Enable (IO)Cache Coherency for Uniprocessor Systems"
+ depends on CPU_V7
+ default n
+ help
+ Uniprocessor systems do not support cache coherency by default, including
+ IO-Cache Coherency.
+ This option forces cache coherency support even for uniprocessor systems.
+ For architectures that support it - this allows efficient IO handeling,
+ without unrequired SMP-kernel overhead.
+
+ If unsure, say N.
+
config CACHE_L2X0
bool "Enable the L2x0 outer cache controller" if MIGHT_HAVE_CACHE_L2X0
default MIGHT_HAVE_CACHE_L2X0
@@ -932,3 +945,9 @@ config ARCH_HAS_BARRIERS
help
This option allows the use of custom mandatory barriers
included via the mach/barriers.h file.
+
+config ARCH_SUPPORTS_BIG_ENDIAN
+ bool
+ help
+ This option specifies the architecture can support big endian
+ operation.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 9e51be9..224a9cc 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8074199..3815a82 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -38,9 +38,8 @@ ENTRY(v6_early_abort)
bne do_DataAbort
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r3, r3
-#endif
+ ARM_BE8(rev r3, r3)
+
do_ldrd_abort tmp=ip, insn=r3
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 6f4585b..9240364 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -25,6 +25,7 @@
#include
#include
#include
+#include
#include "fault.h"
@@ -762,21 +763,25 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (thumb_mode(regs)) {
u16 *ptr = (u16 *)(instrptr & ~1);
fault = probe_kernel_address(ptr, tinstr);
+ tinstr = __mem_to_opcode_thumb16(tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
u16 tinst2 = 0;
fault = probe_kernel_address(ptr + 1, tinst2);
- instr = (tinstr << 16) | tinst2;
+ tinst2 = __mem_to_opcode_thumb16(tinst2);
+ instr = __opcode_thumb32_compose(tinstr, tinst2);
thumb2_32b = 1;
} else {
isize = 2;
instr = thumb2arm(tinstr);
}
}
- } else
+ } else {
fault = probe_kernel_address(instrptr, instr);
+ instr = __mem_to_opcode_arm(instr);
+ }
if (fault) {
type = TYPE_FAULT;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ef3e0f3..9674476 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -250,7 +250,7 @@ static void __dma_free_buffer(struct page *page, size_t size)
#ifdef CONFIG_MMU
#ifdef CONFIG_HUGETLB_PAGE
-#error ARM Coherent DMA allocator does not (yet) support huge TLB
+#warning ARM Coherent DMA allocator does not (yet) support huge TLB
#endif
static void *__alloc_from_contiguous(struct device *dev, size_t size,
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 32aa586..c9e37aa 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -17,6 +17,7 @@
#include
#include
#include
+#include
#include "mm.h"
@@ -168,19 +169,23 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
* coherent with the kernels mapping.
*/
if (!PageHighMem(page)) {
- __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+ size_t page_size = PAGE_SIZE << compound_order(page);
+ __cpuc_flush_dcache_area(page_address(page), page_size);
} else {
- void *addr;
-
+ unsigned long i;
if (cache_is_vipt_nonaliasing()) {
- addr = kmap_atomic(page);
- __cpuc_flush_dcache_area(addr, PAGE_SIZE);
- kunmap_atomic(addr);
- } else {
- addr = kmap_high_get(page);
- if (addr) {
+ for (i = 0; i < (1 << compound_order(page)); i++) {
+ void *addr = kmap_atomic(page);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
- kunmap_high(page);
+ kunmap_atomic(addr);
+ }
+ } else {
+ for (i = 0; i < (1 << compound_order(page)); i++) {
+ void *addr = kmap_high_get(page);
+ if (addr) {
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ kunmap_high(page);
+ }
}
}
}
diff --git a/arch/arm/mm/fsr-3level.c b/arch/arm/mm/fsr-3level.c
index 05a4e94..ab4409a 100644
--- a/arch/arm/mm/fsr-3level.c
+++ b/arch/arm/mm/fsr-3level.c
@@ -9,11 +9,11 @@ static struct fsr_info fsr_info[] = {
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "reserved access flag fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
{ do_bad, SIGBUS, 0, "reserved permission fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
- { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_bad, SIGBUS, 0, "synchronous external abort" },
{ do_bad, SIGBUS, 0, "asynchronous external abort" },
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
new file mode 100644
index 0000000..3d1e4a2
--- /dev/null
+++ b/arch/arm/mm/hugetlbpage.c
@@ -0,0 +1,101 @@
+/*
+ * arch/arm/mm/hugetlbpage.c
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h and Bill Carson's patches
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+/*
+ * On ARM, huge pages are backed by pmd's rather than pte's, so we do a lot
+ * of type casting from pmd_t * to pte_t *.
+ */
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd_present(*pgd)) {
+ pud = pud_offset(pgd, addr);
+ if (pud_present(*pud))
+ pmd = pmd_offset(pud, addr);
+ }
+
+ return (pte_t *)pmd;
+}
+
+struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ int write)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+int pud_huge(pud_t pud)
+{
+ return 0;
+}
+
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ return 0;
+}
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ pud = pud_alloc(mm, pgd, addr);
+ if (pud)
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+
+ return pte;
+}
+
+struct page *
+follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd, int write)
+{
+ struct page *page;
+
+ page = pte_page(*(pte_t *)pmd);
+ if (page)
+ page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ return page;
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
+}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9a..c1319e6 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -82,7 +82,7 @@ extern __init void add_static_vm_early(struct static_vm *svm);
#ifdef CONFIG_ZONE_DMA
extern phys_addr_t arm_dma_limit;
#else
-#define arm_dma_limit ((phys_addr_t)~0)
+#define arm_dma_limit (PHYS_MASK)
#endif
extern phys_addr_t arm_lowmem_limit;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 10062ce..2db5d2d 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -204,13 +204,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
}
/*
- * We don't use supersection mappings for mmap() on /dev/mem, which
- * means that we can't map the memory area above the 4G barrier into
- * userspace.
+ * Do not allow /dev/mem mappings beyond the supported physical range.
*/
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
- return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
+ return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
}
#ifdef CONFIG_STRICT_DEVMEM
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index daf336f..555fc3a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -472,7 +472,11 @@ static void __init build_mem_type_table(void)
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
#endif
+#ifndef CONFIG_ARM_UNIPROCESSOR_IOCC
if (is_smp()) {
+#else
+ {
+#endif
/*
* Mark memory with the "shared" attribute
* for SMP systems
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 919405e..f2e43e8 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -218,9 +218,7 @@ __v6_setup:
#endif /* CONFIG_MMU */
adr r5, v6_crval
ldmia r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r6, r6, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 5fbccee..990d48c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -185,9 +185,13 @@ __v7_ca7mp_setup:
__v7_ca15mp_setup:
mov r10, #0
1:
-#ifdef CONFIG_SMP
+#ifdef CONFIG_ARM_UNIPROCESSOR_IOCC
+ mrc p15, 0, r0, c1, c0, 1
+#elif defined(CONFIG_SMP)
ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
+#endif
+#if defined (CONFIG_ARM_UNIPROCESSOR_IOCC) || defined(CONFIG_SMP)
tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
orreq r0, r0, #(1 << 6) @ Enable SMP/nAMP mode
orreq r0, r0, r10 @ Enable CPU-specific SMP bits
@@ -352,9 +356,7 @@ __v7_setup:
#endif
adr r5, v7_crval
ldmia r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
- orr r6, r6, #1 << 25 @ big-endian page tables
-#endif
+ ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
#ifdef CONFIG_SWP_EMULATE
orr r5, r5, #(1 << 10) @ set SW bit in "clear"
bic r6, r6, #(1 << 10) @ clear it in "mmuset"
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 81edd31..211687d 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -222,10 +222,10 @@ static int __init xen_guest_init(void)
}
if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
return 0;
- xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
+ xen_hvm_resume_frames = res.start;
xen_events_irq = irq_of_parse_and_map(node, 0);
pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
- version, xen_events_irq, xen_hvm_resume_frames);
+ version, xen_events_irq, xen_hvm_resume_frames >> PAGE_SHIFT);
xen_domain_type = XEN_HVM_DOMAIN;
xen_setup_features();
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index 4a92bac..2223e39 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -78,7 +78,7 @@ static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, stru
if (failb == disks-1) {
if (faila == disks-2) {
/* P+Q failure. Just rebuild the syndrome. */
- init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
+ init_async_submit(&submit, ASYNC_TX_FENCE, NULL, NULL, NULL, addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
} else {
struct page *blocks[disks];
@@ -95,21 +95,21 @@ static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, stru
blocks[count++] = ptrs[i];
}
dest = ptrs[faila];
- init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
+ init_async_submit(&submit, ASYNC_TX_FENCE | ASYNC_TX_XOR_ZERO_DST, NULL,
NULL, NULL, addr_conv);
tx = async_xor(dest, blocks, 0, count, bytes, &submit);
- init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv);
+ init_async_submit(&submit, ASYNC_TX_FENCE, tx, NULL, NULL, addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
}
} else {
if (failb == disks-2) {
/* data+P failure. */
- init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
+ init_async_submit(&submit, ASYNC_TX_FENCE, NULL, NULL, NULL, addr_conv);
tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit);
} else {
/* data+data failure. */
- init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
+ init_async_submit(&submit, ASYNC_TX_FENCE, NULL, NULL, NULL, addr_conv);
tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit);
}
}
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 5823735..761a6bc 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1561,14 +1561,17 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
static int alg_test_crc32c(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
+#ifndef CONFIG_CRYPTO_DEV_AL_AHASH_CRC
struct crypto_shash *tfm;
u32 val;
+#endif
int err;
err = alg_test_hash(desc, driver, type, mask);
if (err)
goto out;
+#ifndef CONFIG_CRYPTO_DEV_AL_AHASH_CRC
tfm = crypto_alloc_shash(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
@@ -1602,6 +1605,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
} while (0);
crypto_free_shash(tfm);
+#endif
out:
return err;
@@ -1763,6 +1767,18 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "authenc(hmac(sha384),cbc(aes))",
+ .test = alg_test_aead,
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+ .enc = {
+ .vecs = hmac_sha384_aes_cbc_enc_tv_template,
+ .count = HMAC_SHA384_AES_CBC_ENC_TEST_VECTORS
+ }
+ }
+ }
+ }, {
.alg = "authenc(hmac(sha512),cbc(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 1e701bc..dd34de0 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -14106,6 +14106,300 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_template[] = {
},
};
+#define HMAC_SHA384_AES_CBC_ENC_TEST_VECTORS 7
+
+static struct aead_testvec hmac_sha384_aes_cbc_enc_tv_template[] = {
+ { /* RFC 3602 Case 1 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
+ "\x51\x2e\x03\xd5\x34\x12\x00\x06",
+ .klen = 8 + 48 + 16,
+ .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
+ "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+ .input = "Single block msg",
+ .ilen = 16,
+ .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
+ "\x27\x08\x94\x2d\xbe\x77\x18\x1a"
+ "\x79\x1c\xf1\x22\x95\x80\xe0\x60"
+ "\x7f\xf9\x92\x60\x83\xbd\x60\x9c"
+ "\xf6\x62\x8b\xa9\x7d\x56\xe2\xaf"
+ "\x80\x43\xbc\x41\x4a\x63\x0b\xa0"
+ "\x16\x25\xe2\xfe\x0a\x96\xf6\xa5"
+ "\x6c\x0b\xc2\x53\xb4\x27\xd9\x42",
+ .rlen = 16 + 48,
+ }, { /* RFC 3602 Case 2 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
+ "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
+ .klen = 8 + 48 + 16,
+ .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
+ "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+ .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .ilen = 32,
+ .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
+ "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a"
+ "\x75\x86\x60\x2d\x25\x3c\xff\xf9"
+ "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1"
+ "\x4e\x5b\xa8\x65\x51\xc6\x58\xaf"
+ "\x31\x57\x50\x3d\x01\xa1\xa4\x3f"
+ "\x42\xd1\xd7\x31\x76\x8d\xf8\xc8"
+ "\xe4\xd2\x7e\xc5\x23\xe7\xc6\x2e"
+ "\x2d\xfd\x9d\xc1\xac\x50\x1e\xcf"
+ "\xa0\x10\xeb\x1a\x9c\xb7\xe1\xca",
+ .rlen = 32 + 48,
+ }, { /* RFC 3602 Case 3 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x22\x33\x44\x55\x66\x77\x88\x99"
+ "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
+ "\x33\x44\x55\x66\x77\x88\x99\xaa"
+ "\xbb\xcc\xdd\xee\xff\x11\x22\x33"
+ "\x6c\x3e\xa0\x47\x76\x30\xce\x21"
+ "\xa2\xce\x33\x4a\xa7\x46\xc2\xcd",
+ .klen = 8 + 48 + 16,
+ .iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
+ "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+ .input = "This is a 48-byte message (exactly 3 AES blocks)",
+ .ilen = 48,
+ .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
+ "\xd4\x93\x66\x5d\x33\xf0\xe8\x86"
+ "\x2d\xea\x54\xcd\xb2\x93\xab\xc7"
+ "\x50\x69\x39\x27\x67\x72\xf8\xd5"
+ "\x02\x1c\x19\x21\x6b\xad\x52\x5c"
+ "\x85\x79\x69\x5d\x83\xba\x26\x84"
+ "\xa1\x52\xe7\xda\xf7\x05\xb6\xca"
+ "\xad\x0f\x51\xed\x5a\xd3\x0f\xdf"
+ "\xde\xeb\x3f\x31\xed\x3a\x43\x93"
+ "\x3b\xb7\xca\xc8\x1b\xe7\x3b\x61"
+ "\x6a\x05\xfd\x2d\x6a\x5c\xb1\x0d"
+ "\x6e\x7a\xeb\x1c\x84\xec\xdb\xde",
+ .rlen = 48 + 48,
+ }, { /* RFC 3602 Case 4 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x22\x33\x44\x55\x66\x77\x88\x99"
+ "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
+ "\x33\x44\x55\x66\x77\x88\x99\xaa"
+ "\xbb\xcc\xdd\xee\xff\x11\x22\x33"
+ "\x56\xe4\x7a\x38\xc5\x59\x89\x74"
+ "\xbc\x46\x90\x3d\xba\x29\x03\x49",
+ .klen = 8 + 48 + 16,
+ .iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
+ "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+ .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf",
+ .ilen = 64,
+ .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e"
+ "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa"
+ "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6"
+ "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e"
+ "\x35\x90\x7a\xa6\x32\xc3\xff\xdf"
+ "\x86\x8b\xb7\xb2\x9d\x3d\x46\xad"
+ "\x83\xce\x9f\x9a\x10\x2e\xe9\x9d"
+ "\x49\xa5\x3e\x87\xf4\xc3\xda\x55"
+ "\x85\x7b\x91\xe0\x29\xeb\xd3\x59"
+ "\x7c\xe3\x67\x14\xbe\x71\x2a\xd2"
+ "\x8a\x1a\xd2\x35\x78\x6b\x69\xba"
+ "\x64\xa5\x04\x00\x19\xc3\x4c\xae"
+ "\x71\xff\x76\x9f\xbb\xc3\x29\x22"
+ "\xc2\xc6\x51\xf1\xe6\x29\x5e\xa5",
+ .rlen = 64 + 48,
+ }, { /* RFC 3602 Case 5 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x22\x33\x44\x55\x66\x77\x88\x99"
+ "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
+ "\x33\x44\x55\x66\x77\x88\x99\xaa"
+ "\xbb\xcc\xdd\xee\xff\x11\x22\x33"
+ "\x90\xd3\x82\xb4\x10\xee\xba\x7a"
+ "\xd9\x38\xc4\x6c\xec\x1a\x82\xbf",
+ .klen = 8 + 48 + 16,
+ .iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
+ "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
+ .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01",
+ .alen = 8,
+ .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
+ "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x01\x02\x03\x04\x05\x06\x07\x08"
+ "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01",
+ .ilen = 80,
+ .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6"
+ "\xa9\x45\x3e\x19\x4e\x12\x08\x49"
+ "\xa4\x87\x0b\x66\xcc\x6b\x99\x65"
+ "\x33\x00\x13\xb4\x89\x8d\xc8\x56"
+ "\xa4\x69\x9e\x52\x3a\x55\xdb\x08"
+ "\x0b\x59\xec\x3a\x8e\x4b\x7e\x52"
+ "\x77\x5b\x07\xd1\xdb\x34\xed\x9c"
+ "\x53\x8a\xb5\x0c\x55\x1b\x87\x4a"
+ "\xa2\x69\xad\xd0\x47\xad\x2d\x59"
+ "\x13\xac\x19\xb7\xcf\xba\xd4\xa6"
+ "\x57\x5f\xb4\xd7\x74\x6f\x18\x97"
+ "\xb7\xde\xfc\xf3\x4e\x0d\x29\x4d"
+ "\xa0\xff\x39\x9e\x2d\xbf\x27\xac"
+ "\x54\xb9\x8a\x3e\xab\x3b\xac\xd3"
+ "\x36\x43\x74\xfc\xc2\x64\x81\x8a"
+ "\x2c\x15\x72\xdf\x3f\x9d\x5b\xa4",
+ .rlen = 80 + 48,
+ }, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x18" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x22\x33\x44\x55\x66\x77\x88\x99"
+ "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
+ "\x33\x44\x55\x66\x77\x88\x99\xaa"
+ "\xbb\xcc\xdd\xee\xff\x11\x22\x33"
+ "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
+ "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+ "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+ .klen = 8 + 48 + 24,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ilen = 64,
+ .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
+ "\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
+ "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4"
+ "\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
+ "\x57\x1b\x24\x20\x12\xfb\x7a\xe0"
+ "\x7f\xa9\xba\xac\x3d\xf1\x02\xe0"
+ "\x08\xb0\xe2\x79\x88\x59\x88\x81"
+ "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd"
+ "\x29\x9b\x42\x47\x0b\xbf\xf3\x54"
+ "\x54\x95\xb0\x89\xd5\xa0\xc3\x78"
+ "\x60\x6c\x18\x39\x6d\xc9\xfb\x2a"
+ "\x34\x1c\xed\x95\x10\x1e\x43\x0a"
+ "\x72\xce\x26\xbc\x74\xd9\x6f\xa2"
+ "\xf1\xd9\xd0\xb1\xdf\x3d\x93\x14",
+ .rlen = 64 + 48,
+ }, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x20" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x22\x33\x44\x55\x66\x77\x88\x99"
+ "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
+ "\x33\x44\x55\x66\x77\x88\x99\xaa"
+ "\xbb\xcc\xdd\xee\xff\x11\x22\x33"
+ "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+ "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+ .klen = 8 + 48 + 32,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ilen = 64,
+ .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
+ "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
+ "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
+ "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
+ "\x39\xf2\x33\x69\xa9\xd9\xba\xcf"
+ "\xa5\x30\xe2\x63\x04\x23\x14\x61"
+ "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
+ "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b"
+ "\x9f\x50\xce\x64\xd9\xa3\xc9\x7a"
+ "\x15\x3a\x3d\x46\x9a\x90\xf3\x06"
+ "\x22\xad\xc5\x24\x77\x50\xb8\xfe"
+ "\xbe\x37\x16\x86\x34\x5f\xaf\x97"
+ "\x00\x9d\x86\xc8\x32\x4f\x72\x2f"
+ "\x48\x97\xad\xb6\xb9\x77\x33\xbc",
+ .rlen = 64 + 48,
+ },
+};
+
+
static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_template[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index c04d0fd..9a87e10 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -1,6 +1,8 @@
obj-$(CONFIG_ATA) += libata.o
+KBUILD_CPPFLAGS += -Iarch/arm/mach-alpine/include/al_hal/
+
# non-SFF interface
obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b112625..ed23df8 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -43,10 +43,16 @@
#include
#include
#include
+#include
+#include
#include
#include
#include
#include "ahci.h"
+#ifdef CONFIG_ARCH_ALPINE
+#include "al_hal_iofic.h"
+#include "al_hal_iofic_regs.h"
+#endif
#define DRV_NAME "ahci"
#define DRV_VERSION "3.0"
@@ -72,6 +78,7 @@ enum board_ids {
board_ahci_sb600,
board_ahci_sb700, /* for SB700 and SB800 */
board_ahci_vt8251,
+ board_ahci_alpine,
/* aliases */
board_ahci_mcp_linux = board_ahci_mcp65,
@@ -104,6 +111,14 @@ static struct ata_port_operations ahci_p5wdh_ops = {
.hardreset = ahci_p5wdh_hardreset,
};
+ssize_t al_ahci_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size);
+
+static struct ata_port_operations ahci_al_ops = {
+ .inherits = &ahci_ops,
+ .transmit_led_message = al_ahci_transmit_led_message,
+};
+
static const struct ata_port_info ahci_port_info[] = {
/* by features */
[board_ahci] = {
@@ -187,6 +202,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_vt8251_ops,
},
+ [board_ahci_alpine] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_PMP | AHCI_HFLAG_MSIX),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_al_ops,
+ },
};
static const struct pci_device_id ahci_pci_tbl[] = {
@@ -309,6 +331,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
+ /* Annapurna Labs */
+ { PCI_VDEVICE(ANNAPURNA_LABS, 0x0031), board_ahci_alpine }, /* 0031 */
+
/* AMD */
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
@@ -431,6 +456,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(0x1b4b, 0x9235),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9235 */
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
@@ -1091,11 +1118,165 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
{}
#endif
+#ifdef CONFIG_ARCH_ALPINE
+#define al_ahci_iofic_base(base) ((base) + 0x2000)
+
+static ssize_t al_ahci_show_msix_moder(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ ssize_t rc = 0;
+
+ rc = sprintf(buf, "%d\n", hpriv->int_moderation);
+
+ return rc;
+}
+
+static ssize_t al_ahci_store_msix_moder(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ unsigned long interval;
+ int err;
+ int i;
+
+ err = kstrtoul(buf, 10, &interval);
+ if (err < 0)
+ return err;
+
+
+ for (i = 0; i < ahci_nr_ports(hpriv->cap); i++)
+ al_iofic_msix_moder_interval_config(
+ al_ahci_iofic_base(hpriv->mmio),
+ 1 /*GROUP_B*/,
+ i,
+ interval);
+
+ hpriv->int_moderation = interval;
+
+ return len;
+}
+
+
+static struct device_attribute dev_attr_moder = {
+ .attr = {.name = "msix_moder", .mode = (S_IRUGO | S_IWUSR)},
+ .show = al_ahci_show_msix_moder,
+ .store = al_ahci_store_msix_moder,
+};
+
+int al_ahci_sysfs_init(
+ struct device *dev)
+{
+ if (device_create_file(dev, &dev_attr_moder))
+ dev_err(dev, "failed to create msix interrupt moderation sysfs entry");
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+void al_ahci_sysfs_terminate(
+ struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_moder);
+}
+
+
+irqreturn_t ahci_hw_port_interrupt_handler(int irq, void *dev_instance)
+{
+ struct ata_port *ap_this = dev_instance;
+ struct ata_host *host = ap_this->host;
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *iofic_base = al_ahci_iofic_base(hpriv->mmio);
+ VPRINTK("ENTER\n");
+
+ spin_lock(ap_this->lock);
+ ahci_port_intr(ap_this);
+
+ spin_unlock(ap_this->lock);
+
+ spin_lock(&host->lock);
+ /* clean host cause */
+ writel(1 << ap_this->port_no, hpriv->mmio + HOST_IRQ_STAT);
+
+ /* unmask the interrupt in the iofic (auto-masked) */
+ al_iofic_unmask(iofic_base, 1, 1 << ap_this->port_no);
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_HANDLED;
+}
+
+int al_ahci_init_msix(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
+{
+ unsigned int msix_vecs = ahci_nr_ports(readl(hpriv->mmio + HOST_CAP));
+ int i;
+ int rc;
+ void __iomem *iofic_base = al_ahci_iofic_base(hpriv->mmio);
+
+ hpriv->msix_entries = NULL;
+
+ dev_info(&pdev->dev, "use MSIX for ahci controller. vectors: %u\n",
+ msix_vecs);
+ hpriv->msix_entries = kcalloc(msix_vecs, sizeof(struct msix_entry), GFP_KERNEL);
+
+ if (!hpriv->msix_entries) {
+ dev_err(&pdev->dev, "failed to allocate msix_entries, vectors %d\n",
+ msix_vecs);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < msix_vecs; i++) {
+ hpriv->msix_entries[i].entry = 3 + i;
+ hpriv->msix_entries[i].vector = 0;
+ }
+
+ rc = pci_enable_msix(pdev, hpriv->msix_entries, msix_vecs);
+
+ if (rc) {
+ dev_info(&pdev->dev,"failed to enable MSIX, vectors %d rc %d\n",
+ msix_vecs, rc);
+ kfree(hpriv->msix_entries);
+ hpriv->msix_entries = NULL;
+ /* maybe we shoudl fall back to intx*/
+ return -EPERM;
+ }
+
+ /* we use only group B */
+ al_iofic_config(iofic_base, 1 /*GROUP_B*/,
+ INT_CONTROL_GRP_SET_ON_POSEDGE |
+ INT_CONTROL_GRP_AUTO_CLEAR |
+ INT_CONTROL_GRP_AUTO_MASK |
+ INT_CONTROL_GRP_CLEAR_ON_READ);
+
+ al_iofic_moder_res_config(iofic_base, 1, 15);
+
+ al_iofic_unmask(iofic_base, 1, (1 << msix_vecs) - 1);
+
+ hpriv->msix_vecs = msix_vecs;
+
+ al_ahci_sysfs_init(&pdev->dev);
+
+ return 0;
+}
+#endif
+
int ahci_init_interrupts(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
{
int rc;
unsigned int maxvec;
+#ifdef CONFIG_ARCH_ALPINE
+ if (hpriv->flags & AHCI_HFLAG_MSIX) {
+ if (!al_ahci_init_msix(pdev, hpriv))
+ return hpriv->msix_vecs;
+ }
+#endif
+
if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) {
rc = pci_enable_msi_block_auto(pdev, &maxvec);
if (rc > 0) {
@@ -1136,6 +1317,8 @@ int ahci_init_interrupts(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
{
int i, rc;
+ struct ahci_host_priv *hpriv = host->private_data;
+ int port_irq;
/* Sharing Last Message among several ports is not supported */
if (n_msis < host->n_ports)
@@ -1146,15 +1329,43 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
return rc;
for (i = 0; i < host->n_ports; i++) {
- rc = devm_request_threaded_irq(host->dev,
- irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
- dev_driver_string(host->dev), host->ports[i]);
+ struct ata_port *ap = host->ports[i];
+ struct ahci_port_priv *pp = ap->private_data;
+
+ if (hpriv->msix_entries) {
+#ifdef CONFIG_ARCH_ALPINE
+ port_irq = hpriv->msix_entries[i].vector;
+ snprintf(pp->msix_name, sizeof(pp->msix_name), "ahci_%u",
+ ap->port_no);
+ rc = devm_request_irq(host->dev,
+ port_irq, ahci_hw_port_interrupt_handler,
+ 0, pp->msix_name, ap);
+#else
+ BUG();
+#endif
+ } else {
+ port_irq = irq + i;
+ rc = devm_request_threaded_irq(host->dev,
+ port_irq, ahci_hw_interrupt,
+ ahci_thread_fn, IRQF_SHARED,
+ dev_driver_string(host->dev), host->ports[i]);
+ }
+
if (rc)
goto out_free_irqs;
}
- for (i = 0; i < host->n_ports; i++)
- ata_port_desc(host->ports[i], "irq %d", irq + i);
+ for (i = 0; i < host->n_ports; i++) {
+ if (hpriv->msix_entries)
+#ifdef CONFIG_ARCH_ALPINE
+ port_irq = hpriv->msix_entries[i].vector;
+#else
+ BUG();
+#endif
+ else
+ port_irq = irq + i;
+ ata_port_desc(host->ports[i], "irq %d", port_irq);
+ }
rc = ata_host_register(host, &ahci_sht);
if (rc)
@@ -1165,9 +1376,17 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
out_free_all_irqs:
i = host->n_ports;
out_free_irqs:
- for (i--; i >= 0; i--)
- devm_free_irq(host->dev, irq + i, host->ports[i]);
-
+ for (i--; i >= 0; i--) {
+ if (hpriv->msix_entries)
+#ifdef CONFIG_ARCH_ALPINE
+ port_irq = hpriv->msix_entries[i].vector;
+#else
+ BUG();
+#endif
+ else
+ port_irq = irq + i;
+ devm_free_irq(host->dev, port_irq, host->ports[i]);
+ }
return rc;
}
@@ -1179,6 +1398,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ata_host *host;
+#ifdef CONFIG_ARCH_ALPINE
+ struct device_node *np;
+#endif
int n_ports, n_msis, i, rc;
int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
@@ -1293,6 +1515,63 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_set_em_messages(hpriv, &pi);
+#ifdef CONFIG_ARCH_ALPINE
+ for (i = 0; i < AHCI_MAX_PORTS; i++)
+ hpriv->led_gpio[i] = -1;
+
+ np = of_find_compatible_node(NULL, NULL, "annapurna-labs,al-sata-sw-leds");
+ if (np) {
+ int err;
+ struct device_node *child;
+ u32 domain;
+ u32 pci_bus;
+ u32 pci_dev;
+ u32 port;
+
+ for_each_child_of_node(np, child) {
+ err = of_property_read_u32(child, "pci_domain", &domain);
+ if (err)
+ continue;
+
+ if (domain != pci_domain_nr(pdev->bus))
+ continue;
+
+ err = of_property_read_u32(child, "pci_bus", &pci_bus);
+ if (err)
+ continue;
+
+ if (pci_bus != pdev->bus->number)
+ continue;
+
+ err = of_property_read_u32(child, "pci_dev", &pci_dev);
+ if (err)
+ continue;
+
+ if (pci_dev != PCI_SLOT(pdev->devfn))
+ continue;
+
+ err = of_property_read_u32(child, "port", &port);
+ if (err)
+ continue;
+
+ err = of_get_named_gpio(child, "gpios", 0);
+ if (IS_ERR_VALUE(err))
+ continue;
+
+ hpriv->led_gpio[port] = err;
+ err = gpio_request(hpriv->led_gpio[port], "sata led gpio");
+ if (err) {
+ dev_err(&pdev->dev, "al ahci gpio_request %d failed: %d\n",
+ hpriv->led_gpio[port], err);
+ continue;
+ }
+ gpio_direction_output(hpriv->led_gpio[port], 1);
+ hpriv->em_msg_type = EM_MSG_TYPE_LED;
+ pi.flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
+ }
+ }
+#endif
+
if (ahci_broken_system_poweroff(pdev)) {
pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
dev_info(&pdev->dev,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 10b14d4..2c27bce 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -232,6 +232,7 @@ enum {
port start (wait until
error-handling stage) */
AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
+ AHCI_HFLAG_MSIX = (1 << 17), /* MSIX */
/* ap->flags bits */
@@ -306,6 +307,7 @@ struct ahci_port_priv {
int fbs_last_dev; /* save FBS.DEV of last FIS */
/* enclosure management info per PM slot */
struct ahci_em_priv em_priv[EM_MAX_SLOTS];
+ char msix_name[16];
};
struct ahci_host_priv {
@@ -321,6 +323,11 @@ struct ahci_host_priv {
u32 em_buf_sz; /* EM buffer size in byte */
u32 em_msg_type; /* EM message type */
struct clk *clk; /* Only for platforms supporting clk */
+
+ struct msix_entry *msix_entries;
+ unsigned int msix_vecs;
+ unsigned int int_moderation;
+ int led_gpio[AHCI_MAX_PORTS];
};
extern int ahci_ignore_sss;
@@ -362,6 +369,8 @@ void ahci_set_em_messages(struct ahci_host_priv *hpriv,
struct ata_port_info *pi);
int ahci_reset_em(struct ata_host *host);
irqreturn_t ahci_interrupt(int irq, void *dev_instance);
+void ahci_hw_port_interrupt(struct ata_port *ap);
+void ahci_port_intr(struct ata_port *ap);
irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance);
irqreturn_t ahci_thread_fn(int irq, void *dev_instance);
void ahci_print_info(struct ata_host *host, const char *scc_s);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 7b9bdd8..96656b6 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -43,6 +43,7 @@
#include
#include
#include
+#include
#include
#include "ahci.h"
#include "libata.h"
@@ -173,6 +174,7 @@ struct ata_port_operations ahci_ops = {
.em_store = ahci_led_store,
.sw_activity_show = ahci_activity_show,
.sw_activity_store = ahci_activity_store,
+ .transmit_led_message = ahci_transmit_led_message,
#ifdef CONFIG_PM
.port_suspend = ahci_port_suspend,
.port_resume = ahci_port_resume,
@@ -774,7 +776,7 @@ static void ahci_start_port(struct ata_port *ap)
/* EM Transmit bit maybe busy during init */
for (i = 0; i < EM_MAX_RETRY; i++) {
- rc = ahci_transmit_led_message(ap,
+ rc = ap->ops->transmit_led_message(ap,
emp->led_state,
4);
if (rc == -EBUSY)
@@ -911,11 +913,13 @@ static void ahci_sw_activity_blink(unsigned long arg)
} else {
/* switch to idle */
led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
- if (emp->blink_policy == BLINK_OFF)
+
+ if ((ata_phys_link_online(link)) || (emp->blink_policy == BLINK_OFF))
led_message |= (1 << 16);
+ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(500));
}
spin_unlock_irqrestore(ap->lock, flags);
- ahci_transmit_led_message(ap, led_message, 4);
+ ap->ops->transmit_led_message(ap, led_message, 4);
}
static void ahci_init_sw_activity(struct ata_link *link)
@@ -926,6 +930,7 @@ static void ahci_init_sw_activity(struct ata_link *link)
/* init activity stats, setup timer */
emp->saved_activity = emp->activity = 0;
+ emp->blink_policy = BLINK_ON;
setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
/* check our blink policy and set flag for link if it's enabled */
@@ -948,6 +953,40 @@ int ahci_reset_em(struct ata_host *host)
}
EXPORT_SYMBOL_GPL(ahci_reset_em);
+ssize_t al_ahci_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ unsigned long flags;
+ int led_val = 0;
+ int pmp;
+ struct ahci_em_priv *emp;
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp < EM_MAX_SLOTS)
+ emp = &pp->em_priv[pmp];
+ else
+ return -EINVAL;
+
+ if (hpriv->led_gpio[ap->port_no] == -1)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+
+ if(state & EM_MSG_LED_VALUE_ON)
+ led_val = 1;
+
+ gpio_set_value(hpriv->led_gpio[ap->port_no], led_val);
+
+ /* save off new led state for port/slot */
+ emp->led_state = state;
+
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+ return size;
+}
+
static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
ssize_t size)
{
@@ -1044,7 +1083,7 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
if (emp->blink_policy)
state &= ~EM_MSG_LED_VALUE_ACTIVITY;
- return ahci_transmit_led_message(ap, state, size);
+ return ap->ops->transmit_led_message(ap, state, size);
}
static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
@@ -1063,7 +1102,7 @@ static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
/* set the LED to OFF */
port_led_state &= EM_MSG_LED_VALUE_OFF;
port_led_state |= (ap->port_no | (link->pmp << 8));
- ahci_transmit_led_message(ap, port_led_state, 4);
+ ap->ops->transmit_led_message(ap, port_led_state, 4);
} else {
link->flags |= ATA_LFLAG_SW_ACTIVITY;
if (val == BLINK_OFF) {
@@ -1071,7 +1110,7 @@ static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
port_led_state &= EM_MSG_LED_VALUE_OFF;
port_led_state |= (ap->port_no | (link->pmp << 8));
port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
- ahci_transmit_led_message(ap, port_led_state, 4);
+ ap->ops->transmit_led_message(ap, port_led_state, 4);
}
}
emp->blink_policy = val;
@@ -1784,6 +1823,8 @@ void ahci_hw_port_interrupt(struct ata_port *ap)
pp->intr_status |= status;
}
+EXPORT_SYMBOL_GPL(ahci_hw_port_interrupt);
+
irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance)
{
struct ata_port *ap_this = dev_instance;
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index c4cc27e..d48366a 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -39,4 +39,12 @@ config CPU_IDLE_CALXEDA
help
Select this to enable cpuidle on Calxeda processors.
+config CPU_IDLE_ALPINE
+ bool "CPU Idle Driver for Annapurna Labs Alpine SOC"
+ depends on ARCH_ALPINE
+ default y
+ help
+ Select this to enable cpuidle on Annapurna Labs Alpine SOC.
+ If unsure say Y.
+
endif
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 0d8bd55..0bbafcb 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -1,9 +1,13 @@
#
# Makefile for cpuidle.
#
+ccflags-$(CONFIG_CPU_IDLE_ALPINE) := \
+ -I$(srctree)/arch/arm/mach-alpine/include/al_hal
+
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o
+obj-$(CONFIG_CPU_IDLE_ALPINE) += cpuidle-alpine.o
diff --git a/drivers/cpuidle/cpuidle-alpine.c b/drivers/cpuidle/cpuidle-alpine.c
new file mode 100644
index 0000000..99743a0
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-alpine.c
@@ -0,0 +1,106 @@
+/*
+ * Annapurna labs cpu-idle handler.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include "cpuidle.h"
+
+int alpine_cpu_suspend_wakeup_supported(void);
+void alpine_cpu_suspend(void);
+
+static int alpine_enter_lowpower(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index);
+
+static struct cpuidle_state alpine_cpuidle_set[] __initdata = {
+ [0] = ARM_CPUIDLE_WFI_STATE_PWR(250),
+ [1] = {
+ .enter = alpine_enter_lowpower,
+ .exit_latency = 10,
+ .power_usage = 125,
+ .target_residency = 1000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "C1",
+ .desc = "ARM power down",
+ },
+};
+
+static DEFINE_PER_CPU(struct cpuidle_device, alpine_cpuidle_device);
+
+static struct cpuidle_driver alpine_idle_driver = {
+ .name = "alpine_idle",
+ .owner = THIS_MODULE,
+};
+
+static int alpine_enter_lowpower(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ alpine_cpu_suspend();
+ return index;
+}
+
+static int __init alpine_init_cpuidle(void)
+{
+ int i, max_cpuidle_state, cpu_id;
+ struct cpuidle_device *device;
+ struct cpuidle_driver *drv = &alpine_idle_driver;
+
+ if (cpuidle_disabled())
+ return -ENOENT;
+
+ if (!(alpine_cpu_suspend_wakeup_supported())) {
+ pr_err("Annapurna Labs CPUidle components not found\n");
+ return -ENOENT;
+ }
+
+ /* Setup cpuidle driver */
+ drv->state_count = (sizeof(alpine_cpuidle_set) /
+ sizeof(struct cpuidle_state));
+ max_cpuidle_state = drv->state_count;
+ for (i = 0; i < max_cpuidle_state; i++) {
+ memcpy(&drv->states[i], &alpine_cpuidle_set[i],
+ sizeof(struct cpuidle_state));
+ }
+ drv->safe_state_index = 0;
+ cpuidle_register_driver(&alpine_idle_driver);
+
+ for_each_cpu(cpu_id, cpu_online_mask) {
+ device = &per_cpu(alpine_cpuidle_device, cpu_id);
+ device->cpu = cpu_id;
+ device->state_count = alpine_idle_driver.state_count;
+ if (cpuidle_register_device(device)) {
+ pr_err("CPUidle device registration failed\n,");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+device_initcall(alpine_init_cpuidle);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index dffb855..e71bc55 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -386,4 +386,6 @@ config CRYPTO_DEV_ATMEL_SHA
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
+source drivers/crypto/al/Kconfig
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 38ce13d..deaecc3 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -21,3 +21,4 @@ obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
+obj-$(CONFIG_CRYPTO_DEV_AL_CRYPTO) += al/
diff --git a/drivers/crypto/al/al_crypto.h b/drivers/crypto/al/al_crypto.h
new file mode 100644
index 0000000..1b5620b
--- /dev/null
+++ b/drivers/crypto/al/al_crypto.h
@@ -0,0 +1,527 @@
+/*
+ * drivers/crypto/al_crypto.h
+ *
+ * Annapurna Labs Crypto driver - header file
+ *
+ * Copyright (C) 2012 Annapurna Labs Ltd.
+ *
+ * Chained scatter/gather lists handling based on caam driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __AL_CRYPTO_H__
+#define __AL_CRYPTO_H__
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "linux/scatterlist.h"
+#include "linux/crypto.h"
+#include
+
+#include "al_hal_ssm_crypto.h"
+#include "al_hal_ssm_crc_memcpy.h"
+
+#define AL_CRYPTO_VERSION "0.01"
+
+#ifndef CONFIG_ALPINE_VP_WA
+#define AL_CRYPTO_TX_CDESC_SIZE 8
+#define AL_CRYPTO_RX_CDESC_SIZE 8
+#else
+/* Currently in VP it is always 16 bytes */
+#define AL_CRYPTO_TX_CDESC_SIZE 16
+#define AL_CRYPTO_RX_CDESC_SIZE 16
+#endif
+
+#define AL_CRYPTO_DMA_MAX_CHANNELS 4
+
+/* 4 interrupts for the 4 queues and 1 for group D */
+#define AL_CRYPTO_MSIX_INTERRUPTS AL_CRYPTO_DMA_MAX_CHANNELS + 1
+
+#define AL_CRYPTO_SW_RING_MIN_ORDER 4
+#define AL_CRYPTO_SW_RING_MAX_ORDER 16
+
+/*
+ * tx: 31(supported by HW) - 1(metadata) - 1(sa_in) -
+ * 1(enc_iv_in|auth_iv_in) - 1(auth_sign_in) = 27
+ * rx: 31(supported by HW) - 1(sa_out) - 1(enc_iv_out|auth_iv_out) -
+ * 1(next_enc_iv_out) - 1(auth_sign_out) = 27
+ */
+#define AL_CRYPTO_OP_MAX_BUFS 27
+#define AL_CRYPTO_HASH_HMAC_IPAD 0x36
+#define AL_CRYPTO_HASH_HMAC_OPAD 0x5c
+
+#define AL_CRYPTO_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE,
+ DES3_EDE_BLOCK_SIZE */
+
+#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS
+#define AL_CRYPTO_STATS_INC(var, incval) (var) += (incval)
+#define AL_CRYPTO_STATS_DEC(var, decval) (var) -= (decval)
+#define AL_CRYPTO_STATS_SET(var, val) (var) = (val)
+#define AL_CRYPTO_STATS_LOCK(lock) \
+ spin_lock_bh(lock)
+#define AL_CRYPTO_STATS_UNLOCK(lock) \
+ spin_unlock_bh(lock)
+#define AL_CRYPTO_STATS_INIT_LOCK(lock) \
+ spin_lock_init(lock)
+#else
+#define AL_CRYPTO_STATS_INC(var, incval)
+#define AL_CRYPTO_STATS_DEC(var, decval)
+#define AL_CRYPTO_STATS_SET(var, val)
+#define AL_CRYPTO_STATS_LOCK(lock)
+#define AL_CRYPTO_STATS_LOCK(lock)
+#define AL_CRYPTO_STATS_UNLOCK(lock)
+#define AL_CRYPTO_STATS_INIT_LOCK(lock)
+#endif
+
+#define AL_CRYPTO_IRQNAME_SIZE 40
+
+#define AL_CRYPTO_INT_MODER_RES 1
+
+#define CHKSUM_BLOCK_SIZE 1
+#define CHKSUM_DIGEST_SIZE 4
+
+#define MAX_CACHE_ENTRIES_PER_CHANNEL CACHED_SAD_SIZE
+
+enum al_crypto_req_type {
+ AL_CRYPTO_REQ_ABLKCIPHER,
+ AL_CRYPTO_REQ_AEAD,
+ AL_CRYPTO_REQ_AHASH,
+ AL_CRYPTO_REQ_CRC,
+};
+
+/* software descriptor structure
+ */
+struct al_crypto_sw_desc {
+ union {
+ struct al_crypto_transaction hal_xaction;
+ struct al_crc_transaction hal_crc_xaction;
+ };
+
+ struct al_buf src_bufs[AL_SSM_MAX_SRC_DESCS];
+ struct al_buf dst_bufs[AL_SSM_MAX_SRC_DESCS];
+
+ void *req;
+ int req_type;
+ int src_nents;
+ int assoc_nents;
+ int dst_nents;
+};
+
+/**
+ * cache entry in lru list
+ */
+struct al_crypto_cache_lru_entry {
+ struct list_head list;
+ struct al_crypto_cache_state *ctx;
+ u32 cache_idx;
+};
+
+#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS
+/**
+ * struct al_crypto_chan_stats_gen - Crypto DMA channel statistics - general
+ * @ablkcipher_tfms - active ablkcipher tfms
+ * @ablkcipher_tfms - active aead tfms
+ * @ablkcipher_tfms - active ahash tfms
+ */
+struct al_crypto_chan_stats_gen {
+ uint64_t ablkcipher_tfms;
+ uint64_t aead_tfms;
+ uint64_t ahash_tfms;
+ uint64_t crc_tfms;
+};
+
+/**
+ * struct al_crypto_chan_stats_prep - Crypto DMA channel statistics -
+ * preparation
+ * @ablkcipher_encrypt_reqs - ablkcipher encrypt requests
+ * @ablkcipher_encrypt_bytes - ablkcipher encrypted bytes
+ * @ablkcipher_decrypt_reqs - ablkcipher decrypt requests
+ * @ablkcipher_decrypt_bytes - ablkcipher decrypted bytes
+ * @aead_encrypt_hash_reqs - aead combined encrypt+hash requests
+ * @aead_encrypt_bytes - aead encrypted bytes
+ * @aead_hash_bytes - aead hashed bytes
+ * @aead_decrypt_validate_reqs - aead combined decrypt+validate requests
+ * @aead_decrypt_bytes - aead decrypted bytes
+ * @aead_validate_bytes - aead validate bytes
+ * @ahash_reqs - ahash requests
+ * @ahash_bytes - ahash hashed bytes
+ * @cache_misses - SA cache misses
+ * @ablkcipher_reqs_le512 - ablkcipher requests up to 512 bytes
+ * @ablkcipher_reqs_512_2048 - ablkcipher requests between 512 and 2048 bytes
+ * @ablkcipher_reqs_2048_4096 - ablkcipher requests between 2048 and 4096 bytes
+ * @ablkcipher_reqs_gt4096 - ablkcipher requests greater than 4096 bytes
+ * @aead_reqs_le512 - aead requests up to 512 bytes
+ * @aead_reqs_512_2048 - aead requests between 512 and 2048 bytes
+ * @aead_reqs_2048_4096 - aead requests between 2048 and 4096 bytes
+ * @aead_reqs_gt4096 - aead requests greater than 4096 bytes
+ * @ahash_reqs_le512 - ahash requests up to 512 bytes
+ * @ahash_reqs_512_2048 - ahash requests between 512 and 2048 bytes
+ * @ahash_reqs_2048_4096 - ahash requests between 2048 and 4096 bytes
+ * @ahash_reqs_gt4096 - ahash requests greater than 4096 bytes
+ */
+struct al_crypto_chan_stats_prep {
+ uint64_t ablkcipher_encrypt_reqs;
+ uint64_t ablkcipher_encrypt_bytes;
+ uint64_t ablkcipher_decrypt_reqs;
+ uint64_t ablkcipher_decrypt_bytes;
+ uint64_t aead_encrypt_hash_reqs;
+ uint64_t aead_encrypt_bytes;
+ uint64_t aead_hash_bytes;
+ uint64_t aead_decrypt_validate_reqs;
+ uint64_t aead_decrypt_bytes;
+ uint64_t aead_validate_bytes;
+ uint64_t ahash_reqs;
+ uint64_t ahash_bytes;
+ uint64_t crc_reqs;
+ uint64_t crc_bytes;
+ uint64_t cache_misses;
+ uint64_t ablkcipher_reqs_le512;
+ uint64_t ablkcipher_reqs_512_2048;
+ uint64_t ablkcipher_reqs_2048_4096;
+ uint64_t ablkcipher_reqs_gt4096;
+ uint64_t aead_reqs_le512;
+ uint64_t aead_reqs_512_2048;
+ uint64_t aead_reqs_2048_4096;
+ uint64_t aead_reqs_gt4096;
+ uint64_t ahash_reqs_le512;
+ uint64_t ahash_reqs_512_2048;
+ uint64_t ahash_reqs_2048_4096;
+ uint64_t ahash_reqs_gt4096;
+ uint64_t crc_reqs_le512;
+ uint64_t crc_reqs_512_2048;
+ uint64_t crc_reqs_2048_4096;
+ uint64_t crc_reqs_gt4096;
+};
+
+/**
+ * struct al_crypto_chan_stats_comp - Crypto DMA channel statistics -
+ * completion
+ * @redundant_int_cnt - redundant interrupts (interrupts without completions)
+ */
+struct al_crypto_chan_stats_comp {
+ uint64_t redundant_int_cnt;
+ uint64_t max_active_descs;
+};
+#endif
+
+/* internal representation of a DMA channel
+ */
+struct al_crypto_chan {
+ struct al_ssm_dma *hal_crypto;
+
+ int idx;
+ enum al_ssm_q_type type;
+ cpumask_t affinity_hint_mask;
+
+ /* Tx UDMA hw ring */
+ int tx_descs_num; /* number of descriptors in Tx queue */
+ void *tx_dma_desc_virt; /* Tx descriptors ring */
+ dma_addr_t tx_dma_desc;
+
+ /* Rx UDMA hw ring */
+ int rx_descs_num; /* number of descriptors in Rx queue */
+ void *rx_dma_desc_virt; /* Rx descriptors ring */
+ dma_addr_t rx_dma_desc;
+ void *rx_dma_cdesc_virt; /* Rx completion descriptors ring */
+ dma_addr_t rx_dma_cdesc;
+
+ /* SW descriptors ring */
+ u16 alloc_order;
+ struct al_crypto_sw_desc **sw_ring;
+#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS
+ struct al_crypto_chan_stats_gen stats_gen;
+ spinlock_t stats_gen_lock; /* locked during access of general stats */
+#endif
+
+ /* Frequently accessed prep */
+ spinlock_t prep_lock ____cacheline_aligned; /* locked during
+ xaction preparation and
+ cache management changes */
+ u16 head;
+ int sw_desc_num_locked; /* num of sw descriptors locked during xaction
+ preparation */
+ u32 tx_desc_produced; /* num of hw descriptors generated by HAL */
+ struct crypto_queue sw_queue; /* sw queue for backlog */
+#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS
+ struct al_crypto_chan_stats_prep stats_prep;
+#endif
+
+ /* LRU cache management */
+ int cache_entries_num;
+ struct list_head cache_lru_list;
+ int cache_lru_count;
+ struct al_crypto_cache_lru_entry cache_lru_entries[
+ MAX_CACHE_ENTRIES_PER_CHANNEL];
+
+ /* Frequently accessed cleanup */
+ spinlock_t cleanup_lock ____cacheline_aligned_in_smp; /* locked during
+ cleanup */
+ u16 tail;
+#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS
+ struct al_crypto_chan_stats_comp stats_comp;
+#endif
+
+ struct al_crypto_device *device;
+ struct tasklet_struct cleanup_task;
+ struct kobject kobj;
+};
+
+#define to_dev(al_crypto_chan) (&(al_crypto_chan)->device->pdev->dev)
+
+/* internal structure for AL Crypto IRQ
+ */
+struct al_crypto_irq {
+ char name[AL_CRYPTO_IRQNAME_SIZE];
+};
+
+/* internal structure for AL Crypto device
+ */
+struct al_crypto_device {
+ struct pci_dev *pdev;
+
+ struct al_ssm_dma_params ssm_dma_params;
+ void __iomem *udma_regs_base;
+ void __iomem *crypto_regs_base;
+
+ struct al_ssm_dma hal_crypto;
+
+ struct msix_entry msix_entries[AL_CRYPTO_MSIX_INTERRUPTS];
+ struct al_crypto_irq irq_tbl[AL_CRYPTO_MSIX_INTERRUPTS];
+ struct al_crypto_chan *channels[AL_CRYPTO_DMA_MAX_CHANNELS];
+ int num_channels;
+ int max_channels;
+ int crc_channels;
+ struct kset *channels_kset;
+ struct tasklet_struct cleanup_task;
+ int int_moderation;
+ int num_irq_used;
+
+ struct kmem_cache *cache; /* descriptors cache */
+ atomic_t tfm_count; /* used to allocate the dma
+ channel for current tfm */
+ atomic_t crc_tfm_count; /* used to allocate the dma
+ channel for current crc tfm */
+ struct list_head alg_list; /* list of registered crypto algorithms */
+ struct list_head hash_list; /* list of registered hash algorithms */
+ struct list_head crc_list; /* list of registered crc/csum algorithms */
+};
+
+struct al_crypto_cache_state {
+ bool cached;
+ int idx;
+};
+
+/* context structure
+ */
+struct al_crypto_ctx {
+ struct al_crypto_chan *chan;
+ struct al_crypto_cache_state cache_state;
+ struct al_crypto_sa sa;
+ struct al_crypto_hw_sa *hw_sa;
+ dma_addr_t hw_sa_dma_addr;
+ struct crypto_shash *sw_hash; /* for HMAC key hashing */
+ u8 *iv;
+ dma_addr_t iv_dma_addr;
+};
+
+/* DMA ring management inline functions */
+static inline u16 al_crypto_ring_size(struct al_crypto_chan *chan)
+{
+ return 1 << chan->alloc_order;
+}
+
+/* count of transactions in flight with the engine */
+static inline u16 al_crypto_ring_active(struct al_crypto_chan *chan)
+{
+ return CIRC_CNT(chan->head, chan->tail, al_crypto_ring_size(chan));
+}
+static inline u16 al_crypto_ring_space(struct al_crypto_chan *chan)
+{
+ return CIRC_SPACE(chan->head, chan->tail, al_crypto_ring_size(chan));
+}
+
+static inline struct al_crypto_sw_desc *
+al_crypto_get_ring_ent(struct al_crypto_chan *chan, u16 idx)
+{
+ return chan->sw_ring[idx & (al_crypto_ring_size(chan) - 1)];
+}
+
+int al_crypto_get_sw_desc(struct al_crypto_chan *chan, int num);
+
+void al_crypto_tx_submit(struct al_crypto_chan *chan);
+
+#ifdef DEBUG
+#define set_desc_id(desc, i) ((desc)->id = (i))
+#define desc_id(desc) ((desc)->id)
+#else
+#define set_desc_id(desc, i)
+#define desc_id(desc) (0)
+#endif
+
+#ifdef DEBUG
+static inline void hexdump(unsigned char *buf, unsigned int len)
+{
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ buf, len, false);
+}
+#else
+static inline void hexdump(unsigned char *buf, unsigned int len) {}
+#endif
+
+/* Count number of elements in scatterlist and check if there's a scatterlist
+ * chain
+ */
+static inline int sg_count(struct scatterlist *sg, int nbytes)
+{
+ int nents = 0;
+
+ while ((nbytes > 0) && sg) {
+ nbytes -= sg_dma_len(sg);
+ sg = sg_next(sg);
+ nents++;
+ }
+ BUG_ON(nbytes > 0);
+
+ return nents;
+}
+
+static inline void sg_map_to_xaction_buffers(
+ struct scatterlist *sg_in,
+ struct al_buf* bufs,
+ unsigned int length,
+ int *buf_idx)
+{
+ struct scatterlist *sg, *next_sg;
+ unsigned int remain;
+ bool contig;
+
+ sg = sg_in;
+ remain = length;
+ if (remain) {
+ bufs[*buf_idx].addr = sg_dma_address(sg);
+ bufs[*buf_idx].len = 0;
+ while (remain > sg_dma_len(sg)) {
+ bufs[*buf_idx].len += sg_dma_len(sg);
+ remain -= sg_dma_len(sg);
+ next_sg = sg_next(sg);
+ contig = (sg_dma_address(sg) + sg_dma_len(sg) ==
+ sg_dma_address(next_sg));
+ if (!contig) {
+ (*buf_idx)++;
+ bufs[*buf_idx].addr =
+ sg_dma_address(next_sg);
+ bufs[*buf_idx].len = 0;
+ }
+ sg = next_sg;
+ }
+ /* last sg */
+ bufs[*buf_idx].len += remain;
+ (*buf_idx)++;
+ }
+
+}
+
+/* SA cache management using LRU */
+void al_crypto_cache_update_lru(struct al_crypto_chan *chan,
+ struct al_crypto_cache_state *ctx);
+
+u32 al_crypto_cache_replace_lru(struct al_crypto_chan *chan,
+ struct al_crypto_cache_state *ctx,
+ struct al_crypto_cache_state **old_ctx);
+
+void al_crypto_cache_remove_lru(struct al_crypto_chan *chan,
+ struct al_crypto_cache_state *ctx);
+
+/* Core APIs */
+int al_crypto_core_init(
+ struct al_crypto_device *device,
+ void __iomem *iobase_udma,
+ void __iomem *iobase_app);
+
+int al_crypto_core_terminate(
+ struct al_crypto_device *device);
+
+int al_crypto_cleanup_fn(
+ struct al_crypto_chan *chan,
+ int from_tasklet);
+
+void al_crypto_set_int_moderation(
+ struct al_crypto_device *device,
+ int usec);
+
+int al_crypto_get_int_moderation(
+ struct al_crypto_device *device);
+
+/* ablkcipher related functions */
+void al_crypto_cleanup_single_ablkcipher(
+ struct al_crypto_chan *chan,
+ struct al_crypto_sw_desc *desc,
+ u32 comp_status);
+
+int ablkcipher_process_queue(struct al_crypto_chan *chan);
+
+/* aead related functions */
+void al_crypto_cleanup_single_aead(
+ struct al_crypto_chan *chan,
+ struct al_crypto_sw_desc *desc,
+ u32 comp_status);
+
+/* ahash related functions */
+void al_crypto_cleanup_single_ahash(
+ struct al_crypto_chan *chan,
+ struct al_crypto_sw_desc *desc,
+ u32 comp_status);
+
+/* crc related functions */
+void al_crypto_cleanup_single_crc(
+ struct al_crypto_chan *chan,
+ struct al_crypto_sw_desc *desc,
+ u32 comp_status);
+
+int hmac_setkey(struct al_crypto_ctx *ctx,
+ const u8 *key,
+ unsigned int keylen,
+ unsigned int sw_hash_interm_offset,
+ unsigned int sw_hash_interm_size);
+
+/* sysfs */
+void al_crypto_free_channel(struct al_crypto_chan *chan);
+
+int al_crypto_sysfs_init(struct al_crypto_device *device);
+
+void al_crypto_sysfs_terminate(struct al_crypto_device *device);
+
+/* al_crypto_alg APIs */
+int al_crypto_alg_init(struct al_crypto_device *device);
+
+void al_crypto_alg_terminate(struct al_crypto_device *device);
+
+/* al_crypto_hash APIs */
+int al_crypto_hash_init(struct al_crypto_device *device);
+
+void al_crypto_hash_terminate(struct al_crypto_device *device);
+
+/* al_crypto_crc APIs */
+int al_crypto_crc_init(struct al_crypto_device *device);
+
+void al_crypto_crc_terminate(struct al_crypto_device *device);
+
+#endif /* __AL_CRYPTO_H__ */
diff --git a/drivers/crypto/al/al_crypto_alg.c b/drivers/crypto/al/al_crypto_alg.c
new file mode 100644
index 0000000..d7c80a2
--- /dev/null
+++ b/drivers/crypto/al/al_crypto_alg.c
@@ -0,0 +1,1530 @@
+/*
+ * drivers/crypto/al_crypto_alg.c
+ *
+ * Annapurna Labs Crypto driver - ablckcipher/aead algorithms
+ *
+ * Copyright (C) 2012 Annapurna Labs Ltd.
+ *
+ * Algorithm registration code and chained scatter/gather lists
+ * handling based on caam driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/*
+#ifndef DEBUG
+#define DEBUG
+#endif
+*/
+
+#include "linux/export.h"
+#include "linux/crypto.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "al_crypto.h"
+#include "al_hal_ssm_crypto.h"
+
+#define AL_CRYPTO_CRA_PRIORITY 300
+
+
+
+
+static int ablkcipher_setkey_des(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen);
+
+static int ablkcipher_setkey_aes(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen);
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req);
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req);
+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen);
+
+static int aead_setauthsize(struct crypto_aead *aead, unsigned int authsize);
+
+static int aead_encrypt(struct aead_request *req);
+
+static int aead_decrypt(struct aead_request *req);
+
+static int aead_givencrypt(struct aead_givcrypt_request *req);
+
+struct al_crypto_ablkcipher_req_ctx {
+ enum al_crypto_dir dir;
+};
+
+struct al_crypto_aead_req_ctx {
+ u8 iv[AL_CRYPTO_MAX_IV_LENGTH] ____cacheline_aligned;
+};
+
+struct al_crypto_alg_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ uint32_t type;
+ union {
+ struct ablkcipher_alg ablkcipher;
+ struct aead_alg aead;
+ struct blkcipher_alg blkcipher;
+ struct cipher_alg cipher;
+ struct compress_alg compress;
+ struct rng_alg rng;
+ } template_u;
+ enum al_crypto_sa_enc_type enc_type;
+ enum al_crypto_sa_op sa_op;
+ enum al_crypto_sa_auth_type auth_type;
+ enum al_crypto_sa_sha2_mode sha2_mode;
+ char sw_hash_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int sw_hash_interm_offset;
+ unsigned int sw_hash_interm_size;
+};
+
+static struct al_crypto_alg_template driver_algs[] = {
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-al",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_u.ablkcipher = {
+ .setkey = ablkcipher_setkey_aes,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .enc_type = AL_CRYPT_AES_CBC,
+ .sa_op = AL_CRYPT_ENC_ONLY,
+ },
+ {
+ .name = "ecb(aes)",
+ .driver_name = "ecb-aes-al",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_u.ablkcipher = {
+ .setkey = ablkcipher_setkey_aes,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .enc_type = AL_CRYPT_AES_ECB,
+ .sa_op = AL_CRYPT_ENC_ONLY,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-al",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_u.ablkcipher = {
+ .setkey = ablkcipher_setkey_aes,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .enc_type = AL_CRYPT_AES_CTR,
+ .sa_op = AL_CRYPT_ENC_ONLY,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-al",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_u.ablkcipher = {
+ .setkey = ablkcipher_setkey_des,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .enc_type = AL_CRYPT_DES_CBC,
+ .sa_op = AL_CRYPT_ENC_ONLY,
+ },
+ {
+ .name = "ecb(des)",
+ .driver_name = "ecb-des-al",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_u.ablkcipher = {
+ .setkey = ablkcipher_setkey_des,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .enc_type = AL_CRYPT_DES_ECB,
+ .sa_op = AL_CRYPT_ENC_ONLY,
+ },
+ {
+ .name = "ecb(des3_ede)",
+ .driver_name = "ecb-des3-ede-al",
+ .blocksize = DES3_EDE_KEY_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_u.ablkcipher = {
+ .setkey = ablkcipher_setkey_des,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .enc_type = AL_CRYPT_TRIPDES_ECB,
+ .sa_op = AL_CRYPT_ENC_ONLY,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-des3-ede-al",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_u.ablkcipher = {
+ .setkey = ablkcipher_setkey_des,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .enc_type = AL_CRYPT_TRIPDES_CBC,
+ .sa_op = AL_CRYPT_ENC_ONLY,
+ },
+ {
+ .name = "authenc(hmac(sha1),cbc(aes))",
+ .driver_name = "authenc-hmac-sha1-cbc-aes-al",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_u.aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .enc_type = AL_CRYPT_AES_CBC,
+ .sa_op = AL_CRYPT_ENC_AUTH,
+ .auth_type = AL_CRYPT_AUTH_SHA1,
+ .sha2_mode = 0,
+ .sw_hash_name = "sha1",
+ .sw_hash_interm_offset = offsetof(struct sha1_state, state),
+ .sw_hash_interm_size = sizeof(
+ ((struct sha1_state *)0)->state),
+ },
+ {
+ .name = "authenc(hmac(sha256),cbc(aes))",
+ .driver_name = "authenc-hmac-sha256-cbc-aes-al",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_u.aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .enc_type = AL_CRYPT_AES_CBC,
+ .sa_op = AL_CRYPT_ENC_AUTH,
+ .auth_type = AL_CRYPT_AUTH_SHA2,
+ .sha2_mode = AL_CRYPT_SHA2_256,
+ .sw_hash_name = "sha256",
+ .sw_hash_interm_offset = offsetof(struct sha256_state, state),
+ .sw_hash_interm_size = sizeof(
+ ((struct sha256_state *)0)->state),
+ },
+ {
+ .name = "authenc(hmac(sha384),cbc(aes))",
+ .driver_name = "authenc-hmac-sha384-cbc-aes-al",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_u.aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .enc_type = AL_CRYPT_AES_CBC,
+ .sa_op = AL_CRYPT_ENC_AUTH,
+ .auth_type = AL_CRYPT_AUTH_SHA2,
+ .sha2_mode = AL_CRYPT_SHA2_384,
+ .sw_hash_name = "sha384",
+ .sw_hash_interm_offset = offsetof(struct sha512_state, state),
+ .sw_hash_interm_size = sizeof(
+ ((struct sha512_state *)0)->state),
+ },
+ {
+ .name = "authenc(hmac(sha512),cbc(aes))",
+ .driver_name = "authenc-hmac-sha512-cbc-aes-al",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_u.aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .enc_type = AL_CRYPT_AES_CBC,
+ .sa_op = AL_CRYPT_ENC_AUTH,
+ .auth_type = AL_CRYPT_AUTH_SHA2,
+ .sha2_mode = AL_CRYPT_SHA2_512,
+ .sw_hash_name = "sha512",
+ .sw_hash_interm_offset = offsetof(struct sha512_state, state),
+ .sw_hash_interm_size = sizeof(
+ ((struct sha512_state *)0)->state),
+ },
+};
+
+struct al_crypto_alg {
+ struct list_head entry;
+ struct al_crypto_device *device;
+ enum al_crypto_sa_enc_type enc_type;
+ enum al_crypto_sa_op sa_op;
+ enum al_crypto_sa_auth_type auth_type;
+ enum al_crypto_sa_sha2_mode sha2_mode;
+ char sw_hash_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int sw_hash_interm_offset;
+ unsigned int sw_hash_interm_size;
+ struct crypto_alg crypto_alg;
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static int al_crypto_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct al_crypto_alg *al_crypto_alg =
+ container_of(alg, struct al_crypto_alg, crypto_alg);
+ struct al_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct al_crypto_device *device = al_crypto_alg->device;
+ int chan_idx = atomic_inc_return(&device->tfm_count) %
+ (device->num_channels - device->crc_channels);
+
+ dev_dbg(&device->pdev->dev, "%s\n", __func__);
+
+ memset(ctx, 0, sizeof(struct al_crypto_ctx));
+ memset(&ctx->sa, 0, sizeof(struct al_crypto_sa));
+
+ ctx->chan = device->channels[chan_idx];
+
+ ctx->sa.enc_type = al_crypto_alg->enc_type;
+ ctx->sa.sa_op = al_crypto_alg->sa_op;
+ ctx->hw_sa = dma_alloc_coherent(&device->pdev->dev,
+ sizeof(struct al_crypto_hw_sa),
+ &ctx->hw_sa_dma_addr,
+ GFP_KERNEL);
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int al_crypto_cra_init_ablkcipher(struct crypto_tfm *tfm)
+{
+ struct al_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ al_crypto_cra_init(tfm);
+
+ tfm->crt_ablkcipher.reqsize =
+ sizeof(struct al_crypto_ablkcipher_req_ctx);
+
+ AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock);
+ AL_CRYPTO_STATS_INC(ctx->chan->stats_gen.ablkcipher_tfms, 1);
+ AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock);
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int al_crypto_cra_init_aead(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct al_crypto_alg *al_crypto_alg =
+ container_of(alg, struct al_crypto_alg, crypto_alg);
+ struct al_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct al_crypto_device *device = al_crypto_alg->device;
+ struct crypto_shash *sw_hash = NULL;
+
+ dev_dbg(&device->pdev->dev, "%s\n", __func__);
+
+ al_crypto_cra_init(tfm);
+
+ tfm->crt_aead.reqsize =
+ sizeof(struct al_crypto_aead_req_ctx);
+
+ AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock);
+ AL_CRYPTO_STATS_INC(ctx->chan->stats_gen.aead_tfms, 1);
+ AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock);
+
+ ctx->sa.auth_type = al_crypto_alg->auth_type;
+ ctx->sa.sha2_mode = al_crypto_alg->sha2_mode;
+
+ /* Allocate SW hash for hmac long key hashing and key XOR ipad/opad
+ * intermediate calculations
+ */
+ if (strlen(al_crypto_alg->sw_hash_name)) {
+ /* TODO: is CRYPTO_ALG_NEED_FALLBACK needed here? */
+ sw_hash = crypto_alloc_shash(al_crypto_alg->sw_hash_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(sw_hash)) {
+ dev_err(to_dev(ctx->chan), "Failed to allocate ");
+ return PTR_ERR(sw_hash);
+ }
+ }
+ ctx->sw_hash = sw_hash;
+
+ ctx->iv = dma_alloc_coherent(&device->pdev->dev,
+ AL_CRYPTO_MAX_IV_LENGTH,
+ &ctx->iv_dma_addr,
+ GFP_KERNEL);
+ /* random first IV */
+ get_random_bytes(ctx->iv, AL_CRYPTO_MAX_IV_LENGTH);
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crypto_cra_exit(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct al_crypto_alg *al_crypto_alg =
+ container_of(alg, struct al_crypto_alg, crypto_alg);
+ struct al_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct al_crypto_device *device = al_crypto_alg->device;
+
+ dev_dbg(&device->pdev->dev, "%s\n", __func__);
+
+ /* LRU list access has to be protected */
+ spin_lock_bh(&ctx->chan->prep_lock);
+ if (ctx->cache_state.cached)
+ al_crypto_cache_remove_lru(ctx->chan, &ctx->cache_state);
+ spin_unlock_bh(&ctx->chan->prep_lock);
+
+ if (ctx->hw_sa_dma_addr)
+ dma_free_coherent(&device->pdev->dev,
+ sizeof(struct al_crypto_hw_sa),
+ ctx->hw_sa,
+ ctx->hw_sa_dma_addr);
+
+ return;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crypto_cra_exit_ablkcipher(struct crypto_tfm *tfm)
+{
+ struct al_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ al_crypto_cra_exit(tfm);
+
+ AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock);
+ AL_CRYPTO_STATS_DEC(ctx->chan->stats_gen.ablkcipher_tfms, 1);
+ AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crypto_cra_exit_aead(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct al_crypto_alg *al_crypto_alg =
+ container_of(alg, struct al_crypto_alg, crypto_alg);
+ struct al_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct al_crypto_device *device = al_crypto_alg->device;
+
+ dev_dbg(&device->pdev->dev, "%s\n", __func__);
+
+ al_crypto_cra_exit(tfm);
+
+ AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock);
+ AL_CRYPTO_STATS_DEC(ctx->chan->stats_gen.aead_tfms, 1);
+ AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock);
+
+ if (ctx->iv_dma_addr)
+ dma_free_coherent(&device->pdev->dev,
+ AL_CRYPTO_MAX_IV_LENGTH,
+ ctx->iv,
+ ctx->iv_dma_addr);
+
+ if (ctx->sw_hash)
+ crypto_free_shash(ctx->sw_hash);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int ablkcipher_setkey_des(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 tmp[DES_EXPKEY_WORDS];
+ struct al_crypto_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ u32 *flags = &ablkcipher->base.crt_flags;
+ int ret;
+
+ dev_dbg(to_dev(ctx->chan), "%s\n", __func__);
+
+ if ((ctx->sa.enc_type == AL_CRYPT_TRIPDES_CBC) ||
+ (ctx->sa.enc_type == AL_CRYPT_TRIPDES_ECB)) {
+ ctx->sa.tripdes_m = AL_CRYPT_TRIPDES_EDE;
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ } else {
+ ctx->sa.tripdes_m = 0;
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+
+ /* check for weak keys. */
+ /* Weak keys are keys that cause the encryption mode of DES
+ * to act identically to the decryption mode of DES */
+ ret = des_ekey(tmp, key);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+ }
+
+ /* TODO: optimize HAL to hold ptrs to save this memcpy */
+ /* copy the key to the sa */
+ memcpy(&ctx->sa.enc_key, key, keylen);
+
+ al_crypto_hw_sa_init(&ctx->sa, ctx->hw_sa);
+
+ /* mark the sa as not cached, will update in next xaction */
+ spin_lock_bh(&ctx->chan->prep_lock);
+ if (ctx->cache_state.cached)
+ al_crypto_cache_remove_lru(ctx->chan, &ctx->cache_state);
+ spin_unlock_bh(&ctx->chan->prep_lock);
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int ablkcipher_setkey_aes(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct al_crypto_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+
+ dev_dbg(to_dev(ctx->chan), "%s\n", __func__);
+
+ switch (keylen) {
+ case 16: /* 128 bit */
+ ctx->sa.aes_ksize = AL_CRYPT_AES_128;
+ break;
+ case 24: /* 192 bit */
+ ctx->sa.aes_ksize = AL_CRYPT_AES_192;
+ break;
+ case 32: /* 256 bit */
+ ctx->sa.aes_ksize = AL_CRYPT_AES_256;
+ break;
+ default: /* Invalid key size */
+ return -EINVAL;
+ break;
+ }
+
+ /* As for now we don't support GCM or CCM modes */
+ if ((ctx->sa.enc_type == AL_CRYPT_AES_GCM) ||
+ (ctx->sa.enc_type == AL_CRYPT_AES_CCM)) {
+ BUG();
+ }
+
+ /* TODO: optimize HAL to hold ptrs to save this memcpy */
+ /* copy the key to the sa */
+ memcpy(&ctx->sa.enc_key, key, keylen);
+
+ /* Sets the counter increment to 128 bit to be aligned with the
+ * linux implementation. We know it contradicts the NIST spec.
+ * If and when the linux will be aligned with the spec we should fix it
+ * too.
+ * This variable is relevant only for CTR, GCM and CCM modes*/
+ ctx->sa.cntr_size = AL_CRYPT_CNTR_128_BIT;
+
+ al_crypto_hw_sa_init(&ctx->sa, ctx->hw_sa);
+
+ /* mark the sa as not cached, will update in next xaction */
+ spin_lock_bh(&ctx->chan->prep_lock);
+ if (ctx->cache_state.cached)
+ al_crypto_cache_remove_lru(ctx->chan, &ctx->cache_state);
+ spin_unlock_bh(&ctx->chan->prep_lock);
+
+ return 0;
+}
+
+
+/******************************************************************************
+ *****************************************************************************/
+/* DMA unmap buffers for ablkcipher request
+ */
+static inline void al_crypto_dma_unmap_ablkcipher(struct al_crypto_chan *chan,
+ struct ablkcipher_request *req,
+ int src_nents, int dst_nents,
+ struct al_crypto_sw_desc *desc)
+{
+
+ if (likely(req->src == req->dst)) {
+ dma_unmap_sg(to_dev(chan), req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(to_dev(chan), req->src, src_nents,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(to_dev(chan), req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ }
+
+ if (desc && desc->hal_xaction.enc_iv_in.len)
+ dma_unmap_single(to_dev(chan),
+ desc->hal_xaction.enc_iv_in.addr,
+ desc->hal_xaction.enc_iv_in.len,
+ DMA_TO_DEVICE);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Cleanup single ablkcipher request - invoked from cleanup tasklet (interrupt
+ * handler)
+ */
+void al_crypto_cleanup_single_ablkcipher(
+ struct al_crypto_chan *chan,
+ struct al_crypto_sw_desc *desc,
+ uint32_t comp_status)
+{
+ struct ablkcipher_request *req =
+ (struct ablkcipher_request *)desc->req;
+
+ al_crypto_dma_unmap_ablkcipher(chan, req, desc->src_nents,
+ desc->dst_nents, desc);
+
+ req->base.complete(&req->base, 0);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void ablkcipher_update_stats(
+ struct al_crypto_transaction *xaction,
+ struct al_crypto_chan *chan)
+{
+ if (xaction->dir == AL_CRYPT_ENCRYPT) {
+ AL_CRYPTO_STATS_INC(chan->stats_prep.ablkcipher_encrypt_reqs,
+ 1);
+ AL_CRYPTO_STATS_INC(chan->stats_prep.ablkcipher_encrypt_bytes,
+ xaction->enc_in_len);
+ } else {
+ AL_CRYPTO_STATS_INC(chan->stats_prep.ablkcipher_decrypt_reqs,
+ 1);
+ AL_CRYPTO_STATS_INC(chan->stats_prep.ablkcipher_decrypt_bytes,
+ xaction->enc_in_len);
+ }
+
+ if (xaction->enc_in_len <= 512)
+ AL_CRYPTO_STATS_INC(
+ chan->stats_prep.ablkcipher_reqs_le512, 1);
+ else if ((xaction->enc_in_len > 512) && (xaction->enc_in_len <= 2048))
+ AL_CRYPTO_STATS_INC(
+ chan->stats_prep.ablkcipher_reqs_512_2048, 1);
+ else if ((xaction->enc_in_len > 2048) && (xaction->enc_in_len <= 4096))
+ AL_CRYPTO_STATS_INC(
+ chan->stats_prep.ablkcipher_reqs_2048_4096, 1);
+ else
+ AL_CRYPTO_STATS_INC(
+ chan->stats_prep.ablkcipher_reqs_gt4096, 1);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void ablkcipher_prepare_xaction_buffers(
+ struct ablkcipher_request *req,
+ struct al_crypto_sw_desc *desc)
+{
+ int i;
+ int src_idx, dst_idx;
+ struct al_crypto_transaction *xaction = &desc->hal_xaction;
+
+ src_idx = 0;
+ dst_idx = 0;
+
+ sg_map_to_xaction_buffers(req->src, desc->src_bufs, req->nbytes,
+ &src_idx);
+ if (likely(req->src == req->dst)) {
+ for (i = 0; i < src_idx; i++)
+ desc->dst_bufs[i] = desc->src_bufs[i];
+ dst_idx = src_idx;
+ } else
+ sg_map_to_xaction_buffers(req->dst, desc->dst_bufs,
+ req->nbytes, &dst_idx);
+
+ xaction->src_size = xaction->enc_in_len = req->nbytes;
+ xaction->src.bufs = &desc->src_bufs[0];
+ xaction->src.num = src_idx;
+ xaction->dst.bufs = &desc->dst_bufs[0];
+ xaction->dst.num = dst_idx;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Prepare crypto transaction to be processed by HAL and submit to HAL
+ * Grabs and releases producer lock for relevant sw ring
+ */
+static int ablkcipher_do_crypt(struct ablkcipher_request *req, bool lock)
+{
+ int idx, rc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct al_crypto_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct al_crypto_ablkcipher_req_ctx *rctx = ablkcipher_request_ctx(req);
+ enum al_crypto_dir dir = rctx->dir;
+ struct al_crypto_chan *chan = ctx->chan;
+ struct al_crypto_transaction *xaction;
+ int src_nents = 0, dst_nents = 0;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct al_crypto_sw_desc *desc;
+
+ src_nents = sg_count(req->src, req->nbytes);
+
+ if (req->dst != req->src)
+ dst_nents = sg_count(req->dst, req->nbytes);
+ else
+ dst_nents = src_nents;
+
+ /* Currently supported max sg chain length is
+ * AL_CRYPTO_OP_MAX_DATA_BUFS(12) which is minimum of descriptors left
+ * for data in a transaction:
+ * tx: 31(supported by HW) - 1(metadata) - 1(sa_in) -
+ * 1(enc_iv_in|auth_iv_in) - 1(auth_sign_in) = 27
+ * rx: 31(supported by HW) - 1(sa_out) - 1(enc_iv_out|auth_iv_out) -
+ * 1(next_enc_iv_out) - 1(auth_sign_out) = 27
+ */
+ BUG_ON((src_nents > AL_CRYPTO_OP_MAX_BUFS) ||
+ (dst_nents > AL_CRYPTO_OP_MAX_BUFS));
+
+ if (likely(req->src == req->dst)) {
+ dma_map_sg(to_dev(chan), req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_map_sg(to_dev(chan), req->src, src_nents,
+ DMA_TO_DEVICE);
+ dma_map_sg(to_dev(chan), req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ }
+
+ if (likely(lock))
+ spin_lock_bh(&chan->prep_lock);
+
+ if (likely(al_crypto_get_sw_desc(chan, 1) == 0))
+ idx = chan->head;
+ else {
+ rc = ablkcipher_enqueue_request(&chan->sw_queue, req);
+
+ al_crypto_dma_unmap_ablkcipher(chan, req, src_nents, dst_nents,
+ NULL);
+
+ if (likely(lock))
+ spin_unlock_bh(&chan->prep_lock);
+
+ dev_dbg(
+ to_dev(chan),
+ "%s: al_crypto_get_sw_desc failed!\n",
+ __func__);
+
+ return rc;
+ }
+
+ chan->sw_desc_num_locked = 1;
+ chan->tx_desc_produced = 0;
+
+ desc = al_crypto_get_ring_ent(chan, idx);
+ desc->req = (void *)req;
+ desc->req_type = AL_CRYPTO_REQ_ABLKCIPHER;
+ desc->src_nents = src_nents;
+ desc->dst_nents = dst_nents;
+
+ /* prepare hal transaction */
+ xaction = &desc->hal_xaction;
+ memset(xaction, 0, sizeof(struct al_crypto_transaction));
+ xaction->dir = dir;
+
+ ablkcipher_prepare_xaction_buffers(req, desc);
+
+ if ((ctx->sa.enc_type != AL_CRYPT_AES_ECB) &&
+ (ctx->sa.enc_type != AL_CRYPT_DES_ECB) &&
+ (ctx->sa.enc_type != AL_CRYPT_TRIPDES_ECB)) {
+ xaction->enc_iv_in.addr = dma_map_single(to_dev(chan),
+ req->info, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(to_dev(chan), xaction->enc_iv_in.addr)) {
+ dev_err(to_dev(chan),
+ "dma_map_single failed!\n");
+
+ al_crypto_dma_unmap_ablkcipher(chan, req, src_nents, dst_nents,
+ desc);
+
+ if (likely(lock))
+ spin_unlock_bh(&chan->prep_lock);
+ return -ENOMEM;
+ }
+ xaction->enc_iv_in.len = ivsize;
+ }
+
+ if (!ctx->cache_state.cached) {
+ xaction->sa_indx = al_crypto_cache_replace_lru(chan,
+ &ctx->cache_state, NULL);
+ xaction->sa_in.addr = ctx->hw_sa_dma_addr;
+ xaction->sa_in.len = sizeof(struct al_crypto_hw_sa);
+ } else {
+ al_crypto_cache_update_lru(chan, &ctx->cache_state);
+ xaction->sa_indx = ctx->cache_state.idx;
+ }
+
+ xaction->flags = AL_SSM_INTERRUPT;
+
+ ablkcipher_update_stats(xaction, chan);
+
+ /* send crypto transaction to engine */
+ rc = al_crypto_dma_prepare(chan->hal_crypto, chan->idx,
+ &desc->hal_xaction);
+ if (unlikely(rc != 0)) {
+ dev_err(to_dev(chan),
+ "al_crypto_dma_prepare failed!\n");
+
+ al_crypto_dma_unmap_ablkcipher(chan, req, src_nents, dst_nents,
+ desc);
+
+ if (likely(lock))
+ spin_unlock_bh(&chan->prep_lock);
+ return rc;
+ }
+
+ chan->tx_desc_produced += desc->hal_xaction.tx_descs_count;
+
+ al_crypto_tx_submit(chan);
+
+ if (likely(lock))
+ spin_unlock_bh(&chan->prep_lock);
+
+ return -EINPROGRESS;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int ablkcipher_process_queue(struct al_crypto_chan *chan)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct ablkcipher_request *req;
+ int err = 0;
+
+ spin_lock_bh(&chan->prep_lock);
+
+ while (al_crypto_ring_space(chan) > 0) {
+ backlog = crypto_get_backlog(&chan->sw_queue);
+ async_req = crypto_dequeue_request(&chan->sw_queue);
+
+ if (!async_req)
+ break;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = container_of(async_req, struct ablkcipher_request, base);
+
+ err = ablkcipher_do_crypt(req, false);
+ if (err != -EINPROGRESS)
+ break;
+ }
+
+ spin_unlock_bh(&chan->prep_lock);
+
+ return err;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct al_crypto_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct al_crypto_ablkcipher_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct al_crypto_chan *chan = ctx->chan;
+
+ dev_dbg(to_dev(chan), "ablkcipher_encrypt %p\n", req);
+
+ rctx->dir = AL_CRYPT_ENCRYPT;
+ return ablkcipher_do_crypt(req, true);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct al_crypto_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct al_crypto_ablkcipher_req_ctx *rctx = ablkcipher_request_ctx(req);
+ struct al_crypto_chan *chan = ctx->chan;
+
+ dev_dbg(to_dev(chan), "ablkcipher_decrypt %p\n", req);
+
+ rctx->dir = AL_CRYPT_DECRYPT;
+ return ablkcipher_do_crypt(req, true);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline int aead_auth_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_alg *alg = aead->base.__crt_alg;
+ struct al_crypto_alg *al_crypto_alg =
+ container_of(alg, struct al_crypto_alg, crypto_alg);
+ struct al_crypto_ctx *ctx = crypto_aead_ctx(aead);
+
+ if (!ctx->sw_hash)
+ return 0;
+
+ return hmac_setkey(ctx, key, keylen,
+ al_crypto_alg->sw_hash_interm_offset,
+ al_crypto_alg->sw_hash_interm_size);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct al_crypto_ctx *ctx = crypto_aead_ctx(aead);
+ struct rtattr *rta = (struct rtattr *)key;
+ struct crypto_authenc_key_param *param;
+ unsigned int authkeylen;
+ unsigned int enckeylen;
+ int rc = 0;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ if (keylen < enckeylen)
+ goto badkey;
+
+ authkeylen = keylen - enckeylen;
+
+ if ((ctx->sa.enc_type == AL_CRYPT_AES_CBC) ||
+ (ctx->sa.enc_type == AL_CRYPT_AES_ECB) ||
+ (ctx->sa.enc_type == AL_CRYPT_AES_CTR)) {
+ switch (enckeylen) {
+ case 16: /* 128 bit */
+ ctx->sa.aes_ksize = AL_CRYPT_AES_128;
+ break;
+ case 24: /* 192 bit */
+ ctx->sa.aes_ksize = AL_CRYPT_AES_192;
+ break;
+ case 32: /* 256 bit */
+ ctx->sa.aes_ksize = AL_CRYPT_AES_256;
+ break;
+ default: /* Invalid key size */
+ return -EINVAL;
+ break;
+ }
+ } else {
+ /* Currently only AES is supported */
+ BUG();
+ }
+
+ rc = aead_auth_setkey(aead, key, authkeylen);
+
+ if (!rc) {
+ /* TODO: optimize HAL to hold ptrs to save this memcpy */
+ /* copy the key to the sa */
+ memcpy(&ctx->sa.enc_key, key + authkeylen, enckeylen);
+
+ ctx->sa.sign_after_enc = true;
+ ctx->sa.auth_after_dec = false;
+
+ /* Sets the counter increment to 128 bit to be aligned with the
+ * linux implementation. We know it contradicts the NIST spec.
+ * If and when the linux will be aligned with the spec we should fix it
+ * too.
+ * This variable is relevant only for CTR, GCM and CCM modes*/
+ ctx->sa.cntr_size = AL_CRYPT_CNTR_128_BIT;
+
+ al_crypto_hw_sa_init(&ctx->sa, ctx->hw_sa);
+
+ /* mark the sa as not cached, will update in next xaction */
+ spin_lock_bh(&ctx->chan->prep_lock);
+ if (ctx->cache_state.cached)
+ al_crypto_cache_remove_lru(ctx->chan,
+ &ctx->cache_state);
+ spin_unlock_bh(&ctx->chan->prep_lock);
+ }
+
+ return rc;
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
+{
+ struct al_crypto_ctx *ctx = crypto_aead_ctx(aead);
+ int max = crypto_aead_alg(aead)->maxauthsize;
+ int signature_size = (authsize >> 2) - 1;
+
+ if (signature_size < 0 || authsize > max || (authsize & 3))
+ return -EINVAL;
+
+ ctx->sa.signature_size = signature_size;
+ ctx->sa.auth_signature_msb = true;
+
+ al_crypto_hw_sa_init(&ctx->sa, ctx->hw_sa);
+
+ /* mark the sa as not cached, will update in next xaction */
+ spin_lock_bh(&ctx->chan->prep_lock);
+ if (ctx->cache_state.cached)
+ al_crypto_cache_remove_lru(ctx->chan, &ctx->cache_state);
+ spin_unlock_bh(&ctx->chan->prep_lock);
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* DMA unmap buffers for aead request
+ */
+static inline void al_crypto_dma_unmap_aead(struct al_crypto_chan *chan,
+ struct aead_request *req,
+ int src_nents, int assoc_nents, int dst_nents,
+ struct al_crypto_sw_desc *desc)
+{
+ if (likely(req->src == req->dst)) {
+ dma_unmap_sg(to_dev(chan), req->src,
+ src_nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(to_dev(chan), req->src, src_nents,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(to_dev(chan), req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ }
+
+ dma_unmap_sg(to_dev(chan), req->assoc, assoc_nents,
+ DMA_BIDIRECTIONAL);
+
+ if (desc && desc->hal_xaction.enc_iv_in.len)
+ dma_unmap_single(to_dev(chan),
+ desc->hal_xaction.enc_iv_in.addr,
+ desc->hal_xaction.enc_iv_in.len,
+ DMA_TO_DEVICE);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Cleanup single aead request - invoked from cleanup tasklet (interrupt
+ * handler)
+ */
+void al_crypto_cleanup_single_aead(
+ struct al_crypto_chan *chan,
+ struct al_crypto_sw_desc *desc,
+ uint32_t comp_status)
+{
+ struct aead_request *req =
+ (struct aead_request *)desc->req;
+ int err = 0;
+
+ al_crypto_dma_unmap_aead(chan, req, desc->src_nents, desc->assoc_nents,
+ desc->dst_nents, desc);
+
+ if (comp_status & AL_CRYPT_AUTH_ERROR)
+ err = -EBADMSG;
+
+ req->base.complete(&req->base, err);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void aead_update_stats(struct al_crypto_transaction *xaction,
+ struct al_crypto_chan *chan)
+{
+ if (xaction->dir == AL_CRYPT_ENCRYPT) {
+ AL_CRYPTO_STATS_INC(chan->stats_prep.aead_encrypt_hash_reqs, 1);
+ AL_CRYPTO_STATS_INC(chan->stats_prep.aead_encrypt_bytes,
+ xaction->enc_in_len);
+ AL_CRYPTO_STATS_INC(chan->stats_prep.aead_hash_bytes,
+ xaction->auth_in_len);
+ } else {
+ AL_CRYPTO_STATS_INC(chan->stats_prep.aead_decrypt_validate_reqs,
+ 1);
+ AL_CRYPTO_STATS_INC(chan->stats_prep.aead_decrypt_bytes,
+ xaction->enc_in_len);
+ AL_CRYPTO_STATS_INC(chan->stats_prep.aead_validate_bytes,
+ xaction->auth_in_len);
+ }
+
+ if (xaction->auth_in_len <= 512)
+ AL_CRYPTO_STATS_INC(chan->stats_prep.aead_reqs_le512, 1);
+ else if ((xaction->auth_in_len > 512) && (xaction->auth_in_len <= 2048))
+ AL_CRYPTO_STATS_INC(chan->stats_prep.aead_reqs_512_2048, 1);
+ else if ((xaction->auth_in_len > 2048) &&
+ (xaction->auth_in_len <= 4096))
+ AL_CRYPTO_STATS_INC(chan->stats_prep.aead_reqs_2048_4096, 1);
+ else
+ AL_CRYPTO_STATS_INC(chan->stats_prep.aead_reqs_gt4096, 1);
+}
+
+static inline int aead_prepare_xaction_buffers(
+ struct aead_request *req,
+ struct al_crypto_sw_desc *desc,
+ u8 *iv)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct al_crypto_ctx *ctx = crypto_aead_ctx(aead);
+ struct al_crypto_aead_req_ctx *rctx = aead_request_ctx(req);
+ struct al_crypto_chan *chan = ctx->chan;
+ int i;
+ int src_idx, src_idx_old, dst_idx;
+ struct scatterlist *sg;
+ int ivsize = crypto_aead_ivsize(aead);
+ struct al_crypto_transaction *xaction = &desc->hal_xaction;
+ int rc = 0;
+
+ src_idx = 0;
+ dst_idx = 0;
+ /* add assoc buffers */
+ sg = req->assoc;
+ for (i = 0; i < desc->assoc_nents; i++) {
+ desc->src_bufs[src_idx].addr = desc->dst_bufs[dst_idx].addr =
+ sg_dma_address(sg);
+ desc->src_bufs[src_idx].len = desc->dst_bufs[dst_idx].len =
+ sg_dma_len(sg);
+ xaction->enc_in_off += desc->src_bufs[src_idx].len;
+ xaction->auth_in_len += desc->src_bufs[src_idx].len;
+ sg = sg_next(sg);
+ src_idx++;
+ dst_idx++;
+ }
+
+ /* IV might be allocated on stack, copy for DMA */
+ memcpy(rctx->iv, iv, ivsize);
+ /* map and add IV */
+ desc->src_bufs[src_idx].addr = desc->dst_bufs[dst_idx].addr =
+ xaction->enc_iv_in.addr =
+ dma_map_single(to_dev(chan), rctx->iv, ivsize,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(to_dev(chan), xaction->enc_iv_in.addr)) {
+ dev_err(to_dev(chan),
+ "dma_map_single failed!\n");
+ return -ENOMEM;
+ }
+ desc->src_bufs[src_idx].len = desc->dst_bufs[dst_idx].len =
+ xaction->enc_iv_in.len =
+ ivsize;
+
+ xaction->enc_in_off += desc->src_bufs[src_idx].len;
+ xaction->auth_in_len += desc->src_bufs[src_idx].len;
+ src_idx++;
+ dst_idx++;
+
+ src_idx_old = src_idx;
+ sg_map_to_xaction_buffers(req->src, desc->src_bufs, req->cryptlen,
+ &src_idx);
+ if (likely(req->src == req->dst)) {
+ for (i = src_idx_old; i < src_idx; i++)
+ desc->dst_bufs[i] = desc->src_bufs[i];
+ dst_idx = src_idx;
+ } else
+ sg_map_to_xaction_buffers(req->dst, desc->dst_bufs,
+ req->cryptlen, &dst_idx);
+
+ /* add enc+auth data */
+ xaction->auth_in_len += req->cryptlen;
+ xaction->enc_in_len = req->cryptlen;
+ xaction->src_size = xaction->auth_in_len;
+ xaction->src.bufs = &desc->src_bufs[0];
+ xaction->src.num = src_idx;
+ xaction->dst.bufs = &desc->dst_bufs[0];
+ xaction->dst.num = dst_idx;
+
+ return rc;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Prepare encryption+auth transaction to be processed by HAL
+ */
+static inline int aead_prepare_xaction(enum al_crypto_dir dir,
+ struct aead_request *req,
+ struct al_crypto_sw_desc *desc,
+ u8 *iv)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct al_crypto_ctx *ctx = crypto_aead_ctx(aead);
+ struct al_crypto_chan *chan = ctx->chan;
+ struct al_crypto_transaction *xaction;
+ struct scatterlist *sg;
+ int ivsize = crypto_aead_ivsize(aead);
+ int authsize = crypto_aead_authsize(aead);
+ int rc = 0;
+
+ xaction = &desc->hal_xaction;
+ memset(xaction, 0, sizeof(struct al_crypto_transaction));
+ xaction->dir = dir;
+
+ rc = aead_prepare_xaction_buffers(req, desc, iv);
+ if (unlikely(rc != 0)) {
+ dev_err(to_dev(chan),
+ "aead_prepare_xaction_buffers failed!\n");
+ return rc;
+ }
+
+ if (dir == AL_CRYPT_ENCRYPT) {
+ /* set signature buffer for auth */
+ sg = req->dst;
+ while (!sg_is_last(sg))
+ sg_next(sg);
+
+ /* assume that auth result is not scattered */
+ BUG_ON(sg_dma_len(sg) < authsize);
+ xaction->auth_sign_out.addr =
+ sg_dma_address(sg) + sg_dma_len(sg) - authsize;
+ xaction->auth_sign_out.len = authsize;
+
+ /* get next iv for iv generation */
+ xaction->enc_next_iv_out.addr = ctx->iv_dma_addr;
+ xaction->enc_next_iv_out.len = ivsize;
+ } else {
+ sg = req->src;
+ while (!sg_is_last(sg))
+ sg_next(sg);
+
+ /* assume that auth result is not scattered */
+ BUG_ON(sg_dma_len(sg) < authsize);
+ xaction->auth_sign_in.addr =
+ sg_dma_address(sg) + sg_dma_len(sg) - authsize;
+ xaction->auth_sign_in.len = authsize;
+ }
+
+ if (!ctx->cache_state.cached) {
+ xaction->sa_indx = al_crypto_cache_replace_lru(chan,
+ &ctx->cache_state, NULL);
+ xaction->sa_in.addr = ctx->hw_sa_dma_addr;
+ xaction->sa_in.len = sizeof(struct al_crypto_hw_sa);
+ } else {
+ al_crypto_cache_update_lru(chan, &ctx->cache_state);
+ xaction->sa_indx = ctx->cache_state.idx;
+ }
+
+ xaction->flags = AL_SSM_INTERRUPT;
+
+ aead_update_stats(xaction, chan);
+
+ return rc;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Prepare aead encryption and auth dma, call hal transaction preparation
+ * function and submit the request to HAL.
+ * Grabs and releases producer lock for relevant sw ring
+ */
+static int aead_perform(enum al_crypto_dir dir, struct aead_request *req,
+ u8 *iv)
+{
+ int idx, rc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct al_crypto_ctx *ctx = crypto_aead_ctx(aead);
+ struct al_crypto_chan *chan = ctx->chan;
+ int src_nents = 0, assoc_nents = 0, dst_nents = 0;
+ struct al_crypto_sw_desc *desc;
+ int authsize = crypto_aead_authsize(aead);
+
+ src_nents = sg_count(req->src, req->cryptlen + authsize);
+ if (req->assoc)
+ assoc_nents = sg_count(req->assoc, req->assoclen);
+
+ if (req->dst != req->src)
+ dst_nents = sg_count(req->dst, req->cryptlen + authsize);
+ else
+ dst_nents = src_nents;
+
+ /* Currently supported max sg chain length is
+ * AL_CRYPTO_OP_MAX_DATA_BUFS(12) which is minimum of descriptors left
+ * for data in a transaction:
+ * tx: 31(supported by HW) - 1(metadata) - 1(sa_in) -
+ * 1(enc_iv_in|auth_iv_in) - 1(auth_sign_in) = 27
+ * rx: 31(supported by HW) - 1(sa_out) - 1(enc_iv_out|auth_iv_out) -
+ * 1(next_enc_iv_out) - 1(auth_sign_out) = 27
+ */
+ BUG_ON((src_nents + assoc_nents + 1 > AL_CRYPTO_OP_MAX_BUFS) ||
+ (dst_nents + assoc_nents + 1 > AL_CRYPTO_OP_MAX_BUFS));
+
+ if (likely(req->src == req->dst)) {
+ dma_map_sg(to_dev(chan), req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_map_sg(to_dev(chan), req->src, src_nents,
+ DMA_TO_DEVICE);
+ dma_map_sg(to_dev(chan), req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ }
+
+ if (assoc_nents)
+ dma_map_sg(to_dev(chan), req->assoc, assoc_nents,
+ DMA_BIDIRECTIONAL);
+
+ spin_lock_bh(&chan->prep_lock);
+ if (likely(al_crypto_get_sw_desc(chan, 1) == 0))
+ idx = chan->head;
+ else {
+ dev_dbg(
+ to_dev(chan),
+ "%s: al_crypto_get_sw_desc failed!\n",
+ __func__);
+
+ al_crypto_dma_unmap_aead(chan, req, src_nents, assoc_nents,
+ dst_nents, NULL);
+
+ spin_unlock_bh(&chan->prep_lock);
+
+ return -EBUSY;
+ }
+
+ chan->sw_desc_num_locked = 1;
+ chan->tx_desc_produced = 0;
+
+ desc = al_crypto_get_ring_ent(chan, idx);
+ desc->req = (void *)req;
+ desc->req_type = AL_CRYPTO_REQ_AEAD;
+ desc->src_nents = src_nents;
+ desc->assoc_nents = assoc_nents;
+ desc->dst_nents = dst_nents;
+
+ rc = aead_prepare_xaction(dir, req, desc, iv);
+ if (unlikely(rc != 0)) {
+ dev_err(to_dev(chan),
+ "aead_prepare_xaction failed!\n");
+
+ al_crypto_dma_unmap_aead(chan, req, src_nents, assoc_nents,
+ dst_nents, desc);
+
+ spin_unlock_bh(&chan->prep_lock);
+ return rc;
+ }
+
+ /* send crypto transaction to engine */
+ rc = al_crypto_dma_prepare(chan->hal_crypto, chan->idx,
+ &desc->hal_xaction);
+ if (unlikely(rc != 0)) {
+ dev_err(to_dev(chan),
+ "al_crypto_dma_prepare failed!\n");
+
+ al_crypto_dma_unmap_aead(chan, req, src_nents, assoc_nents,
+ dst_nents, desc);
+
+ spin_unlock_bh(&chan->prep_lock);
+ return rc;
+ }
+
+ chan->tx_desc_produced += desc->hal_xaction.tx_descs_count;
+
+ al_crypto_tx_submit(chan);
+
+ spin_unlock_bh(&chan->prep_lock);
+
+ return -EINPROGRESS;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int aead_encrypt(struct aead_request *req)
+{
+ return aead_perform(AL_CRYPT_ENCRYPT, req, req->iv);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int authsize = crypto_aead_authsize(aead);
+
+ /* req->cryptlen includes the authsize when decrypting */
+ req->cryptlen -= authsize;
+ BUG_ON(req->cryptlen < 0);
+
+ return aead_perform(AL_CRYPT_DECRYPT, req, req->iv);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int aead_givencrypt(struct aead_givcrypt_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(&req->areq);
+ struct al_crypto_ctx *ctx = crypto_aead_ctx(aead);
+
+ memcpy(req->giv, ctx->iv, crypto_aead_ivsize(aead));
+ /* avoid consecutive packets going out with same IV */
+ *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
+
+ return aead_perform(AL_CRYPT_ENCRYPT, &req->areq, req->giv);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static struct al_crypto_alg *al_crypto_alg_alloc(
+ struct al_crypto_device *device,
+ struct al_crypto_alg_template *template)
+{
+ struct al_crypto_alg *t_alg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(struct al_crypto_alg), GFP_KERNEL);
+ if (!t_alg) {
+ dev_err(&device->pdev->dev, "failed to allocate t_alg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ alg = &t_alg->crypto_alg;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ alg->cra_module = THIS_MODULE;
+ alg->cra_priority = AL_CRYPTO_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_ctxsize = sizeof(struct al_crypto_ctx);
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ template->type;
+
+ switch (template->type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg->cra_init = al_crypto_cra_init_ablkcipher;
+ alg->cra_exit = al_crypto_cra_exit_ablkcipher;
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher = template->template_u.ablkcipher;
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ alg->cra_init = al_crypto_cra_init_aead;
+ alg->cra_exit = al_crypto_cra_exit_aead;
+ alg->cra_type = &crypto_aead_type;
+ alg->cra_aead = template->template_u.aead;
+ snprintf(t_alg->sw_hash_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->sw_hash_name);
+ t_alg->sw_hash_interm_offset = template->sw_hash_interm_offset;
+ t_alg->sw_hash_interm_size = template->sw_hash_interm_size;
+ break;
+ }
+
+ t_alg->enc_type = template->enc_type;
+ t_alg->auth_type = template->auth_type;
+ t_alg->sha2_mode = template->sha2_mode;
+ t_alg->sa_op = template->sa_op;
+ t_alg->device = device;
+
+ return t_alg;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int al_crypto_alg_init(struct al_crypto_device *device)
+{
+ int i;
+ int err = 0;
+
+ INIT_LIST_HEAD(&device->alg_list);
+
+ atomic_set(&device->tfm_count, -1);
+
+ /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct al_crypto_alg *t_alg;
+
+ t_alg = al_crypto_alg_alloc(device, &driver_algs[i]);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(&device->pdev->dev,
+ "%s alg allocation failed\n",
+ driver_algs[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_alg(&t_alg->crypto_alg);
+ if (err) {
+ dev_warn(&device->pdev->dev,
+ "%s alg registration failed\n",
+ t_alg->crypto_alg.cra_driver_name);
+ kfree(t_alg);
+ } else
+ list_add_tail(&t_alg->entry, &device->alg_list);
+ }
+
+ if (!list_empty(&device->alg_list))
+ dev_info(&device->pdev->dev,
+ "algorithms registered in /proc/crypto\n");
+
+ return err;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+void al_crypto_alg_terminate(struct al_crypto_device *device)
+{
+ struct al_crypto_alg *t_alg, *n;
+
+ if (!device->alg_list.next)
+ return;
+
+ list_for_each_entry_safe(t_alg, n, &device->alg_list, entry) {
+ crypto_unregister_alg(&t_alg->crypto_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+}
diff --git a/drivers/crypto/al/al_crypto_core.c b/drivers/crypto/al/al_crypto_core.c
new file mode 100644
index 0000000..18cdcfd
--- /dev/null
+++ b/drivers/crypto/al/al_crypto_core.c
@@ -0,0 +1,1411 @@
+/*
+ * drivers/crypto/al_crypto_core.c
+ *
+ * Annapurna Labs Crypto driver - core
+ *
+ * Copyright (C) 2012 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+#ifndef DEBUG
+#define DEBUG
+#endif
+*/
+
+#include
+#include
+#include
+#include
+
+#include "al_crypto.h"
+#include "al_hal_ssm_crypto.h"
+#include "al_hal_udma_iofic.h"
+#include "al_crypto_module_params.h"
+#include "al_hal_udma_config.h"
+
+
+static void al_crypto_free_chan_resources(
+ struct al_crypto_chan *chan);
+
+static int al_crypto_alloc_chan_resources(
+ struct al_crypto_chan *chan);
+
+static void al_crypto_free_channels(
+ struct al_crypto_device *device);
+
+static int al_crypto_alloc_channels(
+ struct al_crypto_device *device);
+
+static int al_crypto_setup_interrupts(
+ struct al_crypto_device *device);
+
+static irqreturn_t al_crypto_do_interrupt_msix(
+ int irq,
+ void *data);
+
+static irqreturn_t al_crypto_do_interrupt_group_d(
+ int irq,
+ void *data);
+
+static irqreturn_t al_crypto_do_interrupt_msix_rx(
+ int irq,
+ void *data);
+
+static irqreturn_t al_crypto_do_interrupt_legacy(
+ int irq,
+ void *data);
+
+static int al_crypto_init_channels(
+ struct al_crypto_device *device,
+ int max_channels);
+
+static void al_crypto_init_channel(
+ struct al_crypto_device *device,
+ struct al_crypto_chan *chan,
+ int idx);
+
+static struct al_crypto_sw_desc **al_crypto_alloc_sw_ring(
+ struct al_crypto_chan *chan,
+ int order,
+ gfp_t flags);
+
+static void al_crypto_free_sw_ring(
+ struct al_crypto_sw_desc **ring,
+ struct al_crypto_chan *chan,
+ int size);
+
+static struct al_crypto_sw_desc *al_crypto_alloc_ring_ent(
+ struct al_crypto_chan *chan,
+ gfp_t flags);
+
+static void al_crypto_free_ring_ent(
+ struct al_crypto_sw_desc *desc,
+ struct al_crypto_chan *chan);
+
+static int al_crypto_iofic_config(struct al_crypto_device *device,
+ bool single_msix);
+
+static void al_crypto_cleanup_tasklet(
+ unsigned long data);
+
+static void al_crypto_cleanup_tasklet_msix_rx(
+ unsigned long data);
+
+static void al_crypto_cleanup_tasklet_legacy(
+ unsigned long data);
+
+static void al_crypto_unmask_interrupts(struct al_crypto_device *device,
+ bool single_interrupt);
+
+static void al_crypto_group_d_errors_handler(struct al_crypto_device *device);
+/******************************************************************************
+ *****************************************************************************/
+int al_crypto_core_init(
+ struct al_crypto_device *device,
+ void __iomem *iobase_udma,
+ void __iomem *iobase_app)
+{
+ int32_t rc;
+ int err;
+ int max_channels;
+ int crc_channels;
+
+ dev_dbg(
+ &device->pdev->dev,
+ "%s(%p, %p)\n",
+ __func__,
+ device,
+ iobase_udma);
+
+ device->cache = kmem_cache_create(
+ "al_crypto",
+ sizeof(struct al_crypto_sw_desc),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!device->cache)
+ return -ENOMEM;
+
+ max_channels = device->max_channels;
+ crc_channels = device->crc_channels;
+
+ if ((crc_channels > max_channels) || (max_channels > DMA_MAX_Q)) {
+ dev_err(&device->pdev->dev, "invalid number of channels\n");
+ err = -EINVAL;
+ goto done;
+ }
+
+ device->udma_regs_base = iobase_udma;
+ /* The crypto regs exists only for the PF.
+ * The VF uses the same configs/ error reporting as the PF */
+ device->crypto_regs_base = iobase_app ? iobase_app +
+ AL_CRYPTO_APP_REGS_BASE_OFFSET : NULL;
+
+ device->ssm_dma_params.udma_regs_base = device->udma_regs_base;
+
+ device->ssm_dma_params.name =
+ kmalloc(strlen(dev_name(&device->pdev->dev)) + 1, GFP_KERNEL);
+ if (device->ssm_dma_params.name == NULL) {
+ dev_err(&device->pdev->dev, "kmalloc failed\n");
+ err = -ENOMEM;
+ goto done;
+ }
+
+ strcpy(device->ssm_dma_params.name, dev_name(&device->pdev->dev));
+
+ device->ssm_dma_params.num_of_queues = max_channels;
+
+ err = al_ssm_dma_init(&device->hal_crypto,
+ &device->ssm_dma_params);
+ if (err) {
+ dev_err(&device->pdev->dev, "al_crypto_dma_init failed\n");
+ goto err_no_chan;
+ }
+
+ /* enumerate and initialize channels (queues) */
+ al_crypto_init_channels(device, max_channels);
+
+ err = al_crypto_alloc_channels(device);
+ if (err) {
+ dev_err(&device->pdev->dev,
+ "failed to alloc channel resources\n");
+ goto err_no_irq;
+ }
+
+ /* enable Crypto DMA engine */
+ rc = al_ssm_dma_state_set(&device->hal_crypto, UDMA_NORMAL);
+
+ err = al_crypto_setup_interrupts(device);
+
+ if (err) {
+ dev_err(&device->pdev->dev, "failed to setup interrupts\n");
+ goto err_no_irq;
+ }
+
+ goto done;
+
+err_no_irq:
+ al_crypto_free_channels(device);
+err_no_chan:
+ kfree(device->ssm_dma_params.name);
+done:
+ return err;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int al_crypto_core_terminate(
+ struct al_crypto_device *device)
+{
+ int status = 0;
+
+ dev_dbg(
+ &device->pdev->dev,
+ "%s(%p)\n",
+ __func__,
+ device);
+
+ al_crypto_free_channels(device);
+
+ kfree(device->ssm_dma_params.name);
+
+ kmem_cache_destroy(device->cache);
+
+ return status;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int al_crypto_init_channels(
+ struct al_crypto_device *device,
+ int max_channels)
+{
+ int i;
+ struct al_crypto_chan *chan;
+
+ for (i = 0; i < max_channels; i++) {
+ chan = kzalloc(sizeof(struct al_crypto_chan), GFP_KERNEL);
+ if (!chan)
+ break;
+
+ al_crypto_init_channel(device, chan, i);
+ }
+ device->num_channels = i;
+ return i;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crypto_init_channel(struct al_crypto_device *device,
+ struct al_crypto_chan *chan, int idx)
+{
+ unsigned long data = (unsigned long)chan;
+
+ dev_dbg(
+ &device->pdev->dev,
+ "%s(%p, %p, %d)\n",
+ __func__,
+ device,
+ chan,
+ idx);
+
+ chan->device = device;
+ chan->idx = idx;
+ chan->hal_crypto = &device->hal_crypto;
+
+ AL_CRYPTO_STATS_INIT_LOCK(&chan->stats_gen_lock);
+ spin_lock_init(&chan->prep_lock);
+ spin_lock_init(&chan->cleanup_lock);
+
+ device->channels[idx] = chan;
+
+ INIT_LIST_HEAD(&chan->cache_lru_list);
+ chan->cache_lru_count = 0;
+
+ tasklet_init(&chan->cleanup_task, al_crypto_cleanup_tasklet, data);
+
+ crypto_init_queue(&chan->sw_queue, 1);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crypto_unmask_interrupts(struct al_crypto_device *device,
+ bool single_interrupt)
+{
+ /* enable group D summary */
+ u32 group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM;
+ u32 group_b_mask = (1 << device->num_channels) - 1; /* bit per Rx q*/
+ u32 group_d_mask = AL_INT_GROUP_D_ALL;
+
+ struct unit_regs __iomem *regs_base =
+ (struct unit_regs __iomem *)device->udma_regs_base;
+
+ if (single_interrupt)
+ group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM;
+
+ al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_A, group_a_mask);
+ al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B, group_b_mask);
+ al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D, group_d_mask);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crypto_config_crypto_app_interrupts
+ (struct al_crypto_device *device)
+{
+ if (!device->crypto_regs_base)
+ return;
+
+ al_iofic_clear_cause(
+ device->crypto_regs_base + AL_CRYPTO_APP_IOFIC_OFFSET,
+ AL_INT_GROUP_A,
+ AL_CRYPTO_APP_INT_A_ALL);
+
+ al_iofic_unmask(
+ device->crypto_regs_base + AL_CRYPTO_APP_IOFIC_OFFSET,
+ AL_INT_GROUP_A,
+ AL_CRYPTO_APP_INT_A_ALL);
+
+ al_iofic_config(
+ device->crypto_regs_base + AL_CRYPTO_APP_IOFIC_OFFSET,
+ AL_INT_GROUP_A,
+ INT_CONTROL_GRP_CLEAR_ON_READ |
+ INT_CONTROL_GRP_MASK_MSI_X);
+
+ /* Clear the interrupt reg */
+ al_iofic_read_cause(
+ device->crypto_regs_base + AL_CRYPTO_APP_IOFIC_OFFSET,
+ AL_INT_GROUP_A);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int al_crypto_iofic_config(struct al_crypto_device *device,
+ bool single_msix)
+{
+ enum al_iofic_mode iofic_mode;
+ int int_moderation_group;
+
+ if (single_msix) {
+ iofic_mode = AL_IOFIC_MODE_MSIX_PER_GROUP;
+ int_moderation_group = AL_INT_GROUP_A;
+ } else {
+ iofic_mode = AL_IOFIC_MODE_MSIX_PER_Q;
+ int_moderation_group = AL_INT_GROUP_B;
+ }
+
+ if (al_udma_iofic_config(
+ (struct unit_regs *)device->udma_regs_base,
+ iofic_mode, 0x480, 0x480, 0x1E0, 0x1E0)) {
+
+ dev_err(&device->pdev->dev, "al_udma_iofic_config failed!.\n");
+ return -EIO;
+ }
+
+ al_iofic_moder_res_config(
+ &((struct unit_regs *)(device->udma_regs_base))->gen.
+ interrupt_regs.main_iofic,
+ int_moderation_group, 15);
+
+ al_crypto_config_crypto_app_interrupts(device);
+ al_crypto_unmask_interrupts(device, single_msix);
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int al_crypto_setup_interrupts(struct al_crypto_device *device)
+{
+ struct al_crypto_chan *chan;
+ struct pci_dev *pdev = device->pdev;
+ struct device *dev = &pdev->dev;
+ struct msix_entry *msix;
+ int i, msixcnt;
+ unsigned int cpu;
+ int err = -EINVAL;
+ int devm_alloc_num = 0;
+
+ if (al_crypto_get_use_single_msix())
+ goto msix_single_vector;
+
+ /* The number of MSI-X vectors should equal the number of channels + 1
+ * for group D */
+ msixcnt = device->num_channels + 1;
+
+ for (i = 0; i < device->num_channels; i++)
+ device->msix_entries[i].entry =
+ AL_INT_MSIX_RX_COMPLETION_START + i;
+
+ device->msix_entries[device->num_channels].entry =
+ AL_INT_MSIX_GROUP_A_SUM_D_IDX;
+
+ err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
+
+ if (err < 0) {
+ dev_err(dev, "pci_enable_msix failed! using intx instead.\n");
+ goto intx;
+ }
+
+ if (err > 0) {
+ dev_err(dev, "pci_enable_msix failed! msix_single_vector.\n");
+ goto msix_single_vector;
+ }
+
+ for (i = 0; i < device->num_channels; i++) {
+ msix = &device->msix_entries[i];
+
+ chan = device->channels[i];
+
+ dev_dbg(dev, "%s: requesting irq %d\n", __func__, msix->vector);
+
+ snprintf(device->irq_tbl[i].name, AL_CRYPTO_IRQNAME_SIZE,
+ "al-crypto-comp-%d@pci:%s", i,
+ pci_name(pdev));
+
+ err = devm_request_irq(
+ dev,
+ msix->vector,
+ al_crypto_do_interrupt_msix,
+ 0,
+ device->irq_tbl[i].name,
+ chan);
+
+ if (err) {
+ dev_err(dev, "devm_request_irq failed!.\n");
+ goto err_free_devm;
+ }
+
+ devm_alloc_num++;
+
+ cpu = next_cpu((i % num_online_cpus() - 1), *cpu_online_mask);
+ cpumask_set_cpu(cpu, &chan->affinity_hint_mask);
+
+ irq_set_affinity_hint(msix->vector, &chan->affinity_hint_mask);
+ }
+
+ snprintf(device->irq_tbl[device->num_channels].name,
+ AL_CRYPTO_IRQNAME_SIZE,
+ "al-crypto-interrupt-group-d@pci:%s",
+ pci_name(pdev));
+
+ err = devm_request_irq(
+ dev,
+ device->msix_entries[device->num_channels].vector,
+ al_crypto_do_interrupt_group_d,
+ 0,
+ device->irq_tbl[device->num_channels].name,
+ device);
+
+ if (err) {
+ dev_err(dev, "devm_request_irq failed!.\n");
+ goto err_free_devm;
+ }
+
+ err = al_crypto_iofic_config(device, false);
+ if (err)
+ return err;
+
+ goto done;
+
+msix_single_vector:
+ msix = &device->msix_entries[0];
+
+ msix->entry = 0;
+
+ tasklet_init(&device->cleanup_task,
+ al_crypto_cleanup_tasklet_msix_rx,
+ (unsigned long)device);
+
+ err = pci_enable_msix(pdev, device->msix_entries, 1);
+
+ if (err) {
+ pci_disable_msix(pdev);
+ goto intx;
+ }
+
+ snprintf(device->irq_tbl[0].name, AL_CRYPTO_IRQNAME_SIZE,
+ "al-crypto-msix-all@pci:%s", pci_name(pdev));
+
+ err = devm_request_irq(
+ dev,
+ msix->vector,
+ al_crypto_do_interrupt_msix_rx,
+ 0,
+ device->irq_tbl[0].name,
+ device);
+
+ if (err) {
+ dev_err(dev, "devm_request_irq failed!.\n");
+ pci_disable_msix(pdev);
+ goto intx;
+ }
+
+ devm_alloc_num = 1;
+
+ err = al_crypto_iofic_config(device, true);
+ if (err)
+ return err;
+ goto done;
+
+intx:
+ tasklet_init(&device->cleanup_task,
+ al_crypto_cleanup_tasklet_legacy,
+ (unsigned long)device);
+
+ snprintf(device->irq_tbl[0].name, AL_CRYPTO_IRQNAME_SIZE,
+ "al-crypto-intx-all@pci:%s", pci_name(pdev));
+
+ err = devm_request_irq(dev, pdev->irq, al_crypto_do_interrupt_legacy,
+ IRQF_SHARED, device->irq_tbl[0].name, device);
+ if (err)
+ goto err_no_irq;
+
+ if (al_udma_iofic_config(
+ (struct unit_regs *)device->udma_regs_base,
+ AL_IOFIC_MODE_LEGACY, 0x480, 0x480, 0x1E0, 0x1E0)) {
+ dev_err(dev, "al_udma_iofic_config failed!.\n");
+ return -EIO;
+ }
+
+ al_crypto_config_crypto_app_interrupts(device);
+ al_crypto_unmask_interrupts(device, true);
+
+done:
+ device->num_irq_used = devm_alloc_num;
+ return 0;
+
+err_free_devm:
+
+ for (i = 0; i < devm_alloc_num; i++) {
+ msix = &device->msix_entries[i];
+ chan = device->channels[i];
+ irq_set_affinity_hint(msix->vector, NULL);
+ devm_free_irq(dev, msix->vector, chan);
+ return -EIO;
+ }
+
+err_no_irq:
+ /* Disable all interrupt generation */
+
+ dev_err(dev, "no usable interrupts\n");
+ return err;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Free tx and rx descriptor rings for all channels
+ */
+static void al_crypto_free_channels(struct al_crypto_device *device)
+{
+ int i;
+
+ for (i = 0; i < device->num_channels; i++) {
+ al_crypto_free_chan_resources(device->channels[i]);
+ al_crypto_free_channel(device->channels[i]);
+ }
+
+ for (i = 0; i < device->num_irq_used; i++)
+ irq_set_affinity_hint(device->msix_entries[i].vector, NULL);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Allocate/initialize tx and rx descriptor rings for all channels
+ */
+static int al_crypto_alloc_channels(struct al_crypto_device *device)
+{
+ int i, j;
+ int err = -EINVAL;
+
+ for (i = 0; i < device->num_channels; i++) {
+ err = al_crypto_alloc_chan_resources(device->channels[i]);
+
+ if (err < 0) {
+ dev_err(
+ &device->pdev->dev,
+ "failed to alloc resources for channel %d\n",
+ i);
+
+ for (j = 0; j < i; j++) {
+ al_crypto_free_chan_resources(
+ device->channels[j]);
+ }
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline bool al_crypto_is_crypt_auth_chan(struct al_crypto_chan *chan)
+{
+ struct al_crypto_device *device = chan->device;
+ return (chan->idx < device->num_channels - device->crc_channels);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Allocate/initialize tx and rx descriptor rings for one channel
+ */
+static int al_crypto_alloc_chan_resources(struct al_crypto_chan *chan)
+{
+ struct al_crypto_device *device = chan->device;
+ struct device *dev = to_dev(chan);
+ struct al_crypto_sw_desc **sw_ring;
+ struct al_udma_q_params tx_params;
+ struct al_udma_q_params rx_params;
+ struct al_udma_m2s_pkt_len_conf conf;
+ struct al_udma *crypto_udma;
+
+ int rc = 0;
+ int tx_descs_order;
+ int rx_descs_order;
+ int ring_alloc_order;
+
+ dev_dbg(dev, "al_crypto_alloc_chan_resources: channel %d\n",
+ chan->idx);
+
+ /* have we already been set up? */
+ if (chan->sw_ring)
+ return 1 << chan->alloc_order;
+
+ tx_descs_order = al_crypto_get_tx_descs_order();
+ rx_descs_order = al_crypto_get_rx_descs_order();
+ ring_alloc_order = al_crypto_get_ring_alloc_order();
+
+ chan->tx_descs_num = 1 << tx_descs_order;
+ chan->rx_descs_num = 1 << rx_descs_order;
+
+ /* allocate coherent memory for Tx submission descriptors */
+ chan->tx_dma_desc_virt = dma_alloc_coherent(dev,
+ chan->tx_descs_num *
+ sizeof(union al_udma_desc),
+ &chan->tx_dma_desc,
+ GFP_KERNEL);
+ if (chan->tx_dma_desc_virt == NULL) {
+ dev_err(dev, "failed to allocate %d bytes of coherent memory for Tx submission descriptors\n",
+ chan->tx_descs_num * sizeof(union al_udma_desc));
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "allocted tx descriptor ring: virt 0x%p phys 0x%llx\n",
+ chan->tx_dma_desc_virt, (u64)chan->tx_dma_desc);
+
+ /* allocate coherent memory for Rx submission descriptors */
+ chan->rx_dma_desc_virt = dma_alloc_coherent(dev,
+ chan->rx_descs_num *
+ sizeof(union al_udma_desc),
+ &chan->rx_dma_desc,
+ GFP_KERNEL);
+ if (chan->rx_dma_desc_virt == NULL) {
+ dev_err(dev, "failed to allocate %d bytes of coherent memory for Rx submission descriptors\n",
+ chan->rx_descs_num * sizeof(union al_udma_desc));
+
+ al_crypto_free_chan_resources(chan);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "allocted rx descriptor ring: virt 0x%p phys 0x%llx\n",
+ chan->rx_dma_desc_virt, (u64)chan->rx_dma_desc);
+
+ /* allocate coherent memory for Rx completion descriptors */
+ chan->rx_dma_cdesc_virt = dma_alloc_coherent(dev,
+ chan->rx_descs_num *
+ AL_CRYPTO_RX_CDESC_SIZE,
+ &chan->rx_dma_cdesc,
+ GFP_KERNEL);
+ if (chan->rx_dma_cdesc_virt == NULL) {
+ dev_err(dev, "failed to allocate %d bytes of coherent memory for Rx completion descriptors\n",
+ chan->rx_descs_num * AL_CRYPTO_RX_CDESC_SIZE);
+
+ al_crypto_free_chan_resources(chan);
+ return -ENOMEM;
+ }
+
+ /* clear the Rx completion descriptors to avoid false positive */
+ memset(
+ chan->rx_dma_cdesc_virt,
+ 0,
+ chan->rx_descs_num * AL_CRYPTO_RX_CDESC_SIZE);
+
+ dev_dbg(
+ dev,
+ "allocted rx completion desc ring: virt 0x%p phys 0x%llx\n",
+ chan->rx_dma_cdesc_virt, (u64)chan->rx_dma_cdesc);
+
+ rc = al_ssm_dma_handle_get(
+ &device->hal_crypto,
+ UDMA_TX,
+ &crypto_udma);
+ if (rc) {
+ dev_err(to_dev(chan), "al_crypto_dma_handle_get failed\n");
+ al_crypto_free_chan_resources(chan);
+ return rc;
+ }
+
+ conf.encode_64k_as_zero = true;
+ conf.max_pkt_size = 0xfffff;
+ al_udma_m2s_packet_size_cfg_set(crypto_udma, &conf);
+
+ tx_params.size = chan->tx_descs_num;
+ tx_params.desc_base = chan->tx_dma_desc_virt;
+ tx_params.desc_phy_base = chan->tx_dma_desc;
+ tx_params.cdesc_base = NULL; /* don't use Tx completion ring */
+ tx_params.cdesc_phy_base = 0;
+ tx_params.cdesc_size = AL_CRYPTO_TX_CDESC_SIZE; /* size is needed */
+
+ rx_params.size = chan->rx_descs_num;
+ rx_params.desc_base = chan->rx_dma_desc_virt;
+ rx_params.desc_phy_base = chan->rx_dma_desc;
+ rx_params.cdesc_base = chan->rx_dma_cdesc_virt;
+ rx_params.cdesc_phy_base = chan->rx_dma_cdesc;
+ rx_params.cdesc_size = AL_CRYPTO_RX_CDESC_SIZE;
+
+ /* alloc sw descriptors */
+ if (ring_alloc_order < AL_CRYPTO_SW_RING_MIN_ORDER) {
+ dev_err(
+ dev,
+ "%s: ring_alloc_order = %d < %d!\n",
+ __func__,
+ ring_alloc_order,
+ AL_CRYPTO_SW_RING_MIN_ORDER);
+
+ al_crypto_free_chan_resources(chan);
+ return -EINVAL;
+ } else if (ring_alloc_order > AL_CRYPTO_SW_RING_MAX_ORDER) {
+ dev_err(
+ dev,
+ "%s: ring_alloc_order = %d > %d!\n",
+ __func__,
+ ring_alloc_order,
+ AL_CRYPTO_SW_RING_MAX_ORDER);
+
+ al_crypto_free_chan_resources(chan);
+ return -EINVAL;
+ } else if (ring_alloc_order > rx_descs_order) {
+ dev_warn(
+ dev,
+ "%s: ring_alloc_order > rx_descs_order (%d>%d)!\n",
+ __func__,
+ ring_alloc_order,
+ rx_descs_order);
+
+ }
+
+ sw_ring = al_crypto_alloc_sw_ring(chan, ring_alloc_order, GFP_KERNEL);
+ if (!sw_ring) {
+ dev_err(
+ dev,
+ "%s: sw ring alloc failed! ring_alloc_order = %d\n",
+ __func__,
+ ring_alloc_order);
+
+ al_crypto_free_chan_resources(chan);
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&chan->prep_lock);
+ chan->sw_ring = sw_ring;
+ chan->head = 0;
+ chan->tail = 0;
+ chan->alloc_order = ring_alloc_order;
+ chan->type = al_crypto_is_crypt_auth_chan(chan) ?
+ AL_CRYPT_AUTH_Q : AL_MEM_CRC_MEMCPY_Q;
+
+ chan->cache_entries_num = al_crypto_is_crypt_auth_chan(chan) ?
+ (CACHED_SAD_SIZE / (device->num_channels -
+ device->crc_channels)) :
+ (CRC_IV_CACHE_SIZE / device->crc_channels);
+
+ rc = al_ssm_dma_q_init(&device->hal_crypto, chan->idx,
+ &tx_params, &rx_params, chan->type);
+ if (rc) {
+ dev_err(dev, "failed to initialize hal q %d. rc %d\n",
+ chan->idx, rc);
+
+ spin_unlock_bh(&chan->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+ al_crypto_free_chan_resources(chan);
+ return -ENOMEM;
+ }
+
+ spin_unlock_bh(&chan->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+
+ /* should we return less ?*/
+ return 1 << chan->alloc_order;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Free tx and rx descriptor rings for one channel
+ */
+static void al_crypto_free_chan_resources(struct al_crypto_chan *chan)
+{
+ struct device *dev = to_dev(chan);
+
+ dev_dbg(dev, "%s: %p\n", __func__, chan);
+
+ tasklet_disable(&chan->cleanup_task);
+
+ al_crypto_cleanup_fn(chan, 0);
+
+ spin_lock_bh(&chan->cleanup_lock);
+
+ al_crypto_free_sw_ring(chan->sw_ring, chan , 1 << chan->alloc_order);
+
+ if (chan->tx_dma_desc_virt != NULL) {
+ dma_free_coherent(
+ dev,
+ chan->tx_descs_num * sizeof(union al_udma_desc),
+ chan->tx_dma_desc_virt, chan->tx_dma_desc);
+ chan->tx_dma_desc_virt = NULL;
+ }
+
+ if (chan->rx_dma_desc_virt != NULL) {
+ dma_free_coherent(
+ dev,
+ chan->rx_descs_num * sizeof(union al_udma_desc),
+ chan->rx_dma_desc_virt,
+ chan->rx_dma_desc);
+ chan->rx_dma_desc_virt = NULL;
+ }
+
+ if (chan->rx_dma_cdesc_virt != NULL) {
+ dma_free_coherent(dev, chan->rx_descs_num *
+ AL_CRYPTO_RX_CDESC_SIZE,
+ chan->rx_dma_cdesc_virt, chan->rx_dma_cdesc);
+ chan->rx_dma_desc_virt = NULL;
+ }
+
+ spin_unlock_bh(&chan->cleanup_lock);
+
+ return;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Alloc sw descriptors ring
+ */
+static struct al_crypto_sw_desc **al_crypto_alloc_sw_ring(
+ struct al_crypto_chan *chan,
+ int order,
+ gfp_t flags)
+{
+ struct al_crypto_sw_desc **ring;
+ int descs = 1 << order;
+ int i;
+
+ /* allocate the array to hold the software ring */
+ ring = kcalloc(descs, sizeof(*ring), flags);
+ if (!ring)
+ return NULL;
+ for (i = 0; i < descs; i++) {
+ ring[i] = al_crypto_alloc_ring_ent(chan, flags);
+ if (!ring[i]) {
+ al_crypto_free_sw_ring(ring, chan , i);
+ return NULL;
+ }
+ /* set_desc_id(ring[i], i); */
+ }
+
+ return ring;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Free sw descriptors ring
+*/
+static void al_crypto_free_sw_ring(
+ struct al_crypto_sw_desc **ring,
+ struct al_crypto_chan *chan,
+ int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ al_crypto_free_ring_ent(ring[i], chan);
+
+ kfree(ring);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Alloc sw descriptor
+ */
+static struct al_crypto_sw_desc *al_crypto_alloc_ring_ent(
+ struct al_crypto_chan *chan,
+ gfp_t flags)
+{
+ struct al_crypto_sw_desc *desc;
+
+ desc = kmem_cache_zalloc(chan->device->cache, flags);
+ if (!desc)
+ return NULL;
+
+ return desc;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Free sw descriptor
+ */
+static void al_crypto_free_ring_ent(
+ struct al_crypto_sw_desc *desc,
+ struct al_crypto_chan *chan)
+{
+ kmem_cache_free(chan->device->cache, desc);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Get sw desc
+ */
+int al_crypto_get_sw_desc(struct al_crypto_chan *chan, int num)
+{
+ if (likely(al_crypto_ring_space(chan) >= num)) {
+ dev_dbg(to_dev(chan), "%s: (%x:%x)\n",
+ __func__, chan->head, chan->tail);
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Handler used for vector-per-channel interrupt mode
+ */
+static irqreturn_t al_crypto_do_interrupt_msix(int irq, void *data)
+{
+ struct al_crypto_chan *chan = data;
+
+ pr_debug("%s(%d, %p)\n", __func__, irq, data);
+
+ tasklet_schedule(&chan->cleanup_task);
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Handler for interrupt group d
+ */
+static irqreturn_t al_crypto_do_interrupt_group_d(int irq, void *data)
+{
+ struct al_crypto_device *device = data;
+
+ pr_debug("%s(%d, %p)\n", __func__, irq, device);
+
+ al_crypto_group_d_errors_handler(device);
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Handler used for vector-per-group interrupt mode
+ */
+static irqreturn_t al_crypto_do_interrupt_msix_rx(int irq, void *data)
+{
+ struct al_crypto_device *device = data;
+
+ pr_debug("%s(%d, %p)\n", __func__, irq, data);
+
+ tasklet_schedule(&device->cleanup_task);
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Handler used for legacy interrupt mode
+ */
+static irqreturn_t al_crypto_do_interrupt_legacy(int irq, void *data)
+{
+ struct al_crypto_device *device = data;
+
+ pr_debug("%s(%d, %p)\n", __func__, irq, data);
+
+ al_udma_iofic_mask(
+ (struct unit_regs *)device->udma_regs_base,
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ AL_INT_GROUP_A,
+ AL_INT_GROUP_A_GROUP_B_SUM |
+ AL_INT_GROUP_A_GROUP_D_SUM);
+
+ tasklet_schedule(&device->cleanup_task);
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int al_crypto_cleanup_fn(struct al_crypto_chan *chan, int from_tasklet)
+{
+ struct al_crypto_sw_desc *desc;
+ uint32_t comp_status;
+ u16 active;
+ int idx, i, rc;
+
+ spin_lock_bh(&chan->cleanup_lock);
+ idx = chan->tail;
+
+ dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x\n",
+ __func__, chan->head, chan->tail);
+
+ active = al_crypto_ring_active(chan);
+
+ AL_CRYPTO_STATS_SET(chan->stats_comp.max_active_descs,
+ (active > chan->stats_comp.max_active_descs) ?
+ active :
+ chan->stats_comp.max_active_descs);
+
+ for (i = 0; i < active; i++) {
+ rc = al_crypto_dma_completion(chan->hal_crypto, chan->idx,
+ &comp_status);
+
+ /* if no completed transaction found -> exit */
+ if (rc == 0) {
+ dev_dbg(to_dev(chan), "%s: No completion\n",
+ __func__);
+
+ break;
+ }
+
+ dev_dbg(
+ to_dev(chan),
+ "%s: completion status: %u\n",
+ __func__,
+ comp_status);
+
+ /* This will instruct the CPU to make sure the index is up to
+ date before reading the new item */
+ smp_read_barrier_depends();
+
+ desc = al_crypto_get_ring_ent(chan, idx + i);
+
+ if (desc->req_type == AL_CRYPTO_REQ_ABLKCIPHER)
+ al_crypto_cleanup_single_ablkcipher(
+ chan, desc, comp_status);
+ else if (desc->req_type == AL_CRYPTO_REQ_AEAD)
+ al_crypto_cleanup_single_aead(
+ chan, desc, comp_status);
+ else if (desc->req_type == AL_CRYPTO_REQ_AHASH)
+ al_crypto_cleanup_single_ahash(
+ chan, desc, comp_status);
+ else if (desc->req_type == AL_CRYPTO_REQ_CRC)
+ al_crypto_cleanup_single_crc(
+ chan, desc, comp_status);
+ else {
+ dev_err(to_dev(chan),
+ "alg type %d is not supported\n",
+ desc->req_type);
+
+ BUG();
+ }
+ }
+
+ /* This will make sure the CPU has finished reading the item
+ before it writes the new tail pointer, which will erase the item */
+ smp_mb(); /* finish all descriptor reads before incrementing tail */
+
+ chan->tail = idx + i;
+
+ /* Keep track of redundant interrupts - interrupts that doesn't
+ yield completions */
+ if (unlikely(from_tasklet && (!i)))
+ AL_CRYPTO_STATS_INC(chan->stats_comp.redundant_int_cnt, 1);
+
+ spin_unlock_bh(&chan->cleanup_lock);
+
+ /* Currently only ablkcipher reqs can be backlogged */
+ if (i && chan->sw_queue.qlen)
+ ablkcipher_process_queue(chan);
+
+ return i;
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crypto_group_d_errors_handler(struct al_crypto_device *device)
+{
+ u32 read_cause_group_d, read_cause_crypto_reg_a;
+
+ read_cause_group_d = al_udma_iofic_read_cause(device->udma_regs_base,
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ AL_INT_GROUP_D);
+
+ dev_err(&device->pdev->dev,
+ "got error - %08x from group D\n",
+ read_cause_group_d);
+
+
+ if (read_cause_group_d & AL_INT_GROUP_D_APP_EXT_INT) {
+ read_cause_crypto_reg_a =
+ al_iofic_read_cause(
+ device->crypto_regs_base +
+ AL_CRYPTO_APP_IOFIC_OFFSET,
+ AL_INT_GROUP_A);
+
+ al_iofic_unmask(
+ device->crypto_regs_base + AL_CRYPTO_APP_IOFIC_OFFSET,
+ AL_INT_GROUP_A,
+ AL_CRYPTO_APP_INT_A_ALL);
+
+ al_udma_iofic_unmask(
+ (struct unit_regs *)device->udma_regs_base,
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ AL_INT_GROUP_A,
+ AL_INT_GROUP_A_GROUP_D_SUM);
+
+ dev_err(&device->pdev->dev,
+ "got error - %08x from APP group A\n",
+ read_cause_crypto_reg_a);
+ }
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crypto_cleanup_tasklet(unsigned long data)
+{
+ struct al_crypto_chan *chan = (struct al_crypto_chan *)data;
+ int num_completed;
+
+ num_completed = al_crypto_cleanup_fn(chan, 1);
+
+ if (unlikely(num_completed < 0))
+ dev_err(
+ to_dev(chan),
+ "al_crypto_cleanup_fn failed\n");
+
+ al_udma_iofic_unmask(
+ (struct unit_regs *)chan->device->udma_regs_base,
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ AL_INT_GROUP_B,
+ 1 << chan->idx);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void al_crypto_cleanup_q_group_fn(
+ struct al_crypto_device *device,
+ int group)
+{
+ int num_completed;
+ unsigned int status;
+ int i;
+
+ status = al_udma_iofic_read_cause(
+ (struct unit_regs *)device->udma_regs_base,
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ group);
+
+ for (i = 0; i < device->num_channels; i++) {
+ if (status & AL_BIT(i)) {
+ num_completed = al_crypto_cleanup_fn(
+ device->channels[i], 1);
+
+ if (unlikely(num_completed < 0))
+ dev_err(to_dev(device->channels[i]),
+ "al_crypto_cleanup_fn failed\n");
+ }
+ }
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crypto_cleanup_tasklet_msix_rx(unsigned long data)
+{
+ struct al_crypto_device *device = (struct al_crypto_device *)data;
+ unsigned int status;
+
+ status = al_udma_iofic_read_cause(
+ (struct unit_regs *)device->udma_regs_base,
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ AL_INT_GROUP_D);
+
+ if (unlikely(status))
+ al_crypto_group_d_errors_handler(device);
+
+ al_crypto_cleanup_q_group_fn(device, AL_INT_GROUP_B);
+
+ al_udma_iofic_unmask(
+ (struct unit_regs *)device->udma_regs_base,
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ AL_INT_GROUP_A,
+ AL_INT_GROUP_A_GROUP_B_SUM);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crypto_cleanup_tasklet_legacy(unsigned long data)
+{
+ struct al_crypto_device *device = (struct al_crypto_device *)data;
+ unsigned int status;
+
+ status = al_udma_iofic_read_cause(
+ (struct unit_regs *)device->udma_regs_base,
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ AL_INT_GROUP_A);
+
+ if (unlikely(status & AL_INT_GROUP_A_GROUP_D_SUM))
+ al_crypto_group_d_errors_handler(device);
+ else if (status & AL_INT_GROUP_A_GROUP_B_SUM)
+ al_crypto_cleanup_q_group_fn(device, AL_INT_GROUP_B);
+
+ al_udma_iofic_unmask(
+ (struct unit_regs *)device->udma_regs_base,
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ AL_INT_GROUP_A,
+ AL_INT_GROUP_A_GROUP_B_SUM);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Update the LRU list according to the currently accessed entry
+ */
+void al_crypto_cache_update_lru(struct al_crypto_chan *chan,
+ struct al_crypto_cache_state *ctx)
+{
+ struct list_head *ptr;
+ struct al_crypto_cache_lru_entry *lru_entry = NULL;
+ uint32_t list_idx = 0;
+
+ /* skip update if cache not yet populated */
+ if (unlikely(chan->cache_lru_count <= 1))
+ return;
+
+ list_for_each(ptr, &chan->cache_lru_list) {
+ lru_entry = list_entry(ptr,
+ struct al_crypto_cache_lru_entry,
+ list);
+ if (lru_entry->ctx == ctx)
+ break;
+ list_idx++;
+ }
+
+ /* The entry has to be in the list */
+ BUG_ON(lru_entry->ctx != ctx);
+
+ /* move to tail only if needed */
+ if (list_idx != (chan->cache_lru_count - 1))
+ list_move_tail(ptr, &chan->cache_lru_list);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Translate cache entry index in ring[0..3] to global index: [0..15] in sa
+ * sa cache, [0..7] in crc iv cache
+ */
+static inline uint32_t
+al_crypto_ring_cache_idx(struct al_crypto_chan *chan, int cache_idx)
+{
+ struct al_crypto_device *device = chan->device;
+ int chan_idx = chan->idx;
+
+ if (chan->idx >= (device->num_channels - device->crc_channels))
+ chan_idx = chan->idx -
+ (device->num_channels - device->crc_channels);
+
+ return (chan_idx * chan->cache_entries_num) + cache_idx;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Replace least recently used cache entry with current entry
+ */
+uint32_t al_crypto_cache_replace_lru(struct al_crypto_chan *chan,
+ struct al_crypto_cache_state *ctx,
+ struct al_crypto_cache_state **old_ctx)
+{
+ struct al_crypto_cache_lru_entry *lru_entry = NULL;
+
+ if (chan->cache_lru_count < chan->cache_entries_num) {
+ /* find a free entry */
+ int i;
+ for (i = 0; i < chan->cache_entries_num; i++) {
+ lru_entry = &chan->cache_lru_entries[i];
+ if (lru_entry->ctx && lru_entry->ctx->cached)
+ continue;
+ else
+ break;
+ }
+
+ BUG_ON(!lru_entry);
+ BUG_ON(i >= chan->cache_entries_num);
+
+ lru_entry->cache_idx =
+ al_crypto_ring_cache_idx(chan, i);
+ lru_entry->ctx = ctx;
+
+ list_add_tail(&lru_entry->list,
+ &chan->cache_lru_list);
+ chan->cache_lru_count++;
+ if (old_ctx)
+ *old_ctx = NULL;
+ } else {
+ AL_CRYPTO_STATS_INC(chan->stats_prep.cache_misses, 1);
+ lru_entry = list_first_entry(&chan->cache_lru_list,
+ struct al_crypto_cache_lru_entry,
+ list);
+ /* Invalidate old ctx */
+ lru_entry->ctx->cached = false;
+ /* Return old ctx if needed */
+ if (old_ctx)
+ *old_ctx = lru_entry->ctx;
+ /* Connect new ctx */
+ lru_entry->ctx = ctx;
+ /* Move current entry to end of LRU list */
+ list_rotate_left(&chan->cache_lru_list);
+ }
+
+ ctx->cached = true;
+ ctx->idx = lru_entry->cache_idx;
+ return lru_entry->cache_idx;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Remove the entry from LRU list
+ */
+void al_crypto_cache_remove_lru(struct al_crypto_chan *chan,
+ struct al_crypto_cache_state *ctx)
+{
+ struct list_head *ptr;
+ struct al_crypto_cache_lru_entry *lru_entry = NULL;
+ uint32_t list_idx = 0;
+
+ /* lru list is empty */
+ if (chan->cache_lru_count == 0)
+ return;
+
+ list_for_each(ptr, &chan->cache_lru_list) {
+ lru_entry = list_entry(ptr,
+ struct al_crypto_cache_lru_entry,
+ list);
+ if (lru_entry->ctx == ctx)
+ break;
+ list_idx++;
+ }
+
+ /* The entry has to be in the list */
+ BUG_ON(lru_entry->ctx != ctx);
+
+ list_del(ptr);
+ lru_entry->ctx = NULL;
+ chan->cache_lru_count--;
+ ctx->cached = false;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Move ring tail to process prepared descriptors
+ */
+void al_crypto_tx_submit(struct al_crypto_chan *chan)
+{
+ dev_dbg(
+ to_dev(chan),
+ "%s: %p\n",
+ __func__,
+ chan);
+
+ /* according to Documentation/circular-buffers.txt we should have */
+ /* smp_wmb before intcrementing the head, however, the */
+ /* al_crypto_dma_action contains writel() which implies dmb on ARM */
+ /* so this smp_wmb() can be omitted on ARM platforms */
+ /*smp_wmb();*/ /* commit the item before incrementing the head */
+ chan->head += chan->sw_desc_num_locked;
+ /* in our case the consumer (interrupt handler) will be waken up by */
+ /* the hw, so we send the transaction to the hw after incrementing */
+ /* the head */
+
+ al_crypto_dma_action(
+ chan->hal_crypto,
+ chan->idx,
+ chan->tx_desc_produced);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Set interrupt moderation interval, each tick ~= 1.5usecs
+ */
+void al_crypto_set_int_moderation(struct al_crypto_device *device, int interval)
+{
+ int i;
+
+ for (i = 0; i < device->num_channels; i++)
+ al_iofic_msix_moder_interval_config(
+ &((struct unit_regs *)(device->udma_regs_base))->gen.
+ interrupt_regs.main_iofic,
+ AL_INT_GROUP_B,
+ i,
+ interval);
+
+ device->int_moderation = interval;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Get interrupt moderation interval
+ */
+int al_crypto_get_int_moderation(struct al_crypto_device *device)
+{
+ return device->int_moderation;
+}
diff --git a/drivers/crypto/al/al_crypto_crc.c b/drivers/crypto/al/al_crypto_crc.c
new file mode 100644
index 0000000..9a41776
--- /dev/null
+++ b/drivers/crypto/al/al_crypto_crc.c
@@ -0,0 +1,655 @@
+/*
+ * drivers/crypto/al_crypto_crc.c
+ *
+ * Annapurna Labs Crypto driver - crc/checksum algorithms
+ *
+ * Copyright (C) 2013 Annapurna Labs Ltd.
+ *
+ * Algorithm registration code and chained scatter/gather lists
+ * handling based on caam driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "linux/export.h"
+#include "linux/crypto.h"
+#include
+#include
+#include
+#include
+#include
+
+#include "al_crypto.h"
+#include "al_hal_ssm_crypto.h"
+#include "al_hal_ssm_crc_memcpy.h"
+
+#define AL_CRYPTO_CRA_PRIORITY 300
+
+static int crc_init(struct ahash_request *req);
+
+static int crc_update(struct ahash_request *req);
+
+static int crc_final(struct ahash_request *req);
+
+static int crc_finup(struct ahash_request *req);
+
+static int crc_digest(struct ahash_request *req);
+
+static int crc_export(struct ahash_request *req, void *out);
+
+static int crc_import(struct ahash_request *req, const void *in);
+
+static int crc_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen);
+
+struct al_crc_req_ctx {
+ /* Make sure the following field isn't share the same cache line
+ * with other fields.
+ * This field is DMAed */
+ uint32_t result ____cacheline_aligned;
+ bool last ____cacheline_aligned;
+ struct al_crypto_cache_state cache_state;
+ dma_addr_t crc_dma_addr;
+};
+
+struct al_crc_ctx {
+ struct al_crypto_chan *chan;
+ enum al_crc_checksum_type crcsum_type;
+ uint32_t key;
+};
+
+struct al_crc_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ struct ahash_alg template_ahash;
+ enum al_crc_checksum_type crcsum_type;
+};
+
+static struct al_crc_template driver_crc[] = {
+ {
+ .name = "crc32c",
+ .driver_name = "crc32c-al",
+ .blocksize = CHKSUM_BLOCK_SIZE,
+ .template_ahash = {
+ .init = crc_init,
+ .update = crc_update,
+ .final = crc_final,
+ .finup = crc_finup,
+ .digest = crc_digest,
+ .export = crc_export,
+ .import = crc_import,
+ .setkey = crc_setkey,
+ .halg = {
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ },
+ },
+ .crcsum_type = AL_CRC_CHECKSUM_CRC32C,
+ },
+};
+
+struct al_crc {
+ struct list_head entry;
+ struct al_crypto_device *device;
+ enum al_crc_checksum_type crcsum_type;
+ struct ahash_alg ahash_alg;
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static int al_crc_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct al_crc *al_crc =
+ container_of(alg, struct al_crc, ahash_alg);
+ struct al_crc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct al_crypto_device *device = al_crc->device;
+ int chan_idx = (atomic_inc_return(&device->crc_tfm_count) %
+ device->crc_channels) +
+ (device->num_channels - device->crc_channels);
+
+ ctx->chan = device->channels[chan_idx];
+
+ ctx->crcsum_type = al_crc->crcsum_type;
+
+ ctx->key = 0;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct al_crc_req_ctx));
+
+ AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock);
+ AL_CRYPTO_STATS_INC(ctx->chan->stats_gen.crc_tfms, 1);
+ AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock);
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crc_cra_exit(struct crypto_tfm *tfm)
+{
+ struct al_crc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock);
+ AL_CRYPTO_STATS_DEC(ctx->chan->stats_gen.crc_tfms, 1);
+ AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock);
+
+ return;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* DMA unmap buffers for crc request
+ */
+static inline void al_crypto_dma_unmap_crc(
+ struct al_crypto_chan *chan,
+ struct al_crypto_sw_desc *desc)
+{
+ struct ahash_request *req =
+ (struct ahash_request *)desc->req;
+ struct al_crc_req_ctx *req_ctx =
+ ahash_request_ctx(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+
+ if (desc->src_nents)
+ dma_unmap_sg(to_dev(chan),
+ req->src,
+ desc->src_nents,
+ DMA_TO_DEVICE);
+
+ if (req_ctx->last) {
+ dma_unmap_single(to_dev(chan),
+ req_ctx->crc_dma_addr,
+ digestsize,
+ DMA_BIDIRECTIONAL);
+ put_unaligned_le32(req_ctx->result, req->result);
+ }
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Cleanup single crc request - invoked from cleanup tasklet (interrupt
+ * handler)
+ */
+void al_crypto_cleanup_single_crc(
+ struct al_crypto_chan *chan,
+ struct al_crypto_sw_desc *desc,
+ uint32_t comp_status)
+{
+ struct ahash_request *req =
+ (struct ahash_request *)desc->req;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct al_crc_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ al_crypto_dma_unmap_crc(chan, desc);
+
+ /* LRU list access has to be protected */
+ if (req_ctx->last) {
+ spin_lock(&ctx->chan->prep_lock);
+ if (req_ctx->cache_state.cached)
+ al_crypto_cache_remove_lru(chan, &req_ctx->cache_state);
+ spin_unlock(&ctx->chan->prep_lock);
+ }
+
+ req->base.complete(&req->base, 0);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int crc_init(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct al_crc_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct al_crypto_chan *chan = ctx->chan;
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+
+ req_ctx->last = false;
+ req_ctx->cache_state.cached = false;
+
+ put_unaligned_le32(ctx->key, &req_ctx->result);
+
+ req_ctx->crc_dma_addr = dma_map_single(to_dev(chan),
+ &req_ctx->result,
+ digestsize,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(to_dev(chan), req_ctx->crc_dma_addr)) {
+ dev_err(to_dev(chan), "dma_map_single failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void crc_req_prepare_xaction_buffers(struct ahash_request *req,
+ struct al_crypto_sw_desc *desc,
+ int nbytes,
+ int src_nents,
+ int *src_idx)
+{
+ struct al_crc_transaction *xaction;
+
+ xaction = &desc->hal_crc_xaction;
+ *src_idx = 0;
+
+ if (src_nents)
+ sg_map_to_xaction_buffers(req->src, desc->src_bufs, nbytes,
+ src_idx);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void crc_update_stats(int nbytes,
+ struct al_crypto_chan *chan)
+{
+ AL_CRYPTO_STATS_INC(chan->stats_prep.crc_reqs, 1);
+ AL_CRYPTO_STATS_INC(chan->stats_prep.crc_bytes, nbytes);
+
+ if (nbytes <= 512)
+ AL_CRYPTO_STATS_INC(chan->stats_prep.crc_reqs_le512, 1);
+ else if ((nbytes > 512) && (nbytes <= 2048))
+ AL_CRYPTO_STATS_INC(chan->stats_prep.crc_reqs_512_2048, 1);
+ else if ((nbytes > 2048) && (nbytes <= 4096))
+ AL_CRYPTO_STATS_INC(chan->stats_prep.crc_reqs_2048_4096, 1);
+ else
+ AL_CRYPTO_STATS_INC(chan->stats_prep.crc_reqs_gt4096, 1);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void crc_req_prepare_xaction(struct ahash_request *req,
+ int nbytes,
+ struct al_crypto_sw_desc *desc,
+ int src_nents)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct al_crc_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct al_crypto_chan *chan = ctx->chan;
+ struct al_crc_transaction *xaction;
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ int src_idx;
+
+ /* prepare hal transaction */
+ xaction = &desc->hal_crc_xaction;
+ memset(xaction, 0, sizeof(struct al_crc_transaction));
+ xaction->crcsum_type = ctx->crcsum_type;
+ xaction->xor_valid = AL_TRUE;
+ xaction->in_xor = ~0;
+ xaction->res_xor = ~0;
+
+ /* if the entry is not cached, take stored iv */
+ if (!(req_ctx->cache_state.cached)) {
+ xaction->crc_iv_in.addr = req_ctx->crc_dma_addr;
+ xaction->crc_iv_in.len = digestsize;
+ }
+
+ /* both store in cache and output intermediate result */
+ /* cached result will be used unless it will be replaced */
+ xaction->crc_out.addr = req_ctx->crc_dma_addr;
+ xaction->crc_out.len = digestsize;
+
+ if (likely(!req_ctx->last)) {
+ xaction->st_crc_out = AL_TRUE;
+
+ if (!req_ctx->cache_state.cached) {
+ xaction->cached_crc_indx = al_crypto_cache_replace_lru(
+ chan, &req_ctx->cache_state,
+ NULL);
+ xaction->flags = AL_SSM_BARRIER;
+ } else {
+ al_crypto_cache_update_lru(chan, &req_ctx->cache_state);
+ xaction->cached_crc_indx = req_ctx->cache_state.idx;
+ }
+ }
+
+ crc_req_prepare_xaction_buffers(req, desc, nbytes, src_nents,
+ &src_idx);
+
+ xaction->src.bufs = &desc->src_bufs[0];
+ xaction->src.num = src_idx;
+
+ dev_dbg(to_dev(chan),
+ "%s: req_ctx->cache_state.cached=%d\n",
+ __func__, req_ctx->cache_state.cached);
+
+ xaction->flags = AL_SSM_INTERRUPT;
+
+ crc_update_stats(nbytes, chan);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Main CRC processing function that handles update/final/finup and digest
+ *
+ * Implementation is based on the assumption that the caller waits for
+ * completion of every operation before issuing the next operation
+ */
+static int crc_process_req(struct ahash_request *req, unsigned int nbytes)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct al_crc_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct al_crypto_chan *chan = ctx->chan;
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+
+ int idx;
+ int src_nents = 0;
+ struct al_crypto_sw_desc *desc;
+ int rc = 0;
+
+ dev_dbg(to_dev(chan),
+ "%s: nbytes=%d, last=%d\n",
+ __func__, nbytes, req_ctx->last);
+
+ if (nbytes) {
+ src_nents = sg_count(req->src, nbytes);
+
+ dev_dbg(to_dev(chan), "%s: src_nents=%d\n", __func__,
+ src_nents);
+
+ dma_map_sg(to_dev(chan), req->src, src_nents,
+ DMA_TO_DEVICE);
+
+ spin_lock_bh(&chan->prep_lock);
+ if (likely(al_crypto_get_sw_desc(chan, 1) == 0))
+ idx = chan->head;
+ else {
+ spin_unlock_bh(&chan->prep_lock);
+ dev_err(
+ to_dev(chan),
+ "%s: al_crypto_get_sw_desc failed!\n",
+ __func__);
+
+ if (src_nents)
+ dma_unmap_sg(to_dev(chan), req->src, src_nents,
+ DMA_TO_DEVICE);
+
+ return -ENOSPC;
+ }
+
+ chan->sw_desc_num_locked = 1;
+ chan->tx_desc_produced = 0;
+
+ desc = al_crypto_get_ring_ent(chan, idx);
+ desc->req = (void *)req;
+ desc->req_type = AL_CRYPTO_REQ_CRC;
+ desc->src_nents = src_nents;
+
+ crc_req_prepare_xaction(req, nbytes, desc, src_nents);
+
+ /* send crypto transaction to engine */
+ rc = al_crc_csum_prepare(chan->hal_crypto, chan->idx,
+ &desc->hal_crc_xaction);
+ if (unlikely(rc != 0)) {
+ dev_err(to_dev(chan),
+ "al_crypto_dma_prepare failed!\n");
+
+ al_crypto_dma_unmap_crc(chan, desc);
+
+ spin_unlock_bh(&chan->prep_lock);
+ return rc;
+ }
+
+ chan->tx_desc_produced += desc->hal_crc_xaction.tx_descs_count;
+
+ al_crypto_tx_submit(chan);
+
+ spin_unlock_bh(&chan->prep_lock);
+
+ return -EINPROGRESS;
+ } else if (likely(req_ctx->last)) {
+ dma_unmap_single(to_dev(chan), req_ctx->crc_dma_addr,
+ digestsize, DMA_BIDIRECTIONAL);
+ put_unaligned_le32(req_ctx->result, req->result);
+
+ spin_lock_bh(&ctx->chan->prep_lock);
+ if (req_ctx->cache_state.cached)
+ al_crypto_cache_remove_lru(chan, &req_ctx->cache_state);
+ spin_unlock_bh(&ctx->chan->prep_lock);
+ }
+
+ return rc;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int crc_update(struct ahash_request *req)
+{
+ struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ req_ctx->last = false;
+
+ return crc_process_req(req, req->nbytes);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int crc_final(struct ahash_request *req)
+{
+ struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ req_ctx->last = true;
+
+ return crc_process_req(req, 0);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int crc_finup(struct ahash_request *req)
+{
+ struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ req_ctx->last = true;
+
+ return crc_process_req(req, req->nbytes);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int crc_digest(struct ahash_request *req)
+{
+ struct al_crc_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+
+ ahash->init(req);
+ req_ctx->last = true;
+
+ return crc_process_req(req, req->nbytes);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int crc_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct al_crc_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct al_crc_req_ctx *state = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(struct al_crc_ctx));
+ memcpy(out + sizeof(struct al_crc_ctx), state,
+ sizeof(struct al_crc_req_ctx));
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int crc_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct al_crc_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct al_crc_req_ctx *state = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(struct al_crc_ctx));
+ memcpy(state, in + sizeof(struct al_crc_ctx),
+ sizeof(struct al_crc_req_ctx));
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int crc_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct al_crc_ctx *ctx = crypto_ahash_ctx(ahash);
+
+ if (keylen != sizeof(ctx->key)) {
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->key = ~get_unaligned_le32(key);
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static struct al_crc *al_crc_alloc(
+ struct al_crypto_device *device,
+ struct al_crc_template *template)
+{
+ struct al_crc *t_alg;
+ struct ahash_alg *halg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(struct al_crc), GFP_KERNEL);
+ if (!t_alg) {
+ dev_err(&device->pdev->dev, "failed to allocate t_alg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ t_alg->ahash_alg = template->template_ahash;
+ halg = &t_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = al_crc_cra_init;
+ alg->cra_exit = al_crc_cra_exit;
+ alg->cra_priority = AL_CRYPTO_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_ctxsize = sizeof(struct al_crc_ctx);
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
+
+ t_alg->crcsum_type = template->crcsum_type;
+ t_alg->device = device;
+
+ return t_alg;
+}
+
+#ifdef CONFIG_BTRFS_AL_FAST_CRC_DMA
+struct al_ssm_dma *al_btrfs_crc_dma[NR_CPUS];
+int al_btrfs_crc_dma_qid[NR_CPUS];
+int al_btrfs_crc_q_count;
+
+EXPORT_SYMBOL(al_btrfs_crc_dma);
+EXPORT_SYMBOL(al_btrfs_crc_dma_qid);
+EXPORT_SYMBOL(al_btrfs_crc_q_count);
+
+int al_crypto_crc_init(struct al_crypto_device *device)
+{
+ int i;
+ /*Instead of registering AHASH crypto algs in system,
+ initialize data structures for BTRFS FAST CRC shortcut*/
+ for (i = 0; i < device->num_channels; i++) {
+ if (device->channels[i]->type == AL_MEM_CRC_MEMCPY_Q) {
+ al_btrfs_crc_dma[al_btrfs_crc_q_count] = &device->hal_crypto;
+ al_btrfs_crc_dma_qid[al_btrfs_crc_q_count] = i;
+ al_btrfs_crc_q_count++;
+ }
+ }
+ return 0;
+}
+
+void al_crypto_crc_terminate(struct al_crypto_device *device)
+{
+
+}
+
+#else
+/******************************************************************************
+ *****************************************************************************/
+int al_crypto_crc_init(struct al_crypto_device *device)
+{
+ int i;
+ int err = 0;
+
+ INIT_LIST_HEAD(&device->crc_list);
+
+ if (!device->crc_channels)
+ return 0;
+
+ atomic_set(&device->crc_tfm_count, -1);
+
+ /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_crc); i++) {
+ struct al_crc *t_alg;
+
+ t_alg = al_crc_alloc(device, &driver_crc[i]);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(&device->pdev->dev,
+ "%s alg allocation failed\n",
+ driver_crc[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(&device->pdev->dev,
+ "%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ } else
+ list_add_tail(&t_alg->entry, &device->crc_list);
+ }
+
+ if (!list_empty(&device->crc_list))
+ dev_info(&device->pdev->dev,
+ "crc/csum algorithms registered in /proc/crypto\n");
+
+ return err;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+void al_crypto_crc_terminate(struct al_crypto_device *device)
+{
+ struct al_crc *t_alg, *n;
+
+ if (!device->crc_list.next)
+ return;
+
+ list_for_each_entry_safe(t_alg, n, &device->crc_list, entry) {
+ crypto_unregister_ahash(&t_alg->ahash_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+}
+#endif
diff --git a/drivers/crypto/al/al_crypto_hash.c b/drivers/crypto/al/al_crypto_hash.c
new file mode 100644
index 0000000..f6c7f01
--- /dev/null
+++ b/drivers/crypto/al/al_crypto_hash.c
@@ -0,0 +1,1169 @@
+/*
+ * drivers/crypto/al_crypto_hash.c
+ *
+ * Annapurna Labs Crypto driver - hash algorithms
+ *
+ * Copyright (C) 2012 Annapurna Labs Ltd.
+ *
+ * Algorithm registration code and chained scatter/gather lists
+ * handling based on caam driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+#ifndef DEBUG
+#define DEBUG
+#endif
+*/
+
+#include "linux/export.h"
+#include "linux/crypto.h"
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "al_crypto.h"
+#include "al_hal_ssm_crypto.h"
+
+#define AL_CRYPTO_CRA_PRIORITY 300
+
+#define AL_CRYPTO_HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
+#define AL_CRYPTO_HASH_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+
+static int ahash_init(struct ahash_request *req);
+
+static int ahash_update(struct ahash_request *req);
+
+static int ahash_final(struct ahash_request *req);
+
+static int ahash_finup(struct ahash_request *req);
+
+static int ahash_digest(struct ahash_request *req);
+
+static int ahash_export(struct ahash_request *req, void *out);
+
+static int ahash_import(struct ahash_request *req, const void *in);
+
+static int ahash_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen);
+
+/* ahash request ctx */
+struct al_crypto_hash_req_ctx {
+ /* Note 1:
+ * buf_0 and buf_1 are used for keeping the data that
+ * was not hashed during current update for the next update
+ * Note 2:
+ * buf_0, buf_1 and interm are DMAed so they shouldn't
+ * share the same cache line
+ * with other fields
+ * */
+ uint8_t buf_0[AL_CRYPTO_HASH_MAX_BLOCK_SIZE] ____cacheline_aligned;
+ uint8_t buf_1[AL_CRYPTO_HASH_MAX_BLOCK_SIZE] ____cacheline_aligned;
+ /* intermediate state */
+ uint8_t interm[AL_CRYPTO_HASH_MAX_DIGEST_SIZE] ____cacheline_aligned;
+ int buflen_0 ____cacheline_aligned;
+ int buflen_1;
+ uint8_t current_buf; /* select active buffer for current update */
+ dma_addr_t buf_dma_addr;
+ int buf_dma_len;
+ dma_addr_t interm_dma_addr;
+ bool first;
+ bool last;
+ uint32_t hashed_len;
+};
+
+struct al_crypto_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ struct ahash_alg template_ahash;
+ enum al_crypto_sa_auth_type auth_type;
+ enum al_crypto_sa_sha2_mode sha2_mode;
+ enum al_crypto_sa_op sa_op;
+ char sw_hash_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int sw_hash_interm_offset;
+ unsigned int sw_hash_interm_size;
+};
+
+static struct al_crypto_hash_template driver_hash[] = {
+ {
+ .name = "sha1",
+ .driver_name = "sha1-al",
+ .hmac_name = "hmac(sha1)",
+ .hmac_driver_name = "hmac-sha1-al",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct al_crypto_ctx),
+ },
+ },
+ .auth_type = AL_CRYPT_AUTH_SHA1,
+ .sha2_mode = 0,
+ .sa_op = AL_CRYPT_AUTH_ONLY,
+ .sw_hash_name = "sha1",
+ .sw_hash_interm_offset = offsetof(struct sha1_state, state),
+ .sw_hash_interm_size = sizeof(
+ ((struct sha1_state *)0)->state),
+ },
+ {
+ .name = "sha256",
+ .driver_name = "sha256-al",
+ .hmac_name = "hmac(sha256)",
+ .hmac_driver_name = "hmac-sha256-al",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct al_crypto_ctx),
+ },
+ },
+ .auth_type = AL_CRYPT_AUTH_SHA2,
+ .sha2_mode = AL_CRYPT_SHA2_256,
+ .sa_op = AL_CRYPT_AUTH_ONLY,
+ .sw_hash_name = "sha256",
+ .sw_hash_interm_offset = offsetof(struct sha256_state, state),
+ .sw_hash_interm_size = sizeof(
+ ((struct sha256_state *)0)->state),
+ },
+ {
+ .name = "sha256",
+ .driver_name = "sha256-al",
+ .hmac_name = "hmac(sha256)",
+ .hmac_driver_name = "hmac-sha256-al",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct al_crypto_ctx),
+ },
+ },
+ .auth_type = AL_CRYPT_AUTH_SHA2,
+ .sha2_mode = AL_CRYPT_SHA2_256,
+ .sa_op = AL_CRYPT_AUTH_ONLY,
+ .sw_hash_name = "sha256",
+ .sw_hash_interm_offset = offsetof(struct sha256_state, state),
+ .sw_hash_interm_size = sizeof(
+ ((struct sha256_state *)0)->state),
+ },
+ {
+ .name = "sha512",
+ .driver_name = "sha512-al",
+ .hmac_name = "hmac(sha512)",
+ .hmac_driver_name = "hmac-sha512-al",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct al_crypto_ctx),
+ },
+ },
+ .auth_type = AL_CRYPT_AUTH_SHA2,
+ .sha2_mode = AL_CRYPT_SHA2_512,
+ .sa_op = AL_CRYPT_AUTH_ONLY,
+ .sw_hash_name = "sha512",
+ .sw_hash_interm_offset = offsetof(struct sha512_state, state),
+ .sw_hash_interm_size = sizeof(
+ ((struct sha512_state *)0)->state),
+ },
+ {
+ .name = "sha384",
+ .driver_name = "sha384-al",
+ .hmac_name = "hmac(sha384)",
+ .hmac_driver_name = "hmac-sha384-al",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct al_crypto_ctx),
+ },
+ },
+ .auth_type = AL_CRYPT_AUTH_SHA2,
+ .sha2_mode = AL_CRYPT_SHA2_384,
+ .sa_op = AL_CRYPT_AUTH_ONLY,
+ .sw_hash_name = "sha384",
+ .sw_hash_interm_offset = offsetof(struct sha512_state, state),
+ .sw_hash_interm_size = sizeof(
+ ((struct sha512_state *)0)->state),
+ },
+ {
+ .name = "md5",
+ .driver_name = "md5-al",
+ .hmac_name = "hmac(md5)",
+ .hmac_driver_name = "hmac-md5-al",
+ .blocksize = MD5_HMAC_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct al_crypto_ctx),
+ },
+ },
+ .auth_type = AL_CRYPT_AUTH_MD5,
+ .sha2_mode = 0,
+ .sa_op = AL_CRYPT_AUTH_ONLY,
+ .sw_hash_name = "md5",
+ .sw_hash_interm_offset = 0,
+ .sw_hash_interm_size = sizeof(struct md5_state),
+ },
+};
+
+struct al_crypto_hash {
+ struct list_head entry;
+ struct al_crypto_device *device;
+ enum al_crypto_sa_auth_type auth_type;
+ enum al_crypto_sa_sha2_mode sha2_mode;
+ enum al_crypto_sa_op sa_op;
+ struct ahash_alg ahash_alg;
+ char sw_hash_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int sw_hash_interm_offset;
+ unsigned int sw_hash_interm_size;
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static u8 zero_message_hash_md5[MD5_DIGEST_SIZE] = {
+ 0xd4, 0x1d, 0x8c, 0xd9, 0x8f ,0x00 ,0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e
+};
+
+static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09
+};
+
+static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+
+static u8 zero_message_hash_sha384[SHA384_DIGEST_SIZE] = {
+ 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38,
+ 0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a,
+ 0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43,
+ 0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda,
+ 0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb,
+ 0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b,
+};
+
+static u8 zero_message_hash_sha512[SHA512_DIGEST_SIZE] = {
+ 0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd,
+ 0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07,
+ 0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc,
+ 0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce,
+ 0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0,
+ 0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f,
+ 0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81,
+ 0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e,
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static inline int to_signature_size(int digest_size)
+{
+ return (digest_size / 4) - 1;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int al_crypto_hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct al_crypto_hash *al_crypto_hash =
+ container_of(alg, struct al_crypto_hash, ahash_alg);
+ struct al_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct al_crypto_device *device = al_crypto_hash->device;
+ int chan_idx = atomic_inc_return(&device->tfm_count) %
+ (device->num_channels - device->crc_channels);
+ struct crypto_shash *sw_hash = NULL;
+
+ memset(&ctx->sa, 0, sizeof(struct al_crypto_sa));
+
+ /* Allocate SW hash for hmac long key hashing and key XOR ipad/opad
+ * intermediate calculations
+ */
+ if (strlen(al_crypto_hash->sw_hash_name)) {
+ /* TODO: is CRYPTO_ALG_NEED_FALLBACK needed here? */
+ sw_hash = crypto_alloc_shash(al_crypto_hash->sw_hash_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(sw_hash)) {
+ dev_err(&device->pdev->dev,
+ "%s: Failed to allocate SW hash!\n",
+ __func__);
+ return PTR_ERR(sw_hash);
+ }
+ }
+ ctx->sw_hash = sw_hash;
+
+ ctx->chan = device->channels[chan_idx];
+
+ ctx->sa.auth_type = al_crypto_hash->auth_type;
+ ctx->sa.sha2_mode = al_crypto_hash->sha2_mode;
+ ctx->sa.sa_op = al_crypto_hash->sa_op;
+ ctx->sa.signature_size = to_signature_size(
+ crypto_ahash_digestsize(ahash));
+
+ ctx->sa.auth_hmac_en = false;
+ ctx->cache_state.cached = false;
+ ctx->hw_sa = dma_alloc_coherent(&device->pdev->dev,
+ sizeof(struct al_crypto_hw_sa),
+ &ctx->hw_sa_dma_addr,
+ GFP_KERNEL);
+
+ al_crypto_hw_sa_init(&ctx->sa, ctx->hw_sa);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct al_crypto_hash_req_ctx));
+
+ AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock);
+ AL_CRYPTO_STATS_INC(ctx->chan->stats_gen.ahash_tfms, 1);
+ AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock);
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crypto_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct al_crypto_hash *al_crypto_hash =
+ container_of(alg, struct al_crypto_hash, ahash_alg);
+ struct al_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct al_crypto_device *device = al_crypto_hash->device;
+
+ /* LRU list access has to be protected */
+ spin_lock_bh(&ctx->chan->prep_lock);
+ if (ctx->cache_state.cached)
+ al_crypto_cache_remove_lru(ctx->chan, &ctx->cache_state);
+ spin_unlock_bh(&ctx->chan->prep_lock);
+
+ if (ctx->hw_sa_dma_addr)
+ dma_free_coherent(&device->pdev->dev,
+ sizeof(struct al_crypto_hw_sa),
+ ctx->hw_sa,
+ ctx->hw_sa_dma_addr);
+
+ if (ctx->sw_hash)
+ crypto_free_shash(ctx->sw_hash);
+
+ AL_CRYPTO_STATS_LOCK(&ctx->chan->stats_gen_lock);
+ AL_CRYPTO_STATS_DEC(ctx->chan->stats_gen.ahash_tfms, 1);
+ AL_CRYPTO_STATS_UNLOCK(&ctx->chan->stats_gen_lock);
+
+ return;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* DMA unmap buffers for ahash request
+ */
+static inline void al_crypto_dma_unmap_ahash(
+ struct al_crypto_chan *chan,
+ struct al_crypto_sw_desc *desc,
+ unsigned int digestsize)
+{
+ struct ahash_request *req =
+ (struct ahash_request *)desc->req;
+ struct al_crypto_hash_req_ctx *req_ctx =
+ ahash_request_ctx(req);
+
+ if (req_ctx->buf_dma_len)
+ dma_unmap_single(to_dev(chan),
+ req_ctx->buf_dma_addr,
+ req_ctx->buf_dma_len,
+ DMA_TO_DEVICE);
+ if (desc->src_nents)
+ dma_unmap_sg(to_dev(chan),
+ req->src,
+ desc->src_nents,
+ DMA_TO_DEVICE);
+
+ if (req_ctx->last) {
+ dma_unmap_single(to_dev(chan),
+ req_ctx->interm_dma_addr,
+ AL_CRYPTO_HASH_MAX_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ memcpy(req->result, req_ctx->interm, digestsize);
+ }
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void zero_message_result_copy(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash);
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ struct al_crypto_chan *chan = ctx->chan;
+ u8* zero_message = NULL;
+
+ switch (ctx->sa.auth_type) {
+ case (AL_CRYPT_AUTH_MD5):
+ zero_message = zero_message_hash_md5;
+ break;
+ case (AL_CRYPT_AUTH_SHA1):
+ zero_message = zero_message_hash_sha1;
+ break;
+ case (AL_CRYPT_AUTH_SHA2):
+ if (ctx->sa.sha2_mode == AL_CRYPT_SHA2_256)
+ zero_message = zero_message_hash_sha256;
+ if (ctx->sa.sha2_mode == AL_CRYPT_SHA2_384)
+ zero_message = zero_message_hash_sha384;
+ if (ctx->sa.sha2_mode == AL_CRYPT_SHA2_512)
+ zero_message = zero_message_hash_sha512;
+ break;
+ default:
+ dev_err(to_dev(chan),"ERROR, unsupported zero message\n");
+ return;
+ }
+
+ memcpy(req->result, zero_message, digestsize);
+}
+/******************************************************************************
+ *****************************************************************************/
+/* Cleanup single ahash request - invoked from cleanup tasklet (interrupt
+ * handler)
+ */
+void al_crypto_cleanup_single_ahash(
+ struct al_crypto_chan *chan,
+ struct al_crypto_sw_desc *desc,
+ uint32_t comp_status)
+{
+ struct ahash_request *req =
+ (struct ahash_request *)desc->req;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+
+ al_crypto_dma_unmap_ahash(chan, desc, digestsize);
+
+ req->base.complete(&req->base, 0);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int ahash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct al_crypto_chan *chan = ctx->chan;
+
+ req_ctx->first = true;
+ req_ctx->last = false;
+
+ req_ctx->current_buf = 0;
+ req_ctx->buflen_0 = 0;
+ req_ctx->buflen_1 = 0;
+ req_ctx->buf_dma_addr = 0;
+ req_ctx->interm_dma_addr = dma_map_single(to_dev(chan),
+ req_ctx->interm,
+ AL_CRYPTO_HASH_MAX_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(to_dev(chan), req_ctx->interm_dma_addr)) {
+ dev_err(to_dev(chan),
+ "dma_map_single failed!\n");
+ return -ENOMEM;
+ }
+ req_ctx->hashed_len = 0;
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void ahash_req_prepare_xaction_buffers(struct ahash_request *req,
+ struct al_crypto_sw_desc *desc,
+ int to_hash,
+ int src_nents,
+ int *buflen,
+ int *src_idx)
+{
+ struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct al_crypto_transaction *xaction;
+
+ xaction = &desc->hal_xaction;
+ *src_idx = 0;
+ if (*buflen) {
+ desc->src_bufs[*src_idx].addr = req_ctx->buf_dma_addr;
+ desc->src_bufs[*src_idx].len = *buflen;
+ xaction->auth_in_len += desc->src_bufs[*src_idx].len;
+ (*src_idx)++;
+ }
+
+ if (src_nents)
+ sg_map_to_xaction_buffers(req->src, desc->src_bufs, to_hash,
+ src_idx);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void ahash_update_stats(struct al_crypto_transaction *xaction,
+ struct al_crypto_chan *chan)
+{
+ AL_CRYPTO_STATS_INC(chan->stats_prep.ahash_reqs, 1);
+ AL_CRYPTO_STATS_INC(chan->stats_prep.ahash_bytes, xaction->auth_in_len);
+
+ if (xaction->auth_in_len <= 512)
+ AL_CRYPTO_STATS_INC(chan->stats_prep.ahash_reqs_le512, 1);
+ else if ((xaction->auth_in_len > 512) && (xaction->auth_in_len <= 2048))
+ AL_CRYPTO_STATS_INC(chan->stats_prep.ahash_reqs_512_2048, 1);
+ else if ((xaction->auth_in_len > 2048) &&
+ (xaction->auth_in_len <= 4096))
+ AL_CRYPTO_STATS_INC(chan->stats_prep.ahash_reqs_2048_4096, 1);
+ else
+ AL_CRYPTO_STATS_INC(chan->stats_prep.ahash_reqs_gt4096, 1);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void ahash_req_prepare_xaction(struct ahash_request *req,
+ struct al_crypto_sw_desc *desc,
+ int to_hash,
+ int src_nents,
+ uint8_t *buf,
+ int *buflen)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct al_crypto_chan *chan = ctx->chan;
+ struct al_crypto_transaction *xaction;
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ unsigned int ivsize;
+ int src_idx;
+
+ /* In SHA384 the ivsize is 64 bytes and not 48 bytes. */
+ ivsize = (digestsize == SHA384_DIGEST_SIZE) ?
+ SHA512_DIGEST_SIZE : digestsize;
+
+ /* prepare hal transaction */
+ xaction = &desc->hal_xaction;
+ memset(xaction, 0, sizeof(struct al_crypto_transaction));
+ xaction->auth_sign_in.len = 0;
+ xaction->auth_fl_valid = AL_TRUE;
+ xaction->auth_in_off = 0;
+ /* if first, there's no input intermediate */
+ if (unlikely(req_ctx->first)) {
+ req_ctx->first = false;
+ xaction->auth_first = AL_TRUE;
+ xaction->auth_iv_in.len = 0;
+ xaction->auth_iv_in.addr = (al_phys_addr_t)(uintptr_t)NULL;
+ } else {
+ xaction->auth_first = AL_FALSE;
+ xaction->auth_iv_in.addr = xaction->auth_iv_out.addr =
+ req_ctx->interm_dma_addr;
+ xaction->auth_iv_in.len = xaction->auth_iv_out.len =
+ ivsize;
+ }
+
+ if (unlikely(req_ctx->last)) {
+ xaction->auth_last = AL_TRUE;
+ xaction->auth_sign_out.addr = req_ctx->interm_dma_addr;
+ xaction->auth_sign_out.len = digestsize;
+ xaction->auth_iv_out.len = 0;
+ xaction->auth_iv_out.addr = (al_phys_addr_t)(uintptr_t)NULL;
+ xaction->auth_bcnt = req_ctx->hashed_len;
+
+ /* count the first hmac key^ipad block */
+ if (ctx->sa.auth_hmac_en)
+ xaction->auth_bcnt +=
+ crypto_tfm_alg_blocksize(
+ crypto_ahash_tfm(ahash));
+ } else {
+ xaction->auth_last = AL_FALSE;
+ xaction->auth_iv_out.addr =
+ req_ctx->interm_dma_addr;
+ xaction->auth_iv_out.len =
+ ivsize;
+ xaction->auth_sign_out.len = 0;
+ xaction->auth_bcnt = 0;
+ }
+
+ xaction->dir = AL_CRYPT_ENCRYPT;
+ xaction->auth_in_len = 0;
+
+ ahash_req_prepare_xaction_buffers(req, desc, to_hash, src_nents, buflen,
+ &src_idx);
+
+ BUG_ON(src_idx > AL_SSM_MAX_SRC_DESCS);
+
+ xaction->src_size = xaction->auth_in_len;
+ xaction->src.bufs = &desc->src_bufs[0];
+ xaction->src.num = src_idx;
+
+ dev_dbg(to_dev(chan),
+ "%s: ctx->cache_state.cached=%d\n",
+ __func__, ctx->cache_state.cached);
+
+ if (!ctx->cache_state.cached) {
+ xaction->sa_indx = al_crypto_cache_replace_lru(chan,
+ &ctx->cache_state, NULL);
+ xaction->sa_in.addr = ctx->hw_sa_dma_addr;
+ xaction->sa_in.len = sizeof(struct al_crypto_hw_sa);
+ } else {
+ al_crypto_cache_update_lru(chan, &ctx->cache_state);
+ xaction->sa_indx = ctx->cache_state.idx;
+ xaction->sa_in.len = 0;
+ }
+
+ dev_dbg(to_dev(chan),
+ "sa_op=%d, auth_type=%d, sha2_mode=%d\n",
+ ctx->sa.sa_op,
+ ctx->sa.auth_type,
+ ctx->sa.sha2_mode);
+
+ xaction->flags = AL_SSM_INTERRUPT;
+
+ ahash_update_stats(xaction, chan);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Main hash processing function that handles update/final/finup and digest
+ *
+ * Implementation is based on the assumption that the caller waits for
+ * completion of every operation before issuing the next operation
+ */
+static int ahash_process_req(struct ahash_request *req, unsigned int nbytes)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req);
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ struct al_crypto_chan *chan = ctx->chan;
+ uint8_t *buf = req_ctx->current_buf ?
+ req_ctx->buf_1 : req_ctx->buf_0;
+ int *buflen = req_ctx->current_buf ?
+ &req_ctx->buflen_1 : &req_ctx->buflen_0;
+ uint8_t *next_buf = req_ctx->current_buf ?
+ req_ctx->buf_0 : req_ctx->buf_1;
+ int *next_buflen = req_ctx->current_buf ?
+ &req_ctx->buflen_0 : &req_ctx->buflen_1;
+ int in_len = *buflen + nbytes;
+ int to_hash, idx;
+ int src_nents = 0;
+ struct al_crypto_sw_desc *desc;
+ int src_sg_nents = sg_nents(req->src);
+ int rc = 0;
+
+ dev_dbg(to_dev(chan),
+ "%s: nbytes=%d,first=%d,last=%d,inlen=%d,buflen=%d\n",
+ __func__, nbytes, req_ctx->first, req_ctx->last,
+ in_len, *buflen);
+
+ if (!req_ctx->last) {
+ /* if aligned, do not hash last block */
+ *next_buflen =
+ (in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1)) ?
+ : crypto_tfm_alg_blocksize(&ahash->base);
+ to_hash = in_len - *next_buflen;
+
+ /* Ignore not last empty update requests */
+ if (unlikely(in_len == 0))
+ return rc;
+ } else {
+ if (unlikely(in_len == 0)) {
+ zero_message_result_copy(req);
+ return rc;
+ }
+
+ *next_buflen = 0;
+ to_hash = in_len;
+ }
+
+ if (to_hash) {
+ if (*buflen) {
+ req_ctx->buf_dma_addr = dma_map_single(to_dev(chan),
+ buf,
+ *buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(to_dev(chan),
+ req_ctx->buf_dma_addr)) {
+ dev_err(to_dev(chan),
+ "dma_map_single failed!\n");
+ return -ENOMEM;
+ }
+ req_ctx->buf_dma_len = *buflen;
+ } else
+ req_ctx->buf_dma_len = 0;
+
+ spin_lock_bh(&chan->prep_lock);
+ if (likely(al_crypto_get_sw_desc(chan, 1) == 0))
+ idx = chan->head;
+ else {
+ spin_unlock_bh(&chan->prep_lock);
+ dev_err(
+ to_dev(chan),
+ "%s: al_crypto_get_sw_desc failed!\n",
+ __func__);
+
+ if (req_ctx->buf_dma_len)
+ dma_unmap_single(to_dev(chan),
+ req_ctx->buf_dma_addr,
+ req_ctx->buf_dma_len,
+ DMA_TO_DEVICE);
+ return -ENOSPC;
+ }
+
+ if (*next_buflen) {
+ sg_pcopy_to_buffer(req->src, src_sg_nents, next_buf,
+ *next_buflen, nbytes - *next_buflen);
+ req_ctx->current_buf = !req_ctx->current_buf;
+ }
+
+ if (nbytes) {
+ src_nents = sg_count(req->src, nbytes - (*next_buflen));
+
+ dev_dbg(to_dev(chan),
+ "%s: src_nents=%d\n",
+ __func__, src_nents);
+
+ dma_map_sg(to_dev(chan), req->src, src_nents,
+ DMA_TO_DEVICE);
+ }
+
+ chan->sw_desc_num_locked = 1;
+ chan->tx_desc_produced = 0;
+
+ desc = al_crypto_get_ring_ent(chan, idx);
+ desc->req = (void *)req;
+ desc->req_type = AL_CRYPTO_REQ_AHASH;
+ desc->src_nents = src_nents;
+
+ ahash_req_prepare_xaction(req, desc, to_hash, src_nents,
+ buf, buflen);
+
+ /* send crypto transaction to engine */
+ rc = al_crypto_dma_prepare(chan->hal_crypto, chan->idx,
+ &desc->hal_xaction);
+ if (unlikely(rc != 0)) {
+ dev_err(to_dev(chan),
+ "al_crypto_dma_prepare failed!\n");
+
+ al_crypto_dma_unmap_ahash(chan, desc, digestsize);
+
+ spin_unlock_bh(&chan->prep_lock);
+ return rc;
+ }
+
+ chan->tx_desc_produced += desc->hal_xaction.tx_descs_count;
+
+ al_crypto_tx_submit(chan);
+
+ req_ctx->hashed_len += to_hash;
+
+ spin_unlock_bh(&chan->prep_lock);
+
+ return -EINPROGRESS;
+ } else {
+ sg_copy_to_buffer(req->src, src_sg_nents,
+ buf + *buflen, nbytes);
+ *buflen = *next_buflen;
+ }
+
+ return rc;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int ahash_update(struct ahash_request *req)
+{
+ struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ req_ctx->last = false;
+
+ return ahash_process_req(req, req->nbytes);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int ahash_final(struct ahash_request *req)
+{
+ struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ req_ctx->last = true;
+
+ return ahash_process_req(req, 0);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int ahash_finup(struct ahash_request *req)
+{
+ struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ req_ctx->last = true;
+
+ return ahash_process_req(req, req->nbytes);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int ahash_digest(struct ahash_request *req)
+{
+ struct al_crypto_hash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+
+ ahash->init(req);
+ req_ctx->last = true;
+
+ return ahash_process_req(req, req->nbytes);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int ahash_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct al_crypto_hash_req_ctx *state = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(struct al_crypto_ctx));
+ memcpy(out + sizeof(struct al_crypto_ctx), state,
+ sizeof(struct al_crypto_hash_req_ctx));
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int ahash_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct al_crypto_hash_req_ctx *state = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(struct al_crypto_ctx));
+ memcpy(state, in + sizeof(struct al_crypto_ctx),
+ sizeof(struct al_crypto_hash_req_ctx));
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* Generate intermediate hash of hmac^opad and hmac^ipad using sw hash engine
+ * and place the results in ctx->sa.
+ */
+int hmac_setkey(struct al_crypto_ctx *ctx, const u8 *key,
+ unsigned int keylen, unsigned int sw_hash_interm_offset,
+ unsigned int sw_hash_interm_size)
+{
+ unsigned int blocksize, digestsize, descsize;
+ int rc;
+
+ /* Based on code from the hmac module */
+ blocksize = crypto_shash_blocksize(ctx->sw_hash);
+ digestsize = crypto_shash_digestsize(ctx->sw_hash);
+ descsize = crypto_shash_descsize(ctx->sw_hash);
+
+ {
+ uint8_t ipad[descsize];
+ uint8_t opad[descsize];
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(ctx->sw_hash)];
+ } desc;
+ unsigned int i;
+
+ desc.shash.tfm = ctx->sw_hash;
+ desc.shash.flags = crypto_shash_get_flags(ctx->sw_hash) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ /* hash the key if longer than blocksize */
+ if (keylen > blocksize) {
+ int err;
+
+ err = crypto_shash_digest(&desc.shash, key, keylen, ipad);
+ if (err)
+ return err;
+
+ keylen = digestsize;
+ } else
+ memcpy(ipad, key, keylen);
+
+ memset(ipad + keylen, 0, blocksize - keylen);
+ memcpy(opad, ipad, blocksize);
+
+ /* Generate XORs with ipad and opad */
+ for (i = 0; i < blocksize; i++) {
+ ipad[i] ^= AL_CRYPTO_HASH_HMAC_IPAD;
+ opad[i] ^= AL_CRYPTO_HASH_HMAC_OPAD;
+ }
+
+ /* Generate intermediate results using SW hash */
+ rc = crypto_shash_init(&desc.shash) ? :
+ crypto_shash_update(&desc.shash, ipad, blocksize) ? :
+ crypto_shash_export(&desc.shash, ipad) ? :
+ crypto_shash_init(&desc.shash) ? :
+ crypto_shash_update(&desc.shash, opad, blocksize) ? :
+ crypto_shash_export(&desc.shash, opad);
+
+ if (rc == 0) {
+ unsigned int offset = sw_hash_interm_offset;
+ unsigned int size = sw_hash_interm_size;
+
+ /* Copy intermediate results to SA */
+ memcpy(ctx->sa.hmac_iv_in, ipad + offset, size);
+ memcpy(ctx->sa.hmac_iv_out, opad + offset, size);
+
+ dev_dbg(to_dev(ctx->chan), "hmac_in:\n");
+ hexdump(ctx->sa.hmac_iv_in, size);
+ dev_dbg(to_dev(ctx->chan), "hmac_out:\n");
+ hexdump(ctx->sa.hmac_iv_out, size);
+
+ ctx->sa.auth_hmac_en = true;
+ }
+ }
+ return rc;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_alg *base = crypto_ahash_tfm(ahash)->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct al_crypto_hash *al_crypto_hash =
+ container_of(alg, struct al_crypto_hash, ahash_alg);
+ struct al_crypto_ctx *ctx = crypto_ahash_ctx(ahash);
+ int rc;
+
+ if (!ctx->sw_hash)
+ return 0;
+
+ rc = hmac_setkey(ctx, key, keylen,
+ al_crypto_hash->sw_hash_interm_offset,
+ al_crypto_hash->sw_hash_interm_size);
+
+ if (rc == 0) {
+ al_crypto_hw_sa_init(&ctx->sa, ctx->hw_sa);
+
+ /* mark the sa as not cached, will update in next xaction */
+ spin_lock_bh(&ctx->chan->prep_lock);
+ if (ctx->cache_state.cached)
+ al_crypto_cache_remove_lru(ctx->chan,
+ &ctx->cache_state);
+ spin_unlock_bh(&ctx->chan->prep_lock);
+ }
+
+ return rc;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static struct al_crypto_hash *al_crypto_hash_alloc(
+ struct al_crypto_device *device,
+ struct al_crypto_hash_template *template,
+ bool keyed)
+{
+ struct al_crypto_hash *t_alg;
+ struct ahash_alg *halg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(struct al_crypto_hash), GFP_KERNEL);
+ if (!t_alg) {
+ dev_err(&device->pdev->dev, "failed to allocate t_alg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ t_alg->ahash_alg = template->template_ahash;
+ halg = &t_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_driver_name);
+ snprintf(t_alg->sw_hash_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->sw_hash_name);
+ } else {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ snprintf(t_alg->sw_hash_name, CRYPTO_MAX_ALG_NAME, "%s",
+ "");
+ }
+
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = al_crypto_hash_cra_init;
+ alg->cra_exit = al_crypto_hash_cra_exit;
+ alg->cra_priority = AL_CRYPTO_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_ctxsize = sizeof(struct al_crypto_ctx);
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK;
+ alg->cra_type = &crypto_ahash_type;
+
+ t_alg->auth_type = template->auth_type;
+ t_alg->sha2_mode = template->sha2_mode;
+ t_alg->sa_op = template->sa_op;
+ t_alg->device = device;
+ t_alg->sw_hash_interm_offset = template->sw_hash_interm_offset;
+ t_alg->sw_hash_interm_size = template->sw_hash_interm_size;
+
+ return t_alg;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int al_crypto_hash_init(struct al_crypto_device *device)
+{
+ int i;
+ int err = 0;
+
+ INIT_LIST_HEAD(&device->hash_list);
+
+ /* tfm count is initialized in alg, move to core?? */
+ /* atomic_set(&device->tfm_count, -1); */
+
+ /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
+ struct al_crypto_hash *t_alg;
+
+ /* register hmac version */
+ t_alg = al_crypto_hash_alloc(device,
+ &driver_hash[i], true);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(&device->pdev->dev,
+ "%s alg allocation failed\n",
+ driver_hash[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(&device->pdev->dev,
+ "%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ } else
+ list_add_tail(&t_alg->entry, &device->hash_list);
+
+ /* register unkeyed version */
+ t_alg = al_crypto_hash_alloc(device, &driver_hash[i], false);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(&device->pdev->dev,
+ "%s alg allocation failed\n",
+ driver_hash[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(&device->pdev->dev,
+ "%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ } else
+ list_add_tail(&t_alg->entry, &device->hash_list);
+ }
+
+ if (!list_empty(&device->hash_list))
+ dev_info(&device->pdev->dev,
+ "hash algorithms registered in /proc/crypto\n");
+
+ return err;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+void al_crypto_hash_terminate(struct al_crypto_device *device)
+{
+ struct al_crypto_hash *t_alg, *n;
+
+ if (!device->hash_list.next)
+ return;
+
+ list_for_each_entry_safe(t_alg, n, &device->hash_list, entry) {
+ crypto_unregister_ahash(&t_alg->ahash_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+}
diff --git a/drivers/crypto/al/al_crypto_main.c b/drivers/crypto/al/al_crypto_main.c
new file mode 100644
index 0000000..d46b6ff
--- /dev/null
+++ b/drivers/crypto/al/al_crypto_main.c
@@ -0,0 +1,311 @@
+/*
+ * drivers/crypto/al_crypto_main.c
+ *
+ * Annapurna Labs Crypto driver - pci enumeration and init invocation
+ *
+ * Copyright (C) 2012 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include
+#include
+#include
+#include
+
+#include "al_crypto.h"
+#include "al_crypto_module_params.h"
+#include "al_hal_unit_adapter_regs.h"
+
+MODULE_VERSION(AL_CRYPTO_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Annapurna Labs");
+
+#define DRV_NAME "al_crypto"
+#define MAX_HW_DESCS_PER_SW_DECS 4
+
+enum {
+ /* BAR's are enumerated in terms of pci_resource_start() terms */
+ AL_CRYPTO_UDMA_BAR = 0,
+ AL_CRYPTO_APP_BAR = 4
+};
+
+static int al_crypto_pci_probe(
+ struct pci_dev *pdev,
+ const struct pci_device_id *id);
+
+static void al_crypto_pci_remove(
+ struct pci_dev *pdev);
+
+static void al_crypto_flr(struct pci_dev *pdev);
+
+static DEFINE_PCI_DEVICE_TABLE(al_crypto_pci_tbl) = {
+ { PCI_VDEVICE(ANNAPURNA_LABS, PCI_DEVICE_ID_AL_CRYPTO), },
+ { PCI_VDEVICE(ANNAPURNA_LABS, PCI_DEVICE_ID_AL_CRYPTO_VF), },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, al_crypto_pci_tbl);
+
+static struct pci_driver al_crypto_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = al_crypto_pci_tbl,
+ .probe = al_crypto_pci_probe,
+ .remove = al_crypto_pci_remove,
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static int al_crypto_pci_probe(
+ struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int status = 0;
+ int sriov_crc_channels = al_crypto_get_crc_channels();
+ void __iomem * const *iomap;
+ struct device *dev = &pdev->dev;
+ struct al_crypto_device *device;
+ int bar_reg;
+
+ dev_dbg(dev, "%s(%p, %p)\n", __func__, pdev, id);
+
+ if (min(al_crypto_get_rx_descs_order(),al_crypto_get_tx_descs_order()) <
+ (MAX_HW_DESCS_PER_SW_DECS + al_crypto_get_ring_alloc_order())) {
+ dev_err(dev, "%s: Too small HW Q can lead to unexpected behavior "
+ "upon queue overflow\n",__func__);
+ }
+
+ al_crypto_flr(pdev);
+
+ status = pcim_enable_device(pdev);
+ if (status) {
+ pr_err("%s: pcim_enable_device failed!\n", __func__);
+ goto done;
+ }
+
+ bar_reg = pdev->is_physfn ?
+ (1 << AL_CRYPTO_UDMA_BAR) | (1 << AL_CRYPTO_APP_BAR ) :
+ (1 << AL_CRYPTO_UDMA_BAR);
+
+ status = pcim_iomap_regions(
+ pdev,
+ bar_reg,
+ DRV_NAME);
+ if (status) {
+ pr_err("%s: pcim_iomap_regions failed!\n", __func__);
+ goto done;
+ }
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap) {
+ status = -ENOMEM;
+ goto done;
+ }
+
+ status = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (status)
+ goto done;
+
+ status = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (status)
+ goto done;
+
+ device = devm_kzalloc(dev, sizeof(struct al_crypto_device), GFP_KERNEL);
+ if (!device) {
+ status = -ENOMEM;
+ goto done;
+ }
+
+ device->pdev = pdev;
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, device);
+ dev_set_drvdata(dev, device);
+
+#ifdef CONFIG_BTRFS_AL_FAST_CRC_DMA
+ BUG_ON(!al_crypto_get_use_virtual_function());
+#endif
+ /*
+ * When VF is used the PF is dedicated to crc and the VF is dedicated
+ * to crypto
+ */
+ if (al_crypto_get_use_virtual_function()) {
+ if (pdev->is_physfn && (pci_sriov_get_totalvfs(pdev) > 0))
+ sriov_crc_channels = 0;
+ else if (pdev->is_virtfn) {
+ sriov_crc_channels = al_crypto_get_max_channels();
+#ifdef CONFIG_BTRFS_AL_FAST_CRC_DMA
+ BUG_ON(sriov_crc_channels < NR_CPUS);
+#endif
+ }
+ }
+
+ device->max_channels = al_crypto_get_max_channels();
+ device->crc_channels = sriov_crc_channels;
+
+ if (al_crypto_get_use_virtual_function() && pdev->is_physfn &&
+ (pci_sriov_get_totalvfs(pdev) > 0)) {
+ status = pci_enable_sriov(pdev, 1);
+ if (status) {
+ dev_err(dev, "%s: pci_enable_sriov failed, status %d\n",
+ __func__, status);
+ goto done;
+ }
+ }
+
+ status = al_crypto_core_init(
+ device,
+ iomap[AL_CRYPTO_UDMA_BAR],
+ pdev->is_physfn ? iomap[AL_CRYPTO_APP_BAR] : NULL);
+ if (status) {
+ dev_err(dev, "%s: al_crypto_core_init failed\n", __func__);
+ goto done;
+ }
+
+ status = al_crypto_sysfs_init(device);
+ if (status) {
+ dev_err(dev, "%s: al_dma_sysfs_init failed\n", __func__);
+ goto err_sysfs_init;
+ }
+
+ if (device->crc_channels < device->max_channels) {
+ status = al_crypto_alg_init(device);
+ if (status) {
+ dev_err(dev, "%s: al_crypto_alg_init failed\n",
+ __func__);
+ goto err_alg_init;
+ }
+
+ status = al_crypto_hash_init(device);
+ if (status) {
+ dev_err(dev, "%s: al_crypto_hash_init failed\n",
+ __func__);
+ goto err_hash_init;
+ }
+ } else
+ dev_info(dev, "%s: Skipping alg/hash initialization, " \
+ "no allocated channels\n", __func__);
+
+ if (device->crc_channels > 0) {
+ status = al_crypto_crc_init(device);
+ if (status) {
+ dev_err(dev, "%s: al_crypto_hash_init failed\n",
+ __func__);
+ goto err_crc_init;
+ }
+ } else
+ dev_info(dev, "%s: Skipping crc initialization, " \
+ "no allocated channels\n", __func__);
+
+ goto done;
+
+
+err_crc_init:
+ al_crypto_hash_terminate(device);
+err_hash_init:
+ al_crypto_alg_terminate(device);
+err_alg_init:
+ al_crypto_sysfs_terminate(device);
+err_sysfs_init:
+ al_crypto_core_terminate(device);
+done:
+ return status;
+}
+
+int al_crypto_read_pcie_config(void *handle, int where, uint32_t *val)
+{
+ /* handle is a pointer to the pci_dev */
+ pci_read_config_dword((struct pci_dev *)handle, where, val);
+ return 0;
+}
+
+int al_crypto_write_pcie_config(void *handle, int where, uint32_t val)
+{
+ /* handle is a pointer to the pci_dev */
+ pci_write_config_dword((struct pci_dev *)handle, where, val);
+ return 0;
+}
+
+int al_crypto_write_pcie_flr(void *handle)
+{
+ /* handle is a pointer to the pci_dev */
+ __pci_reset_function_locked((struct pci_dev *)handle);
+ udelay(1000);
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void inline al_crypto_flr(struct pci_dev *pdev)
+{
+ al_pcie_perform_flr(al_crypto_read_pcie_config,
+ al_crypto_write_pcie_config,
+ al_crypto_write_pcie_flr,
+ pdev);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_crypto_pci_remove(struct pci_dev *pdev)
+{
+ struct al_crypto_device *device = pci_get_drvdata(pdev);
+
+ if (!device)
+ return;
+
+ dev_dbg(&pdev->dev, "Removing dma\n");
+
+ if (device->pdev->is_physfn)
+ pci_disable_sriov(device->pdev);
+
+ al_crypto_crc_terminate(device);
+
+ al_crypto_hash_terminate(device);
+
+ al_crypto_sysfs_terminate(device);
+
+ al_crypto_alg_terminate(device);
+
+ al_crypto_core_terminate(device);
+
+ pci_disable_device(pdev);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int __init al_crypto_init_module(void)
+{
+ int err;
+
+ pr_info(
+ "%s: Annapurna Labs Crypto Driver %s\n",
+ DRV_NAME,
+ AL_CRYPTO_VERSION);
+
+ err = pci_register_driver(&al_crypto_pci_driver);
+
+ return err;
+}
+module_init(al_crypto_init_module);
+
+/******************************************************************************
+ *****************************************************************************/
+static void __exit al_crypto_exit_module(void)
+{
+ pci_unregister_driver(&al_crypto_pci_driver);
+}
+module_exit(al_crypto_exit_module);
diff --git a/drivers/crypto/al/al_crypto_module_params.c b/drivers/crypto/al/al_crypto_module_params.c
new file mode 100644
index 0000000..046e757
--- /dev/null
+++ b/drivers/crypto/al/al_crypto_module_params.c
@@ -0,0 +1,105 @@
+/*
+ * drivers/crypto/al/al_crypto_module_params.c
+ *
+ * Annapurna Labs Crypto driver - module params
+ *
+ * Copyright (C) 2013 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include
+
+#include "al_crypto.h"
+#include "al_crypto_module_params.h"
+
+static bool use_virtual_function = true;
+module_param(use_virtual_function, bool, 0644);
+MODULE_PARM_DESC(
+ use_virtual_function,
+ "use the SR-IOV capability of the crypto engine (default: true)"
+ "If we use the VF we will have 4 crc channels and 4 crypto channels");
+
+static int crc_channels = 1;
+module_param(crc_channels, int, 0644);
+MODULE_PARM_DESC(
+ crc_channels,
+ "number of crc channels (queues) to enable (default: 1)");
+
+static int max_channels = AL_CRYPTO_DMA_MAX_CHANNELS;
+module_param(max_channels, int, 0644);
+MODULE_PARM_DESC(
+ max_channels,
+ "maximum number of channels (queues) to enable (default: 4)");
+
+static int ring_alloc_order = 10;
+module_param(ring_alloc_order, int, 0644);
+MODULE_PARM_DESC(
+ ring_alloc_order,
+ "allocate 2^n descriptors per channel"
+ " (default: 10 max: 16)");
+
+static int tx_descs_order = 14;
+module_param(tx_descs_order, int, 0644);
+MODULE_PARM_DESC(
+ tx_descs_order,
+ "allocate 2^n of descriptors in Tx queue (default: 14)");
+
+static int rx_descs_order = 14;
+module_param(rx_descs_order, int, 0644);
+MODULE_PARM_DESC(
+ rx_descs_order,
+ "allocate 2^n of descriptors in Rx queue (default: 14)");
+
+static bool use_single_msix;
+module_param(use_single_msix, bool, 0644);
+MODULE_PARM_DESC(
+ use_single_msix,
+ "Use single msix (one msi-x per group and not one per queue)");
+
+bool al_crypto_get_use_virtual_function(void)
+{
+ return use_virtual_function;
+}
+
+bool al_crypto_get_use_single_msix(void)
+{
+ return use_single_msix;
+}
+
+int al_crypto_get_crc_channels(void)
+{
+ return crc_channels;
+}
+
+int al_crypto_get_max_channels(void)
+{
+ return max_channels;
+}
+
+int al_crypto_get_ring_alloc_order(void)
+{
+ return ring_alloc_order;
+}
+
+int al_crypto_get_tx_descs_order(void)
+{
+ return tx_descs_order;
+}
+
+int al_crypto_get_rx_descs_order(void)
+{
+ return rx_descs_order;
+}
diff --git a/drivers/crypto/al/al_crypto_module_params.h b/drivers/crypto/al/al_crypto_module_params.h
new file mode 100644
index 0000000..5879b17
--- /dev/null
+++ b/drivers/crypto/al/al_crypto_module_params.h
@@ -0,0 +1,40 @@
+/*
+ * drivers/crypto/al/al_crypto_module_params.h
+ *
+ * Annapurna Labs Crypto driver - module params
+ *
+ * Copyright (C) 2013 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __AL_CRYPTO_MODULE_PARAMS_H__
+#define __AL_CRYPTO_MODULE_PARAMS_H__
+
+bool al_crypto_get_use_virtual_function(void);
+
+int al_crypto_get_crc_channels(void);
+
+int al_crypto_get_max_channels(void);
+
+int al_crypto_get_ring_alloc_order(void);
+
+int al_crypto_get_tx_descs_order(void);
+
+int al_crypto_get_rx_descs_order(void);
+
+bool al_crypto_get_use_single_msix(void);
+
+#endif /* __AL_CRYPTO_MODULE_PARAMS_H__ */
diff --git a/drivers/crypto/al/al_crypto_sysfs.c b/drivers/crypto/al/al_crypto_sysfs.c
new file mode 100644
index 0000000..7d577b7
--- /dev/null
+++ b/drivers/crypto/al/al_crypto_sysfs.c
@@ -0,0 +1,534 @@
+/*
+ * Annapurna Labs Crypto Linux driver - sysfs support
+ * Copyright(c) 2013 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include
+#include
+
+#include "al_crypto.h"
+#include "al_hal_udma_debug.h"
+
+#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
+
+#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS
+static void al_crypto_release_channel(struct kobject *kobj)
+{
+ struct al_crypto_chan *chan =
+ container_of(kobj, struct al_crypto_chan, kobj);
+
+ kfree(chan);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+struct al_crypto_chan_attr {
+ struct attribute attr;
+ size_t offset;
+ ssize_t (*show) (struct al_crypto_chan *chan, size_t offset, char *buf);
+ ssize_t (*store) (struct al_crypto_chan *chan, size_t offset,
+ const char *buf, size_t size);
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t al_crypto_chan_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct al_crypto_chan *chan =
+ container_of(kobj, struct al_crypto_chan, kobj);
+ struct al_crypto_chan_attr *chan_attr =
+ container_of(attr, struct al_crypto_chan_attr, attr);
+ ssize_t ret = 0;
+
+ if (chan_attr->show)
+ ret = chan_attr->show(chan, chan_attr->offset, buf);
+
+ return ret;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t al_crypto_chan_attr_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t size)
+{
+ struct al_crypto_chan *chan =
+ container_of(kobj, struct al_crypto_chan, kobj);
+ struct al_crypto_chan_attr *chan_attr =
+ container_of(attr, struct al_crypto_chan_attr, attr);
+ ssize_t ret = 0;
+
+ if (chan_attr->store)
+ ret = chan_attr->store(chan, chan_attr->offset, buf, size);
+
+ return ret;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static const struct sysfs_ops al_crypto_chan_sysfs_ops = {
+ .show = al_crypto_chan_attr_show,
+ .store = al_crypto_chan_attr_store,
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t al_crypto_chan_rd_stats_prep(
+ struct al_crypto_chan *chan,
+ size_t offset,
+ char *buf)
+{
+ uint64_t val;
+ ssize_t size;
+
+ spin_lock_bh(&chan->prep_lock);
+
+ val = *(uint64_t *)(((uint8_t *)&chan->stats_prep) + offset);
+
+ spin_unlock_bh(&chan->prep_lock);
+
+ size = sprintf(buf, "%llu\n", val);
+
+ return size;
+}
+
+static ssize_t al_crypto_chan_rd_stats_comp(
+ struct al_crypto_chan *chan,
+ size_t offset,
+ char *buf)
+{
+ uint64_t val;
+ ssize_t size;
+
+ spin_lock_bh(&chan->cleanup_lock);
+
+ val = *(uint64_t *)(((uint8_t *)&chan->stats_comp) + offset);
+
+ spin_unlock_bh(&chan->cleanup_lock);
+
+ size = sprintf(buf, "%llu\n", val);
+
+ return size;
+}
+
+static ssize_t al_crypto_chan_rd_stats_gen(
+ struct al_crypto_chan *chan,
+ size_t offset,
+ char *buf)
+{
+ uint64_t val;
+ ssize_t size;
+
+ val = *(uint64_t *)(((uint8_t *)&chan->stats_gen) + offset);
+
+ size = sprintf(buf, "%llu\n", val);
+
+ return size;
+}
+
+
+#define al_crypto_chan_init_attr(_name, _group) \
+static struct al_crypto_chan_attr al_crypto_chan_##_name = { \
+ .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \
+ .offset = offsetof(struct al_crypto_chan_stats_##_group, _name), \
+ .show = al_crypto_chan_rd_stats_##_group, \
+ .store = NULL, \
+}
+
+/* Channel attrs */
+al_crypto_chan_init_attr(ablkcipher_encrypt_reqs, prep);
+al_crypto_chan_init_attr(ablkcipher_encrypt_bytes, prep);
+al_crypto_chan_init_attr(ablkcipher_decrypt_reqs, prep);
+al_crypto_chan_init_attr(ablkcipher_decrypt_bytes, prep);
+al_crypto_chan_init_attr(aead_encrypt_hash_reqs, prep);
+al_crypto_chan_init_attr(aead_encrypt_bytes, prep);
+al_crypto_chan_init_attr(aead_hash_bytes, prep);
+al_crypto_chan_init_attr(aead_decrypt_validate_reqs, prep);
+al_crypto_chan_init_attr(aead_decrypt_bytes, prep);
+al_crypto_chan_init_attr(aead_validate_bytes, prep);
+al_crypto_chan_init_attr(ahash_reqs, prep);
+al_crypto_chan_init_attr(ahash_bytes, prep);
+al_crypto_chan_init_attr(crc_reqs, prep);
+al_crypto_chan_init_attr(crc_bytes, prep);
+al_crypto_chan_init_attr(cache_misses, prep);
+al_crypto_chan_init_attr(ablkcipher_reqs_le512, prep);
+al_crypto_chan_init_attr(ablkcipher_reqs_512_2048, prep);
+al_crypto_chan_init_attr(ablkcipher_reqs_2048_4096, prep);
+al_crypto_chan_init_attr(ablkcipher_reqs_gt4096, prep);
+al_crypto_chan_init_attr(aead_reqs_le512, prep);
+al_crypto_chan_init_attr(aead_reqs_512_2048, prep);
+al_crypto_chan_init_attr(aead_reqs_2048_4096, prep);
+al_crypto_chan_init_attr(aead_reqs_gt4096, prep);
+al_crypto_chan_init_attr(ahash_reqs_le512, prep);
+al_crypto_chan_init_attr(ahash_reqs_512_2048, prep);
+al_crypto_chan_init_attr(ahash_reqs_2048_4096, prep);
+al_crypto_chan_init_attr(ahash_reqs_gt4096, prep);
+al_crypto_chan_init_attr(crc_reqs_le512, prep);
+al_crypto_chan_init_attr(crc_reqs_512_2048, prep);
+al_crypto_chan_init_attr(crc_reqs_2048_4096, prep);
+al_crypto_chan_init_attr(crc_reqs_gt4096, prep);
+al_crypto_chan_init_attr(redundant_int_cnt, comp);
+al_crypto_chan_init_attr(max_active_descs, comp);
+al_crypto_chan_init_attr(ablkcipher_tfms, gen);
+al_crypto_chan_init_attr(aead_tfms, gen);
+al_crypto_chan_init_attr(ahash_tfms, gen);
+al_crypto_chan_init_attr(crc_tfms, gen);
+
+static struct attribute *al_crypto_chan_default_attrs[] = {
+ &al_crypto_chan_ablkcipher_encrypt_reqs.attr,
+ &al_crypto_chan_ablkcipher_encrypt_bytes.attr,
+ &al_crypto_chan_ablkcipher_decrypt_reqs.attr,
+ &al_crypto_chan_ablkcipher_decrypt_bytes.attr,
+ &al_crypto_chan_aead_encrypt_hash_reqs.attr,
+ &al_crypto_chan_aead_encrypt_bytes.attr,
+ &al_crypto_chan_aead_hash_bytes.attr,
+ &al_crypto_chan_aead_decrypt_validate_reqs.attr,
+ &al_crypto_chan_aead_decrypt_bytes.attr,
+ &al_crypto_chan_aead_validate_bytes.attr,
+ &al_crypto_chan_ahash_reqs.attr,
+ &al_crypto_chan_ahash_bytes.attr,
+ &al_crypto_chan_cache_misses.attr,
+ &al_crypto_chan_ablkcipher_reqs_le512.attr,
+ &al_crypto_chan_ablkcipher_reqs_512_2048.attr,
+ &al_crypto_chan_ablkcipher_reqs_2048_4096.attr,
+ &al_crypto_chan_ablkcipher_reqs_gt4096.attr,
+ &al_crypto_chan_aead_reqs_le512.attr,
+ &al_crypto_chan_aead_reqs_512_2048.attr,
+ &al_crypto_chan_aead_reqs_2048_4096.attr,
+ &al_crypto_chan_aead_reqs_gt4096.attr,
+ &al_crypto_chan_ahash_reqs_le512.attr,
+ &al_crypto_chan_ahash_reqs_512_2048.attr,
+ &al_crypto_chan_ahash_reqs_2048_4096.attr,
+ &al_crypto_chan_ahash_reqs_gt4096.attr,
+
+ &al_crypto_chan_redundant_int_cnt.attr,
+ &al_crypto_chan_max_active_descs.attr,
+
+ &al_crypto_chan_ablkcipher_tfms.attr,
+ &al_crypto_chan_aead_tfms.attr,
+ &al_crypto_chan_ahash_tfms.attr,
+ NULL
+};
+
+static struct attribute *al_crypto_crc_chan_default_attrs[] = {
+ &al_crypto_chan_crc_reqs.attr,
+ &al_crypto_chan_crc_bytes.attr,
+ &al_crypto_chan_cache_misses.attr,
+ &al_crypto_chan_crc_reqs_le512.attr,
+ &al_crypto_chan_crc_reqs_512_2048.attr,
+ &al_crypto_chan_crc_reqs_2048_4096.attr,
+ &al_crypto_chan_crc_reqs_gt4096.attr,
+
+ &al_crypto_chan_redundant_int_cnt.attr,
+ &al_crypto_chan_max_active_descs.attr,
+
+ &al_crypto_chan_crc_tfms.attr,
+ NULL
+};
+
+static struct kobj_type chan_ktype = {
+ .sysfs_ops = &al_crypto_chan_sysfs_ops,
+ .release = al_crypto_release_channel,
+ .default_attrs = al_crypto_chan_default_attrs,
+};
+
+static struct kobj_type crc_chan_ktype = {
+ .sysfs_ops = &al_crypto_chan_sysfs_ops,
+ .release = al_crypto_release_channel,
+ .default_attrs = al_crypto_crc_chan_default_attrs,
+};
+
+enum udma_dump_type {
+ UDMA_DUMP_M2S_REGS,
+ UDMA_DUMP_M2S_Q_STRUCT,
+ UDMA_DUMP_M2S_Q_POINTERS,
+ UDMA_DUMP_S2M_REGS,
+ UDMA_DUMP_S2M_Q_STRUCT,
+ UDMA_DUMP_S2M_Q_POINTERS
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t rd_udma_dump(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ enum udma_dump_type dump_type = (enum udma_dump_type)ea->var;
+ ssize_t rc = 0;
+
+ switch (dump_type) {
+ case UDMA_DUMP_M2S_REGS:
+ case UDMA_DUMP_S2M_REGS:
+ rc = sprintf(
+ buf,
+ "Write mask to dump corresponding udma regs\n");
+ break;
+ case UDMA_DUMP_M2S_Q_STRUCT:
+ case UDMA_DUMP_S2M_Q_STRUCT:
+ rc = sprintf(
+ buf,
+ "Write q num to dump correspoding q struct\n");
+ break;
+ case UDMA_DUMP_M2S_Q_POINTERS:
+ case UDMA_DUMP_S2M_Q_POINTERS:
+ rc = sprintf(
+ buf,
+ "Write q num (in hex) and add 1 for submission ring,"
+ " for ex:\n"
+ "0 for completion ring of q 0\n"
+ "10 for submission ring of q 0\n");
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t wr_udma_dump(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int err;
+ int q_id;
+ unsigned long val;
+ struct al_udma* dma;
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ enum udma_dump_type dump_type = (enum udma_dump_type)ea->var;
+ enum al_udma_ring_type ring_type = AL_RING_COMPLETION;
+ struct al_crypto_device *device = dev_get_drvdata(dev);
+
+ err = kstrtoul(buf, 16, &val);
+ if (err < 0)
+ return err;
+
+ switch (dump_type) {
+ case UDMA_DUMP_M2S_REGS:
+ al_ssm_dma_handle_get(&device->hal_crypto, UDMA_TX, &dma);
+ al_udma_regs_print(dma, val);
+ break;
+ case UDMA_DUMP_S2M_REGS:
+ al_ssm_dma_handle_get(&device->hal_crypto, UDMA_RX, &dma);
+ al_udma_regs_print(dma, val);
+ break;
+ case UDMA_DUMP_M2S_Q_STRUCT:
+ al_ssm_dma_handle_get(&device->hal_crypto, UDMA_TX, &dma);
+ al_udma_q_struct_print(dma, val);
+ break;
+ case UDMA_DUMP_S2M_Q_STRUCT:
+ al_ssm_dma_handle_get(&device->hal_crypto, UDMA_RX, &dma);
+ al_udma_q_struct_print(dma, val);
+ break;
+ case UDMA_DUMP_M2S_Q_POINTERS:
+ if (val & 0x10)
+ ring_type = AL_RING_SUBMISSION;
+ q_id = val & 0xf;
+ al_ssm_dma_handle_get(&device->hal_crypto, UDMA_TX, &dma);
+ al_udma_ring_print(dma, q_id, ring_type);
+ break;
+ case UDMA_DUMP_S2M_Q_POINTERS:
+ if (val & 0x10)
+ ring_type = AL_RING_SUBMISSION;
+ q_id = val & 0xf;
+ al_ssm_dma_handle_get(&device->hal_crypto, UDMA_RX, &dma);
+ al_udma_ring_print(dma, q_id, ring_type);
+ break;
+ default:
+ break;
+ }
+
+ return count;
+}
+
+#define UDMA_DUMP_PREP_ATTR(_name, _type) {\
+ __ATTR(udma_dump_##_name, S_IRUGO|S_IWUGO, rd_udma_dump, wr_udma_dump),\
+ (void*)_type }
+
+/* Device attrs - udma debug */
+static struct dev_ext_attribute dev_attr_udma_debug[] = {
+ UDMA_DUMP_PREP_ATTR(m2s_regs, UDMA_DUMP_M2S_REGS),
+ UDMA_DUMP_PREP_ATTR(m2s_q_struct, UDMA_DUMP_M2S_Q_STRUCT),
+ UDMA_DUMP_PREP_ATTR(m2s_q_pointers, UDMA_DUMP_M2S_Q_POINTERS),
+ UDMA_DUMP_PREP_ATTR(s2m_regs, UDMA_DUMP_S2M_REGS),
+ UDMA_DUMP_PREP_ATTR(s2m_q_struct, UDMA_DUMP_S2M_Q_STRUCT),
+ UDMA_DUMP_PREP_ATTR(s2m_q_pointers, UDMA_DUMP_S2M_Q_POINTERS)
+};
+#endif /* CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS */
+
+enum al_crypto_config_type {
+ CONFIG_INT_MODERATION
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t al_crypto_rd_config(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ enum al_crypto_config_type config_type =
+ (enum al_crypto_config_type)ea->var;
+ struct al_crypto_device *device = dev_get_drvdata(dev);
+ ssize_t rc = 0;
+
+ switch (config_type) {
+ case CONFIG_INT_MODERATION:
+ rc = sprintf(buf, "%d\n", al_crypto_get_int_moderation(device));
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t al_crypto_wr_config(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int err;
+ unsigned long val;
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ enum al_crypto_config_type config_type =
+ (enum al_crypto_config_type)ea->var;
+ struct al_crypto_device *device = dev_get_drvdata(dev);
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ switch (config_type) {
+ case CONFIG_INT_MODERATION:
+ al_crypto_set_int_moderation(device, val);
+ break;
+ default:
+ break;
+ }
+
+ return count;
+}
+
+#define CONFIG_PREP_ATTR(_name, _type) {\
+ __ATTR(_name, S_IRUGO|S_IWUGO,\
+ al_crypto_rd_config, al_crypto_wr_config),\
+ (void*)_type }
+
+/* Device attrs - config */
+static struct dev_ext_attribute dev_attr_config[] = {
+ CONFIG_PREP_ATTR(int_moderation, CONFIG_INT_MODERATION),
+};
+
+/******************************************************************************
+ *****************************************************************************/
+void al_crypto_free_channel(struct al_crypto_chan *chan)
+{
+#ifndef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS
+ kfree(chan);
+#endif
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int al_crypto_sysfs_init(
+ struct al_crypto_device *device)
+{
+ int rc = 0;
+ struct device* dev = &device->pdev->dev;
+ int i;
+
+#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS
+
+ device->channels_kset =
+ kset_create_and_add("channels", NULL,
+ &device->pdev->dev.kobj);
+ if (!device->channels_kset)
+ return -ENOMEM;
+
+ for (i = 0; i < device->num_channels; i++) {
+ struct al_crypto_chan *chan = device->channels[i];
+ chan->kobj.kset = device->channels_kset;
+ if (chan->type == AL_CRYPT_AUTH_Q)
+ rc = kobject_init_and_add(&chan->kobj, &chan_ktype,
+ NULL, "chan%d", i);
+ else
+ rc = kobject_init_and_add(&chan->kobj, &crc_chan_ktype,
+ NULL, "chan%d", i);
+ if (rc) {
+ int j;
+ for (j = 0; j <= i; j++)
+ kobject_put(&device->channels[j]->kobj);
+ kset_unregister(device->channels_kset);
+ for (j = i+1; j < device->num_channels; j++)
+ kfree(device->channels[j]);
+ return -ENOMEM;
+ }
+
+ kobject_uevent(&chan->kobj, KOBJ_ADD);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev_attr_udma_debug); i++)
+ rc = sysfs_create_file(
+ &dev->kobj,
+ &dev_attr_udma_debug[i].attr.attr);
+#endif
+ for (i = 0; i < ARRAY_SIZE(dev_attr_config); i++)
+ rc = sysfs_create_file(
+ &dev->kobj,
+ &dev_attr_config[i].attr.attr);
+
+ return rc;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+void al_crypto_sysfs_terminate(
+ struct al_crypto_device *device)
+{
+ int i;
+ struct device* dev = &device->pdev->dev;
+
+ for (i = 0; i < ARRAY_SIZE(dev_attr_config); i++)
+ sysfs_remove_file(
+ &dev->kobj,
+ &dev_attr_config[i].attr.attr);
+
+#ifdef CONFIG_CRYPTO_DEV_AL_CRYPTO_STATS
+
+ for (i = 0; i < ARRAY_SIZE(dev_attr_udma_debug); i++)
+ sysfs_remove_file(
+ &dev->kobj,
+ &dev_attr_udma_debug[i].attr.attr);
+
+ for (i = 0; i < device->num_channels; i++)
+ kobject_put(&device->channels[i]->kobj);
+ kset_unregister(device->channels_kset);
+#endif
+}
diff --git a/drivers/crypto/al/al_hal_ssm_crc_memcpy.c b/drivers/crypto/al/al_hal_ssm_crc_memcpy.c
new file mode 100644
index 0000000..85f7450
--- /dev/null
+++ b/drivers/crypto/al/al_hal_ssm_crc_memcpy.c
@@ -0,0 +1,665 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "al_hal_ssm.h"
+#include "al_hal_ssm_crc_memcpy.h"
+
+/** How many descriptors to save between head and tail in case of
+ * wrap around.
+ */
+#define AL_CRC_MEMCPY_DESC_RES 0
+
+#define CRC_MEMCPY_DEBUG
+
+#ifdef CRC_MEMCPY_DEBUG
+#define al_debug al_dbg
+#else
+#define al_debug(...)
+#endif
+
+#ifdef CRC_MEMCPY_DEBUG
+void al_print_desc(union al_udma_desc *desc)
+{
+ al_dbg("crc_memcpy: Desc: %08x %08x %08x %08x\n",
+ desc->tx_meta.len_ctrl, desc->tx_meta.meta_ctrl,
+ desc->tx_meta.meta1, desc->tx_meta.meta2);
+}
+
+static
+void al_print_crc_xaction(struct al_crc_transaction *xaction) {
+ unsigned int i;
+
+ al_dbg("crc_memcpy: CRC Transaction debug\n");
+ al_dbg(" CRC TYPE: ");
+ switch (xaction->crcsum_type) {
+ case(AL_CRC_CHECKSUM_NULL):
+ al_dbg(" NULL\n");
+ break;
+ case(AL_CRC_CHECKSUM_CRC32):
+ al_dbg(" CRC32\n");
+ break;
+ case(AL_CRC_CHECKSUM_CRC32C):
+ al_dbg(" CRC32C\n");
+ break;
+ case(AL_CRC_CHECKSUM_CKSM16):
+ al_dbg(" CKSM16\n");
+ break;
+ }
+ al_dbg(" Flags %d\n", xaction->flags);
+
+ al_dbg("-SRC num of buffers %d\n",
+ xaction->src.num);
+ for (i = 0 ; i < xaction->src.num; i++)
+ al_dbg(" addr 0x%016llx len %d\n",
+ (unsigned long long)xaction->src.bufs[i].addr,
+ xaction->src.bufs[i].len);
+
+ al_dbg("-DST num of buffers %d\n",
+ xaction->dst.num);
+ for (i = 0 ; i < xaction->dst.num; i++)
+ al_dbg(" addr 0x%016llx len %d\n",
+ (unsigned long long)xaction->dst.bufs[i].addr,
+ xaction->dst.bufs[i].len);
+
+ al_dbg(" CRC IV IN size: %d, addr 0x%016llx\n",
+ xaction->crc_iv_in.len,
+ (unsigned long long)xaction->crc_iv_in.addr);
+ al_dbg(" Cached CRC index %d\n", xaction->cached_crc_indx);
+ al_dbg(" Save CRC IV in cache: %d\n", xaction->save_crc_iv);
+ al_dbg(" Store CRC Out in cache: %d\n", xaction->st_crc_out);
+ al_dbg(" CRC Expected size: %d, addr 0x%016llx\n",
+ xaction->crc_expected.len,
+ (unsigned long long)xaction->crc_expected.addr);
+ al_dbg(" CRC OUT size: %d, addr 0x%016llx\n",
+ xaction->crc_out.len,
+ (unsigned long long)xaction->crc_out.addr);
+ al_dbg(" SWAP flags %x\n", xaction->swap_flags);
+ al_dbg(" XOR Valid: %d XOR in: %x XOR out: %x\n",
+ xaction->xor_valid, xaction->in_xor, xaction->res_xor);
+
+
+}
+#else
+#define al_print_desc(x)
+#define al_print_crc_xaction(x)
+#endif
+
+/**
+ * Fill one rx submission descriptor
+ *
+ * @param rx_udma_q rx udma handle
+ * @param flags flags for the descriptor
+ * @param buf destination buffer
+ * @param vmid virtual machine ID
+ */
+static INLINE
+void al_crc_memcpy_prep_one_rx_desc(struct al_udma_q *rx_udma_q,
+ uint32_t flags, struct al_buf *buf, uint16_t vmid)
+{
+ uint64_t vmid_shifted = ((uint64_t)vmid) << AL_UDMA_DESC_VMID_SHIFT;
+ uint32_t flags_len = flags;
+ union al_udma_desc *rx_desc;
+ uint32_t ring_id;
+
+ rx_desc = al_udma_desc_get(rx_udma_q);
+ /* get ring id */
+ ring_id = al_udma_ring_id_get(rx_udma_q)
+ << AL_M2S_DESC_RING_ID_SHIFT;
+
+ flags_len |= ring_id;
+
+ flags_len |= buf->len & AL_M2S_DESC_LEN_MASK;
+ rx_desc->rx.len_ctrl = swap32_to_le(flags_len);
+ rx_desc->rx.buf1_ptr = swap64_to_le(buf->addr | vmid_shifted);
+ al_print_desc(rx_desc);
+}
+
+/**
+ * Fill one tx submission descriptor
+ *
+ * @param tx_udma_q tx udma handle
+ * @param flags flags for the descriptor
+ * @param meta metadata word1
+ * @param buf source buffer
+ * @param vmid virtual machine ID
+ */
+static INLINE void al_crc_memcpy_prep_one_tx_desc(struct al_udma_q *tx_udma_q,
+ uint32_t flags, uint32_t meta, struct al_buf *buf,
+ uint16_t vmid)
+{
+ uint64_t vmid_shifted = ((uint64_t)vmid) << AL_UDMA_DESC_VMID_SHIFT;
+ uint32_t flags_len = flags;
+ union al_udma_desc *tx_desc;
+ uint32_t ring_id;
+
+ tx_desc = al_udma_desc_get(tx_udma_q);
+ /* get ring id */
+ ring_id = al_udma_ring_id_get(tx_udma_q)
+ << AL_M2S_DESC_RING_ID_SHIFT;
+
+ flags_len |= ring_id;
+
+ flags_len |= buf->len & AL_M2S_DESC_LEN_MASK;
+ tx_desc->tx.len_ctrl = swap32_to_le(flags_len);
+ tx_desc->tx.meta_ctrl = swap32_to_le(meta);
+ tx_desc->tx.buf_ptr = swap64_to_le(buf->addr | vmid_shifted);
+ al_print_desc(tx_desc);
+}
+
+
+/**
+ * Get number of rx submission descriptors needed for crc transaction
+ *
+ * we need rx descriptor for each destination buffer.
+ * if the transaction doesn't have destination buffers, then one
+ * descriptor is needed
+ *
+ * @param xaction transaction context
+ *
+ * @return number of rx submission descriptors
+ */
+static INLINE
+uint32_t _al_crcsum_xaction_rx_descs_count(struct al_crc_transaction *xaction)
+{
+ uint32_t count = xaction->dst.num + (xaction->crc_out.len ? 1 : 0);
+
+ /* valid crc rx descs count */
+ al_assert(count <= AL_SSM_MAX_SRC_DESCS);
+
+ return count;
+}
+
+/**
+ * Get number of tx submission descriptors needed for crc transaction
+ *
+ * we need tx descriptor for each source buffer.
+ *
+ * @param xaction transaction context
+ *
+ * @return number of tx submission descriptors
+ */
+static INLINE
+uint32_t _al_crcsum_xaction_tx_descs_count(struct al_crc_transaction *xaction)
+{
+ uint32_t count = xaction->src.num + (xaction->crc_iv_in.len ? 1 : 0) +
+ (xaction->crc_expected.len ? 1 : 0);
+
+ /* valid crc tx descs count */
+ al_assert(count);
+ /* Need one for metadata if offsets are valid */
+ count += (xaction->xor_valid) ? 1 : 0;
+ /* valid crc tx descs count */
+ al_assert(count <= AL_SSM_MAX_SRC_DESCS);
+
+ return count;
+}
+
+/**
+ * Fill the memcpy rx submission descriptors
+ *
+ * this function writes the contents of the rx submission descriptors
+ *
+ * @param rx_udma_q rx udma handle
+ * @param xaction transaction context
+ * @param rx_desc_cnt number of total rx descriptors
+ */
+static INLINE
+void al_crc_memcpy_set_memcpy_rx_descs(struct al_udma_q *rx_udma_q,
+ struct al_memcpy_transaction *xaction, uint32_t rx_desc_cnt)
+{
+ uint32_t flags = 0;
+ union al_udma_desc *rx_desc;
+ uint32_t buf_idx;
+
+ /* Set descriptor flags */
+ flags |= (xaction->flags & AL_SSM_INTERRUPT) ? AL_M2S_DESC_INT_EN : 0;
+ flags |= (xaction->flags & AL_SSM_DEST_NO_SNOOP) ?
+ AL_M2S_DESC_NO_SNOOP_H : 0;
+
+ /* if the xaction doesn't have destination buffers,
+ * allocate single Meta descriptor
+ */
+ if (unlikely(!rx_desc_cnt)) {
+ al_debug("crc_memcpy: Preparing Memcpy Meta Rx desc\n");
+ rx_desc = al_udma_desc_get(rx_udma_q);
+ flags |= al_udma_ring_id_get(rx_udma_q)
+ << AL_M2S_DESC_RING_ID_SHIFT;
+ flags |= RX_DESC_META;
+ /* write back flags */
+ rx_desc->rx.len_ctrl = swap32_to_le(flags);
+ al_print_desc(rx_desc);
+ return;
+ }
+
+ /* dst exist -> will copy the buf to the destination */
+ if (xaction->dst.num) {
+ struct al_buf *buf = xaction->dst.bufs;
+ al_debug("crc_memcpy: Preparing %d Memcpy DST Rx desc\n",
+ xaction->dst.num);
+ for (buf_idx = 0; buf_idx < xaction->dst.num; buf_idx++) {
+ al_crc_memcpy_prep_one_rx_desc(
+ rx_udma_q, flags, buf, xaction->dst.vmid);
+ buf++;
+ }
+ }
+
+}
+
+
+/**
+ * Fill the crc/checksum rx submission descriptors
+ *
+ * this function writes the contents of the rx submission descriptors
+ *
+ * @param rx_udma_q rx udma handle
+ * @param xaction transaction context
+ * @param rx_desc_cnt number of total rx descriptors
+ */
+static INLINE
+void al_crc_memcpy_set_crc_rx_descs(struct al_udma_q *rx_udma_q,
+ struct al_crc_transaction *xaction, uint32_t rx_desc_cnt)
+{
+ uint32_t flags = 0;
+ union al_udma_desc *rx_desc;
+ uint32_t buf_idx;
+
+ /* Set descriptor flags */
+ flags = (xaction->flags & AL_SSM_INTERRUPT) ? AL_M2S_DESC_INT_EN : 0;
+ flags |= (xaction->flags & AL_SSM_DEST_NO_SNOOP) ?
+ AL_M2S_DESC_NO_SNOOP_H : 0;
+
+ /* if the xaction doesn't have destination buffers,
+ * allocate single Meta descriptor,
+ */
+ if (unlikely(!rx_desc_cnt)) {
+ al_debug("crc_memcpy: Preparing CRC Meta Rx desc\n");
+ rx_desc = al_udma_desc_get(rx_udma_q);
+ flags |= al_udma_ring_id_get(rx_udma_q)
+ << AL_M2S_DESC_RING_ID_SHIFT;
+ flags |= RX_DESC_META;
+ /* write back flags */
+ rx_desc->rx.len_ctrl = swap32_to_le(flags);
+ al_print_desc(rx_desc);
+ return;
+ }
+
+ /* dst exist -> will copy the buf to the destination */
+ if (xaction->dst.num) {
+ struct al_buf *buf = xaction->dst.bufs;
+ al_debug("crc_memcpy: Preparing %d CRC DST Rx desc\n",
+ xaction->dst.num);
+ for (buf_idx = 0; buf_idx < xaction->dst.num; buf_idx++) {
+ al_crc_memcpy_prep_one_rx_desc(
+ rx_udma_q, flags, buf, xaction->dst.vmid);
+ buf++;
+ }
+ }
+
+ /* crc/checksum output */
+ if (xaction->crc_out.len) {
+ al_debug("crc_memcpy: Preparing CRC out Rx desc\n");
+ al_crc_memcpy_prep_one_rx_desc(rx_udma_q, flags,
+ &xaction->crc_out, xaction->misc_vmid);
+ }
+
+}
+
+/**
+ * Fill the memcpy tx submission descriptors
+ *
+ * this function writes the contents of the tx submission descriptors
+ *
+ * @param tx_udma_q tx udma handle
+ * @param xaction transaction context
+ */
+static INLINE
+void al_crc_memcpy_set_memcpy_tx_descs(struct al_udma_q *tx_udma_q,
+ struct al_memcpy_transaction *xaction)
+{
+ struct al_buf *buf = xaction->src.bufs;
+ uint32_t flags = 0;
+ uint32_t buf_idx;
+ uint32_t word1_meta = 0;
+
+ /* Set flags */
+ flags |= AL_M2S_DESC_FIRST;
+ flags |= xaction->flags & AL_SSM_SRC_NO_SNOOP ?
+ AL_M2S_DESC_NO_SNOOP_H : 0;
+
+ /* Set first desc word1 metatdata */
+ word1_meta |= AL_CRC_CHECKSUM << TX_DESC_META_OP_SHIFT;
+ word1_meta |= AL_CRC_CHECKSUM_NULL << TX_DESC_META_CRC_OP_TYPE_SHIFT;
+ word1_meta |= TX_DESC_META_CRC_SEND_ORIG;
+ word1_meta |= RX_DESC_META_CRC_FIRST_BUF;
+ word1_meta |= RX_DESC_META_CRC_LAST_BUF;
+
+ flags |= xaction->flags & AL_SSM_BARRIER ? AL_M2S_DESC_DMB : 0;
+
+ al_debug("crc_memcpy: Preparing %d Memcpy SRC Tx desc\n",
+ xaction->src.num);
+
+ for (buf_idx = 0; buf_idx < xaction->src.num; buf_idx++) {
+ /* check for last */
+ if (buf_idx == (xaction->src.num - 1))
+ flags |= AL_M2S_DESC_LAST;
+
+ al_crc_memcpy_prep_one_tx_desc(
+ tx_udma_q, flags, word1_meta, buf, xaction->src.vmid);
+ word1_meta = 0;
+ /* clear first and DMB flags, keep no snoop hint flag */
+ flags &= AL_M2S_DESC_NO_SNOOP_H;
+ flags |= AL_M2S_DESC_CONCAT;
+ buf++;
+ }
+
+}
+
+/**
+ * Fill the crc/checksum tx submission descriptors
+ *
+ * this function writes the contents of the tx submission descriptors
+ *
+ * @param tx_udma_q tx udma handle
+ * @param xaction transaction context
+ */
+static INLINE
+void al_crc_memcpy_set_crc_tx_descs(struct al_udma_q *tx_udma_q,
+ struct al_crc_transaction *xaction)
+{
+ struct al_buf *buf = xaction->src.bufs;
+ uint32_t flags = 0;
+ uint32_t buf_idx;
+ uint32_t word1_meta;
+
+ /* Set flags */
+ flags = AL_M2S_DESC_FIRST;
+ flags |= xaction->flags & AL_SSM_SRC_NO_SNOOP ?
+ AL_M2S_DESC_NO_SNOOP_H : 0;
+
+ /* Set first desc word1 metatdata */
+ word1_meta = AL_CRC_CHECKSUM << TX_DESC_META_OP_SHIFT;
+ word1_meta |= xaction->crcsum_type << TX_DESC_META_CRC_OP_TYPE_SHIFT;
+ word1_meta |= xaction->dst.num ? TX_DESC_META_CRC_SEND_ORIG : 0;
+ word1_meta |= xaction->save_crc_iv ? TX_DESC_META_CRC_ST_CRC_IV : 0;
+ word1_meta |= xaction->st_crc_out ? TX_DESC_META_CRC_SAVE_IV : 0;
+ word1_meta |= xaction->crc_out.len ? TX_DESC_META_CRC_SEND_CRC : 0;
+ word1_meta |= xaction->crc_iv_in.len ? 0 : TX_DESC_META_CRC_USE_ST_IV;
+ word1_meta |= xaction->crc_expected.len ? TX_DESC_META_CRC_VALID : 0;
+ word1_meta |= (xaction->swap_flags << TX_DESC_META_CRC_SWAP_SHIFT)
+ & TX_DESC_META_CRC_SWAP_MASK;
+ word1_meta |= (xaction->cached_crc_indx << TX_DESC_META_CRC_IDX_SHIFT)
+ & TX_DESC_META_CRC_IDX_MASK;
+
+ /* if xor fields are valid first desc is metadata */
+ if (unlikely(xaction->xor_valid)) {
+ uint32_t flags_len = flags;
+ union al_udma_desc *tx_desc;
+ uint32_t ring_id;
+
+ al_debug("crc_memcpy: preparing CRC metadata desc\n");
+ tx_desc = al_udma_desc_get(tx_udma_q);
+ /* UDMA feilds */
+ ring_id = al_udma_ring_id_get(tx_udma_q)
+ << AL_M2S_DESC_RING_ID_SHIFT;
+ flags_len |= ring_id;
+ flags_len |= AL_M2S_DESC_META_DATA;
+ tx_desc->tx_meta.len_ctrl = swap32_to_le(flags_len);
+ /* Word1 metadata */
+ tx_desc->tx_meta.meta_ctrl = 0;
+ /* Word 2 metadat */
+ tx_desc->tx_meta.meta1 = swap32_to_le(xaction->in_xor);
+ /* Word 3 metadata */
+ tx_desc->tx_meta.meta2 = swap32_to_le(xaction->res_xor);
+ al_print_desc(tx_desc);
+ /* clear first flag, keep no snoop hint flag */
+ /* Indicate Last Block */
+ flags &= AL_M2S_DESC_NO_SNOOP_H;
+
+ }
+
+ flags |= xaction->flags & AL_SSM_BARRIER ? AL_M2S_DESC_DMB : 0;
+
+ word1_meta |= RX_DESC_META_CRC_FIRST_BUF;
+
+ /* CRC IV in */
+ if (xaction->crc_iv_in.len) {
+ al_debug("CRC_memcpy: Preparing CRC IV in Tx desc\n");
+ /* check for last */
+ flags |= xaction->src.num ? 0 : AL_M2S_DESC_LAST;
+
+ word1_meta |= xaction->src.num ? 0 : RX_DESC_META_CRC_LAST_BUF;
+
+ al_crc_memcpy_prep_one_tx_desc(tx_udma_q, flags, word1_meta,
+ &xaction->crc_iv_in, xaction->misc_vmid);
+ word1_meta = 0;
+ /* clear first and DMB flags, keep no snoop hint flag */
+ flags &= AL_M2S_DESC_NO_SNOOP_H;
+
+ }
+
+ /* CRC IV expected */
+ if (xaction->crc_expected.len) {
+ al_debug("CRC_memcpy: Preparing CRC expected Tx desc\n");
+
+ al_crc_memcpy_prep_one_tx_desc(tx_udma_q, flags, word1_meta,
+ &xaction->crc_expected, xaction->misc_vmid);
+ word1_meta = 0;
+ /* clear first and DMB flags, keep no snoop hint flag */
+ flags &= AL_M2S_DESC_NO_SNOOP_H;
+
+ }
+
+ al_debug("CRC_memcpy: Preparing %d CRC SRC Tx desc\n",
+ xaction->src.num);
+
+ /* Indicate Last Block */
+ word1_meta |= RX_DESC_META_CRC_LAST_BUF;
+ for (buf_idx = 0; buf_idx < xaction->src.num; buf_idx++) {
+ /* check for last */
+ if (buf_idx == (xaction->src.num - 1))
+ flags |= AL_M2S_DESC_LAST;
+
+ al_crc_memcpy_prep_one_tx_desc(
+ tx_udma_q, flags, word1_meta, buf, xaction->src.vmid);
+ word1_meta = 0;
+ /* clear first and DMB flags, keep no snoop hint flag */
+ flags &= AL_M2S_DESC_NO_SNOOP_H;
+ flags |= AL_M2S_DESC_CONCAT;
+ buf++;
+ }
+
+}
+
+int al_memcpy_prepare(struct al_ssm_dma *dma, uint32_t qid,
+ struct al_memcpy_transaction *xaction)
+{
+ uint32_t rx_descs = xaction->dst.num;
+ uint32_t tx_descs = xaction->src.num;
+ struct al_udma_q *rx_udma_q;
+ struct al_udma_q *tx_udma_q;
+ int rc;
+
+ al_debug("%s\n", __func__);
+
+ /* Queue is for memcpy transactions */
+ al_assert(dma->q_types[qid] == AL_MEM_CRC_MEMCPY_Q);
+
+ /* ensure enough rx/tx udma descs */
+ rc = al_udma_q_handle_get(&dma->m2m_udma.rx_udma, qid, &rx_udma_q);
+ al_assert(!rc);
+ if (unlikely(al_udma_available_get(rx_udma_q) <
+ (rx_descs ? rx_descs : 1))) {
+ al_dbg("memcpy[%s]:rx q has no enough free desc",
+ dma->m2m_udma.name);
+ return -ENOSPC;
+ }
+
+ rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q);
+ al_assert(!rc);
+ if (unlikely(al_udma_available_get(tx_udma_q) < tx_descs
+ + AL_CRC_MEMCPY_DESC_RES)) {
+ al_dbg("memcpy[%s]:tx q has no enough free desc",
+ dma->m2m_udma.name);
+ return -ENOSPC;
+ }
+
+ /* prepare memcpy rx/tx descs */
+ al_crc_memcpy_set_memcpy_rx_descs(rx_udma_q, xaction, rx_descs);
+ al_crc_memcpy_set_memcpy_tx_descs(tx_udma_q, xaction);
+ /* add rx descriptors */
+ al_udma_desc_action_add(rx_udma_q, rx_descs ? rx_descs : 1);
+
+ /* set number of tx descriptors */
+ xaction->tx_descs_count = tx_descs;
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int al_crc_csum_prepare(struct al_ssm_dma *dma, uint32_t qid,
+ struct al_crc_transaction *xaction)
+{
+ uint32_t rx_descs;
+ uint32_t tx_descs;
+ struct al_udma_q *rx_udma_q;
+ struct al_udma_q *tx_udma_q;
+ int rc;
+
+ al_debug("al_crc_csum\n");
+ al_print_crc_xaction(xaction);
+
+ /* Queue is for crc/csum transactions */
+ al_assert(dma->q_types[qid] == AL_MEM_CRC_MEMCPY_Q);
+
+ /* Save and store together */
+ al_assert(((xaction->save_crc_iv == 0) || (xaction->st_crc_out == 0)));
+
+ /* calc tx (M2S) descriptors */
+ tx_descs = _al_crcsum_xaction_tx_descs_count(xaction);
+ rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q);
+ /* valid CRC/CSUM tx q handle */
+ al_assert(!rc);
+ if (unlikely(al_udma_available_get(tx_udma_q) < tx_descs
+ + AL_CRC_MEMCPY_DESC_RES)) {
+ al_dbg("crc_csum[%s]:tx q has no enough free desc",
+ dma->m2m_udma.name);
+ return -ENOSPC;
+ }
+
+ /* calc rx (S2M) descriptors, at least one desc is required */
+ rx_descs = _al_crcsum_xaction_rx_descs_count(xaction);
+ rc = al_udma_q_handle_get(&dma->m2m_udma.rx_udma, qid, &rx_udma_q);
+ /* valid CRC/CSUM rx q handle */
+ al_assert(!rc);
+ if (unlikely(al_udma_available_get(rx_udma_q) <
+ (rx_descs ? rx_descs : 1))) {
+ al_dbg("crc_csum[%s]:rx q has no enough free desc",
+ dma->m2m_udma.name);
+ return -ENOSPC;
+ }
+
+ /* prepare crc/checksum rx descs */
+ al_crc_memcpy_set_crc_rx_descs(rx_udma_q, xaction, rx_descs);
+ /* prepare crc/checksum tx descs */
+ al_crc_memcpy_set_crc_tx_descs(tx_udma_q, xaction);
+ /* add rx descriptors */
+ al_udma_desc_action_add(rx_udma_q, rx_descs ? rx_descs : 1);
+
+ /* set number of tx descriptors */
+ xaction->tx_descs_count = tx_descs;
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int al_crc_memcpy_dma_action(struct al_ssm_dma *dma, uint32_t qid,
+ int tx_descs)
+{
+ struct al_udma_q *tx_udma_q;
+ int rc;
+
+ rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q);
+ /* valid CRC/CSUM tx q handle */
+ al_assert(!rc);
+
+ /* add tx descriptors */
+ al_udma_desc_action_add(tx_udma_q, tx_descs);
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int al_crc_memcpy_dma_completion(struct al_ssm_dma *dma, uint32_t qid,
+ uint32_t *comp_status)
+{
+ struct al_udma_q *rx_udma_q;
+ struct al_udma_q *tx_udma_q;
+ volatile union al_udma_cdesc *cdesc;
+ int rc;
+ uint32_t cdesc_count;
+
+ rc = al_udma_q_handle_get(&dma->m2m_udma.rx_udma, qid, &rx_udma_q);
+ /* valid comp rx q handle */
+ al_assert(!rc);
+
+ cdesc_count = al_udma_cdesc_packet_get(rx_udma_q, &cdesc);
+ if (!cdesc_count)
+ return 0;
+
+ /* if we have multiple completion descriptors,
+ then last one will have the valid status */
+ if (unlikely(cdesc_count > 1))
+ cdesc = al_cdesc_next(rx_udma_q, cdesc, cdesc_count - 1);
+
+ *comp_status = swap32_from_le(cdesc->al_desc_comp_rx.ctrl_meta) &
+ RX_COMP_STATUS_MASK;
+
+ al_udma_cdesc_ack(rx_udma_q, cdesc_count);
+
+ al_debug("crc_memcpy packet completed. count %d status desc %p meta %x\n",
+ cdesc_count, cdesc, cdesc->al_desc_comp_rx.ctrl_meta);
+
+ /* cleanup tx completion queue */
+ rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q);
+ /* valid comp tx q handle */
+ al_assert(!rc);
+
+ cdesc_count = al_udma_cdesc_get_all(tx_udma_q, NULL);
+ if (cdesc_count)
+ al_udma_cdesc_ack(tx_udma_q, cdesc_count);
+
+ return 1;
+}
diff --git a/drivers/crypto/al/al_hal_ssm_crypto.c b/drivers/crypto/al/al_hal_ssm_crypto.c
new file mode 100644
index 0000000..551e8881
--- /dev/null
+++ b/drivers/crypto/al/al_hal_ssm_crypto.c
@@ -0,0 +1,947 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "al_hal_ssm.h"
+#include "al_hal_ssm_crypto.h"
+
+/*
+ * Rx (S2M) Descriptors
+ */
+#define RX_DESC_META (1<<30) /* Meta data */
+
+/* Tx (M2S) word1 common Descriptors */
+#define TX_DESC_META_OP_MASK (0x3<<23)
+#define TX_DESC_META_OP_SHIFT (23)
+
+/*
+ * Crypto
+ */
+#define TX_DESC_META_CRYPT_DIR_SHIFT (22) /* Direction */
+#define TX_DESC_META_CRYPT_S_SA (1<<21) /* Evict SA */
+#define TX_DESC_META_CRYPT_S_ENCIV (1<<20) /* Send IV */
+#define TX_DESC_META_CRYPT_SEND_ORIG (1<<19) /* Send original packet */
+#define TX_DESC_META_CRYPT_SEND_AUTHIV (1<<18) /* Send Authentication IV */
+#define TX_DESC_META_CRYPT_S_SIGN (1<<17) /* Send Sign */
+
+#define TX_DESC_META_AUTH_FIRST (1<<16) /* Auth only first bit */
+#define TX_DESC_META_AUTH_LAST (1<<15) /* Auth only last bit */
+
+#define TX_DESC_META_AUTH_VALID (1<<14) /* Validate Signature */
+
+#define TX_DESC_META_SA_IDX_MASK (0xff<<5) /* SA index mask */
+#define TX_DESC_META_SA_IDX_SHIFT (5)
+
+#define TX_DESC_META_BUF_TYPE_MASK (0x7)/* Buffer type mask */
+#define TX_DESC_META_BUF_TYPE_SHIFT (0)
+
+/* Tx (M2S) word2 Descriptors */
+#define TX_DESC_META_ENC_OFF_MASK (0xffff<<16)
+#define TX_DESC_META_ENC_OFF_SHIFT (16)
+#define TX_DESC_META_ENC_OFF_EOP_MASK (0xffff)
+#define TX_DESC_META_ENC_OFF_EOP_SHIFT (0)
+
+/* Tx (M2S) word3 Descriptors */
+#define TX_DESC_META_AUTH_OFF_MASK (0xffff<<16)
+#define TX_DESC_META_AUTH_OFF_SHIFT (16)
+#define TX_DESC_META_AUTH_OFF_EOP_MASK (0xffff)
+#define TX_DESC_META_AUTH_OFF_EOP_SHIFT (0)
+
+#define RX_COMP_STATUS_MASK (AL_CRYPT_AUTH_ERROR | \
+ AL_CRYPT_SA_IV_EVICT_FIFO_ERROR | \
+ AL_CRYPT_DES_ILLEGAL_KEY_ERROR | \
+ AL_CRYPT_M2S_ERROR | \
+ AL_CRYPT_SRAM_PARITY_ERROR | \
+ AL_CRYPT_INTERNAL_FLOW_VIOLATION_ERROR)
+
+/*
+ * Crypto DMA operation (Enc, Auth or Enc + Auth)
+ */
+#define AL_CRYPT_OP 3
+
+/** Crypto DMA buffer types */
+enum al_crypto_buf_type {
+ AL_CRYPT_BUF_SA_UPDATE = 0,
+ AL_CRYPT_BUF_ENC_IV = 1,
+ AL_CRYPT_BUF_AUTH_IV = 2,
+ AL_CRYPT_BUF_SRC = 3,
+ AL_CRYPT_BUF_AUTH_SIGN = 4
+};
+
+
+/*
+ * SA
+ */
+/* Word 0 */
+#define CRYPT_SAD_OP_MASK (0x3<<30)/* Crypto Operation */
+#define CRYPT_SAD_OP_SHIFT (30)
+#define CRYPT_SAD_ENC_TYPE_MASK (0xf<<25)/* Crypto Type */
+#define CRYPT_SAD_ENC_TYPE_SHIFT (25)
+#define CRYPT_SAD_TRIPDES_MODE_MASK (0x1<<22)/* 3DES mode */
+#define CRYPT_SAD_TRIPDES_MODE_SHIFT (22)
+#define CRYPT_SAD_AES_KEY_SIZE_MASK (0x3<<20)/* AES key size */
+#define CRYPT_SAD_AES_KEY_SIZE_SHIFT (20)
+#define CRYPT_SAD_AUTH_TYPE_MASK (0xf<<12)/* Auth type */
+#define CRYPT_SAD_AUTH_TYPE_SHIFT (12)
+#define CRYPT_SAD_SIGN_SIZE_MASK (0xf<<8) /* Signature size */
+#define CRYPT_SAD_SIGN_SIZE_SHIFT (8)
+#define CRYPT_SAD_SHA2_KEY_SIZE_MASK (0x3<<6) /* Sha2 key size */
+#define CRYPT_SAD_SHA2_KEY_SIZE_SHIFT (6)
+#define CRYPT_SAD_HMAC_EN (1<<5) /* Hmac enable */
+#define CRYPT_SAD_SIGN_AFTER_ENC (1<<4) /* Sign after encryption */
+#define CRYPT_SAD_AUTH_AFTER_DEC (1<<3) /* Auth after decryption */
+#define CRYPT_SAD_AUTH_MSB_BITS (1<<2) /* Auth use the more significant
+ bits of the signature */
+#define CRYPT_SAD_CNTR_SIZE_MASK (0x3) /* Counter size */
+#define CRYPT_SAD_CNTR_SIZE_SHIFT (0)
+
+
+/* Word 1 */
+#define CRYPT_SAD_CCM_CBC_IV_ADD_SWORD (1)
+#define CRYPT_SAD_CCM_CBC_IV_ADD_SIZE (1)
+
+/* Word 2 */
+#define CRYPT_SAD_ENC_OFF_MASK (0xffff<<16)/*Enc off- start of pkt*/
+#define CRYPT_SAD_ENC_OFF_SHIFT (16)
+#define CRYPT_SAD_ENC_OFF_EOP_MASK (0xffff)/*Enc off- end of pkt*/
+#define CRYPT_SAD_ENC_OFF_EOP_SHIFT (0)
+
+/* Word 3 */
+#define CRYPT_SAD_AUTH_OFF_MASK (0xffff<<16) /*Auth off- start of pkt*/
+#define CRYPT_SAD_AUTH_OFF_SHIFT (16)
+#define CRYPT_SAD_AUTH_OFF_EOP_MASK (0xffff) /*Auth off- end of pkt*/
+#define CRYPT_SAD_AUTH_OFF_EOP_SHIFT (0)
+
+/* Other words */
+#define CRYPT_SAD_ENC_KEY_SWORD (4) /* Encryption Key */
+#define CRYPT_SAD_ENC_KEY_SIZE (8)
+#define CRYPT_SAD_ENC_IV_SWORD (12) /* Encryption IV */
+#define CRYPT_SAD_ENC_IV_SIZE (4) /* Engine update this field */
+#define CRYPT_SAD_GCM_AUTH_IV_SWORD (16) /* GCM Auth IV */
+#define CRYPT_SAD_GCM_AUTH_IV_SIZE (4)
+#define CRYPT_SAD_AUTH_IV_SWORD (12) /* Auth Only IV */
+#define CRYPT_SAD_AUTH_IV_SIZE (16) /* Engine update this field */
+#define CRYPT_SAD_HMAC_IV_IN_SWORD (28) /* HMAC_IV_in H(k xor ipad) */
+#define CRYPT_SAD_HMAC_IV_IN_SIZE (16)
+#define CRYPT_SAD_HMAC_IV_OUT_SWORD (44) /* HMAC_IV_out H(k xor opad) */
+#define CRYPT_SAD_HMAC_IV_OUT_SIZE (16)
+
+
+#define sa_init_field(dest, val, mask, shift, str)\
+ do {\
+ al_assert(!((val << shift) & ~(mask)));\
+ al_debug(" SA %s - %x\n", str, val); \
+ dest |= (val << shift) & mask;\
+ } while (0);
+/**
+ * DEBUG
+ */
+#ifdef CRYPTO_DEBUG
+void al_print_crypto_desc(union al_udma_desc *desc)
+{
+ al_dbg(" Crypto: Desc: %08x %08x %08x %08x\n",
+ desc->tx_meta.len_ctrl, desc->tx_meta.meta_ctrl,
+ desc->tx_meta.meta1, desc->tx_meta.meta2);
+}
+
+static
+void al_print_crypto_xaction(struct al_crypto_transaction *xaction)
+{
+ unsigned int i;
+
+ al_dbg("Crypto: Transaction debug\n");
+ al_dbg(" Direction %s\n",
+ (xaction->dir == AL_CRYPT_ENCRYPT) ? "Encrypt" : "Decrypt");
+ al_dbg(" Flags %d\n", xaction->flags);
+
+ al_dbg("-SRC buf size %d num of buffers %d\n",
+ xaction->src_size, xaction->src.num);
+ for (i = 0 ; i < xaction->src.num; i++)
+ al_dbg(" addr 0x%016llx len %d\n",
+ (unsigned long long)xaction->src.bufs[i].addr,
+ xaction->src.bufs[i].len);
+
+ al_dbg("-DST num of buffers %d\n",
+ xaction->dst.num);
+ for (i = 0 ; i < xaction->dst.num; i++)
+ al_dbg(" addr 0x%016llx len %d\n",
+ (unsigned long long)xaction->dst.bufs[i].addr,
+ xaction->dst.bufs[i].len);
+
+ al_dbg("-SA index %d address 0x%016llx len %d\n",
+ xaction->sa_indx, (unsigned long long)xaction->sa_in.addr,
+ xaction->sa_in.len);
+ al_dbg(" SA OUT size: %d , addr 0x%016llx\n",
+ xaction->sa_out.len,
+ (unsigned long long)xaction->sa_out.addr);
+
+ al_dbg("-Enc IV IN size: %d, addr 0x%016llx\n",
+ xaction->enc_iv_in.len,
+ (unsigned long long)xaction->enc_iv_in.addr);
+ al_dbg(" Enc IV OUT size: %d, addr 0x%016llx\n",
+ xaction->enc_iv_out.len,
+ (unsigned long long)xaction->enc_iv_out.addr);
+ al_dbg(" Enc Next IV OUT size: %d, addr 0x%016llx\n",
+ xaction->enc_next_iv_out.len,
+ (unsigned long long)xaction->enc_next_iv_out.addr);
+ al_dbg(" Enc Offset %d Len %d\n",
+ xaction->enc_in_off, xaction->enc_in_len);
+
+ al_dbg("-Auth fl_valid %d, first %d last %d\n",
+ xaction->auth_fl_valid, xaction->auth_first,
+ xaction->auth_last);
+ al_dbg(" Auth IV IN size: %d, addr 0x%016llx\n",
+ xaction->auth_iv_in.len,
+ (unsigned long long)xaction->auth_iv_in.addr);
+ al_dbg(" Auth IV OUT size: %d, addr 0x%016llx\n",
+ xaction->auth_iv_out.len,
+ (unsigned long long)xaction->auth_iv_out.addr);
+ al_dbg(" Auth SIGN IN size: %d, addr 0x%016llx\n",
+ xaction->auth_sign_in.len,
+ (unsigned long long)xaction->auth_sign_in.addr);
+ al_dbg(" Auth SIGN OUT size: %d, addr 0x%016llx\n",
+ xaction->auth_sign_out.len,
+ (unsigned long long)xaction->auth_sign_out.addr);
+ al_dbg(" Auth Offset %d Len %d\n",
+ xaction->auth_in_off, xaction->auth_in_len);
+ al_dbg(" Auth Byte Count %d\n",
+ xaction->auth_bcnt);
+
+}
+
+#else
+#define al_print_crypto_desc(x)
+#define al_print_crypto_xaction(x)
+#endif
+
+/**
+ * Memcpy to HW SA
+ *
+ * @param dst destination buffer
+ * @param src source buffer
+ * @param size size in words
+ */
+static
+void al_crypto_sa_copy(uint32_t *dst, uint8_t *src, uint32_t size)
+{
+ uint32_t i;
+ uint8_t *cdst = (uint8_t *)dst;
+ for (i = 0; i < size*4; i++)
+ cdst[i] = src[i];
+}
+
+/**
+ * Get number of rx submission descriptors needed for crypto transaction
+ *
+ * we need rx descriptor for each destination buffer.
+ * if the transaction doesn't have destination buffers, then one
+ * descriptor is needed
+ *
+ * @param xaction transaction context
+ *
+ * @return number of rx submission descriptors
+ */
+static INLINE
+uint32_t al_crypto_xaction_rx_descs_count(struct al_crypto_transaction *xaction)
+{
+ uint32_t count = xaction->dst.num + (xaction->sa_out.len ? 1 : 0) +
+ (xaction->enc_iv_out.len ? 1 : 0) +
+ ((xaction->enc_next_iv_out.len ||
+ xaction->auth_iv_out.len) ? 1 : 0) +
+ (xaction->auth_sign_out.len ? 1 : 0);
+
+ /* valid rx descs count */
+ al_assert(count <= AL_SSM_MAX_SRC_DESCS);
+
+ return count;
+}
+
+/**
+ * Get number of tx submission descriptors needed for crypto transaction
+ *
+ * we need tx descriptor for each source buffer.
+ *
+ * @param xaction transaction context
+ *
+ * @return number of tx submission descriptors
+ */
+static INLINE
+uint32_t al_crypto_xaction_tx_descs_count(struct al_crypto_transaction *xaction)
+{
+ uint32_t count = xaction->src.num + (xaction->sa_in.len ? 1 : 0) +
+ (xaction->enc_iv_in.len ? 1 : 0) +
+ (xaction->auth_iv_in.len ? 1 : 0) +
+ (xaction->auth_sign_in.len ? 1 : 0);
+
+ /* valid tx descs count */
+ al_assert(count);
+ /* Need one for metadata if offsets are valid */
+ count += (xaction->enc_in_len || xaction->auth_in_len) ? 1 : 0;
+ /*valid tx descs count*/
+ al_assert(count <= AL_SSM_MAX_SRC_DESCS);
+
+ return count;
+}
+
+/**
+ * Fill one rx submission descriptor
+ *
+ * @param rx_udma_q rx udma handle
+ * @param flags flags for the descriptor
+ * @param buf destination buffer
+ * @param vmid virtual machine ID
+ */
+static INLINE
+void al_crypto_prep_one_rx_desc(struct al_udma_q *rx_udma_q,
+ uint32_t flags, struct al_buf *buf, uint16_t vmid)
+{
+ uint64_t vmid_shifted = ((uint64_t)vmid) << AL_UDMA_DESC_VMID_SHIFT;
+ uint32_t flags_len = flags;
+ union al_udma_desc *rx_desc;
+ uint32_t ring_id;
+
+ rx_desc = al_udma_desc_get(rx_udma_q);
+ /* get ring id */
+ ring_id = al_udma_ring_id_get(rx_udma_q)
+ << AL_M2S_DESC_RING_ID_SHIFT;
+
+ flags_len |= ring_id;
+
+ flags_len |= buf->len & AL_M2S_DESC_LEN_MASK;
+ rx_desc->rx.len_ctrl = swap32_to_le(flags_len);
+ rx_desc->rx.buf1_ptr = swap64_to_le(buf->addr | vmid_shifted);
+ al_print_crypto_desc(rx_desc);
+}
+
+/**
+ * Fill the crypto rx submission descriptors
+ *
+ * this function writes the contents of the rx submission descriptors
+ *
+ * @param rx_udma_q rx udma handle
+ * @param xaction transaction context
+ * @param rx_desc_cnt number of total rx descriptors
+ */
+static
+void al_crypto_set_rx_descs(struct al_udma_q *rx_udma_q,
+ struct al_crypto_transaction *xaction, uint32_t rx_desc_cnt)
+{
+ uint32_t flags;
+ union al_udma_desc *rx_desc;
+ uint32_t buf_idx;
+
+ /* Set descriptor flags */
+ flags = (xaction->flags & AL_SSM_INTERRUPT) ?
+ AL_M2S_DESC_INT_EN : 0;
+ flags |= (xaction->flags & AL_SSM_DEST_NO_SNOOP) ?
+ AL_M2S_DESC_NO_SNOOP_H : 0;
+
+ /* if the xaction doesn't have destination buffers,
+ * allocate single Meta descriptor,
+ */
+ if (unlikely(!rx_desc_cnt)) {
+ al_debug("Crypto: Preparing Meta Rx dec\n");
+ rx_desc = al_udma_desc_get(rx_udma_q);
+ flags |= al_udma_ring_id_get(rx_udma_q)
+ << AL_M2S_DESC_RING_ID_SHIFT;
+ flags |= RX_DESC_META;
+ /* write back flags */
+ rx_desc->rx.len_ctrl = swap32_to_le(flags);
+ al_print_crypto_desc(rx_desc);
+ return;
+ }
+
+ /* prepare descriptors for the required feilds */
+ if (unlikely(xaction->sa_out.len)) {
+ al_debug("Crypto: Preparing SA out Rx desc\n");
+ al_crypto_prep_one_rx_desc(
+ rx_udma_q, flags, &xaction->sa_out, xaction->misc_vmid);
+ }
+
+ if (unlikely(xaction->enc_iv_out.len)) {
+ al_debug("Crypto: Preparing ENC IV out Rx desc\n");
+ al_crypto_prep_one_rx_desc(rx_udma_q, flags,
+ &xaction->enc_iv_out, xaction->misc_vmid);
+ }
+
+ if (xaction->dst.num) {
+ struct al_buf *buf = xaction->dst.bufs;
+ al_debug("Crypto: Preparing %d Crypto DST Rx desc\n",
+ xaction->dst.num);
+ for (buf_idx = 0; buf_idx < xaction->dst.num; buf_idx++) {
+ al_crypto_prep_one_rx_desc(
+ rx_udma_q, flags, buf, xaction->dst.vmid);
+ buf++;
+ }
+ }
+
+ /*
+ * IV output:Encryption IV next to use or In case of auth only SA and
+ * auth_last isnt set, this is the intermidiate auto output.
+ */
+ if (xaction->enc_next_iv_out.len) {
+ al_debug("Crypto: Preparing ENC Next IV OUT Rx desc\n");
+ al_crypto_prep_one_rx_desc(rx_udma_q, flags,
+ &xaction->enc_next_iv_out, xaction->misc_vmid);
+ } else {
+ if (xaction->auth_iv_out.len) {
+ al_debug("Crypto: Preparing AUTH IV OUT Rx desc\n");
+ al_crypto_prep_one_rx_desc(rx_udma_q, flags,
+ &xaction->auth_iv_out, xaction->misc_vmid);
+ }
+ }
+
+ if (xaction->auth_sign_out.len) {
+ al_debug("Crypto: Preparing SIGN out Rx desc\n");
+ al_crypto_prep_one_rx_desc(rx_udma_q, flags,
+ &xaction->auth_sign_out, xaction->misc_vmid);
+ }
+
+}
+
+
+/**
+ * Fill one tx submission descriptor
+ *
+ * @param tx_udma_q tx udma handle
+ * @param flags flags for the descriptor
+ * @param meta metadata word1
+ * @param buf source buffer
+ * @param vmid virtual machine ID
+ */
+static INLINE void al_crypto_prep_one_tx_desc(struct al_udma_q *tx_udma_q,
+ uint32_t flags, uint32_t meta, struct al_buf *buf,
+ uint16_t vmid)
+{
+ uint64_t vmid_shifted = ((uint64_t)vmid) << AL_UDMA_DESC_VMID_SHIFT;
+ uint32_t flags_len = flags;
+ union al_udma_desc *tx_desc;
+ uint32_t ring_id;
+
+ tx_desc = al_udma_desc_get(tx_udma_q);
+ /* get ring id */
+ ring_id = al_udma_ring_id_get(tx_udma_q)
+ << AL_M2S_DESC_RING_ID_SHIFT;
+
+ flags_len |= ring_id;
+
+ flags_len |= buf->len & AL_M2S_DESC_LEN_MASK;
+ tx_desc->tx.len_ctrl = swap32_to_le(flags_len);
+ tx_desc->tx.meta_ctrl = swap32_to_le(meta);
+ tx_desc->tx.buf_ptr = swap64_to_le(buf->addr | vmid_shifted);
+ al_print_crypto_desc(tx_desc);
+}
+
+/**
+ * Fill the crypto tx submission descriptors
+ *
+ * this function writes the contents of the tx submission descriptors
+ *
+ * @param tx_udma_q tx udma handle
+ * @param xaction transaction context
+ * @param tx_desc_cnt number of total tx descriptors
+ */
+static
+void al_crypto_set_tx_descs(struct al_udma_q *tx_udma_q,
+ struct al_crypto_transaction *xaction, uint32_t tx_desc_cnt)
+{
+ uint32_t flags;
+ uint32_t buf_idx;
+ uint32_t word1_meta;
+ uint32_t desc_cnt = tx_desc_cnt;
+
+ /* Set flags */
+ flags = AL_M2S_DESC_FIRST;
+ flags |= unlikely(xaction->flags & AL_SSM_SRC_NO_SNOOP) ?
+ AL_M2S_DESC_NO_SNOOP_H : 0;
+
+ /* Set first desc word1 metatdata */
+ word1_meta = AL_CRYPT_OP << TX_DESC_META_OP_SHIFT;
+ word1_meta |= xaction->dir << TX_DESC_META_CRYPT_DIR_SHIFT;
+ word1_meta |= unlikely(xaction->sa_out.len) ?
+ TX_DESC_META_CRYPT_S_SA : 0;
+ word1_meta |= unlikely(xaction->enc_iv_out.len) ?
+ TX_DESC_META_CRYPT_S_ENCIV : 0;
+
+ word1_meta |= unlikely(xaction->dst.num) ?
+ TX_DESC_META_CRYPT_SEND_ORIG : 0;
+
+ word1_meta |=
+ unlikely(xaction->enc_next_iv_out.len ||
+ xaction->auth_iv_out.len) ?
+ TX_DESC_META_CRYPT_SEND_AUTHIV : 0;
+
+ word1_meta |= likely(xaction->auth_sign_out.len) ?
+ TX_DESC_META_CRYPT_S_SIGN : 0;
+
+ if (unlikely(xaction->auth_fl_valid)) {
+ word1_meta |= xaction->auth_first ? TX_DESC_META_AUTH_FIRST : 0;
+ word1_meta |= xaction->auth_last ? TX_DESC_META_AUTH_LAST : 0;
+ } else {
+ word1_meta |= TX_DESC_META_AUTH_FIRST | TX_DESC_META_AUTH_LAST;
+ }
+
+ word1_meta |= unlikely(xaction->auth_sign_in.len) ?
+ TX_DESC_META_AUTH_VALID : 0;
+
+ word1_meta |= (xaction->sa_indx << TX_DESC_META_SA_IDX_SHIFT)
+ & TX_DESC_META_SA_IDX_MASK;
+
+ /* First Meta data desc */
+ if ((xaction->enc_in_len) || (xaction->auth_in_len)) {
+ uint32_t flags_len = flags;
+ union al_udma_desc *tx_desc;
+ uint32_t ring_id;
+ uint32_t enc_meta;
+ uint32_t auth_meta;
+
+ al_debug("Crypto: preparing metadata desc: enc_in_len %d "
+ "auth_in_len %d\n",
+ xaction->enc_in_len, xaction->auth_in_len);
+ al_debug(" metadata desc: enc_in_off %d "
+ "auth_in_off %d\n",
+ xaction->enc_in_off, xaction->auth_in_off);
+ /* having only metdata desc isnt valid */
+ desc_cnt--;
+ /* Valid desc count */
+ al_assert(desc_cnt);
+
+ tx_desc = al_udma_desc_get(tx_udma_q);
+ /* UDMA feilds */
+ ring_id = al_udma_ring_id_get(tx_udma_q)
+ << AL_M2S_DESC_RING_ID_SHIFT;
+ flags_len |= ring_id;
+ flags_len |= AL_M2S_DESC_META_DATA;
+ tx_desc->tx_meta.len_ctrl = swap32_to_le(flags_len);
+ /* Word1 metadata */
+ tx_desc->tx_meta.meta_ctrl = 0;
+ if (xaction->auth_bcnt) {
+ /* Auth only, prev auth byte count */
+ tx_desc->tx_meta.meta1 =
+ swap32_to_le(xaction->auth_bcnt);
+ } else {
+ /* Encryption offsets */
+ enc_meta = (xaction->src_size -
+ (xaction->enc_in_len + xaction->enc_in_off))
+ & TX_DESC_META_ENC_OFF_EOP_MASK;
+ enc_meta |= (xaction->enc_in_off
+ << TX_DESC_META_ENC_OFF_SHIFT)
+ & TX_DESC_META_ENC_OFF_MASK;
+
+ tx_desc->tx_meta.meta1 = swap32_to_le(enc_meta);
+ }
+ /* Authentication offsets */
+ auth_meta = (xaction->src_size -
+ (xaction->auth_in_len + xaction->auth_in_off))
+ & TX_DESC_META_AUTH_OFF_EOP_MASK;
+ auth_meta |= (xaction->auth_in_off
+ << TX_DESC_META_AUTH_OFF_SHIFT)
+ & TX_DESC_META_AUTH_OFF_MASK;
+ tx_desc->tx_meta.meta2 = swap32_to_le(auth_meta);
+ al_print_crypto_desc(tx_desc);
+ /* clear first flag, keep no snoop hint flag */
+ flags &= AL_M2S_DESC_NO_SNOOP_H;
+ }
+
+ flags |= unlikely(xaction->flags & AL_SSM_BARRIER) ?
+ AL_M2S_DESC_DMB : 0;
+
+ /* prepare descriptors for the SA_in if found */
+ if (xaction->sa_in.len) {
+ al_debug("Crypto: Preparing SA Tx desc sa_index %d\n",
+ xaction->sa_indx);
+ /* check for last */
+ flags |= (desc_cnt == 1) ? AL_M2S_DESC_LAST : 0;
+ desc_cnt--;
+ /* update buffer type in metadata */
+ word1_meta |= AL_CRYPT_BUF_SA_UPDATE
+ << TX_DESC_META_BUF_TYPE_SHIFT;
+
+ al_crypto_prep_one_tx_desc(tx_udma_q, flags, word1_meta,
+ &xaction->sa_in, xaction->misc_vmid);
+ word1_meta = 0;
+ /* clear first and DMB flags, keep no snoop hint flag */
+ flags &= AL_M2S_DESC_NO_SNOOP_H;
+ }
+
+ /* prepare descriptors for the enc_IV_in if found */
+ if (likely(xaction->enc_iv_in.len)) {
+ al_debug("Crypto: Preparing IV in Tx desc\n");
+ /* check for last */
+ flags |= (desc_cnt == 1) ? AL_M2S_DESC_LAST : 0;
+ desc_cnt--;
+ /* update buffer type in metadata */
+ word1_meta |= AL_CRYPT_BUF_ENC_IV
+ << TX_DESC_META_BUF_TYPE_SHIFT;
+
+ al_crypto_prep_one_tx_desc(tx_udma_q, flags, word1_meta,
+ &xaction->enc_iv_in, xaction->misc_vmid);
+ word1_meta = 0;
+ /* clear first and DMB flags, keep no snoop hint flag */
+ flags &= AL_M2S_DESC_NO_SNOOP_H;
+ }
+
+ /* prepare descriptors for the auth_IV_in if found */
+ if (unlikely(xaction->auth_iv_in.len)) {
+ al_debug("Crypto: Preparing Auth IV in Tx desc\n");
+ /* check for last */
+ flags |= (desc_cnt == 1) ? AL_M2S_DESC_LAST : 0;
+ desc_cnt--;
+ /* update buffer type in metadata */
+ word1_meta |= AL_CRYPT_BUF_AUTH_IV
+ << TX_DESC_META_BUF_TYPE_SHIFT;
+
+ al_crypto_prep_one_tx_desc(tx_udma_q, flags, word1_meta,
+ &xaction->auth_iv_in, xaction->misc_vmid);
+ word1_meta = 0;
+ /* clear first and DMB flags, keep no snoop hint flag */
+ flags &= AL_M2S_DESC_NO_SNOOP_H;
+ }
+
+ /* prepare descriptors for the source buffer if found */
+ if (likely(xaction->src.num)) {
+ struct al_buf *buf = xaction->src.bufs;
+ al_debug("Crypto: Preparing SRC %d Tx desc\n",
+ xaction->src.num);
+ /* update buffer type in metadata */
+ word1_meta |= AL_CRYPT_BUF_SRC << TX_DESC_META_BUF_TYPE_SHIFT;
+
+ for (buf_idx = 0; buf_idx < xaction->src.num; buf_idx++) {
+ /* check for last */
+ flags |= (desc_cnt == 1) ? AL_M2S_DESC_LAST : 0;
+ desc_cnt--;
+
+ al_crypto_prep_one_tx_desc(tx_udma_q, flags,
+ word1_meta, buf, xaction->src.vmid);
+ word1_meta = 0;
+ /* clear first and DMB flags, keep no snoop hint flag */
+ flags &= AL_M2S_DESC_NO_SNOOP_H;
+ flags |= AL_M2S_DESC_CONCAT;
+ buf++;
+ }
+
+ /* clear first, concat and DMB flags, keep no snoop hint flag */
+ flags &= AL_M2S_DESC_NO_SNOOP_H;
+ }
+
+ /* prepare descriptors for the auth signature if found */
+ if (unlikely(xaction->auth_sign_in.len)) {
+ al_debug("Crypto: Preparing Signature in Tx desc\n");
+ /* if we are here then this is last */
+ flags |= AL_M2S_DESC_LAST;
+ /* update buffer type in metadata */
+ word1_meta |= AL_CRYPT_BUF_AUTH_SIGN
+ << TX_DESC_META_BUF_TYPE_SHIFT;
+
+ al_crypto_prep_one_tx_desc(tx_udma_q, flags, word1_meta,
+ &xaction->auth_sign_in, xaction->misc_vmid);
+ }
+
+}
+
+/****************************** API functions *********************************/
+int al_crypto_hw_sa_init(struct al_crypto_sa *sa,
+ struct al_crypto_hw_sa *hw_sa)
+{
+ uint32_t tword;
+
+ /* Word 0 */
+ tword = 0;
+ /* Valid SA operation */
+ al_assert(sa->sa_op != AL_CRYPT_RES);
+ sa_init_field(tword, sa->sa_op, CRYPT_SAD_OP_MASK,
+ CRYPT_SAD_OP_SHIFT, "valid sa_op");
+ /* Encryption */
+ if ((sa->sa_op != AL_CRYPT_AUTH_ONLY) ||
+ (sa->auth_type == AL_CRYPT_AUTH_AES_GCM) ||
+ (sa->auth_type == AL_CRYPT_AUTH_AES_CCM)) {
+ sa_init_field(tword, sa->enc_type, CRYPT_SAD_ENC_TYPE_MASK,
+ CRYPT_SAD_ENC_TYPE_SHIFT, "valid enc type");
+ if ((sa->enc_type == AL_CRYPT_TRIPDES_ECB) ||
+ (sa->enc_type == AL_CRYPT_TRIPDES_CBC)) {
+ sa_init_field(tword,
+ sa->tripdes_m,
+ CRYPT_SAD_TRIPDES_MODE_MASK,
+ CRYPT_SAD_TRIPDES_MODE_SHIFT,
+ "valid 3des mode");
+ }
+ if (sa->enc_type > AL_CRYPT_TRIPDES_CBC) {
+ sa_init_field(tword,
+ sa->aes_ksize,
+ CRYPT_SAD_AES_KEY_SIZE_MASK,
+ CRYPT_SAD_AES_KEY_SIZE_SHIFT,
+ "valid aes key size");
+ }
+ sa_init_field(tword,
+ sa->cntr_size,
+ CRYPT_SAD_CNTR_SIZE_MASK,
+ CRYPT_SAD_CNTR_SIZE_SHIFT,
+ "valid counter loop");
+ }
+
+ /* Authentication */
+ if (sa->sa_op != AL_CRYPT_ENC_ONLY) {
+ sa_init_field(tword,
+ sa->auth_type,
+ CRYPT_SAD_AUTH_TYPE_MASK,
+ CRYPT_SAD_AUTH_TYPE_SHIFT,
+ "valid auth type");
+ sa_init_field(tword,
+ sa->signature_size,
+ CRYPT_SAD_SIGN_SIZE_MASK,
+ CRYPT_SAD_SIGN_SIZE_SHIFT,
+ "valid sign size");
+ if (sa->auth_type == AL_CRYPT_AUTH_SHA2)
+ sa_init_field(tword,
+ sa->sha2_mode,
+ CRYPT_SAD_SHA2_KEY_SIZE_MASK,
+ CRYPT_SAD_SHA2_KEY_SIZE_SHIFT,
+ "valid sha2 key size");
+ tword |= sa->auth_signature_msb ? CRYPT_SAD_AUTH_MSB_BITS : 0;
+ tword |= sa->auth_hmac_en ? CRYPT_SAD_HMAC_EN : 0;
+ }
+
+ /* Encryption + Authentication */
+ if (sa->sa_op == AL_CRYPT_ENC_AUTH) {
+ tword |= sa->sign_after_enc ? CRYPT_SAD_SIGN_AFTER_ENC : 0;
+ tword |= sa->auth_after_dec ? CRYPT_SAD_AUTH_AFTER_DEC : 0;
+ }
+
+ hw_sa->sa_word[0] = swap32_to_le(tword);
+
+ /* Word 2 - Encryption offsets */
+ tword = 0;
+ if (sa->sa_op != AL_CRYPT_AUTH_ONLY) {
+ sa_init_field(tword,
+ sa->enc_offset,
+ CRYPT_SAD_ENC_OFF_MASK,
+ CRYPT_SAD_ENC_OFF_SHIFT,
+ "valid enc off");
+ sa_init_field(tword,
+ sa->enc_offset_eop,
+ CRYPT_SAD_ENC_OFF_EOP_MASK,
+ CRYPT_SAD_ENC_OFF_EOP_SHIFT,
+ "valid enc off eop");
+ hw_sa->sa_word[2] = swap32_to_le(tword);
+ }
+
+ /* Word 3 - Authentication offsets */
+ tword = 0;
+ if (sa->sa_op != AL_CRYPT_ENC_ONLY) {
+ sa_init_field(tword,
+ sa->auth_offset,
+ CRYPT_SAD_AUTH_OFF_MASK,
+ CRYPT_SAD_AUTH_OFF_SHIFT,
+ "valid auth off");
+ sa_init_field(tword,
+ sa->auth_offset_eop,
+ CRYPT_SAD_AUTH_OFF_EOP_MASK,
+ CRYPT_SAD_AUTH_OFF_EOP_SHIFT,
+ "valid auth off eop");
+ hw_sa->sa_word[3] = swap32_to_le(tword);
+ }
+
+ /* Other Words */
+ /* CCM CBC IV */
+ if (sa->enc_type == AL_CRYPT_AES_CCM) {
+ al_crypto_sa_copy(
+ &hw_sa->sa_word[CRYPT_SAD_CCM_CBC_IV_ADD_SWORD],
+ sa->enc_ccm_cbc_iv_add, CRYPT_SAD_CCM_CBC_IV_ADD_SIZE);
+ }
+
+ /* Encryption Key and IV, also relevant for GCM Auth */
+ if ((sa->sa_op != AL_CRYPT_AUTH_ONLY) ||
+ (sa->auth_type == AL_CRYPT_AUTH_AES_GCM)) {
+ al_crypto_sa_copy(&hw_sa->sa_word[CRYPT_SAD_ENC_KEY_SWORD],
+ sa->enc_key, CRYPT_SAD_ENC_KEY_SIZE);
+ al_crypto_sa_copy(&hw_sa->sa_word[CRYPT_SAD_ENC_IV_SWORD],
+ sa->enc_iv, CRYPT_SAD_ENC_IV_SIZE);
+ }
+
+ /* AES GCM IV */
+ if (sa->enc_type == AL_CRYPT_AES_GCM) {
+ al_crypto_sa_copy(&hw_sa->sa_word[CRYPT_SAD_GCM_AUTH_IV_SWORD],
+ sa->aes_gcm_auth_iv, CRYPT_SAD_GCM_AUTH_IV_SIZE);
+ }
+
+ /* Authentication */
+ if (sa->sa_op != AL_CRYPT_ENC_ONLY) {
+ if (sa->auth_hmac_en) {
+ al_crypto_sa_copy(
+ &hw_sa->sa_word[CRYPT_SAD_HMAC_IV_IN_SWORD],
+ sa->hmac_iv_in, CRYPT_SAD_HMAC_IV_IN_SIZE);
+ al_crypto_sa_copy(
+ &hw_sa->sa_word[CRYPT_SAD_HMAC_IV_OUT_SWORD],
+ sa->hmac_iv_out, CRYPT_SAD_HMAC_IV_OUT_SIZE);
+ }
+
+ }
+ /* IV for broken Auth, overlap GCM feilds
+ which dont support broken Auth */
+ if ((sa->sa_op == AL_CRYPT_AUTH_ONLY) &&
+ (sa->auth_type != AL_CRYPT_AUTH_AES_GCM)) {
+ al_crypto_sa_copy(&hw_sa->sa_word[CRYPT_SAD_AUTH_IV_SWORD],
+ sa->auth_iv_in, CRYPT_SAD_AUTH_IV_SIZE);
+ }
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int al_crypto_dma_prepare(struct al_ssm_dma *dma, uint32_t qid,
+ struct al_crypto_transaction *xaction)
+{
+ uint32_t rx_descs;
+ uint32_t tx_descs;
+ struct al_udma_q *rx_udma_q;
+ struct al_udma_q *tx_udma_q;
+ int rc;
+
+ al_debug("al_crypto_dma_prepare\n");
+ al_print_crypto_xaction(xaction);
+
+ /* Check some parameters */
+ /* SA out -> SA in */
+ al_assert(!xaction->sa_out.len ||
+ (xaction->sa_out.len && xaction->sa_in.len));
+ /* Valid SA index */
+ al_assert(!(xaction->sa_indx >> TX_DESC_META_SA_IDX_SHIFT
+ & ~TX_DESC_META_SA_IDX_MASK));
+ /* Auth first has no iv_in */
+ al_assert(!(xaction->auth_fl_valid &&
+ xaction->auth_first && xaction->auth_iv_in.len));
+ /* No last -> No sign_in */
+ al_assert(!(xaction->auth_fl_valid &&
+ !xaction->auth_last && xaction->auth_sign_in.len));
+ /* Queue is for crypt/auth transactions */
+ al_assert(dma->q_types[qid] == AL_CRYPT_AUTH_Q);
+
+ /* calc tx (M2S) descriptors */
+ tx_descs = al_crypto_xaction_tx_descs_count(xaction);
+ rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q);
+ /* valid crypto tx q handle */
+ al_assert(!rc);
+ if (unlikely(al_udma_available_get(tx_udma_q) < tx_descs
+ + AL_CRYPT_DESC_RES)) {
+ al_dbg("crypt[%s]:tx q has no enough free desc",
+ dma->m2m_udma.name);
+ return -ENOSPC;
+ }
+
+ /* calc rx (S2M) descriptors, at least one desc is required */
+ rx_descs = al_crypto_xaction_rx_descs_count(xaction);
+ rc = al_udma_q_handle_get(&dma->m2m_udma.rx_udma, qid, &rx_udma_q);
+ /* valid crypto rx q handle */
+ al_assert(!rc);
+ if (unlikely(al_udma_available_get(rx_udma_q)
+ < (rx_descs ? rx_descs : 1))) {
+ al_dbg("crypto [%s]: rx q has no enough free desc",
+ dma->m2m_udma.name);
+ return -ENOSPC;
+ }
+
+ /* prepare rx descs */
+ al_crypto_set_rx_descs(rx_udma_q, xaction, rx_descs);
+ /* add rx descriptors */
+ al_udma_desc_action_add(rx_udma_q, rx_descs ? rx_descs : 1);
+
+ /* prepare tx descriptors */
+ al_crypto_set_tx_descs(tx_udma_q, xaction, tx_descs);
+
+ /* set number of tx descriptors */
+ xaction->tx_descs_count = tx_descs;
+
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int al_crypto_dma_action(struct al_ssm_dma *dma, uint32_t qid,
+ int tx_descs)
+{
+ struct al_udma_q *tx_udma_q;
+ int rc;
+
+ rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q);
+ /* valid CRC/CSUM tx q handle */
+ al_assert(!rc);
+
+ /* add tx descriptors */
+ al_udma_desc_action_add(tx_udma_q, tx_descs);
+ return 0;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int al_crypto_dma_completion(struct al_ssm_dma *dma, uint32_t qid,
+ uint32_t *comp_status)
+{
+ struct al_udma_q *rx_udma_q;
+ struct al_udma_q *tx_udma_q;
+ volatile union al_udma_cdesc *cdesc;
+ int rc;
+ uint32_t cdesc_count;
+
+ rc = al_udma_q_handle_get(&dma->m2m_udma.rx_udma, qid, &rx_udma_q);
+ /* valid comp rx q handle */
+ al_assert(!rc);
+
+ cdesc_count = al_udma_cdesc_packet_get(rx_udma_q, &cdesc);
+ if (!cdesc_count)
+ return 0;
+
+ /* if we have multiple completion descriptors,
+ then last one will have the valid status */
+ if (unlikely(cdesc_count > 1))
+ cdesc = al_cdesc_next(rx_udma_q, cdesc, cdesc_count - 1);
+
+ *comp_status = swap32_from_le(cdesc->al_desc_comp_rx.ctrl_meta) &
+ RX_COMP_STATUS_MASK;
+
+ al_udma_cdesc_ack(rx_udma_q, cdesc_count);
+
+ al_debug("crypto packet completed. count %d status desc %p meta %x\n",
+ cdesc_count, cdesc, cdesc->al_desc_comp_rx.ctrl_meta);
+
+ /* cleanup tx completion queue */
+ rc = al_udma_q_handle_get(&dma->m2m_udma.tx_udma, qid, &tx_udma_q);
+ /* valid comp tx q handle */
+ al_assert(!rc);
+
+ cdesc_count = al_udma_cdesc_get_all(tx_udma_q, NULL);
+ if (cdesc_count)
+ al_udma_cdesc_ack(tx_udma_q, cdesc_count);
+
+ return 1;
+}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e992489..63d5d25 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -312,6 +312,27 @@ config MMP_PDMA
help
Support the MMP PDMA engine for PXA and MMP platfrom.
+config AL_DMA
+ tristate "Annapurna Labs DMA support"
+ depends on ARCH_ALPINE
+ select DMA_ENGINE
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ help
+ Enable support for the Annapurna Labs DMA and RAID acceleration
+ engine.
+
+config AL_DMA_STATS
+ bool "Annapurna Labs DMA statistics enabled"
+ depends on AL_DMA
+ help
+ Enable Annapurna Labs DMA and RAID acceleration engine statistics.
+
+config AL_DMA_PCI_IOV
+ bool "Annapurna Labs DMA Virtual Function enabled"
+ depends on AL_DMA
+ help
+ Enable Annapurna Labs DMA Virtual Function.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a2b0df5..1a0c1f1 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -37,4 +37,5 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_AL_DMA) += al/
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
diff --git a/drivers/dma/al/al_dma.h b/drivers/dma/al/al_dma.h
new file mode 100644
index 0000000..5d90863
--- /dev/null
+++ b/drivers/dma/al/al_dma.h
@@ -0,0 +1,407 @@
+/*
+ * Annapurna Labs DMA Linux driver
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef AL_DMA_H
+#define AL_DMA_H
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "al_hal_ssm_raid.h"
+
+#define AL_DMA_VERSION "0.01"
+
+#define AL_DMA_IRQNAME_SIZE 40
+
+#define AL_DMA_MAX_SIZE_SHIFT_MEMCPY 16 /* 64KB */
+#define AL_DMA_MAX_SIZE_SHIFT_MEMSET 16 /* 64KB */
+#define AL_DMA_MAX_SIZE_SHIFT_XOR 14 /* 16KB */
+#define AL_DMA_MAX_SIZE_SHIFT_XOR_VAL 14 /* 16KB */
+#define AL_DMA_MAX_SIZE_SHIFT_PQ 13 /* 8KB */
+#define AL_DMA_MAX_SIZE_SHIFT_PQ_VAL 13 /* 8KB */
+
+#define AL_DMA_ALIGN_SHIFT 0 /* No alignment requirements */
+
+#ifndef CONFIG_ALPINE_VP_WA
+#define AL_DMA_RAID_TX_CDESC_SIZE 8
+#define AL_DMA_RAID_RX_CDESC_SIZE 8
+#else
+/* Currently in VP it is always 16 bytes */
+#define AL_DMA_RAID_TX_CDESC_SIZE 16
+#define AL_DMA_RAID_RX_CDESC_SIZE 16
+#endif
+
+#define AL_DMA_MAX_SIZE_MEMCPY (1 << AL_DMA_MAX_SIZE_SHIFT_MEMCPY)
+#define AL_DMA_MAX_SIZE_MEMSET (1 << AL_DMA_MAX_SIZE_SHIFT_MEMSET)
+#define AL_DMA_MAX_SIZE_XOR (1 << AL_DMA_MAX_SIZE_SHIFT_XOR)
+#define AL_DMA_MAX_SIZE_XOR_VAL (1 << AL_DMA_MAX_SIZE_SHIFT_XOR_VAL)
+#define AL_DMA_MAX_SIZE_PQ (1 << AL_DMA_MAX_SIZE_SHIFT_PQ)
+#define AL_DMA_MAX_SIZE_PQ_VAL (1 << AL_DMA_MAX_SIZE_SHIFT_PQ_VAL)
+
+#define AL_DMA_MAX_XOR AL_SSM_MAX_SRC_DESCS
+
+#define AL_DMA_OP_MAX_BLOCKS (AL_DMA_MAX_XOR * 2)
+
+#define AL_DMA_MAX_CHANNELS 4
+
+#define AL_DMA_SW_RING_MIN_ORDER 4
+#define AL_DMA_SW_RING_MAX_ORDER 16
+
+/**
+ * Issue pending transaction upon sumbit:
+ * 0 - no, issue when issue_pending is called
+ * 1 - yes, and do nothing when issue_pending is called
+ */
+#define AL_DMA_ISSUE_PNDNG_UPON_SUBMIT 1
+
+/*#define AL_DMA_MEMCPY_VALIDATION*/
+/*#define AL_DMA_XOR_VALIDATION*/
+
+#ifdef CONFIG_AL_DMA_STATS
+#define AL_DMA_STATS_INC(var, incval) { (var) += (incval); }
+
+#define AL_DMA_STATS_UPDATE(chan, num, cnt, size, size_inc) \
+{ \
+ AL_DMA_STATS_INC((num), (cnt)); \
+ \
+ if (size_inc) \
+ AL_DMA_STATS_INC((size), (size_inc)); \
+ \
+ AL_DMA_STATS_INC( \
+ (chan)->stats_prep.matching_cpu, \
+ (cnt) * (((chan)->idx == smp_processor_id()))); \
+ \
+ AL_DMA_STATS_INC( \
+ (chan)->stats_prep.mismatching_cpu, \
+ (cnt) * (!((chan)->idx == smp_processor_id()))); \
+}
+#else
+#define AL_DMA_STATS_INC(var, incval)
+#define AL_DMA_STATS_UPDATE(chan, num, cnt, size, size_inc)
+#endif
+
+enum al_unmap_type {
+ AL_UNMAP_SINGLE,
+ AL_UNMAP_PAGE,
+};
+
+struct al_dma_unmap_info_ent {
+ dma_addr_t handle;
+ size_t size;
+ int dir;
+ enum al_unmap_type type;
+};
+
+/**
+ * struct al_dma_sw_desc - software descriptor
+ */
+struct al_dma_sw_desc {
+ struct al_raid_transaction hal_xaction;
+ struct al_block blocks[AL_DMA_OP_MAX_BLOCKS];
+ struct al_buf bufs[AL_DMA_OP_MAX_BLOCKS];
+
+ size_t len;
+ struct dma_async_tx_descriptor txd;
+ #ifdef DEBUG
+ int id;
+ #endif
+
+ int last_is_pq_val;
+ enum sum_check_flags *pq_val_res;
+
+ int last_is_xor_val;
+ enum sum_check_flags *xor_val_res;
+
+ int last_is_memcpy;
+
+#ifdef AL_DMA_MEMCPY_VALIDATION
+ void *memcpy_dest;
+ void *memcpy_src;
+ size_t memcpy_len;
+#endif
+
+ int last_is_xor;
+
+#ifdef AL_DMA_XOR_VALIDATION
+ void *xor_dest;
+ int xor_src_cnt;
+ void *xor_src[AL_DMA_OP_MAX_BLOCKS];
+ size_t xor_len;
+#endif
+
+ struct al_dma_unmap_info_ent unmap_info[AL_DMA_OP_MAX_BLOCKS];
+ int umap_ent_cnt;
+};
+#define to_al_dma_device(dev) container_of(dev, struct al_dma_device, common)
+#define to_dev(al_dma_chan) (&(al_dma_chan)->device->pdev->dev)
+
+#ifdef CONFIG_AL_DMA_STATS
+/**
+ * struct al_dma_chan_stats_prep - DMA channel statistics - preparation
+ * @int_num - Total number of interrupt requests
+ * @memcpy_num - Total number of memcpy operations
+ * @memcpy_size - Total size of memcpy operations
+ * @memset_num - Total number of memset operations
+ * @memset_size - Total size of memset operations
+ * @xor_num - Total number of xor operations
+ * @xor_size - Total size of xor operations
+ * @pq_num - Total number of pq operations
+ * @pq_size - Total size of pq operations
+ * @pq_val_num - Total number of pq validation operations
+ * @pq_val_size - Total size of pq validation operations
+ * @xor_val_num - Total number of xor validation operations
+ * @xor_val_size - Total size of xor validation operations
+ * @matching_cpu - Number of preparations with matching queue and cpu
+ * @mismatching_cpu - Number of preparations with mismatching queue and cpu
+ */
+struct al_dma_chan_stats_prep {
+ uint64_t int_num;
+ uint64_t memcpy_num;
+ uint64_t memcpy_size;
+ uint64_t sg_memcpy_num;
+ uint64_t sg_memcpy_size;
+ uint64_t memset_num;
+ uint64_t memset_size;
+ uint64_t xor_num;
+ uint64_t xor_size;
+ uint64_t pq_num;
+ uint64_t pq_size;
+ uint64_t pq_val_num;
+ uint64_t pq_val_size;
+ uint64_t xor_val_num;
+ uint64_t xor_val_size;
+ uint64_t matching_cpu;
+ uint64_t mismatching_cpu;
+};
+
+/**
+ * struct al_dma_chan_stats_prep - DMA channel statistics - completion
+ * @redundant_int_cnt - Total number of redundant interrupts (interrupts for
+ * which there was no completions
+ * @matching_cpu - Number of completions with matching queue and cpu
+ * @mismatching_cpu - Number of completions with mismatching queue and cpu
+ */
+struct al_dma_chan_stats_comp {
+ uint64_t redundant_int_cnt;
+ uint64_t matching_cpu;
+ uint64_t mismatching_cpu;
+};
+#endif
+
+/* internal structure for AL Crypto IRQ
+ */
+struct al_dma_irq {
+ char name[AL_DMA_IRQNAME_SIZE];
+};
+
+/**
+ * struct al_dma_device - internal representation of a DMA device
+ */
+struct al_dma_device {
+ struct pci_dev *pdev;
+ u16 dev_id;
+ u8 rev_id;
+
+ struct al_ssm_dma_params ssm_dma_params;
+ void __iomem *udma_regs_base;
+ void __iomem *app_regs_base;
+ struct al_ssm_dma hal_raid;
+
+ struct dma_device common;
+
+ struct msix_entry msix_entries[AL_DMA_MAX_CHANNELS];
+ struct al_dma_irq irq_tbl[AL_DMA_MAX_CHANNELS];
+ struct al_dma_chan *channels[AL_DMA_MAX_CHANNELS];
+ int max_channels;
+
+ struct kmem_cache *cache;
+};
+
+/**
+ * struct al_dma_chan - internal representation of a DMA channel
+ */
+struct al_dma_chan {
+ /* Misc */
+ struct dma_chan common ____cacheline_aligned;
+ struct al_ssm_dma *hal_raid;
+ int idx;
+ struct al_dma_device *device;
+ cpumask_t affinity_mask;
+
+ /* SW descriptors ring */
+ struct al_dma_sw_desc **sw_ring;
+
+ /* Tx UDMA hw ring */
+ int tx_descs_num; /* number of descriptors in Tx queue */
+ void *tx_dma_desc_virt; /* Tx descriptors ring */
+ dma_addr_t tx_dma_desc;
+
+ /* Rx UDMA hw ring */
+ int rx_descs_num; /* number of descriptors in Rx queue */
+ void *rx_dma_desc_virt; /* Rx descriptors ring */
+ dma_addr_t rx_dma_desc;
+ void *rx_dma_cdesc_virt; /* Rx completion descriptors ring */
+ dma_addr_t rx_dma_cdesc;
+
+ /* sysfs */
+ struct kobject kobj;
+
+ /* Channel allocation */
+ u16 alloc_order;
+
+ /* Preparation */
+ spinlock_t prep_lock ____cacheline_aligned;
+ u16 head;
+ int sw_desc_num_locked;
+ uint32_t tx_desc_produced;
+#ifdef CONFIG_AL_DMA_STATS
+ struct al_dma_chan_stats_prep stats_prep;
+#endif
+
+ /* Completion */
+ spinlock_t cleanup_lock ____cacheline_aligned_in_smp;
+ struct tasklet_struct cleanup_task;
+ u16 tail;
+#ifdef CONFIG_AL_DMA_STATS
+ struct al_dma_chan_stats_comp stats_comp;
+#endif
+};
+
+static inline u16 al_dma_ring_size(struct al_dma_chan *chan)
+{
+ return 1 << chan->alloc_order;
+}
+
+/* count of transactions in flight with the engine */
+static inline u16 al_dma_ring_active(struct al_dma_chan *chan)
+{
+ return CIRC_CNT(chan->head, chan->tail, al_dma_ring_size(chan));
+}
+
+static inline u16 al_dma_ring_space(struct al_dma_chan *chan)
+{
+ return CIRC_SPACE(chan->head, chan->tail, al_dma_ring_size(chan));
+}
+
+static inline struct al_dma_sw_desc *
+al_dma_get_ring_ent(struct al_dma_chan *chan, u16 idx)
+{
+ return chan->sw_ring[idx & (al_dma_ring_size(chan) - 1)];
+}
+
+struct al_dma_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct dma_chan *, char *);
+};
+
+static inline struct al_dma_chan *to_al_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct al_dma_chan, common);
+}
+
+/* wrapper around hardware descriptor format + additional software fields */
+
+
+
+#ifdef DEBUG
+#define set_desc_id(desc, i) ((desc)->id = (i))
+#define desc_id(desc) ((desc)->id)
+#else
+#define set_desc_id(desc, i)
+#define desc_id(desc) (0)
+#endif
+
+static inline struct al_dma_chan *
+al_dma_chan_by_index(struct al_dma_device *device, int index)
+{
+ return device->channels[index];
+}
+
+static inline u32 al_dma_chansts(struct al_dma_chan *chan)
+{
+ u32 status = 0;
+
+ return status;
+}
+
+static inline void al_dma_unmap_info_ent_set(
+ struct al_dma_unmap_info_ent *ent,
+ dma_addr_t handle,
+ size_t size,
+ int dir,
+ enum al_unmap_type type)
+{
+ ent->handle = handle;
+ ent->size = size;
+ ent->dir = dir;
+ ent->type = type;
+}
+
+int al_dma_get_sw_desc_lock(
+ struct al_dma_chan *chan,
+ int num);
+
+int al_dma_core_init(
+ struct al_dma_device *device,
+ void __iomem *iobase_udma,
+ void __iomem *iobase_app);
+
+int al_dma_fast_init(
+ struct al_dma_device *device,
+ void __iomem *iobase_udma);
+
+int al_dma_fast_terminate(
+ struct al_dma_device *device);
+
+int al_dma_core_terminate(
+ struct al_dma_device *device);
+
+int al_dma_cleanup_fn(
+ struct al_dma_chan *chan,
+ int from_tasklet);
+
+int udma_fast_memcpy(int len, al_phys_addr_t src, al_phys_addr_t dst);
+
+void al_dma_flr(
+ struct pci_dev *pdev);
+
+/**
+ * Submit pending SW descriptors (enlarge the head) and unlock the prep-lock
+ * in the case 'issue-pending' is responsible for submitting the HW descriptors
+ */
+void al_dma_tx_submit_sw_cond_unlock(
+ struct al_dma_chan *chan,
+ struct dma_async_tx_descriptor *tx);
+
+void al_dma_kobject_add(struct al_dma_device *device, struct kobj_type *type);
+void al_dma_kobject_del(struct al_dma_device *device);
+extern const struct sysfs_ops al_dma_sysfs_ops;
+extern struct al_dma_sysfs_entry al_dma_version_attr;
+extern struct al_dma_sysfs_entry al_dma_cap_attr;
+
+#endif /* AL_DMA_H */
+
diff --git a/drivers/dma/al/al_dma_cleanup.c b/drivers/dma/al/al_dma_cleanup.c
new file mode 100644
index 0000000..b5d233a
--- /dev/null
+++ b/drivers/dma/al/al_dma_cleanup.c
@@ -0,0 +1,312 @@
+/*
+ * Annapurna Labs DMA Linux driver - Operation completion cleanup
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "../dmaengine.h"
+#include "al_dma.h"
+#include "al_hal_udma_iofic.h"
+
+static void al_dma_cleanup_single(
+ struct al_dma_chan *chan,
+ struct al_dma_sw_desc *desc,
+ uint32_t comp_status);
+
+static inline void al_dma_cleanup_single_memcpy(
+ struct al_dma_chan *chan,
+ struct al_dma_sw_desc *desc);
+
+static inline void al_dma_cleanup_single_xor(
+ struct al_dma_chan *chan,
+ struct al_dma_sw_desc *desc);
+
+static inline void al_dma_cleanup_single_pq_val(
+ struct al_dma_chan *chan,
+ struct al_dma_sw_desc *desc,
+ uint32_t comp_status);
+
+static inline void al_dma_cleanup_single_xor_val(
+ struct al_dma_chan *chan,
+ struct al_dma_sw_desc *desc,
+ uint32_t comp_status);
+
+
+static void al_dma_cleanup_unmap(
+ struct al_dma_chan *chan,
+ struct al_dma_sw_desc *desc);
+
+/******************************************************************************
+ *****************************************************************************/
+int al_dma_cleanup_fn(
+ struct al_dma_chan *chan,
+ int from_tasklet)
+{
+ struct al_dma_sw_desc *desc;
+ uint32_t comp_status;
+ u16 active;
+ int idx, i;
+ uint32_t rc;
+
+ dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x\n",
+ __func__, chan->head, chan->tail);
+
+ spin_lock_bh(&chan->cleanup_lock);
+
+ idx = chan->tail;
+
+ active = al_dma_ring_active(chan);
+ for (i = 0; i < active; i++) {
+ rc = al_raid_dma_completion(chan->hal_raid, chan->idx,
+ &comp_status);
+
+ /* if no completed transaction found -> exit */
+ if (rc == 0) {
+ dev_dbg(to_dev(chan), "%s: No completion\n",
+ __func__);
+
+ break;
+ }
+
+ dev_dbg(
+ to_dev(chan),
+ "%s: completion status: %u\n",
+ __func__,
+ comp_status);
+
+ /* This will instruct the CPU to make sure the index is up to
+ date before reading the new item */
+ smp_read_barrier_depends();
+
+ desc = al_dma_get_ring_ent(chan, idx + i);
+
+ al_dma_cleanup_single(chan, desc, comp_status);
+ }
+
+ /* This will make sure the CPU has finished reading the item
+ before it writes the new tail pointer, which will erase the item */
+ smp_mb();
+
+ chan->tail = idx + i;
+
+ AL_DMA_STATS_INC(chan->stats_comp.matching_cpu,
+ i * ((chan->idx == smp_processor_id())));
+ AL_DMA_STATS_INC(chan->stats_comp.mismatching_cpu,
+ i * (!(chan->idx == smp_processor_id())));
+
+ /* Keep track of redundant interrupts - interrupts that doesn't
+ yield completions */
+ if (unlikely(from_tasklet && (!i))) {
+ AL_DMA_STATS_INC(chan->stats_comp.redundant_int_cnt, 1);
+ }
+
+ spin_unlock_bh(&chan->cleanup_lock);
+
+ return i;
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void al_dma_cleanup_single(
+ struct al_dma_chan *chan,
+ struct al_dma_sw_desc *desc,
+ uint32_t comp_status)
+{
+ struct dma_async_tx_descriptor *tx;
+
+ if (desc->last_is_memcpy) {
+ desc->last_is_memcpy = 0;
+ al_dma_cleanup_single_memcpy(chan, desc);
+ } else if (desc->last_is_xor) {
+ desc->last_is_xor = 0;
+ al_dma_cleanup_single_xor(chan, desc);
+ } else if (desc->last_is_pq_val) {
+ desc->last_is_pq_val = 0;
+ al_dma_cleanup_single_pq_val(chan, desc, comp_status);
+ } else if (desc->last_is_xor_val) {
+ desc->last_is_xor_val = 0;
+ al_dma_cleanup_single_xor_val(chan, desc, comp_status);
+ }
+
+ tx = &desc->txd;
+ if (tx->cookie) {
+ dma_cookie_complete(tx);
+ al_dma_cleanup_unmap(chan, desc);
+ if (tx->callback) {
+ tx->callback(tx->callback_param);
+ tx->callback = NULL;
+ }
+ }
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void al_dma_cleanup_unmap(
+ struct al_dma_chan *chan,
+ struct al_dma_sw_desc *desc)
+{
+ struct pci_dev *pdev = chan->device->pdev;
+ int i;
+
+ for (i = 0; i < desc->umap_ent_cnt; i++) {
+ struct al_dma_unmap_info_ent *ent = &desc->unmap_info[i];
+
+ switch (ent->type) {
+ case AL_UNMAP_SINGLE:
+ pci_unmap_single(
+ pdev, ent->handle, ent->size, ent->dir);
+ break;
+ case AL_UNMAP_PAGE:
+ pci_unmap_page(pdev, ent->handle, ent->size, ent->dir);
+ break;
+ }
+ }
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void al_dma_cleanup_single_memcpy(
+ struct al_dma_chan *chan,
+ struct al_dma_sw_desc *desc)
+{
+#ifdef AL_DMA_MEMCPY_VALIDATION
+ if (memcmp(desc->memcpy_dest, desc->memcpy_src, desc->memcpy_len)) {
+ dev_err(
+ to_dev(chan),
+ "%s: memcpy (%p, %p, %d) failed!\n",
+ __func__,
+ desc->memcpy_dest,
+ desc->memcpy_src,
+ desc->memcpy_len);
+ } else
+ dev_dbg(
+ to_dev(chan),
+ "%s: memcpy (%p, %p, %d) ok!\n",
+ __func__,
+ desc->memcpy_dest,
+ desc->memcpy_src,
+ desc->memcpy_len);
+#endif
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void al_dma_cleanup_single_pq_val(
+ struct al_dma_chan *chan,
+ struct al_dma_sw_desc *desc,
+ uint32_t comp_status)
+{
+ if (unlikely(comp_status & AL_RAID_P_VAL_ERROR)) {
+ dev_dbg(
+ to_dev(chan),
+ "%s: pq_val failed P!\n",
+ __func__);
+
+ (*desc->pq_val_res) |= SUM_CHECK_P_RESULT;
+ }
+
+ if (unlikely(comp_status & AL_RAID_Q_VAL_ERROR)) {
+ dev_dbg(
+ to_dev(chan),
+ "%s: pq_val failed Q!\n",
+ __func__);
+
+ (*desc->pq_val_res) |= SUM_CHECK_Q_RESULT;
+ }
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void al_dma_cleanup_single_xor(
+ struct al_dma_chan *chan,
+ struct al_dma_sw_desc *desc)
+{
+#ifdef AL_DMA_XOR_VALIDATION
+ int src_index;
+ int dest_index;
+
+ for (
+ dest_index = 0;
+ dest_index < desc->xor_len;
+ dest_index++) {
+ uint8_t byte_val =
+ ((uint8_t *)desc->xor_dest)[dest_index];
+
+ for (
+ src_index = 0;
+ src_index < desc->xor_src_cnt;
+ src_index++) {
+ byte_val ^=
+ ((uint8_t *)desc->xor_src[
+ src_index])[dest_index];
+ }
+
+ if (byte_val) {
+ if (desc->xoddr_src_cnt != 3)
+ dev_err(
+ to_dev(chan),
+ "%s: xor(%p, [%d srcs, %d) failed!\n",
+ __func__,
+ desc->xor_dest,
+ desc->xor_src_cnt,
+ desc->xor_len);
+ else
+ dev_err(
+ to_dev(chan),
+ "%s: xor(%p, %p, %p, %p, %d) failed!\n",
+ __func__,
+ desc->xor_dest,
+ desc->xor_src[0],
+ desc->xor_src[1],
+ desc->xor_src[2],
+ desc->xor_len);
+
+ break;
+ }
+ }
+
+ if (dest_index == desc->xor_len) {
+ dev_dbg(
+ to_dev(chan),
+ "%s: xor (%p, %d, %d) ok!\n",
+ __func__,
+ desc->xor_dest,
+ desc->xor_len,
+ desc->xor_src_cnt);
+ }
+#endif
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline void al_dma_cleanup_single_xor_val(
+ struct al_dma_chan *chan,
+ struct al_dma_sw_desc *desc,
+ uint32_t comp_status)
+{
+ if (unlikely(comp_status & AL_RAID_P_VAL_ERROR)) {
+ dev_dbg(
+ to_dev(chan),
+ "%s: xor_val failed P!\n",
+ __func__);
+
+ (*desc->xor_val_res) |= SUM_CHECK_P_RESULT;
+ }
+}
+
diff --git a/drivers/dma/al/al_dma_core.c b/drivers/dma/al/al_dma_core.c
new file mode 100644
index 0000000..2327556
--- /dev/null
+++ b/drivers/dma/al/al_dma_core.c
@@ -0,0 +1,1616 @@
+/*
+ * Annapurna Labs DMA Linux driver core
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include
+#include
+#include
+#include
+
+#include "../dmaengine.h"
+#include "al_fabric.h"
+#include "al_dma.h"
+#include "al_dma_prep.h"
+#include "al_dma_sysfs.h"
+#include "al_dma_module_params.h"
+#include "al_hal_udma_iofic.h"
+#include "al_hal_udma_config.h"
+#include "al_hal_ssm_crc_memcpy.h"
+#include "al_hal_udma_fast.h"
+#include "al_hal_udma_debug.h"
+#include "al_hal_iomap.h"
+#include "al_hal_plat_services.h"
+#include "al_hal_unit_adapter_regs.h"
+
+MODULE_LICENSE("GPL");
+
+static dma_cookie_t al_dma_tx_submit_unlock(
+ struct dma_async_tx_descriptor *tx);
+
+static void al_dma_free_chan_resources(
+ struct dma_chan *c);
+
+static int al_dma_alloc_chan_resources(
+ struct dma_chan *c);
+
+static enum dma_status al_dma_tx_status(
+ struct dma_chan *c,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
+
+static void al_dma_issue_pending(
+ struct dma_chan *c);
+
+static int al_dma_control(
+ struct dma_chan *chan,
+ enum dma_ctrl_cmd cmd,
+ unsigned long arg);
+
+static int al_dma_setup_interrupts(
+ struct al_dma_device *device);
+
+static void al_dma_free_interrupts(
+ struct al_dma_device *device);
+
+static irqreturn_t al_dma_do_interrupt(
+ int irq,
+ void *data);
+
+static irqreturn_t al_dma_do_interrupt_msix(
+ int irq,
+ void *data);
+
+static int al_dma_init_channels(
+ struct al_dma_device *device,
+ int max_channels);
+
+static void al_dma_init_channel(
+ struct al_dma_device *device,
+ struct al_dma_chan *chan,
+ int idx);
+
+static struct al_dma_sw_desc **al_dma_alloc_sw_ring(
+ struct al_dma_chan *chan,
+ int order,
+ gfp_t flags);
+
+static struct al_dma_sw_desc *al_dma_alloc_ring_ent(
+ struct al_dma_chan *chan,
+ gfp_t flags);
+
+static void al_dma_free_ring_ent(
+ struct al_dma_sw_desc *desc,
+ struct al_dma_chan *chan);
+
+static void al_dma_cleanup_tasklet(unsigned long data);
+
+/******************************************************************************
+ *****************************************************************************/
+int al_dma_core_init(
+ struct al_dma_device *device,
+ void __iomem *iobase_udma,
+ void __iomem *iobase_app)
+{
+ int32_t rc;
+
+ struct dma_device *dma = &device->common;
+ int err;
+ struct al_udma_m2s_pkt_len_conf pkt_len_conf;
+ struct al_udma *tx_udma;
+ int max_channels = al_dma_get_max_channels();
+
+ dev_dbg(
+ dma->dev,
+ "%s(%p, %p, %p)\n",
+ __func__,
+ device,
+ iobase_udma,
+ iobase_app);
+
+ device->cache = kmem_cache_create(
+ "al_dma",
+ sizeof(struct al_dma_sw_desc),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!device->cache)
+ return -ENOMEM;
+
+ device->max_channels = max_channels;
+
+ device->udma_regs_base = iobase_udma;
+ device->app_regs_base = iobase_app;
+
+ memset(&device->ssm_dma_params, 0, sizeof(struct al_ssm_dma_params));
+ device->ssm_dma_params.dev_id = device->dev_id;
+ device->ssm_dma_params.rev_id = device->rev_id;
+ device->ssm_dma_params.udma_regs_base = device->udma_regs_base;
+
+ device->ssm_dma_params.name =
+ kmalloc(strlen(dev_name(device->common.dev)) + 1, GFP_KERNEL);
+ if (device->ssm_dma_params.name == NULL) {
+ dev_err(device->common.dev, "kmalloc failed\n");
+ return -1;
+ }
+
+ memcpy(
+ device->ssm_dma_params.name,
+ dev_name(device->common.dev),
+ strlen(dev_name(device->common.dev)) + 1);
+
+ device->ssm_dma_params.num_of_queues = max_channels;
+
+ rc = al_ssm_dma_init(&device->hal_raid, &device->ssm_dma_params);
+ if (rc) {
+ dev_err(device->common.dev, "al_raid_dma_init failed\n");
+ return rc;
+ }
+
+ al_raid_init(device->app_regs_base);
+
+ /* set max packet size to 512k (XOR with 32 sources) */
+ rc = al_ssm_dma_handle_get(
+ &device->hal_raid,
+ UDMA_TX,
+ &tx_udma);
+ if (rc) {
+ dev_err(device->common.dev, "al_raid_dma_handle_get failed\n");
+ return rc;
+ }
+
+ pkt_len_conf.encode_64k_as_zero = AL_FALSE;
+ pkt_len_conf.max_pkt_size = SZ_512K;
+ rc = al_udma_m2s_packet_size_cfg_set(tx_udma, &pkt_len_conf);
+ if (rc) {
+ dev_err(device->common.dev,
+ "al_udma_m2s_packet_size_cfg_set failed\n");
+ return rc;
+ }
+
+ /* enumerate and initialize channels (queues) */
+ al_dma_init_channels(device, max_channels);
+
+ /* enable RAID DMA engine */
+ rc = al_ssm_dma_state_set(&device->hal_raid, UDMA_NORMAL);
+
+ dma->dev = &device->pdev->dev;
+
+ dma->device_alloc_chan_resources = al_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = al_dma_free_chan_resources;
+ dma->device_tx_status = al_dma_tx_status;
+ dma->device_issue_pending = al_dma_issue_pending;
+ dma->device_control = al_dma_control;
+
+ if (al_dma_get_op_support_interrupt()) {
+ dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
+ dma->device_prep_dma_interrupt = al_dma_prep_interrupt_lock;
+ }
+
+ if (al_dma_get_op_support_memcpy()) {
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma->device_prep_dma_memcpy = al_dma_prep_memcpy_lock;
+ }
+
+ if (al_dma_get_op_support_sg()) {
+ dma_cap_set(DMA_SG, dma->cap_mask);
+ dma->device_prep_dma_sg = al_dma_prep_sg_lock;
+ }
+
+ if (al_dma_get_op_support_memset()) {
+ dma_cap_set(DMA_MEMSET, dma->cap_mask);
+ dma->device_prep_dma_memset = al_dma_prep_memset_lock;
+ }
+
+ if (al_dma_get_op_support_xor()) {
+ dma_cap_set(DMA_XOR, dma->cap_mask);
+ dma->device_prep_dma_xor = al_dma_prep_xor_lock;
+ dma->max_xor = AL_DMA_MAX_XOR;
+ }
+
+ if (al_dma_get_op_support_pq()) {
+ dma_cap_set(DMA_PQ, dma->cap_mask);
+ dma->device_prep_dma_pq = al_dma_prep_pq_lock;
+ }
+
+ if (al_dma_get_op_support_pq_val()) {
+#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
+ dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
+ dma->device_prep_dma_pq_val = al_dma_prep_pq_val_lock;
+#endif
+ }
+
+ if (al_dma_get_op_support_pq())
+ dma_set_maxpq(dma, AL_DMA_MAX_XOR - 2, 0);
+ else if (al_dma_get_op_support_pq_val()) {
+#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
+ dma_set_maxpq(dma, AL_DMA_MAX_XOR - 2, 0);
+#endif
+ }
+
+ if (al_dma_get_op_support_xor_val()) {
+#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
+ dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+ dma->device_prep_dma_xor_val = al_dma_prep_xor_val_lock;
+#endif
+ }
+
+#ifdef CONFIG_ALPINE_VP_WA
+ dma->copy_align = AL_DMA_ALIGN_SHIFT;
+ dma->xor_align = AL_DMA_ALIGN_SHIFT;
+ dma->pq_align = AL_DMA_ALIGN_SHIFT;
+ dma->fill_align = AL_DMA_ALIGN_SHIFT;
+#endif
+
+#ifdef CONFIG_DMATEST
+ /* Reserve for DMA test */
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+#endif
+
+ err = al_dma_setup_interrupts(device);
+
+ if (err) {
+ dev_err(device->common.dev, "failed to setup interrupts\n");
+ return err;
+ }
+
+ err = dma_async_device_register(&device->common);
+
+ if (err)
+ dev_err(device->common.dev, "failed to register dma device\n");
+
+ return err;
+}
+
+
+/******************************************************************************
+ ***************************** Fast DMA **************************************/
+#define FAST_DMA_NUM_OF_QUEUES 4
+#define FAST_DMA_MEMCPY_TIMEOUT 1000 /* in uSec */
+#define FAST_DMA_DESCS_COUNT 8
+#define FAST_DMA_TX_CDESCS_COUNT 8
+#define FAST_DMA_RX_CDESCS_COUNT 8
+
+DEFINE_PER_CPU(struct al_udma_q *, tx_udma_q_percpu);
+DEFINE_PER_CPU(struct al_udma_q *, rx_udma_q_percpu);
+DEFINE_PER_CPU(uint32_t *, temp_percpu);
+DEFINE_PER_CPU(al_phys_addr_t, temp_phys_addr_percpu);
+
+al_phys_addr_t tx_dma_desc_phys[FAST_DMA_NUM_OF_QUEUES];
+al_phys_addr_t rx_dma_desc_phys[FAST_DMA_NUM_OF_QUEUES];
+al_phys_addr_t rx_dma_cdesc_phys[FAST_DMA_NUM_OF_QUEUES];
+void *tx_dma_desc_virt[FAST_DMA_NUM_OF_QUEUES];
+void *rx_dma_desc_virt[FAST_DMA_NUM_OF_QUEUES];
+void *rx_dma_cdesc_virt[FAST_DMA_NUM_OF_QUEUES];
+
+uint64_t al_pcie_read_addr_start[AL_SB_PCIE_NUM];
+uint64_t al_pcie_read_addr_end[AL_SB_PCIE_NUM];
+uint64_t al_pcie_write_addr_start[AL_SB_PCIE_NUM];
+uint64_t al_pcie_write_addr_end[AL_SB_PCIE_NUM];
+bool al_pcie_address_valid[AL_SB_PCIE_NUM] = {0};
+
+bool fast_dma_init = false;
+
+/******************************************************************************
+ *****************************************************************************/
+/* Prepare queue for fast mode */
+static void ssm_udma_fast_init(struct al_ssm_dma *ssm_dma)
+{
+ struct al_memcpy_transaction xaction;
+ struct al_udma_q *tx_udma_q, *rx_udma_q;
+ uint32_t *temp;
+ al_phys_addr_t temp_phys_addr;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ tx_udma_q = al_ssm_dma_tx_queue_handle_get(ssm_dma, cpu);
+ rx_udma_q = al_ssm_dma_rx_queue_handle_get(ssm_dma, cpu);
+
+ memset(&xaction, 0, sizeof(struct al_memcpy_transaction));
+ al_udma_fast_memcpy_q_prepare(tx_udma_q, rx_udma_q, &xaction);
+
+ /* Allocate temp memory */
+ temp = dma_alloc_coherent(NULL,
+ sizeof(uint32_t),
+ &temp_phys_addr,
+ GFP_KERNEL);
+
+ per_cpu(tx_udma_q_percpu, cpu) = tx_udma_q;
+ per_cpu(rx_udma_q_percpu, cpu) = rx_udma_q;
+ per_cpu(temp_percpu, cpu) = temp;
+ per_cpu(temp_phys_addr_percpu, cpu) = temp_phys_addr;
+ }
+}
+
+static void ssm_udma_fast_terminate(void)
+{
+ uint32_t *temp;
+ al_phys_addr_t temp_phys_addr;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ temp = per_cpu(temp_percpu, cpu);
+ temp_phys_addr = per_cpu(temp_phys_addr_percpu, cpu);
+
+ /* if not set, don't free */
+ if (!temp)
+ continue;
+
+ dma_free_coherent(NULL,
+ sizeof(uint32_t),
+ temp,
+ temp_phys_addr);
+
+ }
+}
+
+/******************************************************************************
+ *****************************************************************************/
+int al_dma_fast_init(
+ struct al_dma_device *device,
+ void __iomem *iobase_udma)
+{
+ int32_t rc;
+ int i;
+
+ struct dma_device *dma = &device->common;
+ struct al_udma_m2s_pkt_len_conf pkt_len_conf;
+ struct al_udma *tx_udma;
+
+ struct al_udma_q_params tx_params;
+ struct al_udma_q_params rx_params;
+
+ int max_channels = al_dma_get_max_channels();
+
+ dev_dbg(
+ dma->dev,
+ "%s(%p, %p)\n",
+ __func__,
+ device,
+ iobase_udma);
+
+ al_assert(FAST_DMA_NUM_OF_QUEUES >= NR_CPUS);
+
+ device->max_channels = max_channels;
+
+ device->udma_regs_base = iobase_udma;
+ device->app_regs_base = NULL;
+
+ memset(&device->ssm_dma_params, 0, sizeof(struct al_ssm_dma_params));
+ device->ssm_dma_params.dev_id = device->dev_id;
+ device->ssm_dma_params.rev_id = device->rev_id;
+ device->ssm_dma_params.udma_regs_base = device->udma_regs_base;
+
+ device->ssm_dma_params.name =
+ kmalloc(strlen(dev_name(device->common.dev)) + 1, GFP_KERNEL);
+ if (device->ssm_dma_params.name == NULL) {
+ dev_err(device->common.dev, "kmalloc failed\n");
+ return -1;
+ }
+
+ memcpy(
+ device->ssm_dma_params.name,
+ dev_name(device->common.dev),
+ strlen(dev_name(device->common.dev)) + 1);
+
+ device->ssm_dma_params.num_of_queues = max_channels;
+
+ rc = al_ssm_dma_init(&device->hal_raid, &device->ssm_dma_params);
+ if (rc) {
+ dev_err(device->common.dev, "al_raid_dma_init failed\n");
+ return rc;
+ }
+
+ rc = al_ssm_dma_handle_get(
+ &device->hal_raid,
+ UDMA_TX,
+ &tx_udma);
+ if (rc) {
+ dev_err(device->common.dev, "al_raid_dma_handle_get failed\n");
+ return rc;
+ }
+
+ /* set max packet size to 128 (XOR with 32 sources) */
+ /* TODO reduce max pkt size to 32 */
+ pkt_len_conf.encode_64k_as_zero = AL_FALSE;
+ pkt_len_conf.max_pkt_size = SZ_128;
+ rc = al_udma_m2s_packet_size_cfg_set(tx_udma, &pkt_len_conf);
+ if (rc) {
+ dev_err(device->common.dev,
+ "al_udma_m2s_packet_size_cfg_set failed\n");
+ return rc;
+ }
+
+ /* enable RAID DMA engine */
+ rc = al_ssm_dma_state_set(&device->hal_raid, UDMA_NORMAL);
+
+ dma->dev = &device->pdev->dev;
+
+ /* Init dma queue using the params below */
+ for (i = 0; i < FAST_DMA_NUM_OF_QUEUES; i++) {
+ /* Allocate dma queue memory */
+ /* allocate coherent memory for Tx submission descriptors */
+ tx_dma_desc_virt[i] = dma_alloc_coherent(
+ dma->dev,
+ FAST_DMA_DESCS_COUNT * sizeof(union al_udma_desc),
+ &tx_dma_desc_phys[i],
+ GFP_KERNEL);
+
+ /* allocate coherent memory for Rx submission descriptors */
+ rx_dma_desc_virt[i] = dma_alloc_coherent(
+ dma->dev,
+ FAST_DMA_DESCS_COUNT * sizeof(union al_udma_desc),
+ &rx_dma_desc_phys[i],
+ GFP_KERNEL);
+
+ /* Allocate memory for Rx completion descriptors */
+ /* allocate coherent memory for Rx submission descriptors */
+ rx_dma_cdesc_virt[i] = dma_alloc_coherent(
+ dma->dev,
+ FAST_DMA_RX_CDESCS_COUNT * sizeof(union al_udma_cdesc),
+ &rx_dma_cdesc_phys[i],
+ GFP_KERNEL);
+
+ /* Fill in dma queue params */
+ tx_params.size = FAST_DMA_DESCS_COUNT;
+ tx_params.desc_base = tx_dma_desc_virt[i];
+ tx_params.desc_phy_base = tx_dma_desc_phys[i];
+ tx_params.cdesc_base = NULL; /* don't use Tx completion ring */
+ tx_params.cdesc_phy_base = 0;
+ tx_params.cdesc_size = FAST_DMA_TX_CDESCS_COUNT;
+
+ rx_params.size = FAST_DMA_DESCS_COUNT;
+ rx_params.desc_base = rx_dma_desc_virt[i];
+ rx_params.desc_phy_base = rx_dma_desc_phys[i];
+ rx_params.cdesc_base = rx_dma_cdesc_virt[i];
+ rx_params.cdesc_phy_base = rx_dma_cdesc_phys[i];
+ rx_params.cdesc_size = FAST_DMA_RX_CDESCS_COUNT;
+
+ rc += al_ssm_dma_q_init(&device->hal_raid, i,
+ &tx_params, &rx_params, AL_MEM_CRC_MEMCPY_Q);
+ }
+
+ ssm_udma_fast_init(&device->hal_raid);
+
+ fast_dma_init = true;
+
+ return rc;
+}
+int al_dma_fast_terminate(struct al_dma_device *device)
+{
+ int i;
+ struct dma_device *dma = &device->common;
+
+ dev_dbg(
+ dma->dev,
+ "%s(%p)\n",
+ __func__,
+ device);
+
+ fast_dma_init = false;
+
+ ssm_udma_fast_terminate();
+
+ for (i=0; i < FAST_DMA_NUM_OF_QUEUES; i++) {
+ dma_free_coherent(
+ dma->dev,
+ FAST_DMA_RX_CDESCS_COUNT * sizeof(union al_udma_cdesc),
+ rx_dma_cdesc_virt[i],
+ rx_dma_cdesc_phys[i]);
+
+ dma_free_coherent(
+ dma->dev,
+ FAST_DMA_DESCS_COUNT * sizeof(union al_udma_desc),
+ rx_dma_desc_virt[i],
+ rx_dma_desc_phys[i]);
+
+ dma_free_coherent(
+ dma->dev,
+ FAST_DMA_DESCS_COUNT * sizeof(union al_udma_desc),
+ tx_dma_desc_virt[i],
+ tx_dma_desc_phys[i]);
+ }
+
+ kfree(device->ssm_dma_params.name);
+
+ return 0;
+}
+/******************************************************************************
+ *****************************************************************************/
+/* Fast memcopy submission */
+int udma_fast_memcpy(int len, al_phys_addr_t src, al_phys_addr_t dst)
+{
+ struct al_udma_q *tx_udma_q, *rx_udma_q;
+
+ union al_udma_desc *tx_desc;
+ union al_udma_desc *rx_desc;
+ int completed = 0;
+ int timeout = FAST_DMA_MEMCPY_TIMEOUT;
+ uint32_t flags;
+ /* prepare rx desc */
+
+ rx_udma_q = __get_cpu_var(rx_udma_q_percpu);
+ tx_udma_q = __get_cpu_var(tx_udma_q_percpu);
+
+ rx_desc = al_udma_desc_get(rx_udma_q);
+
+ flags = al_udma_ring_id_get(rx_udma_q) <<
+ AL_M2S_DESC_RING_ID_SHIFT;
+
+ al_udma_fast_desc_flags_set(rx_desc, flags, AL_M2S_DESC_RING_ID_MASK);
+ al_udma_fast_desc_len_set(rx_desc, len);
+ al_udma_fast_desc_buf_set(rx_desc, dst, 0);
+
+ /* submit rx desc */
+ al_udma_desc_action_add(rx_udma_q, 1);
+
+ /* prepare tx desc */
+ tx_desc = al_udma_desc_get(tx_udma_q);
+
+ flags = al_udma_ring_id_get(tx_udma_q) <<
+ AL_M2S_DESC_RING_ID_SHIFT;
+
+ al_udma_fast_desc_flags_set(tx_desc, flags, AL_M2S_DESC_RING_ID_MASK);
+ al_udma_fast_desc_len_set(tx_desc, len);
+ al_udma_fast_desc_buf_set(tx_desc, src, 0);
+
+ /* submit tx desc */
+ al_udma_desc_action_add(tx_udma_q, 1);
+
+ /* wait for completion using polling */
+ while(1) {
+ completed = al_udma_fast_completion(rx_udma_q, 1, 0);
+ if ((completed > 0) || (timeout == 0))
+ break;
+
+ udelay(1);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ pr_err("%s: Didn't receive completion in %d uSec",
+ __func__, FAST_DMA_MEMCPY_TIMEOUT);
+
+ return -ETIME;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(udma_fast_memcpy);
+
+static inline al_phys_addr_t virt_to_physical_address(const volatile void __iomem *address)
+{
+ al_phys_addr_t phys_addr;
+ uint32_t phys_addr_h, phys_addr_l;
+
+ /*
+ * write a virt. address to ATS1CPR:
+ * perform H/W stage1 address translation (meaning, to IPA)
+ * translate as current security state, privileged read accesses
+ * read PAR: (physical address register)
+ * lower 12-bit have some flags, the rest holds upper bits
+ * of the physical address
+ */
+ asm volatile( "mcr p15, 0, %0, c7, c8, 0" :: "r"(address));
+
+ /*
+ * according to ARM ABI, in Little Endian systems r0 will contain the
+ * low 32 bits, while in Big Endian systems r0 will contain the high 32
+ * bits
+ * TODO: assumes LE need to change to BE mode
+ */
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#error "virt_to_physical_address assumes LE!"
+#endif
+ asm volatile("mrrc p15, 0, %0, %1, c7" : "=r"(phys_addr_l), "=r"(phys_addr_h));
+
+ /* Take the lower 12-bit from the virtual address. */
+ phys_addr = phys_addr_l & ~(((uint32_t)1<<12) - 1UL);
+ phys_addr |= (uintptr_t)address & AL_BIT_MASK(12);
+
+ return phys_addr;
+}
+
+#ifdef CONFIG_AL_PCIE_DEADLOCK_WA_VALIDATE
+#define _al_dma_dma_read_validate(type, val) \
+{ \
+ type _cpu_val; \
+ switch (sizeof(type)) { \
+ case sizeof(uint8_t): \
+ _cpu_val = __raw_readb(address); \
+ break; \
+ case sizeof(uint16_t): \
+ _cpu_val = le16_to_cpu((__force __le16)__raw_readw(address)); \
+ break; \
+ default: \
+ case sizeof(uint32_t): \
+ _cpu_val = le32_to_cpu((__force __le32)__raw_readl(address)); \
+ break; \
+ } \
+ \
+ if (memcmp(&_cpu_val, &val, sizeof(type))) { \
+ al_info("[%s] Potential Error: DMA read value isn't the same as CPU read addr: " \
+ "%p phys addr %x DMA read: %x cpu read: %x\n" \
+ "This register might be clear on read or status register so different values" \
+ "doesn't guarantee we have a problem, Please check the spec\n", \
+ __func__, address, phys_addr, val, _cpu_val); \
+ val = _cpu_val; \
+ } \
+}
+#else
+#define _al_dma_dma_read_validate(type, val)
+#endif
+
+static inline uint32_t _al_dma_read_reg(const volatile void __iomem *address, int size)
+{
+ unsigned long flags;
+ al_phys_addr_t phys_addr;
+ uint32_t val_32;
+ uint16_t val_16;
+ uint8_t val_8;
+ int i;
+
+ /* Use DMA read only if the fast DMA was initialized and HW CC */
+ if (likely((al_fabric_hwcc_enabled()) && (fast_dma_init))) {
+ local_irq_save(flags);
+
+ phys_addr = virt_to_physical_address(address);
+
+ for (i = 0; i < AL_SB_PCIE_NUM; i++) {
+ if (likely(al_pcie_address_valid[i] == false))
+ continue;
+
+ if (unlikely(phys_addr >= al_pcie_read_addr_start[i] &&
+ phys_addr <= al_pcie_read_addr_end[i]))
+ goto pcie_mem_read;
+ }
+
+ local_irq_restore(flags);
+ }
+
+
+ switch (size) {
+ case sizeof(uint8_t):
+ val_8 = __raw_readb(address);
+ return val_8;
+ case sizeof(uint16_t):
+ val_16 = le16_to_cpu((__force __le16)__raw_readw(address));
+ return val_16;
+ default:
+ case sizeof(uint32_t):
+ val_32 = le32_to_cpu((__force __le32)__raw_readl(address));
+ return val_32;
+ }
+
+pcie_mem_read:
+ udma_fast_memcpy(size,
+ phys_addr,
+ __get_cpu_var(temp_phys_addr_percpu));
+
+ switch (size) {
+ default:
+ case sizeof(uint32_t):
+ val_32 = *__get_cpu_var(temp_percpu);
+ _al_dma_dma_read_validate(uint32_t, val_32);
+ local_irq_restore(flags);
+ return val_32;
+ case sizeof(uint16_t):
+ val_16 = *__get_cpu_var(temp_percpu);
+ _al_dma_dma_read_validate(uint16_t, val_16);
+ local_irq_restore(flags);
+ return val_16;
+ case sizeof(uint8_t):
+ val_8 = *__get_cpu_var(temp_percpu);
+ _al_dma_dma_read_validate(uint8_t, val_8);
+ local_irq_restore(flags);
+ return val_8;
+ }
+}
+
+uint32_t al_dma_read_reg32(const volatile void __iomem *address)
+{
+ return _al_dma_read_reg(address, sizeof(uint32_t));
+}
+EXPORT_SYMBOL(al_dma_read_reg32);
+
+uint16_t al_dma_read_reg16(const volatile void __iomem *address)
+{
+ return _al_dma_read_reg(address, sizeof(uint16_t));
+}
+EXPORT_SYMBOL(al_dma_read_reg16);
+
+uint8_t al_dma_read_reg8(const volatile void __iomem *address)
+{
+ return _al_dma_read_reg(address, sizeof(uint8_t));
+}
+EXPORT_SYMBOL(al_dma_read_reg8);
+
+
+void al_dma_write_reg32(volatile void __iomem *address, u32 val)
+{
+ unsigned long flags;
+ al_phys_addr_t phys_addr;
+ int i;
+
+ /* Use DMA write only if the fast DMA was initialized and HW CC */
+ if (likely((al_fabric_hwcc_enabled()) && (fast_dma_init))) {
+ local_irq_save(flags);
+
+ phys_addr = virt_to_physical_address(address);
+
+ for (i = 0; i < AL_SB_PCIE_NUM; i++) {
+ if (likely(al_pcie_address_valid[i] == false))
+ continue;
+
+ if (unlikely(phys_addr >= al_pcie_write_addr_start[i] &&
+ phys_addr <= al_pcie_write_addr_end[i]))
+ goto pcie_mem_write;
+ }
+
+ local_irq_restore(flags);
+ }
+
+ __raw_writel((__force u32) cpu_to_le32(val), address);
+
+ return;
+
+pcie_mem_write:
+ *__get_cpu_var(temp_percpu) = val;
+
+ udma_fast_memcpy(sizeof(uint32_t),
+ __get_cpu_var(temp_phys_addr_percpu),
+ phys_addr);
+
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(al_dma_write_reg32);
+
+/******************************************************************************
+ *****************************************************************************/
+int al_dma_core_terminate(
+ struct al_dma_device *device)
+{
+ int status = 0;
+
+ struct dma_device *dma = &device->common;
+
+ dev_dbg(
+ dma->dev,
+ "%s(%p)\n",
+ __func__,
+ device);
+
+ dma_async_device_unregister(&device->common);
+
+ al_dma_free_interrupts(device);
+
+ kfree(device->ssm_dma_params.name);
+
+ kmem_cache_destroy(device->cache);
+
+ return status;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int al_dma_init_channels(struct al_dma_device *device, int max_channels)
+{
+ int i;
+ struct al_dma_chan *chan;
+ struct device *dev = &device->pdev->dev;
+ struct dma_device *dma = &device->common;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma->chancnt = max_channels;
+
+ if (dma->chancnt > ARRAY_SIZE(device->channels)) {
+ dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
+ dma->chancnt, ARRAY_SIZE(device->channels));
+ dma->chancnt = ARRAY_SIZE(device->channels);
+ }
+
+ for (i = 0; i < dma->chancnt; i++) {
+ chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ break;
+
+ al_dma_init_channel(device, chan, i);
+
+ }
+ dma->chancnt = i;
+ return i;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_dma_init_channel(struct al_dma_device *device,
+ struct al_dma_chan *chan, int idx)
+{
+ struct dma_device *dma = &device->common;
+ struct dma_chan *c = &chan->common;
+ unsigned long data = (unsigned long) c;
+
+ dev_dbg(
+ dma->dev,
+ "%s(%p, %p, %d): %p\n",
+ __func__,
+ device,
+ chan,
+ idx,
+ c);
+
+ chan->device = device;
+ chan->idx = idx;
+ chan->hal_raid = &device->hal_raid;
+
+ spin_lock_init(&chan->prep_lock);
+
+ spin_lock_init(&chan->cleanup_lock);
+ chan->common.device = dma;
+ list_add_tail(&chan->common.device_node, &dma->channels);
+ device->channels[idx] = chan;
+
+ tasklet_init(&chan->cleanup_task, al_dma_cleanup_tasklet, data);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int al_dma_setup_interrupts(struct al_dma_device *device)
+{
+ struct al_dma_chan *chan;
+ struct pci_dev *pdev = device->pdev;
+ struct device *dev = &pdev->dev;
+ struct msix_entry *msix;
+ int i, j, msixcnt;
+ int err = -EINVAL;
+
+ /* The number of MSI-X vectors should equal the number of channels */
+ msixcnt = device->common.chancnt;
+
+ for (i = 0; i < msixcnt; i++)
+ device->msix_entries[i].entry = 3 + i;
+
+ err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
+
+ if (err < 0) {
+ dev_err(dev, "pci_enable_msix failed! using intx instead.\n");
+ goto intx;
+ }
+
+ if (err > 0) {
+ dev_err(dev, "pci_enable_msix failed! msix_single_vector.\n");
+ goto msix_single_vector;
+ }
+
+ for (i = 0; i < msixcnt; i++) {
+ msix = &device->msix_entries[i];
+
+ chan = al_dma_chan_by_index(device, i);
+
+ dev_dbg(dev, "%s: requesting irq %d\n", __func__, msix->vector);
+
+ snprintf(device->irq_tbl[i].name, AL_DMA_IRQNAME_SIZE,
+ "al-dma-comp-%d@pci:%s", i,
+ pci_name(pdev));
+
+ err = devm_request_irq(
+ dev,
+ msix->vector,
+ al_dma_do_interrupt_msix,
+ 0,
+ device->irq_tbl[i].name,
+ chan);
+
+ if (err) {
+ dev_err(dev, "devm_request_irq failed!.\n");
+
+ for (j = 0; j < i; j++) {
+ msix = &device->msix_entries[j];
+ chan = al_dma_chan_by_index(device, j);
+ devm_free_irq(dev, msix->vector, chan);
+ }
+
+ /* goto msix_single_vector; */
+ return -EIO;
+ }
+
+ /* setup interrupt affinity */
+ if (cpu_online(chan->idx))
+ cpumask_set_cpu(chan->idx, &chan->affinity_mask);
+ else
+ cpumask_copy(&chan->affinity_mask, cpu_online_mask);
+
+ dev_dbg(
+ dev,
+ "Setting affinity of channel %d to %lx\n",
+ chan->idx,
+ chan->affinity_mask.bits[0]);
+
+ err = irq_set_affinity_hint(msix->vector, &chan->affinity_mask);
+ if (err) {
+ dev_err(dev, "irq_set_affinity_hint failed!\n");
+ return err;
+ }
+
+ err = irq_set_affinity(msix->vector, &chan->affinity_mask);
+ if (err) {
+ dev_err(dev, "irq_set_affinity failed!\n");
+ return err;
+ }
+ }
+
+ err = al_udma_iofic_config(
+ (struct unit_regs *)device->udma_regs_base,
+ AL_IOFIC_MODE_MSIX_PER_Q, 0x480, 0x480, 0x1E0, 0x1E0);
+ if (err) {
+ dev_err(dev, "al_udma_iofic_config failed!.\n");
+ return err;
+ }
+
+ al_udma_iofic_unmask(
+ (struct unit_regs *)device->udma_regs_base,
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ AL_INT_GROUP_B,
+ ((1 << (device->common.chancnt)) - 1));
+
+ goto done;
+
+msix_single_vector:
+ msix = &device->msix_entries[0];
+
+ msix->entry = 0;
+
+ err = pci_enable_msix(pdev, device->msix_entries, 1);
+
+ if (err)
+ goto intx;
+
+ snprintf(device->irq_tbl[0].name, AL_DMA_IRQNAME_SIZE,
+ "al-dma-msix-all@pci:%s", pci_name(pdev));
+
+ err = devm_request_irq(
+ dev,
+ msix->vector,
+ al_dma_do_interrupt,
+ IRQF_TRIGGER_RISING,
+ device->irq_tbl[0].name, device);
+
+ if (err) {
+ pci_disable_msix(pdev);
+ goto intx;
+ }
+
+ goto done;
+
+intx:
+ snprintf(device->irq_tbl[0].name, AL_DMA_IRQNAME_SIZE,
+ "al-dma-intx-all@pci:%s", pci_name(pdev));
+
+ err = devm_request_irq(dev, pdev->irq, al_dma_do_interrupt,
+ IRQF_SHARED, device->irq_tbl[0].name, device);
+ if (err)
+ goto err_no_irq;
+
+done:
+ return 0;
+
+err_no_irq:
+ /* Disable all interrupt generation */
+
+ dev_err(dev, "no usable interrupts\n");
+ return err;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_dma_free_interrupts(struct al_dma_device *device)
+{
+ struct al_dma_chan *chan;
+ struct pci_dev *pdev = device->pdev;
+ struct device *dev = &pdev->dev;
+ struct msix_entry *msix;
+ int i, msixcnt;
+
+ /* The number of MSI-X vectors should equal the number of channels */
+ msixcnt = device->common.chancnt;
+
+ if (pdev->msix_enabled) {
+ msix = &device->msix_entries[0];
+ if (msix->entry == 0) {
+ devm_free_irq(dev, msix->vector, device);
+ pci_disable_msix(pdev);
+ return;
+ }
+
+ for (i = 0; i < msixcnt; i++) {
+ msix = &device->msix_entries[i];
+ chan = al_dma_chan_by_index(device, i);
+ irq_set_affinity_hint(msix->vector, NULL);
+ devm_free_irq(dev, msix->vector, chan);
+ }
+
+ pci_disable_msix(pdev);
+ } else {
+ devm_free_irq(dev, pdev->irq, device);
+ }
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* al_dma_alloc_chan_resources - allocate/initialize tx and rx descriptor rings
+ */
+static int al_dma_alloc_chan_resources(struct dma_chan *c)
+{
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+ struct device *dev = chan->device->common.dev;
+ struct al_dma_sw_desc **sw_ring;
+ struct al_udma_q_params tx_params;
+ struct al_udma_q_params rx_params;
+ int ring_alloc_order = al_dma_get_ring_alloc_order();
+ int tx_descs_order = al_dma_get_tx_descs_order();
+ int rx_descs_order = al_dma_get_rx_descs_order();
+ uint32_t rc = 0;
+
+ dev_dbg(dev, "al_dma_alloc_chan_resources: channel %d\n",
+ chan->idx);
+
+ /* have we already been set up? */
+ if (chan->sw_ring)
+ return 1 << chan->alloc_order;
+
+ chan->tx_descs_num = 1 << tx_descs_order;
+ chan->rx_descs_num = 1 << rx_descs_order;
+
+ /* allocate coherent memory for Tx submission descriptors */
+ chan->tx_dma_desc_virt = dma_alloc_coherent(dev,
+ chan->tx_descs_num *
+ sizeof(union al_udma_desc),
+ &chan->tx_dma_desc,
+ GFP_KERNEL);
+ if (chan->tx_dma_desc_virt == NULL) {
+ dev_err(dev, "failed to allocate %d bytes of coherent "
+ "memory for Tx submission descriptors\n",
+ chan->tx_descs_num * sizeof(union al_udma_desc));
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "allocted tx descriptor ring: virt 0x%p phys 0x%llx\n",
+ chan->tx_dma_desc_virt, (u64)chan->tx_dma_desc);
+
+ /* allocate coherent memory for Rx submission descriptors */
+ chan->rx_dma_desc_virt = dma_alloc_coherent(dev,
+ chan->rx_descs_num *
+ sizeof(union al_udma_desc),
+ &chan->rx_dma_desc,
+ GFP_KERNEL);
+ if (chan->rx_dma_desc_virt == NULL) {
+ dev_err(dev, "failed to allocate %d bytes of coherent "
+ "memory for Rx submission descriptors\n",
+ chan->rx_descs_num * sizeof(union al_udma_desc));
+
+ al_dma_free_chan_resources(c);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "allocted rx descriptor ring: virt 0x%p phys 0x%llx\n",
+ chan->rx_dma_desc_virt, (u64)chan->rx_dma_desc);
+
+ /* allocate coherent memory for Rx completion descriptors */
+ chan->rx_dma_cdesc_virt = dma_alloc_coherent(dev,
+ chan->rx_descs_num *
+ AL_DMA_RAID_RX_CDESC_SIZE,
+ &chan->rx_dma_cdesc,
+ GFP_KERNEL);
+ if (chan->rx_dma_cdesc_virt == NULL) {
+ dev_err(dev, "failed to allocate %d bytes of coherent "
+ "memory for Rx completion descriptors\n",
+ chan->rx_descs_num * AL_DMA_RAID_RX_CDESC_SIZE);
+
+ al_dma_free_chan_resources(c);
+ return -ENOMEM;
+ }
+
+ /* clear the Rx completion descriptors to avoid false positive */
+ memset(
+ chan->rx_dma_cdesc_virt,
+ 0,
+ chan->rx_descs_num * AL_DMA_RAID_RX_CDESC_SIZE);
+
+ dev_dbg(
+ dev,
+ "allocted rx completion desc ring: virt 0x%p phys 0x%llx\n",
+ chan->rx_dma_cdesc_virt, (u64)chan->rx_dma_cdesc);
+
+ tx_params.size = chan->tx_descs_num;
+ tx_params.desc_base = chan->tx_dma_desc_virt;
+ tx_params.desc_phy_base = chan->tx_dma_desc;
+ tx_params.cdesc_base = NULL; /* don't use Tx completion ring */
+ tx_params.cdesc_phy_base = 0;
+ tx_params.cdesc_size = AL_DMA_RAID_TX_CDESC_SIZE; /* size is needed */
+
+ rx_params.size = chan->rx_descs_num;
+ rx_params.desc_base = chan->rx_dma_desc_virt;
+ rx_params.desc_phy_base = chan->rx_dma_desc;
+ rx_params.cdesc_base = chan->rx_dma_cdesc_virt;
+ rx_params.cdesc_phy_base = chan->rx_dma_cdesc;
+ rx_params.cdesc_size = AL_DMA_RAID_RX_CDESC_SIZE;
+
+ /* alloc sw descriptors */
+ if (ring_alloc_order < AL_DMA_SW_RING_MIN_ORDER) {
+ dev_err(
+ dev,
+ "%s: ring_alloc_order = %d < %d!\n",
+ __func__,
+ ring_alloc_order,
+ AL_DMA_SW_RING_MIN_ORDER);
+
+ al_dma_free_chan_resources(c);
+ return -EINVAL;
+ } else if (ring_alloc_order > AL_DMA_SW_RING_MAX_ORDER) {
+ dev_err(
+ dev,
+ "%s: ring_alloc_order = %d > %d!\n",
+ __func__,
+ ring_alloc_order,
+ AL_DMA_SW_RING_MAX_ORDER);
+
+ al_dma_free_chan_resources(c);
+ return -EINVAL;
+ } else if (ring_alloc_order > rx_descs_order) {
+ dev_warn(
+ dev,
+ "%s: ring_alloc_order > rx_descs_order (%d>%d)!\n",
+ __func__,
+ ring_alloc_order,
+ rx_descs_order);
+
+ }
+
+ sw_ring = al_dma_alloc_sw_ring(chan, ring_alloc_order, GFP_KERNEL);
+ if (!sw_ring)
+ return -ENOMEM;
+
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&chan->prep_lock);
+ chan->sw_ring = sw_ring;
+ chan->head = 0;
+ chan->tail = 0;
+ chan->alloc_order = ring_alloc_order;
+ chan->tx_desc_produced = 0;
+ spin_unlock_bh(&chan->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+
+ rc = al_ssm_dma_q_init(&chan->device->hal_raid, chan->idx,
+ &tx_params, &rx_params, AL_RAID_Q);
+ if (rc) {
+ dev_err(dev, "failed to initialize hal q %d. rc %d\n",
+ chan->idx, rc);
+ al_dma_free_chan_resources(c);
+ return rc;
+ }
+
+ /* should we return less ?*/
+ return 1 << chan->alloc_order;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/* al_dma_free_chan_resources - free tx and rx descriptor rings
+ * @chan: channel to be free
+ */
+static void al_dma_free_chan_resources(struct dma_chan *c)
+{
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+ struct device *dev = chan->device->common.dev;
+ struct al_dma_sw_desc **sw_ring;
+ int i;
+
+ dev_dbg(dev, "%s(%p): %p\n", __func__, c, chan);
+
+ tasklet_disable(&chan->cleanup_task);
+
+ al_dma_cleanup_fn(chan, 0);
+
+ spin_lock_bh(&chan->cleanup_lock);
+ sw_ring = chan->sw_ring;
+ for (i = 0; i < (1 << chan->alloc_order); i++)
+ al_dma_free_ring_ent(sw_ring[i], chan);
+
+ kfree(chan->sw_ring);
+
+ spin_unlock_bh(&chan->cleanup_lock);
+ if (chan->tx_dma_desc_virt != NULL) {
+ dma_free_coherent(
+ dev,
+ chan->tx_descs_num * sizeof(union al_udma_desc),
+ chan->tx_dma_desc_virt, chan->tx_dma_desc);
+ chan->tx_dma_desc_virt = NULL;
+ }
+
+ if (chan->rx_dma_desc_virt != NULL) {
+ dma_free_coherent(
+ dev,
+ chan->rx_descs_num * sizeof(union al_udma_desc),
+ chan->rx_dma_desc_virt,
+ chan->rx_dma_desc);
+ chan->rx_dma_desc_virt = NULL;
+ }
+
+ if (chan->rx_dma_cdesc_virt != NULL) {
+ dma_free_coherent(dev, chan->rx_descs_num *
+ AL_DMA_RAID_RX_CDESC_SIZE,
+ chan->rx_dma_cdesc_virt, chan->rx_dma_cdesc);
+ chan->rx_dma_desc_virt = NULL;
+ }
+
+ return;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static struct al_dma_sw_desc **al_dma_alloc_sw_ring(
+ struct al_dma_chan *chan,
+ int order,
+ gfp_t flags)
+{
+ struct al_dma_sw_desc **ring;
+ int descs = 1 << order;
+ int i;
+
+ /* allocate the array to hold the software ring */
+ ring = kcalloc(descs, sizeof(*ring), flags);
+ if (!ring)
+ return NULL;
+ for (i = 0; i < descs; i++) {
+ ring[i] = al_dma_alloc_ring_ent(chan, flags);
+ if (!ring[i]) {
+ while (i--)
+ al_dma_free_ring_ent(ring[i], chan);
+ kfree(ring);
+ return NULL;
+ }
+ set_desc_id(ring[i], i);
+ }
+
+ return ring;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static struct al_dma_sw_desc *al_dma_alloc_ring_ent(
+ struct al_dma_chan *chan,
+ gfp_t flags)
+{
+ struct al_dma_sw_desc *desc;
+
+ desc = kmem_cache_zalloc(chan->device->cache, flags);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->txd, &chan->common);
+ desc->txd.tx_submit = al_dma_tx_submit_unlock;
+ return desc;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_dma_free_ring_ent(
+ struct al_dma_sw_desc *desc,
+ struct al_dma_chan *chan)
+{
+ kmem_cache_free(chan->device->cache, desc);
+}
+
+/* wrappers for accessing PCI configuration space */
+static int al_dma_read_pcie_config(void *handle, int where, uint32_t *val)
+{
+ /* handle is a pointer to the pci_dev */
+ pci_read_config_dword((struct pci_dev *)handle, where, val);
+ return 0;
+}
+
+static int al_dma_write_pcie_config(void *handle, int where, uint32_t val)
+{
+ /* handle is a pointer to the pci_dev */
+ pci_write_config_dword((struct pci_dev *)handle, where, val);
+ return 0;
+}
+
+/* wrapper for PCI function level reset */
+static int al_dma_write_pcie_flr(void *handle)
+{
+ /* handle is a pointer to the pci_dev */
+ __pci_reset_function_locked((struct pci_dev *)handle);
+ udelay(1000);
+ return 0;
+}
+
+/**
+ * al_dma_flr - perform Function Level Reset
+ * @pdev: PCI device to reset
+ */
+void al_dma_flr(struct pci_dev *pdev)
+{
+ al_pcie_perform_flr(al_dma_read_pcie_config,
+ al_dma_write_pcie_config,
+ al_dma_write_pcie_flr,
+ pdev);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/**
+ * al_dma_get_sw_desc_lock - get sw desc and grab ring producer lock
+ * @chan: dma channel to operate on
+ * @num: the number of required sw descriptos
+ */
+int al_dma_get_sw_desc_lock(struct al_dma_chan *chan, int num)
+{
+ spin_lock_bh(&chan->prep_lock);
+
+ /* never allow the last descriptor to be consumed, we need at
+ * least one free at all times to allow for on-the-fly ring
+ * resizing.
+ */
+ if (likely(al_dma_ring_space(chan) >= num)) {
+ dev_dbg(to_dev(chan), "%s: (%x:%x)\n",
+ __func__, chan->head, chan->tail);
+ return 0; /* with chan->prep_lock held */
+ }
+
+ spin_unlock_bh(&chan->prep_lock);
+
+ return -ENOMEM;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/**
+ * al_dma_do_interrupt - handler used for single vector interrupt mode
+ * @irq: interrupt id
+ * @data: interrupt data
+ */
+static irqreturn_t al_dma_do_interrupt(int irq, void *data)
+{
+ pr_debug("%s(%d, %p)\n", __func__, irq, data);
+
+ /* TODO: handle interrupt registers */
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/**
+ * al_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
+ * @irq: interrupt id
+ * @data: interrupt data
+ */
+static irqreturn_t al_dma_do_interrupt_msix(int irq, void *data)
+{
+ struct al_dma_chan *chan = data;
+
+ pr_debug("%s(%d, %p)\n", __func__, irq, data);
+
+ tasklet_schedule(&chan->cleanup_task);
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+/**
+ * al_dma_tx_status - poll the status of an DMA transaction
+ * @c: channel handle
+ * @cookie: transaction identifier
+ * @txstate: if set, updated with the transaction state
+ */
+static enum dma_status al_dma_tx_status(
+ struct dma_chan *c,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+ enum dma_status ret;
+
+ dev_dbg(
+ to_dev(chan),
+ "%s(%d)\n",
+ __func__,
+ cookie);
+
+ ret = dma_cookie_status(c, cookie, txstate);
+ if (ret == DMA_SUCCESS)
+ return ret;
+
+ al_dma_cleanup_fn(chan, 0);
+
+ return dma_cookie_status(c, cookie, txstate);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static inline int al_dma_issue_pending_raw(struct al_dma_chan *chan)
+{
+ int err = 0;
+
+ if (chan->tx_desc_produced) {
+ dev_dbg(
+ chan->device->common.dev,
+ "%s(%p): issuing %u descriptors\n",
+ __func__,
+ chan,
+ chan->tx_desc_produced);
+
+ err = al_raid_dma_action(
+ chan->hal_raid,
+ chan->idx,
+ chan->tx_desc_produced);
+ if (err)
+ dev_err(
+ chan->device->common.dev,
+ "al_raid_dma_action failed\n");
+
+ chan->tx_desc_produced = 0;
+ }
+
+ return err;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+void al_dma_tx_submit_sw_cond_unlock(
+ struct al_dma_chan *chan,
+ struct dma_async_tx_descriptor *tx)
+{
+ if (tx) {
+ struct dma_chan *c = tx->chan;
+ dma_cookie_t cookie = dma_cookie_assign(tx);
+
+ c->cookie = cookie;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: cookie = %d\n",
+ __func__,
+ cookie);
+
+ /**
+ * according to Documentation/circular-buffers.txt we should
+ * have smp_wmb before intcrementing the head, however, the
+ * al_raid_dma_action contains writel() which implies dmb on
+ * ARM so this smp_wmb() can be omitted on ARM platforms
+ */
+ /*smp_wmb();*/ /* commit the item before updating the head */
+ chan->head += chan->sw_desc_num_locked;
+ /**
+ * in our case the consumer (interrupt handler) will be waken up
+ * by the hw, so we send the transaction to the hw after
+ * incrementing the head
+ **/
+ }
+
+#if !AL_DMA_ISSUE_PNDNG_UPON_SUBMIT
+ spin_unlock_bh(&chan->prep_lock);
+#endif
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static dma_cookie_t al_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
+{
+#if AL_DMA_ISSUE_PNDNG_UPON_SUBMIT
+ int err;
+
+ struct dma_chan *c = tx->chan;
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s(%p): %p, %p\n",
+ __func__,
+ tx,
+ chan,
+ c);
+
+ err = al_dma_issue_pending_raw(chan);
+ if (err)
+ dev_err(
+ chan->device->common.dev,
+ "%s: al_dma_issue_pending\n",
+ __func__);
+
+ spin_unlock_bh(&chan->prep_lock);
+#endif
+
+ return tx->cookie;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_dma_issue_pending(struct dma_chan *c)
+{
+#if !AL_DMA_ISSUE_PNDNG_UPON_SUBMIT
+ int err;
+
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+
+ spin_lock_bh(&chan->prep_lock);
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s(%p)\n",
+ __func__,
+ chan);
+
+ err = al_dma_issue_pending_raw(chan);
+ if (err)
+ dev_err(
+ chan->device->common.dev,
+ "%s: al_dma_issue_pending\n",
+ __func__);
+
+ spin_unlock_bh(&chan->prep_lock);
+#endif
+}
+
+static int al_dma_control(
+ struct dma_chan *c,
+ enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+ int err;
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ case DMA_TERMINATE_ALL:
+ case DMA_PAUSE:
+ case DMA_RESUME:
+ default:
+ dev_err(
+ chan->device->common.dev,
+ "%s: Unexpected cmd (%d)!\n",
+ __func__,
+ cmd);
+ err = -ENXIO;
+ break;
+ }
+
+ return err;
+}
+static void al_dma_cleanup_tasklet(unsigned long data)
+{
+ struct al_dma_chan *chan = to_al_dma_chan((void *) data);
+ int num_completed;
+
+ num_completed = al_dma_cleanup_fn(chan, 1);
+
+ if (unlikely(num_completed < 0))
+ dev_err(
+ chan->device->common.dev,
+ "al_dma_cleanup_fn failed\n");
+
+ al_udma_iofic_unmask(
+ (struct unit_regs *)chan->device->udma_regs_base,
+ AL_UDMA_IOFIC_LEVEL_PRIMARY,
+ AL_INT_GROUP_B,
+ 1 << chan->idx);
+}
+
diff --git a/drivers/dma/al/al_dma_main.c b/drivers/dma/al/al_dma_main.c
new file mode 100644
index 0000000..71cdd04
--- /dev/null
+++ b/drivers/dma/al/al_dma_main.c
@@ -0,0 +1,237 @@
+/*
+ * Annapurna Labs DMA Linux driver
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include
+#include
+#include
+#include
+
+#include "al_dma.h"
+#include "al_dma_sysfs.h"
+
+MODULE_VERSION(AL_DMA_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Annapurna Labs");
+
+#define DRV_NAME "al_dma"
+
+enum {
+ /* BAR's are enumerated in terms of pci_resource_start() terms */
+ AL_DMA_UDMA_BAR = 0,
+ AL_DMA_APP_BAR = 4,
+};
+
+static int al_dma_pci_probe(
+ struct pci_dev *pdev,
+ const struct pci_device_id *id);
+
+static void al_dma_pci_remove(
+ struct pci_dev *pdev);
+
+static void al_dma_pci_shutdown(
+ struct pci_dev *pdev);
+
+static DEFINE_PCI_DEVICE_TABLE(al_dma_pci_tbl) = {
+ { PCI_VDEVICE(ANNAPURNA_LABS, PCI_DEVICE_ID_AL_RAID_DMA) },
+ { PCI_VDEVICE(ANNAPURNA_LABS, PCI_DEVICE_ID_AL_RAID_DMA_VF) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, al_dma_pci_tbl);
+
+static struct pci_driver al_dma_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = al_dma_pci_tbl,
+ .probe = al_dma_pci_probe,
+ .remove = al_dma_pci_remove,
+ .shutdown = al_dma_pci_shutdown,
+};
+
+/******************************************************************************
+ *****************************************************************************/
+static int al_dma_pci_probe(
+ struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int status = 0;
+
+ void __iomem * const *iomap;
+ struct device *dev = &pdev->dev;
+ struct al_dma_device *device;
+ int bar_reg;
+ u16 dev_id;
+ u8 rev_id;
+
+ dev_dbg(dev, "%s(%p, %p)\n", __func__, pdev, id);
+
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
+
+ /* Perform FLR on a related function */
+ al_dma_flr(pdev);
+
+ status = pcim_enable_device(pdev);
+ if (status) {
+ pr_err("%s: pcim_enable_device failed!\n", __func__);
+ goto done;
+ }
+
+ bar_reg = pdev->is_physfn ?
+ (1 << AL_DMA_UDMA_BAR) | (1 << AL_DMA_APP_BAR) :
+ (1 << AL_DMA_UDMA_BAR);
+
+ status = pcim_iomap_regions(
+ pdev,
+ bar_reg,
+ DRV_NAME);
+ if (status) {
+ pr_err("%s: pcim_iomap_regions failed!\n", __func__);
+ goto done;
+ }
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap) {
+ status = -ENOMEM;
+ goto done;
+ }
+
+ status = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (status)
+ goto done;
+
+ status = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (status)
+ goto done;
+
+ device = devm_kzalloc(dev, sizeof(struct al_dma_device), GFP_KERNEL);
+ if (!device) {
+ status = -ENOMEM;
+ goto done;
+ }
+
+ device->pdev = pdev;
+ device->dev_id = dev_id;
+ device->rev_id = rev_id;
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, device);
+ dev_set_drvdata(dev, device);
+
+ device->common.dev = &pdev->dev;
+
+#ifdef CONFIG_AL_DMA_PCI_IOV
+ if (PCI_FUNC(pdev->devfn) == 0) {
+ status = pci_enable_sriov(pdev, 1);
+ if (status) {
+ dev_err(dev, "%s: pci_enable_sriov failed, status %d\n",
+ __func__, status);
+ }
+ }
+#endif
+
+ if (pdev->is_physfn) {
+ status = al_dma_core_init(
+ device,
+ iomap[AL_DMA_UDMA_BAR],
+ iomap[AL_DMA_APP_BAR]);
+ if (status) {
+ dev_err(dev, "%s: al_dma_core_init failed\n", __func__);
+ goto done;
+ }
+
+ status = al_dma_sysfs_init(dev);
+ if (status) {
+ dev_err(dev, "%s: al_dma_sysfs_init failed\n", __func__);
+ goto err_sysfs_init;
+ }
+ }
+ else {
+ status = al_dma_fast_init(
+ device,
+ iomap[AL_DMA_UDMA_BAR]);
+ if (status) {
+ dev_err(dev, "%s: al_dma_fast_init failed\n", __func__);
+ goto done;
+ }
+ }
+
+ goto done;
+
+err_sysfs_init:
+ al_dma_core_terminate(device);
+
+done:
+ return status;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static void al_dma_pci_remove(struct pci_dev *pdev)
+{
+ struct al_dma_device *device = pci_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ if (!device)
+ return;
+
+ dev_dbg(&pdev->dev, "Removing dma\n");
+
+ if (pdev->is_physfn) {
+ al_dma_sysfs_terminate(dev);
+
+ al_dma_core_terminate(device);
+ } else {
+ al_dma_fast_terminate(device);
+ }
+
+}
+
+static void al_dma_pci_shutdown(struct pci_dev *pdev)
+{
+ /* Don't call for physfn as its removal is not fully implement yet */
+ if (!pdev->is_physfn)
+ al_dma_pci_remove(pdev);
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static int __init al_dma_init_module(void)
+{
+ int err;
+
+ pr_info(
+ "%s: Annapurna Labs DMA Driver %s\n",
+ DRV_NAME,
+ AL_DMA_VERSION);
+
+ err = pci_register_driver(&al_dma_pci_driver);
+
+ return err;
+}
+module_init(al_dma_init_module);
+
+/******************************************************************************
+ *****************************************************************************/
+static void __exit al_dma_exit_module(void)
+{
+ pci_unregister_driver(&al_dma_pci_driver);
+}
+module_exit(al_dma_exit_module);
diff --git a/drivers/dma/al/al_dma_module_params.c b/drivers/dma/al/al_dma_module_params.c
new file mode 100644
index 0000000..79ba239
--- /dev/null
+++ b/drivers/dma/al/al_dma_module_params.c
@@ -0,0 +1,148 @@
+/*
+ * drivers/crypto/al/al_dma_module_params.c
+ *
+ * Annapurna Labs DMA driver - module params
+ *
+ * Copyright (C) 2013 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include
+#include "al_dma.h"
+
+static int op_support_interrupt = 1;
+module_param(op_support_interrupt, int, 0444);
+MODULE_PARM_DESC(op_support_interrupt,
+ "DMA_INTERRUPT capability (default: 1 - enabled)");
+
+static int op_support_memcpy = 1;
+module_param(op_support_memcpy, int, 0444);
+MODULE_PARM_DESC(op_support_memcpy,
+ "DMA_MEMCPY capability (default: 1 - enabled)");
+
+static int op_support_sg = 1;
+module_param(op_support_sg, int, 0444);
+MODULE_PARM_DESC(op_support_sg,
+ "DMA_SG capability (default: 1 - enabled)");
+
+static int op_support_memset = 1;
+module_param(op_support_memset, int, 0444);
+MODULE_PARM_DESC(op_support_memset,
+ "DMA_MEMSET capability (default: 1 - enabled)");
+
+static int op_support_xor = 1;
+module_param(op_support_xor, int, 0444);
+MODULE_PARM_DESC(op_support_xor,
+ "DMA_XOR capability (default: 1 - enabled)");
+
+static int op_support_xor_val = 1;
+module_param(op_support_xor_val, int, 0444);
+MODULE_PARM_DESC(op_support_xor_val,
+ "DMA_XOR_VAL capability (default: 1 - enabled)");
+
+static int op_support_pq = 1;
+module_param(op_support_pq, int, 0444);
+MODULE_PARM_DESC(op_support_pq,
+ "DMA_PQ capability (default: 1 - enabled)");
+
+static int op_support_pq_val = 1;
+module_param(op_support_pq_val, int, 0444);
+MODULE_PARM_DESC(op_support_pq_val,
+ "DMA_PQ_VAL capability (default: 1 - enabled)");
+
+static int max_channels = AL_DMA_MAX_CHANNELS;
+module_param(max_channels, int, 0644);
+MODULE_PARM_DESC(
+ max_channels,
+ "maximum number of channels (queues) to enable (default: 4)");
+
+static int ring_alloc_order = 10;
+module_param(ring_alloc_order, int, 0644);
+MODULE_PARM_DESC(
+ ring_alloc_order,
+ "allocate 2^n descriptors per channel"
+ " (default: 8 max: 16)");
+
+static int tx_descs_order = 15;
+module_param(tx_descs_order, int, 0644);
+MODULE_PARM_DESC(
+ tx_descs_order,
+ "allocate 2^n of descriptors in Tx queue (default: 15)");
+
+static int rx_descs_order = 15;
+module_param(rx_descs_order, int, 0644);
+MODULE_PARM_DESC(
+ rx_descs_order,
+ "allocate 2^n of descriptors in Rx queue (default: 15)");
+
+int al_dma_get_op_support_interrupt(void)
+{
+ return op_support_interrupt;
+}
+
+int al_dma_get_op_support_memcpy(void)
+{
+ return op_support_memcpy;
+}
+
+int al_dma_get_op_support_sg(void)
+{
+ return op_support_sg;
+}
+
+int al_dma_get_op_support_memset(void)
+{
+ return op_support_memset;
+}
+
+int al_dma_get_op_support_xor(void)
+{
+ return op_support_xor;
+}
+
+int al_dma_get_op_support_xor_val(void)
+{
+ return op_support_xor_val;
+}
+
+int al_dma_get_op_support_pq(void)
+{
+ return op_support_pq;
+}
+
+int al_dma_get_op_support_pq_val(void)
+{
+ return op_support_pq_val;
+}
+
+int al_dma_get_max_channels(void)
+{
+ return max_channels;
+}
+
+int al_dma_get_ring_alloc_order(void)
+{
+ return ring_alloc_order;
+}
+
+int al_dma_get_tx_descs_order(void)
+{
+ return tx_descs_order;
+}
+
+int al_dma_get_rx_descs_order(void)
+{
+ return rx_descs_order;
+}
diff --git a/drivers/dma/al/al_dma_module_params.h b/drivers/dma/al/al_dma_module_params.h
new file mode 100644
index 0000000..467bb06
--- /dev/null
+++ b/drivers/dma/al/al_dma_module_params.h
@@ -0,0 +1,50 @@
+/*
+ * drivers/crypto/al/al_dma_module_params.h
+ *
+ * Annapurna Labs DMA driver - module params
+ *
+ * Copyright (C) 2015 Annapurna Labs Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __AL_DMA_MODULE_PARAMS_H__
+#define __AL_DMA_MODULE_PARAMS_H__
+
+int al_dma_get_op_support_interrupt(void);
+
+int al_dma_get_op_support_memcpy(void);
+
+int al_dma_get_op_support_sg(void);
+
+int al_dma_get_op_support_memset(void);
+
+int al_dma_get_op_support_xor(void);
+
+int al_dma_get_op_support_xor_val(void);
+
+int al_dma_get_op_support_pq(void);
+
+int al_dma_get_op_support_pq_val(void);
+
+int al_dma_get_max_channels(void);
+
+int al_dma_get_ring_alloc_order(void);
+
+int al_dma_get_tx_descs_order(void);
+
+int al_dma_get_rx_descs_order(void);
+
+#endif /* __AL_DMA_MODULE_PARAMS_H__ */
\ No newline at end of file
diff --git a/drivers/dma/al/al_dma_prep.h b/drivers/dma/al/al_dma_prep.h
new file mode 100644
index 0000000..75e9bff
--- /dev/null
+++ b/drivers/dma/al/al_dma_prep.h
@@ -0,0 +1,88 @@
+/*
+ * Annapurna Labs DMA Linux driver - operation preparation declarations
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef __AL_DMA_PREP_H__
+#define __AL_DMA_PREP_H__
+
+#include "al_dma.h"
+
+struct dma_async_tx_descriptor *al_dma_prep_interrupt_lock(
+ struct dma_chan *c,
+ unsigned long flags);
+
+struct dma_async_tx_descriptor *al_dma_prep_memcpy_lock(
+ struct dma_chan *c,
+ dma_addr_t dest,
+ dma_addr_t src,
+ size_t len,
+ unsigned long flags);
+
+struct dma_async_tx_descriptor *al_dma_prep_sg_lock(
+ struct dma_chan *c,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags);
+
+struct dma_async_tx_descriptor *al_dma_prep_memset_lock(
+ struct dma_chan *c,
+ dma_addr_t dest,
+ int value,
+ size_t len,
+ unsigned long flags);
+
+struct dma_async_tx_descriptor *al_dma_prep_xor_lock(
+ struct dma_chan *c,
+ dma_addr_t dest,
+ dma_addr_t *src,
+ unsigned int src_cnt,
+ size_t len,
+ unsigned long flags);
+
+struct dma_async_tx_descriptor *al_dma_prep_pq_lock(
+ struct dma_chan *c,
+ dma_addr_t *dst,
+ dma_addr_t *src,
+ unsigned int src_cnt,
+ const unsigned char *scf,
+ size_t len,
+ unsigned long flags);
+
+struct dma_async_tx_descriptor *al_dma_prep_pq_val_lock(
+ struct dma_chan *c,
+ dma_addr_t *pq,
+ dma_addr_t *src,
+ unsigned int src_cnt,
+ const unsigned char *scf,
+ size_t len,
+ enum sum_check_flags *pqres,
+ unsigned long flags);
+
+struct dma_async_tx_descriptor *al_dma_prep_xor_val_lock(
+ struct dma_chan *c,
+ dma_addr_t *src,
+ unsigned int src_cnt,
+ size_t len,
+ enum sum_check_flags *result,
+ unsigned long flags);
+
+#endif
+
diff --git a/drivers/dma/al/al_dma_prep_interrupt.c b/drivers/dma/al/al_dma_prep_interrupt.c
new file mode 100644
index 0000000..10833fc
--- /dev/null
+++ b/drivers/dma/al/al_dma_prep_interrupt.c
@@ -0,0 +1,116 @@
+/*
+ * Annapurna Labs DMA Linux driver - Interrupt preparation
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "al_dma.h"
+
+/******************************************************************************
+ *****************************************************************************/
+struct dma_async_tx_descriptor *al_dma_prep_interrupt_lock(
+ struct dma_chan *c,
+ unsigned long flags)
+{
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+ struct dma_async_tx_descriptor *txd = NULL;
+ int idx;
+ int32_t rc;
+ struct al_dma_sw_desc *desc;
+ struct al_raid_transaction *xaction;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: chan->idx = %d, flags = %08x\n",
+ __func__,
+ chan->idx,
+ (unsigned int)flags);
+
+ if (likely(al_dma_get_sw_desc_lock(chan, 1) == 0))
+ idx = chan->head;
+ else {
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: al_dma_get_sw_desc_lock failed!\n",
+ __func__);
+
+ return NULL;
+ }
+
+ chan->sw_desc_num_locked = 1;
+
+ desc = al_dma_get_ring_ent(chan, idx);
+
+ desc->umap_ent_cnt = 0;
+
+ txd = &desc->txd;
+
+ desc->txd.flags = flags;
+
+ /* prepare hal transaction */
+ xaction = &desc->hal_xaction;
+ memset(xaction, 0, sizeof(struct al_raid_transaction));
+ xaction->op = AL_RAID_OP_NOP;
+ xaction->flags |= AL_SSM_INTERRUPT;
+ if (flags & DMA_PREP_FENCE)
+ xaction->flags |= AL_SSM_BARRIER;
+
+ if (flags & (~(DMA_PREP_INTERRUPT | DMA_PREP_FENCE)))
+ dev_err(
+ chan->device->common.dev,
+ "%s: flags = %08x\n",
+ __func__,
+ (unsigned int)flags);
+
+ xaction->num_of_srcs = 0;
+ xaction->total_src_bufs = 0;
+
+ xaction->num_of_dsts = 0;
+ xaction->total_dst_bufs = 0;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: xaction->flags = %08x\n",
+ __func__,
+ xaction->flags);
+
+ /* send raid transaction to engine */
+ rc = al_raid_dma_prepare(chan->hal_raid, chan->idx,
+ &desc->hal_xaction);
+ if (unlikely(rc)) {
+ dev_err(chan->device->common.dev,
+ "%s: al_raid_dma_prepare failed!\n", __func__);
+ spin_unlock_bh(&chan->prep_lock);
+ return NULL;
+ }
+
+ chan->tx_desc_produced += desc->hal_xaction.tx_descs_count;
+
+ AL_DMA_STATS_UPDATE(
+ chan,
+ chan->stats_prep.int_num,
+ 1,
+ chan->stats_prep.int_num, /* dummy */
+ 0);
+
+ al_dma_tx_submit_sw_cond_unlock(chan, txd);
+
+ return txd;
+}
+
diff --git a/drivers/dma/al/al_dma_prep_memcpy.c b/drivers/dma/al/al_dma_prep_memcpy.c
new file mode 100644
index 0000000..6e2ddf6
--- /dev/null
+++ b/drivers/dma/al/al_dma_prep_memcpy.c
@@ -0,0 +1,184 @@
+/*
+ * Annapurna Labs DMA Linux driver - Memory copy preparation
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "al_dma.h"
+
+#define MAX_SIZE AL_DMA_MAX_SIZE_MEMCPY
+
+/******************************************************************************
+ *****************************************************************************/
+struct dma_async_tx_descriptor *al_dma_prep_memcpy_lock(
+ struct dma_chan *c,
+ dma_addr_t dest,
+ dma_addr_t src,
+ size_t len,
+ unsigned long flags)
+{
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+ struct dma_async_tx_descriptor *txd = NULL;
+ int idx;
+ int32_t rc;
+ int sw_desc_num_req = ALIGN(len, MAX_SIZE) / MAX_SIZE;
+ int sw_desc_num_req_orig = sw_desc_num_req;
+ size_t len_orig = len;
+ dma_addr_t src_orig = src;
+ dma_addr_t dest_orig = dest;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: chan->idx = %d, dest = %X, src = %X, len = %d, flags = %08x\n",
+ __func__,
+ chan->idx,
+ (unsigned int)dest,
+ (unsigned int)src,
+ len,
+ (unsigned int)flags);
+
+ if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0))
+ idx = chan->head;
+ else {
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: al_dma_get_sw_desc_lock failed!\n",
+ __func__);
+
+ return NULL;
+ }
+
+ chan->sw_desc_num_locked = sw_desc_num_req;
+
+ if (unlikely(sw_desc_num_req > 1))
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: splitting transaction to %d sub-transactions\n\n",
+ __func__,
+ sw_desc_num_req);
+
+ while (sw_desc_num_req) {
+ int cur_len = (len > MAX_SIZE) ? MAX_SIZE : len;
+
+ struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx);
+
+ struct al_raid_transaction *xaction;
+
+ if (1 == sw_desc_num_req)
+ txd = &desc->txd;
+
+ if (1 == sw_desc_num_req) {
+ int umap_ent_cnt = 0;
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ src_orig,
+ len_orig,
+ PCI_DMA_TODEVICE,
+ (flags & DMA_COMPL_SRC_UNMAP_SINGLE) ? AL_UNMAP_SINGLE : AL_UNMAP_PAGE);
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ dest_orig,
+ len_orig,
+ PCI_DMA_FROMDEVICE,
+ (flags & DMA_COMPL_SRC_UNMAP_SINGLE) ? AL_UNMAP_SINGLE : AL_UNMAP_PAGE);
+ }
+
+ desc->umap_ent_cnt = umap_ent_cnt;
+ } else
+ desc->umap_ent_cnt = 0;
+
+ desc->txd.flags = flags;
+ desc->len = cur_len;
+ /* prepare hal transaction */
+ xaction = &desc->hal_xaction;
+ memset(xaction, 0, sizeof(struct al_raid_transaction));
+ xaction->op = AL_RAID_OP_MEM_CPY;
+ if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_INTERRUPT;
+ if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_BARRIER;
+
+ /* use bufs[0] and block[0] for source buffers/blocks */
+ desc->bufs[0].addr = src;
+ desc->bufs[0].len = cur_len;
+ desc->blocks[0].bufs = &desc->bufs[0];
+ desc->blocks[0].num = 1;
+ xaction->srcs_blocks = &desc->blocks[0];
+ xaction->num_of_srcs = 1;
+ xaction->total_src_bufs = 1;
+
+ /* use bufs[1] and block[1] for destination buffers/blocks */
+ desc->bufs[1].addr = dest;
+ desc->bufs[1].len = cur_len;
+ desc->blocks[1].bufs = &desc->bufs[1];
+ desc->blocks[1].num = 1;
+ xaction->dsts_blocks = &desc->blocks[1];
+ xaction->num_of_dsts = 1;
+ xaction->total_dst_bufs = 1;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: xaction->flags = %08x\n",
+ __func__,
+ xaction->flags);
+
+ /* send raid transaction to engine */
+ rc = al_raid_dma_prepare(chan->hal_raid, chan->idx,
+ &desc->hal_xaction);
+ if (unlikely(rc)) {
+ dev_err(chan->device->common.dev,
+ "%s: al_raid_dma_prepare failed!\n", __func__);
+ spin_unlock_bh(&chan->prep_lock);
+ return NULL;
+ }
+
+ chan->tx_desc_produced += desc->hal_xaction.tx_descs_count;
+
+ desc->last_is_memcpy = 1;
+
+#ifdef AL_DMA_MEMCPY_VALIDATION
+ desc->memcpy_dest = phys_to_virt(dest);
+ desc->memcpy_src = phys_to_virt(src);
+ desc->memcpy_len = cur_len;
+#endif
+
+ idx++;
+ sw_desc_num_req--;
+ len -= MAX_SIZE;
+ dest += MAX_SIZE;
+ src += MAX_SIZE;
+ }
+
+ AL_DMA_STATS_UPDATE(
+ chan,
+ chan->stats_prep.memcpy_num,
+ sw_desc_num_req_orig,
+ chan->stats_prep.memcpy_size,
+ len_orig);
+
+ al_dma_tx_submit_sw_cond_unlock(chan, txd);
+
+ return txd;
+}
+
diff --git a/drivers/dma/al/al_dma_prep_memset.c b/drivers/dma/al/al_dma_prep_memset.c
new file mode 100644
index 0000000..8127273
--- /dev/null
+++ b/drivers/dma/al/al_dma_prep_memset.c
@@ -0,0 +1,163 @@
+/*
+ * Annapurna Labs DMA Linux driver - Memory setting preparation
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "al_dma.h"
+
+#define MAX_SIZE AL_DMA_MAX_SIZE_MEMSET
+
+/******************************************************************************
+ *****************************************************************************/
+struct dma_async_tx_descriptor *al_dma_prep_memset_lock(
+ struct dma_chan *c,
+ dma_addr_t dest,
+ int value,
+ size_t len,
+ unsigned long flags)
+{
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+ struct dma_async_tx_descriptor *txd = NULL;
+ int idx;
+ int32_t rc;
+ int sw_desc_num_req = ALIGN(len, MAX_SIZE) / MAX_SIZE;
+ int sw_desc_num_req_orig = sw_desc_num_req;
+ size_t len_orig = len;
+ dma_addr_t dest_orig = dest;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: chan->idx = %d, dest = %X, value = %d, len = %d, flags = %08x\n",
+ __func__,
+ chan->idx,
+ (unsigned int)dest,
+ value,
+ len,
+ (unsigned int)flags);
+
+ if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0))
+ idx = chan->head;
+ else {
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: al_dma_get_sw_desc_lock failed!\n",
+ __func__);
+
+ return NULL;
+ }
+
+ chan->sw_desc_num_locked = sw_desc_num_req;
+
+ if (unlikely(sw_desc_num_req > 1))
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: splitting transaction to %d sub-transactions\n\n",
+ __func__,
+ sw_desc_num_req);
+
+ while (sw_desc_num_req) {
+ int cur_len = (len > MAX_SIZE) ? MAX_SIZE : len;
+
+ struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx);
+
+ struct al_raid_transaction *xaction;
+
+ if (1 == sw_desc_num_req)
+ txd = &desc->txd;
+
+ if (1 == sw_desc_num_req) {
+ int umap_ent_cnt = 0;
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ dest_orig,
+ len_orig,
+ PCI_DMA_FROMDEVICE,
+ (flags & DMA_COMPL_SRC_UNMAP_SINGLE) ? AL_UNMAP_SINGLE : AL_UNMAP_PAGE);
+ }
+
+ desc->umap_ent_cnt = umap_ent_cnt;
+ } else
+ desc->umap_ent_cnt = 0;
+
+ desc->txd.flags = flags;
+ desc->len = cur_len;
+ /* prepare hal transaction */
+ xaction = &desc->hal_xaction;
+ memset(xaction, 0, sizeof(struct al_raid_transaction));
+ xaction->op = AL_RAID_OP_MEM_SET;
+ if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_INTERRUPT;
+ if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_BARRIER;
+
+ memset(xaction->data, value, sizeof(xaction->data));
+
+ /* MEMSET has no sources */
+ xaction->num_of_srcs = 0;
+ xaction->total_src_bufs = 0;
+
+ /* use bufs[1] and block[1] for destination buffers/blocks */
+ desc->bufs[1].addr = dest;
+ desc->bufs[1].len = cur_len;
+ desc->blocks[1].bufs = &desc->bufs[1];
+ desc->blocks[1].num = 1;
+ xaction->dsts_blocks = &desc->blocks[1];
+ xaction->num_of_dsts = 1;
+ xaction->total_dst_bufs = 1;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: xaction->flags = %08x\n",
+ __func__,
+ xaction->flags);
+
+ /* send raid transaction to engine */
+ rc = al_raid_dma_prepare(chan->hal_raid, chan->idx,
+ &desc->hal_xaction);
+ if (unlikely(rc)) {
+ dev_err(
+ chan->device->common.dev,
+ "%s: al_raid_dma_prepare failed!\n", __func__);
+ spin_unlock_bh(&chan->prep_lock);
+ return NULL;
+ }
+
+ chan->tx_desc_produced += desc->hal_xaction.tx_descs_count;
+
+ idx++;
+ sw_desc_num_req--;
+ len -= MAX_SIZE;
+ dest += MAX_SIZE;
+ }
+
+ AL_DMA_STATS_UPDATE(
+ chan,
+ chan->stats_prep.memset_num,
+ sw_desc_num_req_orig,
+ chan->stats_prep.memset_size,
+ len_orig);
+
+ al_dma_tx_submit_sw_cond_unlock(chan, txd);
+
+ return txd;
+}
+
diff --git a/drivers/dma/al/al_dma_prep_pq.c b/drivers/dma/al/al_dma_prep_pq.c
new file mode 100644
index 0000000..30066a8
--- /dev/null
+++ b/drivers/dma/al/al_dma_prep_pq.c
@@ -0,0 +1,242 @@
+/*
+ * Annapurna Labs DMA Linux driver - PQ preparation
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "al_dma.h"
+
+#define MAX_SIZE AL_DMA_MAX_SIZE_PQ
+
+/******************************************************************************
+ *****************************************************************************/
+struct dma_async_tx_descriptor *al_dma_prep_pq_lock(
+ struct dma_chan *c,
+ dma_addr_t *dst,
+ dma_addr_t *src,
+ unsigned int src_cnt,
+ const unsigned char *scf,
+ size_t len,
+ unsigned long flags)
+{
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+ struct dma_async_tx_descriptor *txd = NULL;
+ int idx;
+ int32_t rc;
+ int sw_desc_num_req = ALIGN(len, MAX_SIZE) / MAX_SIZE;
+ int sw_desc_num_req_orig = sw_desc_num_req;
+ size_t len_orig = len;
+ dma_addr_t src_off = 0;
+ dma_addr_t dst_off = 0;
+ int i;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: dest_p = %X, dest_q = %X, src = %X, cnt = %d, len = %d,"
+ " flags = %08x\n",
+ __func__,
+ (unsigned int)dst[0],
+ (unsigned int)dst[1],
+ (unsigned int)src,
+ src_cnt,
+ len,
+ (unsigned int)flags);
+
+ if (unlikely(src_cnt > AL_DMA_MAX_XOR)) {
+ BUG();
+ return NULL;
+ }
+
+ if (unlikely(
+ (flags & DMA_PREP_PQ_DISABLE_P) &&
+ (flags & DMA_PREP_PQ_DISABLE_Q))) {
+ BUG();
+ return NULL;
+ }
+
+ if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0))
+ idx = chan->head;
+ else {
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: al_dma_get_sw_desc_lock failed!\n",
+ __func__);
+
+ return NULL;
+ }
+
+ chan->sw_desc_num_locked = sw_desc_num_req;
+
+ if (unlikely(sw_desc_num_req > 1))
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: splitting transaction to %d sub-transactions\n\n",
+ __func__,
+ sw_desc_num_req);
+
+ while (sw_desc_num_req) {
+ int cur_len = (len > MAX_SIZE) ? MAX_SIZE : len;
+
+ struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx);
+
+ struct al_raid_transaction *xaction;
+
+ if (1 == sw_desc_num_req)
+ txd = &desc->txd;
+
+ if (1 == sw_desc_num_req) {
+ int umap_ent_cnt = 0;
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ for (i = 0; i < src_cnt; i++) {
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ src[i],
+ len_orig,
+ PCI_DMA_TODEVICE,
+ (flags & DMA_COMPL_SRC_UNMAP_SINGLE) ? AL_UNMAP_SINGLE : AL_UNMAP_PAGE);
+ }
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ dst[0],
+ len_orig,
+ PCI_DMA_FROMDEVICE,
+ (flags & DMA_COMPL_SRC_UNMAP_SINGLE) ? AL_UNMAP_SINGLE : AL_UNMAP_PAGE);
+ }
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ dst[1],
+ len_orig,
+ PCI_DMA_FROMDEVICE,
+ (flags & DMA_COMPL_SRC_UNMAP_SINGLE) ? AL_UNMAP_SINGLE : AL_UNMAP_PAGE);
+ }
+ }
+
+ desc->umap_ent_cnt = umap_ent_cnt;
+ } else
+ desc->umap_ent_cnt = 0;
+
+ desc->txd.flags = flags;
+ desc->len = cur_len;
+ /* prepare hal transaction */
+ xaction = &desc->hal_xaction;
+ memset(xaction, 0, sizeof(struct al_raid_transaction));
+ xaction->op = AL_RAID_OP_PQ_CALC;
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ xaction->op = AL_RAID_OP_Q_CALC;
+ if (flags & DMA_PREP_PQ_DISABLE_Q)
+ xaction->op = AL_RAID_OP_P_CALC;
+ if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_INTERRUPT;
+ if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_BARRIER;
+
+ /* use bufs[0] and block[i] for source buffers/blocks */
+ for (i = 0; i < src_cnt; i++) {
+ desc->bufs[i].addr = src[i] + src_off;
+ desc->bufs[i].len = cur_len;
+ desc->blocks[i].bufs = &desc->bufs[i];
+ desc->blocks[i].num = 1;
+ }
+
+ xaction->srcs_blocks = &desc->blocks[0];
+ xaction->num_of_srcs = src_cnt;
+ xaction->total_src_bufs = src_cnt;
+
+ /* use bufs[1] and block[1] for destination buffers/blocks */
+ if (flags & DMA_PREP_PQ_DISABLE_P) {
+ desc->bufs[src_cnt].addr = dst[1] + dst_off;
+ desc->bufs[src_cnt].len = cur_len;
+ desc->blocks[src_cnt].bufs = &desc->bufs[src_cnt];
+ desc->blocks[src_cnt].num = 1;
+
+ xaction->num_of_dsts = 1;
+ xaction->total_dst_bufs = 1;
+ } else if (flags & DMA_PREP_PQ_DISABLE_Q) {
+ desc->bufs[src_cnt].addr = dst[0] + dst_off;
+ desc->bufs[src_cnt].len = cur_len;
+ desc->blocks[src_cnt].bufs = &desc->bufs[src_cnt];
+ desc->blocks[src_cnt].num = 1;
+
+ xaction->num_of_dsts = 1;
+ xaction->total_dst_bufs = 1;
+ } else {
+ desc->bufs[src_cnt].addr = dst[0] + dst_off;
+ desc->bufs[src_cnt].len = cur_len;
+ desc->blocks[src_cnt].bufs = &desc->bufs[src_cnt];
+ desc->blocks[src_cnt].num = 1;
+
+ desc->bufs[src_cnt + 1].addr = dst[1] + dst_off;
+ desc->bufs[src_cnt + 1].len = cur_len;
+ desc->blocks[src_cnt + 1].bufs =
+ &desc->bufs[src_cnt + 1];
+ desc->blocks[src_cnt + 1].num = 1;
+
+ xaction->num_of_dsts = 2;
+ xaction->total_dst_bufs = 2;
+ }
+
+ xaction->dsts_blocks = &desc->blocks[src_cnt];
+
+ xaction->coefs = (uint8_t *)scf;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: xaction->flags = %08x\n",
+ __func__,
+ xaction->flags);
+
+ /* send raid transaction to engine */
+ rc = al_raid_dma_prepare(chan->hal_raid, chan->idx,
+ &desc->hal_xaction);
+ if (unlikely(rc)) {
+ dev_err(
+ chan->device->common.dev,
+ "%s: al_raid_dma_prepare failed!\n", __func__);
+ spin_unlock_bh(&chan->prep_lock);
+ return NULL;
+ }
+
+ chan->tx_desc_produced += desc->hal_xaction.tx_descs_count;
+ BUG_ON(desc->hal_xaction.tx_descs_count > AL_DMA_MAX_XOR);
+
+ idx++;
+ sw_desc_num_req--;
+ len -= MAX_SIZE;
+ src_off += MAX_SIZE;
+ dst_off += MAX_SIZE;
+ }
+
+ AL_DMA_STATS_UPDATE(
+ chan,
+ chan->stats_prep.pq_num,
+ sw_desc_num_req_orig,
+ chan->stats_prep.pq_size,
+ len_orig);
+
+ al_dma_tx_submit_sw_cond_unlock(chan, txd);
+
+ return txd;
+}
+
diff --git a/drivers/dma/al/al_dma_prep_pq_val.c b/drivers/dma/al/al_dma_prep_pq_val.c
new file mode 100644
index 0000000..179f9d6
--- /dev/null
+++ b/drivers/dma/al/al_dma_prep_pq_val.c
@@ -0,0 +1,239 @@
+/*
+ * Annapurna Labs DMA Linux driver - PQ validation preparation
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "al_dma.h"
+
+#define MAX_SIZE AL_DMA_MAX_SIZE_PQ_VAL
+
+/******************************************************************************
+ *****************************************************************************/
+struct dma_async_tx_descriptor *al_dma_prep_pq_val_lock(
+ struct dma_chan *c,
+ dma_addr_t *pq,
+ dma_addr_t *src,
+ unsigned int src_cnt,
+ const unsigned char *scf,
+ size_t len,
+ enum sum_check_flags *pqres,
+ unsigned long flags)
+{
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+ struct dma_async_tx_descriptor *txd = NULL;
+ int idx;
+ int32_t rc;
+ int sw_desc_num_req = ALIGN(len, MAX_SIZE) / MAX_SIZE;
+ int sw_desc_num_req_orig = sw_desc_num_req;
+ size_t len_orig = len;
+ dma_addr_t src_off = 0;
+ int i;
+
+ unsigned char q_coefs[AL_DMA_OP_MAX_BLOCKS];
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: p=%X, q=%X, src=%X, cnt=%d, len=%d, flags=%08x\n",
+ __func__,
+ (unsigned int)pq[0],
+ (unsigned int)pq[1],
+ (unsigned int)src,
+ src_cnt,
+ len,
+ (unsigned int)flags);
+
+ if (unlikely((src_cnt + 2) > AL_DMA_MAX_XOR)) {
+ BUG();
+ return NULL;
+ }
+
+ if (unlikely(
+ (flags & DMA_PREP_PQ_DISABLE_P) &&
+ (flags & DMA_PREP_PQ_DISABLE_Q))) {
+ BUG();
+ return NULL;
+ }
+
+ if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0))
+ idx = chan->head;
+ else {
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: al_dma_get_sw_desc_lock failed!\n",
+ __func__);
+
+ return NULL;
+ }
+
+ chan->sw_desc_num_locked = sw_desc_num_req;
+
+ if (unlikely(sw_desc_num_req > 1))
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: splitting transaction to %d sub-transactions\n\n",
+ __func__,
+ sw_desc_num_req);
+
+ memcpy(q_coefs, scf, src_cnt);
+
+ while (sw_desc_num_req) {
+ int cur_len = (len > MAX_SIZE) ? MAX_SIZE : len;
+
+ struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx);
+
+ struct al_raid_transaction *xaction;
+
+ if (1 == sw_desc_num_req)
+ txd = &desc->txd;
+
+ if (1 == sw_desc_num_req) {
+ int umap_ent_cnt = 0;
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ for (i = 0; i < src_cnt; i++) {
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ src[i],
+ len_orig,
+ PCI_DMA_TODEVICE,
+ (flags & DMA_COMPL_SRC_UNMAP_SINGLE) ? AL_UNMAP_SINGLE : AL_UNMAP_PAGE);
+ }
+
+ if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ pq[0],
+ len_orig,
+ PCI_DMA_TODEVICE,
+ (flags & DMA_COMPL_SRC_UNMAP_SINGLE) ? AL_UNMAP_SINGLE : AL_UNMAP_PAGE);
+ }
+
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ pq[1],
+ len_orig,
+ PCI_DMA_TODEVICE,
+ (flags & DMA_COMPL_SRC_UNMAP_SINGLE) ? AL_UNMAP_SINGLE : AL_UNMAP_PAGE);
+ }
+ }
+
+ desc->umap_ent_cnt = umap_ent_cnt;
+ } else
+ desc->umap_ent_cnt = 0;
+
+ desc->txd.flags = flags;
+ desc->len = cur_len;
+ /* prepare hal transaction */
+ xaction = &desc->hal_xaction;
+ memset(xaction, 0, sizeof(struct al_raid_transaction));
+ xaction->op = AL_RAID_OP_PQ_VAL;
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ xaction->op = AL_RAID_OP_Q_VAL;
+ if (flags & DMA_PREP_PQ_DISABLE_Q)
+ xaction->op = AL_RAID_OP_P_VAL;
+ if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_INTERRUPT;
+ if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_BARRIER;
+
+ /* use bufs[0] and block[i] for source buffers/blocks */
+ for (i = 0; i < src_cnt; i++) {
+ desc->bufs[i].addr = src[i] + src_off;
+ desc->bufs[i].len = cur_len;
+ desc->blocks[i].bufs = &desc->bufs[i];
+ desc->blocks[i].num = 1;
+ }
+
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
+ desc->bufs[i].addr = pq[1] + src_off;
+ desc->bufs[i].len = cur_len;
+ desc->blocks[i].bufs = &desc->bufs[i];
+ desc->blocks[i].num = 1;
+
+ q_coefs[i] = 1; /* Count Q */
+
+ xaction->q_index = i;
+
+ i++;
+ }
+
+ if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
+ desc->bufs[i].addr = pq[0] + src_off;
+ desc->bufs[i].len = cur_len;
+ desc->blocks[i].bufs = &desc->bufs[i];
+ desc->blocks[i].num = 1;
+
+ q_coefs[i] = 0; /* Ignore P */
+
+ i++;
+ }
+
+ xaction->srcs_blocks = &desc->blocks[0];
+ xaction->num_of_srcs = i;
+ xaction->total_src_bufs = i;
+
+ xaction->num_of_dsts = 0;
+ xaction->total_dst_bufs = 0;
+
+ xaction->coefs = (uint8_t *)q_coefs;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: xaction->flags = %08x\n",
+ __func__,
+ xaction->flags);
+
+ /* send raid transaction to engine */
+ rc = al_raid_dma_prepare(chan->hal_raid, chan->idx,
+ &desc->hal_xaction);
+ if (unlikely(rc)) {
+ dev_err(
+ chan->device->common.dev,
+ "%s: al_raid_dma_prepare failed!\n", __func__);
+ spin_unlock_bh(&chan->prep_lock);
+ return NULL;
+ }
+
+ chan->tx_desc_produced += desc->hal_xaction.tx_descs_count;
+ BUG_ON(desc->hal_xaction.tx_descs_count > AL_DMA_MAX_XOR);
+
+ desc->last_is_pq_val = 1;
+ desc->pq_val_res = pqres;
+ *desc->pq_val_res = 0;
+
+ idx++;
+ sw_desc_num_req--;
+ len -= MAX_SIZE;
+ src_off += MAX_SIZE;
+ }
+
+ AL_DMA_STATS_UPDATE(
+ chan,
+ chan->stats_prep.pq_val_num,
+ sw_desc_num_req_orig,
+ chan->stats_prep.pq_val_size,
+ len_orig);
+
+ al_dma_tx_submit_sw_cond_unlock(chan, txd);
+
+ return txd;
+}
+
diff --git a/drivers/dma/al/al_dma_prep_sg.c b/drivers/dma/al/al_dma_prep_sg.c
new file mode 100644
index 0000000..6dcc095
--- /dev/null
+++ b/drivers/dma/al/al_dma_prep_sg.c
@@ -0,0 +1,177 @@
+/*
+ * Annapurna Labs DMA Linux driver - SG Memory copy preparation
+ * Copyright(c) 2013 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+#include "al_dma.h"
+
+
+/******************************************************************************
+ *****************************************************************************/
+struct dma_async_tx_descriptor *al_dma_prep_sg_lock(
+ struct dma_chan *c,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+ struct dma_async_tx_descriptor *txd = NULL;
+ int idx;
+ int32_t rc;
+ int sw_desc_num_req = 1;
+ struct scatterlist *sg;
+ size_t total_src_len = 0;
+ size_t total_dst_len = 0;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: chan->idx = %d, dests = %u, srcs = %u, flags = %08lx\n",
+ __func__,
+ chan->idx,
+ dst_nents,
+ src_nents,
+ flags);
+
+ BUG_ON(src_nents > AL_SSM_MAX_SRC_DESCS);
+ BUG_ON(dst_nents > AL_SSM_MAX_DST_DESCS);
+
+ if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0))
+ idx = chan->head;
+ else {
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: al_dma_get_sw_desc_lock failed!\n",
+ __func__);
+
+ return NULL;
+ }
+
+ chan->sw_desc_num_locked = sw_desc_num_req;
+
+ while (sw_desc_num_req) {
+ struct al_raid_transaction *xaction;
+ struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx);
+ int umap_ent_cnt = 0;
+ int i;
+
+ if (1 == sw_desc_num_req)
+ txd = &desc->txd;
+
+ desc->txd.flags = flags;
+ /* prepare hal transaction */
+ xaction = &desc->hal_xaction;
+ xaction->op = AL_RAID_OP_MEM_CPY;
+ if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_INTERRUPT;
+ if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_BARRIER;
+
+ /* use bufs[0] and block[0] for source buffers/blocks */
+ for_each_sg(src_sg, sg, src_nents, i) {
+ desc->bufs[i].addr = sg_dma_address(sg);
+ desc->bufs[i].len = sg_dma_len(sg);
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ desc->bufs[i].addr,
+ desc->bufs[i].len,
+ PCI_DMA_TODEVICE,
+ AL_UNMAP_PAGE);
+
+ total_src_len += desc->bufs[i].len;
+ dev_dbg(chan->device->common.dev,
+ "%s: src[%d] = %llx len 0x%x\n",
+ __func__, i, (unsigned long long)desc->bufs[i].addr,
+ desc->bufs[i].len);
+ }
+ desc->blocks[0].bufs = &desc->bufs[0];
+ desc->blocks[0].num = src_nents;
+ xaction->srcs_blocks = &desc->blocks[0];
+ xaction->num_of_srcs = 1;
+ xaction->total_src_bufs = src_nents;
+
+ /* use next bufs and block for destination buffers/blocks */
+ for_each_sg(dst_sg, sg, dst_nents, i) {
+ desc->bufs[src_nents + i].addr = sg_dma_address(sg);
+ desc->bufs[src_nents + i].len = sg_dma_len(sg);
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ desc->bufs[src_nents + i].addr,
+ desc->bufs[src_nents + i].len,
+ PCI_DMA_FROMDEVICE,
+ AL_UNMAP_PAGE);
+
+ total_dst_len += desc->bufs[src_nents + i].len;
+ dev_dbg(chan->device->common.dev,
+ "%s: dst[%d] = %llx len 0x%x\n",
+ __func__, i,
+ (unsigned long long)desc->bufs[src_nents + i].addr,
+ desc->bufs[src_nents + i].len);
+ }
+
+ desc->umap_ent_cnt = umap_ent_cnt;
+
+ desc->blocks[1].bufs = &desc->bufs[src_nents];
+ desc->blocks[1].num = dst_nents;
+ xaction->dsts_blocks = &desc->blocks[1];
+ xaction->num_of_dsts = 1;
+ xaction->total_dst_bufs = dst_nents;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: xaction->flags = %08x\n",
+ __func__,
+ xaction->flags);
+
+ if (total_src_len != total_dst_len) {
+ dev_err(chan->device->common.dev,
+ "%s: src len 0x%x doesn't match dst len 0x%x!\n",
+ __func__, total_src_len, total_dst_len);
+ }
+
+ /* send raid transaction to engine */
+ rc = al_raid_dma_prepare(chan->hal_raid, chan->idx,
+ &desc->hal_xaction);
+ if (unlikely(rc)) {
+ dev_err(chan->device->common.dev,
+ "%s: al_raid_dma_prepare failed!\n", __func__);
+ spin_unlock_bh(&chan->prep_lock);
+ return NULL;
+ }
+
+ chan->tx_desc_produced += desc->hal_xaction.tx_descs_count;
+
+ idx++;
+ sw_desc_num_req--;
+ }
+ AL_DMA_STATS_UPDATE(
+ chan,
+ chan->stats_prep.sg_memcpy_num,
+ 1,
+ chan->stats_prep.sg_memcpy_size,
+ total_src_len);
+
+
+ al_dma_tx_submit_sw_cond_unlock(chan, txd);
+
+ return txd;
+}
+
diff --git a/drivers/dma/al/al_dma_prep_xor.c b/drivers/dma/al/al_dma_prep_xor.c
new file mode 100644
index 0000000..13466ef
--- /dev/null
+++ b/drivers/dma/al/al_dma_prep_xor.c
@@ -0,0 +1,204 @@
+/*
+ * Annapurna Labs DMA Linux driver - XOR preparation
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "al_dma.h"
+
+#define MAX_SIZE AL_DMA_MAX_SIZE_XOR
+
+/******************************************************************************
+ *****************************************************************************/
+struct dma_async_tx_descriptor *al_dma_prep_xor_lock(
+ struct dma_chan *c,
+ dma_addr_t dest,
+ dma_addr_t *src,
+ unsigned int src_cnt,
+ size_t len,
+ unsigned long flags)
+{
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+ struct dma_async_tx_descriptor *txd = NULL;
+ int idx;
+ int32_t rc;
+ int sw_desc_num_req = ALIGN(len, MAX_SIZE) / MAX_SIZE;
+ int sw_desc_num_req_orig = sw_desc_num_req;
+ size_t len_orig = len;
+ dma_addr_t src_off = 0;
+ dma_addr_t dest_orig = dest;
+ int i;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: chan->idx = %d, dest = %X, src = %X, cnt = %d, len = %d,"
+ " flags = %08x\n",
+ __func__,
+ chan->idx,
+ (unsigned int)dest,
+ (unsigned int)src,
+ src_cnt,
+ len,
+ (unsigned int)flags);
+
+ if (unlikely(src_cnt > AL_DMA_MAX_XOR)) {
+ BUG();
+ return NULL;
+ }
+
+ if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0))
+ idx = chan->head;
+ else {
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: al_dma_get_sw_desc_lock failed!\n",
+ __func__);
+
+ return NULL;
+ }
+
+ chan->sw_desc_num_locked = sw_desc_num_req;
+
+ if (unlikely(sw_desc_num_req > 1))
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: splitting transaction to %d sub-transactions\n\n",
+ __func__,
+ sw_desc_num_req);
+
+ while (sw_desc_num_req) {
+ int cur_len = (len > MAX_SIZE) ? MAX_SIZE : len;
+
+ struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx);
+
+ struct al_raid_transaction *xaction;
+
+ if (1 == sw_desc_num_req)
+ txd = &desc->txd;
+
+ if (1 == sw_desc_num_req) {
+ int umap_ent_cnt = 0;
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ for (i = 0; i < src_cnt; i++) {
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ src[i],
+ len_orig,
+ PCI_DMA_TODEVICE,
+ (flags & DMA_COMPL_SRC_UNMAP_SINGLE) ? AL_UNMAP_SINGLE : AL_UNMAP_PAGE);
+ }
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ dest_orig,
+ len_orig,
+ PCI_DMA_FROMDEVICE,
+ (flags & DMA_COMPL_SRC_UNMAP_SINGLE) ? AL_UNMAP_SINGLE : AL_UNMAP_PAGE);
+ }
+
+ desc->umap_ent_cnt = umap_ent_cnt;
+ } else
+ desc->umap_ent_cnt = 0;
+
+ desc->txd.flags = flags;
+ desc->len = cur_len;
+ /* prepare hal transaction */
+ xaction = &desc->hal_xaction;
+ memset(xaction, 0, sizeof(struct al_raid_transaction));
+ xaction->op = AL_RAID_OP_P_CALC;
+ if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_INTERRUPT;
+ if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_BARRIER;
+
+ /* use bufs[0] and block[i] for source buffers/blocks */
+ for (i = 0; i < src_cnt; i++) {
+ desc->bufs[i].addr = src[i] + src_off;
+ desc->bufs[i].len = cur_len;
+ desc->blocks[i].bufs = &desc->bufs[i];
+ desc->blocks[i].num = 1;
+ }
+
+ xaction->srcs_blocks = &desc->blocks[0];
+ xaction->num_of_srcs = src_cnt;
+ xaction->total_src_bufs = src_cnt;
+
+ /* use bufs[1] and block[1] for destination buffers/blocks */
+ desc->bufs[src_cnt].addr = dest;
+ desc->bufs[src_cnt].len = cur_len;
+ desc->blocks[src_cnt].bufs = &desc->bufs[src_cnt];
+ desc->blocks[src_cnt].num = 1;
+
+ xaction->dsts_blocks = &desc->blocks[src_cnt];
+ xaction->num_of_dsts = 1;
+ xaction->total_dst_bufs = 1;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: xaction->flags = %08x\n",
+ __func__,
+ xaction->flags);
+
+ /* send raid transaction to engine */
+ rc = al_raid_dma_prepare(chan->hal_raid, chan->idx,
+ &desc->hal_xaction);
+ if (unlikely(rc)) {
+ dev_err(
+ chan->device->common.dev,
+ "%s: al_raid_dma_prepare failed!\n", __func__);
+ spin_unlock_bh(&chan->prep_lock);
+ return NULL;
+ }
+
+ chan->tx_desc_produced += desc->hal_xaction.tx_descs_count;
+ BUG_ON(desc->hal_xaction.tx_descs_count > AL_DMA_MAX_XOR);
+
+ desc->last_is_xor = 1;
+
+#ifdef AL_DMA_XOR_VALIDATION
+ desc->xor_dest = phys_to_virt(dest);
+ desc->xor_len = cur_len;
+ desc->xor_src_cnt = src_cnt;
+
+ for (i = 0; i < src_cnt; i++)
+ desc->xor_src[i] = phys_to_virt(src[i] + src_off);
+#endif
+
+ idx++;
+ sw_desc_num_req--;
+ len -= MAX_SIZE;
+ dest += MAX_SIZE;
+ src_off += MAX_SIZE;
+ }
+
+ AL_DMA_STATS_UPDATE(
+ chan,
+ chan->stats_prep.xor_num,
+ sw_desc_num_req_orig,
+ chan->stats_prep.xor_size,
+ len_orig);
+
+ al_dma_tx_submit_sw_cond_unlock(chan, txd);
+
+ return txd;
+}
+
diff --git a/drivers/dma/al/al_dma_prep_xor_val.c b/drivers/dma/al/al_dma_prep_xor_val.c
new file mode 100644
index 0000000..979a42f
--- /dev/null
+++ b/drivers/dma/al/al_dma_prep_xor_val.c
@@ -0,0 +1,183 @@
+/*
+ * Annapurna Labs DMA Linux driver - XOR validation preparation
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "al_dma.h"
+
+#define MAX_SIZE AL_DMA_MAX_SIZE_XOR_VAL
+
+/******************************************************************************
+ *****************************************************************************/
+struct dma_async_tx_descriptor *al_dma_prep_xor_val_lock(
+ struct dma_chan *c,
+ dma_addr_t *src,
+ unsigned int src_cnt,
+ size_t len,
+ enum sum_check_flags *result,
+ unsigned long flags)
+{
+ struct al_dma_chan *chan = to_al_dma_chan(c);
+ struct dma_async_tx_descriptor *txd = NULL;
+ int idx;
+ int32_t rc;
+ int sw_desc_num_req = ALIGN(len, MAX_SIZE) / MAX_SIZE;
+ int sw_desc_num_req_orig = sw_desc_num_req;
+ size_t len_orig = len;
+ dma_addr_t src_off = 0;
+ int i;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: src = %X, cnt = %d, len = %d, flags = %08x\n",
+ __func__,
+ (unsigned int)src,
+ src_cnt,
+ len,
+ (unsigned int)flags);
+
+ if (unlikely(src_cnt > AL_DMA_MAX_XOR)) {
+ BUG();
+ return NULL;
+ }
+
+ if (likely(al_dma_get_sw_desc_lock(chan, sw_desc_num_req) == 0))
+ idx = chan->head;
+ else {
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: al_dma_get_sw_desc_lock failed!\n",
+ __func__);
+
+ return NULL;
+ }
+
+ chan->sw_desc_num_locked = sw_desc_num_req;
+
+ if (unlikely(sw_desc_num_req > 1))
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: splitting transaction to %d sub-transactions\n\n",
+ __func__,
+ sw_desc_num_req);
+
+ while (sw_desc_num_req) {
+ int cur_len = (len > MAX_SIZE) ? MAX_SIZE : len;
+
+ struct al_dma_sw_desc *desc = al_dma_get_ring_ent(chan, idx);
+
+ struct al_raid_transaction *xaction;
+
+ if (1 == sw_desc_num_req)
+ txd = &desc->txd;
+
+ if (1 == sw_desc_num_req) {
+ int umap_ent_cnt = 0;
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ for (i = 0; i < src_cnt; i++) {
+ al_dma_unmap_info_ent_set(
+ &desc->unmap_info[umap_ent_cnt++],
+ src[i],
+ len_orig,
+ PCI_DMA_TODEVICE,
+ (flags & DMA_COMPL_SRC_UNMAP_SINGLE) ? AL_UNMAP_SINGLE : AL_UNMAP_PAGE);
+ }
+ }
+
+ desc->umap_ent_cnt = umap_ent_cnt;
+ } else
+ desc->umap_ent_cnt = 0;
+
+ desc->txd.flags = flags;
+ desc->len = cur_len;
+ /* prepare hal transaction */
+ xaction = &desc->hal_xaction;
+ memset(xaction, 0, sizeof(struct al_raid_transaction));
+ xaction->op = AL_RAID_OP_P_VAL;
+ if ((flags & DMA_PREP_INTERRUPT) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_INTERRUPT;
+ if ((flags & DMA_PREP_FENCE) && (1 == sw_desc_num_req))
+ xaction->flags |= AL_SSM_BARRIER;
+
+ if (flags & (~(DMA_PREP_INTERRUPT | DMA_PREP_FENCE)))
+ dev_err(
+ chan->device->common.dev,
+ "%s: flags = %08x\n",
+ __func__,
+ (unsigned int)flags);
+
+ /* use bufs[0] and block[i] for source buffers/blocks */
+ for (i = 0; i < src_cnt; i++) {
+ desc->bufs[i].addr = src[i] + src_off;
+ desc->bufs[i].len = cur_len;
+ desc->blocks[i].bufs = &desc->bufs[i];
+ desc->blocks[i].num = 1;
+ }
+
+ xaction->srcs_blocks = &desc->blocks[0];
+ xaction->num_of_srcs = i;
+ xaction->total_src_bufs = i;
+
+ xaction->num_of_dsts = 0;
+ xaction->total_dst_bufs = 0;
+
+ dev_dbg(
+ chan->device->common.dev,
+ "%s: xaction->flags = %08x\n",
+ __func__,
+ xaction->flags);
+
+ /* send raid transaction to engine */
+ rc = al_raid_dma_prepare(chan->hal_raid, chan->idx,
+ &desc->hal_xaction);
+ if (unlikely(rc)) {
+ dev_err(
+ chan->device->common.dev,
+ "%s: al_raid_dma_prepare failed!\n", __func__);
+ spin_unlock_bh(&chan->prep_lock);
+ return NULL;
+ }
+
+ chan->tx_desc_produced += desc->hal_xaction.tx_descs_count;
+ BUG_ON(desc->hal_xaction.tx_descs_count > AL_DMA_MAX_XOR);
+
+ desc->last_is_xor_val = 1;
+ desc->xor_val_res = result;
+ *desc->xor_val_res = 0;
+
+ idx++;
+ sw_desc_num_req--;
+ len -= MAX_SIZE;
+ src_off += MAX_SIZE;
+ }
+
+ AL_DMA_STATS_UPDATE(
+ chan,
+ chan->stats_prep.xor_val_num,
+ sw_desc_num_req_orig,
+ chan->stats_prep.xor_val_size,
+ len_orig);
+
+ al_dma_tx_submit_sw_cond_unlock(chan, txd);
+
+ return txd;
+}
+
diff --git a/drivers/dma/al/al_dma_sysfs.c b/drivers/dma/al/al_dma_sysfs.c
new file mode 100644
index 0000000..62e2139
--- /dev/null
+++ b/drivers/dma/al/al_dma_sysfs.c
@@ -0,0 +1,443 @@
+/*
+ * Annapurna Labs DMA Linux driver - sysfs support
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include
+
+#include "al_dma.h"
+#include "al_hal_udma_debug.h"
+
+#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
+
+#define DEVICE_STATS_PREP_ATTR(_name) {\
+ __ATTR(stats_prep_##_name, S_IRUGO, rd_stats_prep, NULL),\
+ (void*)offsetof(struct al_dma_chan_stats_prep, _name) }
+
+#define DEVICE_STATS_COMP_ATTR(_name) {\
+ __ATTR(stats_comp_##_name, S_IRUGO, rd_stats_comp, NULL),\
+ (void*)offsetof(struct al_dma_chan_stats_comp, _name) }
+
+#define UDMA_DUMP_PREP_ATTR(_name, _type) {\
+ __ATTR(udma_dump_##_name, S_IRUGO|S_IWUGO, rd_udma_dump, wr_udma_dump),\
+ (void*)_type }
+
+enum udma_dump_type {
+ UDMA_DUMP_M2S_REGS,
+ UDMA_DUMP_M2S_Q_STRUCT,
+ UDMA_DUMP_M2S_Q_POINTERS,
+ UDMA_DUMP_S2M_REGS,
+ UDMA_DUMP_S2M_Q_STRUCT,
+ UDMA_DUMP_S2M_Q_POINTERS
+};
+
+#ifdef CONFIG_AL_DMA_STATS
+static ssize_t rd_stats_prep(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static ssize_t rd_stats_comp(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static ssize_t rd_stats_rst(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static ssize_t wr_stats_rst(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count);
+
+static ssize_t rd_udma_dump(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static ssize_t wr_udma_dump(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count);
+#endif
+
+#ifdef CONFIG_AL_DMA_STATS
+struct dev_ext_attribute dev_attr_stats_prep[] = {
+ DEVICE_STATS_PREP_ATTR(int_num),
+ DEVICE_STATS_PREP_ATTR(memcpy_num),
+ DEVICE_STATS_PREP_ATTR(memcpy_size),
+ DEVICE_STATS_PREP_ATTR(sg_memcpy_num),
+ DEVICE_STATS_PREP_ATTR(sg_memcpy_size),
+ DEVICE_STATS_PREP_ATTR(memset_num),
+ DEVICE_STATS_PREP_ATTR(memset_size),
+ DEVICE_STATS_PREP_ATTR(xor_num),
+ DEVICE_STATS_PREP_ATTR(xor_size),
+ DEVICE_STATS_PREP_ATTR(pq_num),
+ DEVICE_STATS_PREP_ATTR(pq_size),
+ DEVICE_STATS_PREP_ATTR(pq_val_num),
+ DEVICE_STATS_PREP_ATTR(pq_val_size),
+ DEVICE_STATS_PREP_ATTR(xor_val_num),
+ DEVICE_STATS_PREP_ATTR(xor_val_size),
+ DEVICE_STATS_PREP_ATTR(matching_cpu),
+ DEVICE_STATS_PREP_ATTR(mismatching_cpu),
+};
+
+struct dev_ext_attribute dev_attr_stats_comp[] = {
+ DEVICE_STATS_COMP_ATTR(redundant_int_cnt),
+ DEVICE_STATS_COMP_ATTR(matching_cpu),
+ DEVICE_STATS_COMP_ATTR(mismatching_cpu),
+};
+
+/* Device attrs - udma debug */
+static struct dev_ext_attribute dev_attr_udma_debug[] = {
+ UDMA_DUMP_PREP_ATTR(m2s_regs, UDMA_DUMP_M2S_REGS),
+ UDMA_DUMP_PREP_ATTR(m2s_q_struct, UDMA_DUMP_M2S_Q_STRUCT),
+ UDMA_DUMP_PREP_ATTR(m2s_q_pointers, UDMA_DUMP_M2S_Q_POINTERS),
+ UDMA_DUMP_PREP_ATTR(s2m_regs, UDMA_DUMP_S2M_REGS),
+ UDMA_DUMP_PREP_ATTR(s2m_q_struct, UDMA_DUMP_S2M_Q_STRUCT),
+ UDMA_DUMP_PREP_ATTR(s2m_q_pointers, UDMA_DUMP_S2M_Q_POINTERS)
+};
+
+static DEVICE_ATTR(stats_rst, S_IRUGO | S_IWUSR, rd_stats_rst, wr_stats_rst);
+#endif
+
+/******************************************************************************
+ *****************************************************************************/
+int al_dma_sysfs_init(
+ struct device *dev)
+{
+ int status = 0;
+
+#ifdef CONFIG_AL_DMA_STATS
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev_attr_stats_prep); i++) {
+ status = sysfs_create_file(
+ &dev->kobj,
+ &dev_attr_stats_prep[i].attr.attr);
+ if (status) {
+ dev_err(
+ dev,
+ "%s: sysfs_create_file(stats_prep %d) failed\n",
+ __func__,
+ i);
+ goto done;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev_attr_stats_comp); i++) {
+ status = sysfs_create_file(
+ &dev->kobj,
+ &dev_attr_stats_comp[i].attr.attr);
+ if (status) {
+ dev_err(
+ dev,
+ "%s: sysfs_create_file(stats_comp %d) failed\n",
+ __func__,
+ i);
+ goto done;
+ }
+ }
+
+ status = sysfs_create_file(
+ &dev->kobj,
+ &dev_attr_stats_rst.attr);
+ if (status) {
+ dev_err(
+ dev,
+ "%s: sysfs_create_file(stats_rst) failed\n",
+ __func__);
+ goto done;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev_attr_udma_debug); i++ ) {
+ status = sysfs_create_file(
+ &dev->kobj,
+ &dev_attr_udma_debug[i].attr.attr);
+ if (status) {
+ dev_err(
+ dev,
+ "%s: sysfs_create_file(stats_udma %d) failed\n",
+ __func__,
+ i);
+ goto done;
+ }
+ }
+done:
+#endif
+
+ return status;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+void al_dma_sysfs_terminate(
+ struct device *dev)
+{
+#ifdef CONFIG_AL_DMA_STATS
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev_attr_stats_prep); i++)
+ sysfs_remove_file(
+ &dev->kobj,
+ &dev_attr_stats_prep[i].attr.attr);
+
+ for (i = 0; i < ARRAY_SIZE(dev_attr_stats_comp); i++)
+ sysfs_remove_file(
+ &dev->kobj,
+ &dev_attr_stats_comp[i].attr.attr);
+
+ sysfs_remove_file(&dev->kobj, &dev_attr_stats_rst.attr);
+
+ for (i = 0; i < ARRAY_SIZE(dev_attr_udma_debug); i++) {
+ sysfs_remove_file(
+ &dev->kobj,
+ &dev_attr_udma_debug[i].attr.attr);
+ }
+
+#endif
+}
+
+#ifdef CONFIG_AL_DMA_STATS
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t rd_stats_prep(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct al_dma_device *device = dev_get_drvdata(dev);
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ uintptr_t offset = (uintptr_t)ea->var;
+ int i;
+ ssize_t size = 0;
+
+ size += sprintf(
+ &buf[size],
+ "statistics - %s:\n",
+ attr->attr.name);
+
+ for (i = 0; i < device->max_channels; i++) {
+ uint64_t val;
+
+ spin_lock_bh(&device->channels[i]->prep_lock);
+
+ val = *(uint64_t *)(((uint8_t*)&device->channels[i]
+ ->stats_prep) + offset);
+
+ spin_unlock_bh(&device->channels[i]->prep_lock);
+
+ size += sprintf(
+ &buf[size],
+ "chan[%d] %llu\n",
+ i,
+ val);
+ }
+
+ return size;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t rd_stats_comp(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct al_dma_device *device = dev_get_drvdata(dev);
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ uintptr_t offset = (uintptr_t)ea->var;
+ int i;
+ ssize_t size = 0;
+
+ size += sprintf(
+ &buf[size],
+ "statistics - %s:\n",
+ attr->attr.name);
+
+ for (i = 0; i < device->max_channels; i++) {
+ uint64_t val;
+
+ spin_lock_bh(&device->channels[i]->cleanup_lock);
+
+ val = *(uint64_t *)(((uint8_t*)&device->channels[i]
+ ->stats_comp) + offset);
+
+ spin_unlock_bh(&device->channels[i]->cleanup_lock);
+
+ size += sprintf(
+ &buf[size],
+ "chan[%d] %llu\n",
+ i,
+ val);
+ }
+
+ return size;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t rd_stats_rst(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(
+ buf,
+ "Write anything to clear all statistics\n");
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t wr_stats_rst(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct al_dma_device *device = dev_get_drvdata(dev);
+
+ int i;
+
+ for (i = 0; i < device->max_channels; i++) {
+ spin_lock_bh(&device->channels[i]->prep_lock);
+ spin_lock_bh(&device->channels[i]->cleanup_lock);
+
+ memset(
+ &device->channels[i]->stats_prep,
+ 0,
+ sizeof(struct al_dma_chan_stats_prep));
+
+ memset(
+ &device->channels[i]->stats_comp,
+ 0,
+ sizeof(struct al_dma_chan_stats_comp));
+
+ spin_unlock_bh(&device->channels[i]->cleanup_lock);
+ spin_unlock_bh(&device->channels[i]->prep_lock);
+ }
+
+ return i;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t rd_udma_dump(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ enum udma_dump_type dump_type = (enum udma_dump_type)ea->var;
+ ssize_t rc = 0;
+
+ switch (dump_type) {
+ case UDMA_DUMP_M2S_REGS:
+ case UDMA_DUMP_S2M_REGS:
+ rc = sprintf(
+ buf,
+ "Write mask to dump corresponding udma regs\n");
+ break;
+ case UDMA_DUMP_M2S_Q_STRUCT:
+ case UDMA_DUMP_S2M_Q_STRUCT:
+ rc = sprintf(
+ buf,
+ "Write q num to dump correspoding q struct\n");
+ break;
+ case UDMA_DUMP_M2S_Q_POINTERS:
+ case UDMA_DUMP_S2M_Q_POINTERS:
+ rc = sprintf(
+ buf,
+ "Write q num (in hex) and add 1 for submission ring,"
+ " for ex:\n"
+ "0 for completion ring of q 0\n"
+ "10 for submission ring of q 0\n");
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/******************************************************************************
+ *****************************************************************************/
+static ssize_t wr_udma_dump(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int err;
+ int q_id;
+ unsigned long val;
+ struct al_udma* dma;
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ enum udma_dump_type dump_type = (enum udma_dump_type)ea->var;
+ enum al_udma_ring_type ring_type = AL_RING_COMPLETION;
+ struct al_dma_device *device = dev_get_drvdata(dev);
+
+ err = kstrtoul(buf, 16, &val);
+ if (err < 0)
+ return err;
+
+ switch (dump_type) {
+ case UDMA_DUMP_M2S_REGS:
+ al_ssm_dma_handle_get(&device->hal_raid, UDMA_TX, &dma);
+ al_udma_regs_print(dma, val);
+ break;
+ case UDMA_DUMP_S2M_REGS:
+ al_ssm_dma_handle_get(&device->hal_raid, UDMA_RX, &dma);
+ al_udma_regs_print(dma, val);
+ break;
+ case UDMA_DUMP_M2S_Q_STRUCT:
+ al_ssm_dma_handle_get(&device->hal_raid, UDMA_TX, &dma);
+ al_udma_q_struct_print(dma, val);
+ break;
+ case UDMA_DUMP_S2M_Q_STRUCT:
+ al_ssm_dma_handle_get(&device->hal_raid, UDMA_RX, &dma);
+ al_udma_q_struct_print(dma, val);
+ break;
+ case UDMA_DUMP_M2S_Q_POINTERS:
+ if (val & 0x10)
+ ring_type = AL_RING_SUBMISSION;
+ q_id = val & 0xf;
+ al_ssm_dma_handle_get(&device->hal_raid, UDMA_TX, &dma);
+ al_udma_ring_print(dma, q_id, ring_type);
+ break;
+ case UDMA_DUMP_S2M_Q_POINTERS:
+ if (val & 0x10)
+ ring_type = AL_RING_SUBMISSION;
+ q_id = val & 0xf;
+ al_ssm_dma_handle_get(&device->hal_raid, UDMA_RX, &dma);
+ al_udma_ring_print(dma, q_id, ring_type);
+ break;
+ default:
+ break;
+ }
+
+ return count;
+}
+#endif
+
diff --git a/drivers/dma/al/al_dma_sysfs.h b/drivers/dma/al/al_dma_sysfs.h
new file mode 100644
index 0000000..f3733f2
--- /dev/null
+++ b/drivers/dma/al/al_dma_sysfs.h
@@ -0,0 +1,28 @@
+/*
+ * Annapurna Labs DMA Linux driver - sysfs support declarations
+ * Copyright(c) 2011 Annapurna Labs.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+int al_dma_sysfs_init(
+ struct device *dev);
+
+void al_dma_sysfs_terminate(
+ struct device *dev);
+
diff --git a/drivers/dma/al/al_hal_ssm_raid.c b/drivers/dma/al/al_hal_ssm_raid.c
new file mode 100644
index 0000000..dd1cca0d
--- /dev/null
+++ b/drivers/dma/al/al_hal_ssm_raid.c
@@ -0,0 +1,575 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @{
+ * @file al_hal_ssm_raid.c
+ *
+ */
+
+#include "al_hal_ssm.h"
+#include "al_hal_ssm_raid.h"
+#include "al_hal_ssm_raid_regs.h"
+
+#define RX_COMP_STATUS_MASK \
+ (AL_RAID_P_VAL_ERROR | AL_RAID_Q_VAL_ERROR |\
+ AL_RAID_BUS_PARITY_ERROR | AL_RAID_SOURCE_LEN_ERROR |\
+ AL_RAID_CMD_DECODE_ERROR | AL_RAID_INTERNAL_ERROR |\
+ AL_RAID_REDIRECTED_TRANSACTION |\
+ AL_RAID_REDIRECTED_SRC_UDMA |\
+ AL_RAID_REDIRECTED_SRC_QUEUE)
+
+/** operation attributes */
+struct al_op_attr {
+ uint32_t opcode; /* hw opcode */
+ uint32_t meta_descs; /* number of meta descs needed for the operation */
+ al_bool have_g_coef;
+ al_bool have_p_coef;
+};
+
+static const struct al_op_attr op_attr_table[] = {
+ /* opcode meta g coef p coef */
+ {AL_RAID_MEM_CPY_OPCODE, 0, AL_FALSE, AL_FALSE},
+ {AL_RAID_MEM_SET_OPCODE, 1, AL_FALSE, AL_FALSE},
+ {AL_RAID_MEM_SCRUB_OPCODE, 0, AL_FALSE, AL_FALSE},
+ {AL_RAID_MEM_CMP_OPCODE, 4, AL_FALSE, AL_FALSE},
+ {AL_RAID_NOP_OPCODE, 1, AL_FALSE, AL_FALSE},
+ {AL_RAID_P_CALC_OPCODE, 0, AL_FALSE, AL_TRUE},
+ {AL_RAID_Q_CALC_OPCODE, 0, AL_TRUE, AL_FALSE},
+ {AL_RAID_PQ_CALC_OPCODE, 0, AL_TRUE, AL_TRUE},
+ {AL_RAID_P_VAL_OPCODE, 0, AL_FALSE, AL_TRUE},
+ {AL_RAID_Q_VAL_OPCODE, 0, AL_TRUE, AL_FALSE},
+ {AL_RAID_PQ_VAL_OPCODE, 0, AL_TRUE, AL_TRUE},
+};
+
+#define GF_SIZE 256
+
+static const uint8_t gflog[GF_SIZE] = {
+ 0xff, 0x00, 0x01, 0x19, 0x02, 0x32, 0x1a, 0xc6,
+ 0x03, 0xdf, 0x33, 0xee, 0x1b, 0x68, 0xc7, 0x4b,
+ 0x04, 0x64, 0xe0, 0x0e, 0x34, 0x8d, 0xef, 0x81,
+ 0x1c, 0xc1, 0x69, 0xf8, 0xc8, 0x08, 0x4c, 0x71,
+ 0x05, 0x8a, 0x65, 0x2f, 0xe1, 0x24, 0x0f, 0x21,
+ 0x35, 0x93, 0x8e, 0xda, 0xf0, 0x12, 0x82, 0x45,
+ 0x1d, 0xb5, 0xc2, 0x7d, 0x6a, 0x27, 0xf9, 0xb9,
+ 0xc9, 0x9a, 0x09, 0x78, 0x4d, 0xe4, 0x72, 0xa6,
+ 0x06, 0xbf, 0x8b, 0x62, 0x66, 0xdd, 0x30, 0xfd,
+ 0xe2, 0x98, 0x25, 0xb3, 0x10, 0x91, 0x22, 0x88,
+ 0x36, 0xd0, 0x94, 0xce, 0x8f, 0x96, 0xdb, 0xbd,
+ 0xf1, 0xd2, 0x13, 0x5c, 0x83, 0x38, 0x46, 0x40,
+ 0x1e, 0x42, 0xb6, 0xa3, 0xc3, 0x48, 0x7e, 0x6e,
+ 0x6b, 0x3a, 0x28, 0x54, 0xfa, 0x85, 0xba, 0x3d,
+ 0xca, 0x5e, 0x9b, 0x9f, 0x0a, 0x15, 0x79, 0x2b,
+ 0x4e, 0xd4, 0xe5, 0xac, 0x73, 0xf3, 0xa7, 0x57,
+ 0x07, 0x70, 0xc0, 0xf7, 0x8c, 0x80, 0x63, 0x0d,
+ 0x67, 0x4a, 0xde, 0xed, 0x31, 0xc5, 0xfe, 0x18,
+ 0xe3, 0xa5, 0x99, 0x77, 0x26, 0xb8, 0xb4, 0x7c,
+ 0x11, 0x44, 0x92, 0xd9, 0x23, 0x20, 0x89, 0x2e,
+ 0x37, 0x3f, 0xd1, 0x5b, 0x95, 0xbc, 0xcf, 0xcd,
+ 0x90, 0x87, 0x97, 0xb2, 0xdc, 0xfc, 0xbe, 0x61,
+ 0xf2, 0x56, 0xd3, 0xab, 0x14, 0x2a, 0x5d, 0x9e,
+ 0x84, 0x3c, 0x39, 0x53, 0x47, 0x6d, 0x41, 0xa2,
+ 0x1f, 0x2d, 0x43, 0xd8, 0xb7, 0x7b, 0xa4, 0x76,
+ 0xc4, 0x17, 0x49, 0xec, 0x7f, 0x0c, 0x6f, 0xf6,
+ 0x6c, 0xa1, 0x3b, 0x52, 0x29, 0x9d, 0x55, 0xaa,
+ 0xfb, 0x60, 0x86, 0xb1, 0xbb, 0xcc, 0x3e, 0x5a,
+ 0xcb, 0x59, 0x5f, 0xb0, 0x9c, 0xa9, 0xa0, 0x51,
+ 0x0b, 0xf5, 0x16, 0xeb, 0x7a, 0x75, 0x2c, 0xd7,
+ 0x4f, 0xae, 0xd5, 0xe9, 0xe6, 0xe7, 0xad, 0xe8,
+ 0x74, 0xd6, 0xf4, 0xea, 0xa8, 0x50, 0x58, 0xaf
+};
+
+static const uint8_t gfilog[GF_SIZE] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
+ 0x1d, 0x3a, 0x74, 0xe8, 0xcd, 0x87, 0x13, 0x26,
+ 0x4c, 0x98, 0x2d, 0x5a, 0xb4, 0x75, 0xea, 0xc9,
+ 0x8f, 0x03, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0,
+ 0x9d, 0x27, 0x4e, 0x9c, 0x25, 0x4a, 0x94, 0x35,
+ 0x6a, 0xd4, 0xb5, 0x77, 0xee, 0xc1, 0x9f, 0x23,
+ 0x46, 0x8c, 0x05, 0x0a, 0x14, 0x28, 0x50, 0xa0,
+ 0x5d, 0xba, 0x69, 0xd2, 0xb9, 0x6f, 0xde, 0xa1,
+ 0x5f, 0xbe, 0x61, 0xc2, 0x99, 0x2f, 0x5e, 0xbc,
+ 0x65, 0xca, 0x89, 0x0f, 0x1e, 0x3c, 0x78, 0xf0,
+ 0xfd, 0xe7, 0xd3, 0xbb, 0x6b, 0xd6, 0xb1, 0x7f,
+ 0xfe, 0xe1, 0xdf, 0xa3, 0x5b, 0xb6, 0x71, 0xe2,
+ 0xd9, 0xaf, 0x43, 0x86, 0x11, 0x22, 0x44, 0x88,
+ 0x0d, 0x1a, 0x34, 0x68, 0xd0, 0xbd, 0x67, 0xce,
+ 0x81, 0x1f, 0x3e, 0x7c, 0xf8, 0xed, 0xc7, 0x93,
+ 0x3b, 0x76, 0xec, 0xc5, 0x97, 0x33, 0x66, 0xcc,
+ 0x85, 0x17, 0x2e, 0x5c, 0xb8, 0x6d, 0xda, 0xa9,
+ 0x4f, 0x9e, 0x21, 0x42, 0x84, 0x15, 0x2a, 0x54,
+ 0xa8, 0x4d, 0x9a, 0x29, 0x52, 0xa4, 0x55, 0xaa,
+ 0x49, 0x92, 0x39, 0x72, 0xe4, 0xd5, 0xb7, 0x73,
+ 0xe6, 0xd1, 0xbf, 0x63, 0xc6, 0x91, 0x3f, 0x7e,
+ 0xfc, 0xe5, 0xd7, 0xb3, 0x7b, 0xf6, 0xf1, 0xff,
+ 0xe3, 0xdb, 0xab, 0x4b, 0x96, 0x31, 0x62, 0xc4,
+ 0x95, 0x37, 0x6e, 0xdc, 0xa5, 0x57, 0xae, 0x41,
+ 0x82, 0x19, 0x32, 0x64, 0xc8, 0x8d, 0x07, 0x0e,
+ 0x1c, 0x38, 0x70, 0xe0, 0xdd, 0xa7, 0x53, 0xa6,
+ 0x51, 0xa2, 0x59, 0xb2, 0x79, 0xf2, 0xf9, 0xef,
+ 0xc3, 0x9b, 0x2b, 0x56, 0xac, 0x45, 0x8a, 0x09,
+ 0x12, 0x24, 0x48, 0x90, 0x3d, 0x7a, 0xf4, 0xf5,
+ 0xf7, 0xf3, 0xfb, 0xeb, 0xcb, 0x8b, 0x0b, 0x16,
+ 0x2c, 0x58, 0xb0, 0x7d, 0xfa, 0xe9, 0xcf, 0x83,
+ 0x1b, 0x36, 0x6c, 0xd8, 0xad, 0x47, 0x8e, 0x00
+};
+
+static void _al_raid_load_table(
+ struct raid_accelerator_regs __iomem *app_regs,
+ int gflog,
+ const uint8_t *table)
+{
+ uint32_t *base_reg;
+ int i;
+
+ if (gflog)
+ base_reg = &app_regs->gflog_table[0].w0_raw;
+ else
+ base_reg = &app_regs->gfilog_table[0].w0_r;
+
+ for (i = 0; i < GF_SIZE/4; i++) {
+ int table_idx = i << 2; /* *4 */
+ uint32_t reg = (table[table_idx + 3] << 24) |
+ (table[table_idx + 2] << 16) |
+ (table[table_idx + 1] << 8) |
+ table[table_idx];
+
+ al_reg_write32(base_reg + i, reg);
+ }
+}
+
+/**
+ * get number of rx submission descriptors needed to the transaction
+ *
+ * we need rx descriptor for each destination buffer.
+ * if the transaction doesn't have destination buffers, then one*
+ * descriptor is needed
+ *
+ * @param xaction transaction context
+ *
+ * @return number of rx submission descriptors
+ */
+INLINE uint32_t _al_raid_xaction_rx_descs_count(
+ struct al_raid_transaction *xaction)
+{
+ return xaction->total_dst_bufs ? xaction->total_dst_bufs : 1;
+}
+
+/**
+ * get number of tx submission descriptors needed to the transaction
+ *
+ * we need tx descriptor for each source buffer.
+ * MEM_SET needs 1 Meta descriptor, MEM_CMP needs 4
+ * and we need at least one descriptor for the opcode
+ *
+ * @param xaction transaction context
+ * @param meta number of meta descriptors
+ *
+ * @return number of tx submission descriptors
+ */
+INLINE uint32_t _al_raid_xaction_tx_descs_count(
+ struct al_raid_transaction *xaction,
+ uint32_t meta)
+{
+ uint32_t count = xaction->total_src_bufs + meta;
+ return count ? count : 1;
+}
+
+/**
+ * prepare the rx submission descriptors
+ * this function writes the contents of the rx submission descriptors
+ * @param rx_udma_q rx udma handle
+ * @param xaction transaction context
+ */
+static void _al_raid_set_rx_descs(
+ struct al_udma_q *rx_udma_q,
+ struct al_raid_transaction *xaction)
+{
+ uint32_t flags = 0;
+ union al_udma_desc *rx_desc;
+ struct al_block *block;
+ uint32_t blk_idx;
+
+ if (xaction->flags & AL_SSM_INTERRUPT)
+ flags = AL_M2S_DESC_INT_EN;
+ if (xaction->flags & AL_SSM_DEST_NO_SNOOP)
+ flags |= AL_M2S_DESC_NO_SNOOP_H;
+
+ /* if the xaction doesn't have destination buffers, allocate single
+ Meta descriptor */
+ if (xaction->total_dst_bufs == 0) {
+ rx_desc = al_udma_desc_get(rx_udma_q);
+ flags |= al_udma_ring_id_get(rx_udma_q) <<
+ AL_M2S_DESC_RING_ID_SHIFT;
+ flags |= AL_RAID_RX_DESC_META;
+ /* write back flags */
+ rx_desc->rx.len_ctrl = swap32_to_le(flags);
+ return;
+ }
+
+ /* for each desc set buffer length, address */
+ block = xaction->dsts_blocks;
+ for (blk_idx = 0; blk_idx < xaction->num_of_dsts; blk_idx++) {
+ struct al_buf *buf = block->bufs;
+ unsigned int buf_idx = 0;
+ for (; buf_idx < block->num; buf_idx++) {
+ uint64_t vmid = ((uint64_t)block->vmid) <<
+ AL_UDMA_DESC_VMID_SHIFT;
+ uint32_t flags_len = flags;
+ uint32_t ring_id;
+
+ rx_desc = al_udma_desc_get(rx_udma_q);
+ /* get ring id */
+ ring_id = al_udma_ring_id_get(rx_udma_q)
+ << AL_M2S_DESC_RING_ID_SHIFT;
+
+ flags_len |= ring_id;
+
+ flags_len |= buf->len & AL_M2S_DESC_LEN_MASK;
+ rx_desc->rx.len_ctrl = swap32_to_le(flags_len);
+ rx_desc->rx.buf1_ptr = swap64_to_le(buf->addr | vmid);
+ buf++;
+ }
+ block++;
+ }
+}
+
+/**
+ * calculate the total length of rx buffers
+ *
+ * @param xaction transaction context
+ */
+static uint32_t _al_raid_rx_get_len(struct al_raid_transaction *xaction)
+{
+ uint32_t total_len = 0;
+ uint32_t blk_idx;
+ struct al_block *block = xaction->dsts_blocks;
+
+ for (blk_idx = 0; blk_idx < xaction->num_of_dsts; blk_idx++) {
+ struct al_buf *buf = block->bufs;
+ unsigned int buf_idx = 0;
+ for (; buf_idx < block->num; buf_idx++) {
+ total_len += buf->len;
+ buf++;
+ }
+ }
+ return total_len;
+}
+
+/**
+ * fill the tx submission descriptors
+ * this function writes the contents of the tx submission descriptors
+ * @param tx_udma_q tx udma handle
+ * @param xaction transaction context
+ * @param meta number of meta descriptors used by this xaction
+ * @param op_attr operation attributes
+ */
+static void _al_raid_set_tx_descs(
+ struct al_udma_q *tx_udma_q,
+ struct al_raid_transaction *xaction,
+ uint32_t meta,
+ const struct al_op_attr *op_attr)
+{
+ union al_udma_desc *tx_desc;
+ uint32_t flags = AL_M2S_DESC_FIRST;
+ struct al_block *block;
+ uint32_t blk_idx;
+
+
+ for (blk_idx = 0; blk_idx < meta; blk_idx++) {
+ uint32_t flags_len = flags;
+ uint32_t ring_id;
+
+ /* clear first flags */
+ flags = 0;
+
+ /* get next descriptor */
+ tx_desc = al_udma_desc_get(tx_udma_q);
+ /* get ring id */
+ ring_id = al_udma_ring_id_get(tx_udma_q) <<
+ AL_M2S_DESC_RING_ID_SHIFT;
+
+ flags_len |= ring_id;
+
+ /* write descriptor's flags */
+ flags_len |= AL_M2S_DESC_META_DATA;
+
+ /* set LAST flag */
+ if ((blk_idx == (meta - 1)) && (xaction->num_of_srcs == 0)) {
+ flags_len |= AL_M2S_DESC_LAST;
+ if (xaction->flags & AL_SSM_BARRIER)
+ flags_len |= AL_M2S_DESC_DMB;
+ }
+
+ if (blk_idx == 0) {
+ uint32_t attr = op_attr->opcode;
+
+ if (xaction->op == AL_RAID_OP_MEM_SET)
+ attr |= xaction->mem_set_flags;
+
+ /* write opcode in first descriptor */
+ tx_desc->tx.meta_ctrl = swap32_to_le(attr);
+ }
+ /* write meta data */
+ if (xaction->op == AL_RAID_OP_MEM_SET) {
+ /* memset needs length in meta desc */
+ uint32_t total_len = _al_raid_rx_get_len(xaction);
+ flags_len |= total_len & AL_M2S_DESC_LEN_MASK;
+ tx_desc->tx_meta.meta1 = xaction->data[1];
+ tx_desc->tx_meta.meta2 = xaction->data[0];
+ } else if (xaction->op == AL_RAID_OP_MEM_CMP) {
+ tx_desc->tx_meta.meta1 = xaction->pattern_data[blk_idx];
+ tx_desc->tx_meta.meta2 = xaction->pattern_mask[blk_idx];
+ }
+ tx_desc->tx.len_ctrl = swap32_to_le(flags_len);
+ }
+
+ if (xaction->flags & AL_SSM_SRC_NO_SNOOP)
+ flags |= AL_M2S_DESC_NO_SNOOP_H;
+ if (xaction->flags & AL_SSM_BARRIER)
+ flags |= AL_M2S_DESC_DMB;
+
+ /* for each desc set buffer length, address */
+ block = xaction->srcs_blocks;
+ for (blk_idx = 0; blk_idx < xaction->num_of_srcs; blk_idx++) {
+ uint32_t attr = op_attr->opcode;
+ struct al_buf *buf = block->bufs;
+ unsigned int buf_idx = 0;
+
+ attr &= ~0xFFFFF;
+ if (op_attr->have_g_coef == AL_TRUE)
+ attr |= xaction->coefs[blk_idx] & 0xFF;
+ if (blk_idx == 0)
+ attr |= AL_RAID_TX_DESC_META_FIRST_SOURCE;
+ if (blk_idx == (xaction->num_of_srcs - 1))
+ attr |= AL_RAID_TX_DESC_META_LAST_SOURCE;
+ if (op_attr->have_p_coef == AL_TRUE)
+ if ((xaction->op != AL_RAID_OP_PQ_VAL) ||
+ (blk_idx != xaction->q_index))
+ attr |= AL_RAID_TX_DESC_META_P_ENABLE;
+
+ for (buf_idx = 0; buf_idx < block->num; buf_idx++) {
+ uint64_t vmid = ((uint64_t)block->vmid) <<
+ AL_UDMA_DESC_VMID_SHIFT;
+ uint32_t flags_len = flags;
+ uint32_t ring_id;
+
+ /* clear first and DMB flags, keep no snoop hint flag */
+ flags &= AL_M2S_DESC_NO_SNOOP_H;
+
+ tx_desc = al_udma_desc_get(tx_udma_q);
+ /* get ring id, and clear FIRST and Int flags */
+ ring_id = al_udma_ring_id_get(tx_udma_q) <<
+ AL_M2S_DESC_RING_ID_SHIFT;
+
+ flags_len |= ring_id;
+ /* set LAST flag if last descriptor */
+ if ((blk_idx == (xaction->num_of_srcs - 1)) &&
+ (buf_idx == (block->num - 1)))
+ flags_len |= AL_M2S_DESC_LAST;
+
+ flags_len |= buf->len & AL_M2S_DESC_LEN_MASK;
+
+ if (buf_idx == 0) {
+ /* write attributes for descriptors */
+ /* that start new source */
+ tx_desc->tx.meta_ctrl = swap32_to_le(attr);
+ } else {
+ flags_len |= AL_M2S_DESC_CONCAT;
+ }
+ tx_desc->tx.len_ctrl = swap32_to_le(flags_len);
+ tx_desc->tx.buf_ptr = swap64_to_le(buf->addr | vmid);
+ /* move to next buffer/descriptor */
+ buf++;
+ }
+ block++;
+ }
+}
+
+/****************************** API functions *********************************/
+void al_raid_init(void __iomem *app_regs)
+{
+ /* initialize the GFLOG and GFILOG tables of the hw */
+ _al_raid_load_table(app_regs, 1, gflog);
+ _al_raid_load_table(app_regs, 0, gfilog);
+}
+
+/**
+ * prepare raid transaction
+ *
+ * @param raid_dma raid DMA handle
+ * @param qid queue index
+ * @param xaction transaction context
+ *
+ * @return 0 if no error found.
+ * -ENOSPC if no space available.
+ */
+int al_raid_dma_prepare(
+ struct al_ssm_dma *raid_dma,
+ uint32_t qid,
+ struct al_raid_transaction *xaction)
+{
+ uint32_t rx_descs;
+ uint32_t tx_descs, meta;
+ struct al_udma_q *rx_udma_q;
+ struct al_udma_q *tx_udma_q;
+ const struct al_op_attr *op_attr;
+ int rc;
+
+ /* assert valid opcode */
+ al_assert(xaction->op <
+ (sizeof(op_attr_table)/sizeof(op_attr_table[0])));
+
+ op_attr = &(op_attr_table[xaction->op]);
+
+ /* calc rx (S2M) descriptors */
+ rx_descs = _al_raid_xaction_rx_descs_count(xaction);
+ al_assert(rx_descs <= AL_SSM_MAX_DST_DESCS);
+ rc = al_udma_q_handle_get(&raid_dma->m2m_udma.rx_udma, qid, &rx_udma_q);
+
+ al_assert(rc == 0); /* assert valid rx q handle */
+
+ if (unlikely(al_udma_available_get(rx_udma_q) < rx_descs)) {
+ al_dbg("raid [%s]: rx q has no enough free descriptor",
+ raid_dma->m2m_udma.name);
+ return -ENOSPC;
+ }
+
+ /* calc tx (M2S) descriptors */
+ meta = op_attr->meta_descs;
+ tx_descs = _al_raid_xaction_tx_descs_count(xaction, meta);
+ al_assert(tx_descs <= AL_SSM_MAX_SRC_DESCS);
+ rc = al_udma_q_handle_get(&raid_dma->m2m_udma.tx_udma, qid, &tx_udma_q);
+ al_assert(rc == 0); /* assert valid tx q handle */
+ if (unlikely(al_udma_available_get(tx_udma_q) < tx_descs)) {
+ al_dbg("raid [%s]: tx q has no enough free descriptor",
+ raid_dma->m2m_udma.name);
+ return -ENOSPC;
+ }
+
+ /* prepare rx descs */
+ _al_raid_set_rx_descs(rx_udma_q, xaction);
+ /* add rx descriptors */
+ al_udma_desc_action_add(rx_udma_q, rx_descs);
+
+ /* prepare tx descriptors */
+ _al_raid_set_tx_descs(tx_udma_q, xaction, meta, op_attr);
+ /* add tx descriptors */
+ xaction->tx_descs_count = tx_descs;
+ /* union al_udma_desc_action_add(tx_udma_q, tx_descs); */
+
+ return 0;
+}
+
+/**
+ * add previously prepared transaction to hw engine
+ *
+ * @param raid_dma raid DMA handle
+ * @param qid queue index
+ * @param tx_descs number of tx descriptors to be processed by the engine
+ *
+ * @return 0 if no error found.
+ * -ENOSPC if no space available.
+ */
+int al_raid_dma_action(
+ struct al_ssm_dma *raid_dma,
+ uint32_t qid,
+ uint32_t tx_descs)
+{
+ struct al_udma_q *tx_udma_q;
+ int rc;
+
+ rc = al_udma_q_handle_get(&raid_dma->m2m_udma.tx_udma, qid, &tx_udma_q);
+ al_assert(rc == 0); /* assert valid tx q handle */
+
+ al_udma_desc_action_add(tx_udma_q, tx_descs);
+
+ return 0;
+}
+
+/**
+ * check and cleanup completed transaction
+ *
+ * @param raid_dma raid DMA handle
+ * @param qid queue index
+ * @param comp_status status reported by rx completion descriptor
+ *
+ * @return 1 if a transaction was completed. 0 otherwise
+ */
+int al_raid_dma_completion(
+ struct al_ssm_dma *raid_dma,
+ uint32_t qid,
+ uint32_t *comp_status)
+{
+ struct al_udma_q *rx_udma_q;
+ struct al_udma_q *tx_udma_q;
+ volatile union al_udma_cdesc *cdesc;
+ int rc;
+ uint32_t cdesc_count;
+
+ rc = al_udma_q_handle_get(&raid_dma->m2m_udma.rx_udma, qid, &rx_udma_q);
+ al_assert(rc == 0); /* assert valid rx q handle */
+
+ cdesc_count = al_udma_cdesc_packet_get(rx_udma_q, &cdesc);
+ if (cdesc_count == 0)
+ return 0;
+
+ /* if we have multiple completion descriptors, then last one will have
+ the valid status */
+ if (unlikely(cdesc_count > 1))
+ cdesc = al_cdesc_next(rx_udma_q, cdesc, cdesc_count - 1);
+
+ *comp_status = swap32_from_le(cdesc->al_desc_comp_rx.ctrl_meta) &
+ RX_COMP_STATUS_MASK;
+
+ al_dbg(
+ "raid [%s %d]: packet completed. "
+ "count %d status desc %p meta %x\n",
+ raid_dma->m2m_udma.name, qid, cdesc_count, cdesc,
+ cdesc->al_desc_comp_rx.ctrl_meta);
+
+ al_udma_cdesc_ack(rx_udma_q, cdesc_count);
+
+ /* cleanup tx completion queue */
+ rc = al_udma_q_handle_get(&raid_dma->m2m_udma.tx_udma, qid, &tx_udma_q);
+ al_assert(rc == 0); /* assert valid tx q handle */
+
+ cdesc_count = al_udma_cdesc_get_all(tx_udma_q, NULL);
+ if (cdesc_count != 0)
+ al_udma_cdesc_ack(tx_udma_q, cdesc_count);
+
+ return 1;
+}
+/** @} end of RAID group */
diff --git a/drivers/dma/al/al_hal_ssm_raid.h b/drivers/dma/al/al_hal_ssm_raid.h
new file mode 100644
index 0000000..31151c3
--- /dev/null
+++ b/drivers/dma/al/al_hal_ssm_raid.h
@@ -0,0 +1,160 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/**
+ * @defgroup group_raid_api API
+ * @ingroup group_raid
+ * RAID API
+ * @{
+ * @file al_hal_ssm_raid.h
+ *
+ * @brief Header file for RAID acceleration unit HAL driver
+ *
+ */
+
+#ifndef __AL_HAL_RAID_H__
+#define __AL_HAL_RAID_H__
+
+#include "al_hal_common.h"
+#include "al_hal_udma.h"
+#include "al_hal_m2m_udma.h"
+#include "al_hal_ssm.h"
+
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* *INDENT-ON* */
+
+/* PCI Adapter Device/Revision ID */
+#define AL_RAID_DEV_ID 0x0021
+#define AL_RAID_REV_ID_0 0
+#define AL_RAID_REV_ID_1 1
+
+enum al_raid_op {
+ AL_RAID_OP_MEM_CPY = 0, /* memory copy */
+ AL_RAID_OP_MEM_SET, /* memory set with 64b data */
+ AL_RAID_OP_MEM_SCRUB, /* memory read */
+ AL_RAID_OP_MEM_CMP, /* compare block with 128 bit pattern */
+ AL_RAID_OP_NOP, /* no-operation */
+ AL_RAID_OP_P_CALC, /* RAID5/6 Parity (xor) calculation */
+ AL_RAID_OP_Q_CALC, /* RAID6 Q calculation */
+ AL_RAID_OP_PQ_CALC, /* P and Q calculation */
+ AL_RAID_OP_P_VAL, /* Parity validation */
+ AL_RAID_OP_Q_VAL, /* Q validation */
+ AL_RAID_OP_PQ_VAL, /* P and Q validation */
+};
+
+
+#define AL_RAID_RX_DESC_META (1<<30) /* Meta data */
+
+#define AL_RAID_TX_DESC_META_OPCODE_MASK (0x1f<<20) /* RAID_op & type combined */
+#define AL_RAID_TX_DESC_META_OPCODE_SHIFT (20)
+#define AL_RAID_TX_DESC_META_FIRST_SOURCE (1<<19) /*TODO beginning of 1st block */
+#define AL_RAID_TX_DESC_META_LAST_SOURCE (1<<18) /* beginning of last block */
+#define AL_RAID_TX_DESC_META_P_ENABLE (1<<17) /* P calculation Coef */
+
+/* define the HW opcode with the needed shift, also the code combines the */
+/* op class (mem or raid) and type */
+#define AL_RAID_OPCODE(x) ((x) << AL_RAID_TX_DESC_META_OPCODE_SHIFT)
+#define AL_RAID_MEM_CPY_OPCODE AL_RAID_OPCODE(0x0)
+#define AL_RAID_MEM_SET_OPCODE AL_RAID_OPCODE(0x1)
+#define AL_RAID_MEM_SCRUB_OPCODE AL_RAID_OPCODE(0x2)
+#define AL_RAID_MEM_CMP_OPCODE AL_RAID_OPCODE(0x3)
+#define AL_RAID_NOP_OPCODE AL_RAID_OPCODE(0x8)
+#define AL_RAID_P_CALC_OPCODE AL_RAID_OPCODE(0x9)
+#define AL_RAID_Q_CALC_OPCODE AL_RAID_OPCODE(0xa)
+#define AL_RAID_PQ_CALC_OPCODE AL_RAID_OPCODE(0xb)
+#define AL_RAID_P_VAL_OPCODE AL_RAID_OPCODE(0xd)
+#define AL_RAID_Q_VAL_OPCODE AL_RAID_OPCODE(0xe)
+#define AL_RAID_PQ_VAL_OPCODE AL_RAID_OPCODE(0xf)
+#define AL_RAID_PARALLEL_MEM_CPY_OPCODE AL_RAID_OPCODE(0x10)
+
+/* transaction completion status */
+#define AL_RAID_P_VAL_ERROR AL_BIT(0)
+#define AL_RAID_Q_VAL_ERROR AL_BIT(1)
+#define AL_RAID_BUS_PARITY_ERROR AL_BIT(2)
+#define AL_RAID_SOURCE_LEN_ERROR AL_BIT(3)
+#define AL_RAID_CMD_DECODE_ERROR AL_BIT(4)
+#define AL_RAID_INTERNAL_ERROR AL_BIT(5)
+
+#define AL_RAID_REDIRECTED_TRANSACTION AL_BIT(16)
+#define AL_RAID_REDIRECTED_SRC_UDMA (AL_BIT(13) | AL_BIT(12))
+#define AL_RAID_REDIRECTED_SRC_QUEUE (AL_BIT(9) | AL_BIT(8))
+
+struct al_raid_transaction {
+ enum al_raid_op op;
+ enum al_ssm_op_flags flags;
+ struct al_block *srcs_blocks;
+ uint32_t num_of_srcs;
+ uint32_t total_src_bufs; /* total number of buffers of all source blocks */
+ struct al_block *dsts_blocks;
+ uint32_t num_of_dsts;
+ uint32_t total_dst_bufs; /* total number of buffers of all destination blocks */
+ uint32_t tx_descs_count; /* number of tx descriptors created for this */
+ /* transaction, this field set by the hal */
+
+ /* the following fields are operation specific */
+ uint8_t *coefs; /* RAID6 Q coefficients of source blocks */
+ uint8_t q_index; /* RAID6 PQ_VAL: index of q src block, the parity*/
+ /* calculation will ignore that buffer */
+ uint32_t data[2]; /* MEM SET data */
+
+ uint32_t mem_set_flags; /* MEM SET special flags, should be zero */
+ uint32_t pattern_data[4]; /* MEM CMP pattern data */
+ uint32_t pattern_mask[4]; /* MEM CMP pattern mask */
+};
+
+/* Init RAID GFLOG and GFILOG tables */
+void al_raid_init(void __iomem *app_regs);
+
+/* prepare raid transaction */
+int al_raid_dma_prepare(struct al_ssm_dma *raid_dma, uint32_t qid,
+ struct al_raid_transaction *xaction);
+
+/* add previously prepared transaction to engine */
+int al_raid_dma_action(struct al_ssm_dma *raid_dma, uint32_t qid,
+ uint32_t tx_descs);
+
+/* get new completed transaction */
+int al_raid_dma_completion(struct al_ssm_dma *raid_dma, uint32_t qid,
+ uint32_t *comp_status);
+/* *INDENT-OFF* */
+#ifdef __cplusplus
+}
+#endif
+/* *INDENT-ON* */
+/** @} end of RAID group */
+#endif /* __AL_HAL_RAID_H__ */
diff --git a/drivers/dma/al/al_hal_ssm_raid_regs.h b/drivers/dma/al/al_hal_ssm_raid_regs.h
new file mode 100644
index 0000000..c3cd18b
--- /dev/null
+++ b/drivers/dma/al/al_hal_ssm_raid_regs.h
@@ -0,0 +1,239 @@
+/*******************************************************************************
+Copyright (C) 2013 Annapurna Labs Ltd.
+
+This file may be licensed under the terms of the Annapurna Labs Commercial
+License Agreement.
+
+Alternatively, this file can be distributed under the terms of the GNU General
+Public License V2 as published by the Free Software Foundation and can be
+found at http://www.gnu.org/licenses/gpl-2.0.html
+
+Alternatively, redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __AL_HAL_RAID_ACCELERATOR_REGS_H
+#define __AL_HAL_RAID_ACCELERATOR_REGS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+* Unit Registers
+*/
+
+struct raid_accelerator_configuration {
+ uint32_t unit_conf; /* Unit configuration register */
+ uint32_t rsrvd[3];
+};
+
+struct raid_accelerator_error {
+ uint32_t unit_status; /* Error status registerSee mask_fatal_error register ... */
+ uint32_t mask_fatal_error; /* Interrupt error configuration register: A bit in th ... */
+ uint32_t rsrvd[2];
+};
+
+struct raid_accelerator_gflog_table {
+ uint32_t w0_raw; /* GFLOG Table Word0 */
+ uint32_t w1_raw; /* GFLOG Table Word1R_n referes ro Raw n in the table */
+ uint32_t w2_raw; /* GFLOG Table Word2R_n referes ro Raw n in the table */
+ uint32_t w3_raw; /* GFLOG Table Word3R_n referes ro Raw n in the table */
+};
+
+struct raid_accelerator_log {
+ uint32_t desc_word0; /* Descriptor word 0 */
+ uint32_t desc_word1; /* Descriptor word 1 */
+ uint32_t trans_info_1; /* Transaction Information of the command that trigger ... */
+ uint32_t trans_info_2; /* Transaction Information of the command that trigger ... */
+ uint32_t rsrvd[4];
+};
+
+struct raid_accelerator_gfilog_table {
+ uint32_t w0_r; /* GFILOG Table Word0 */
+ uint32_t w1_r; /* GFILOG Table Word1R_n referes ro Raw n in the table */
+ uint32_t w2_r; /* GFILOG Table Word2R_n referes ro Raw n in the table */
+ uint32_t w3_r; /* GFILOG Table Word3R_n referes ro Raw n in the table */
+};
+
+struct raid_accelerator_raid_status {
+ uint32_t rsrvd[1];
+ uint32_t status; /* Performance counter control */
+};
+
+struct raid_accelerator_raid_perf_counter {
+ uint32_t exec_cnt; /* The execution cycle counter Measure number of cycle ... */
+ uint32_t m2s_active_cnt; /* M2S active cycles counterMeasure number of cycles M ... */
+ uint32_t m2s_idle_cnt; /* M2S idle cycles counterMeasure number of idle cycle ... */
+ uint32_t m2s_backp_cnt; /* M2S back prussure cycles counterMeasure number of ... */
+ uint32_t s2m_active_cnt; /* S2M active cycles counterMeasure number of cycles r ... */
+ uint32_t s2m_idle_cnt; /* S2M idle cycles counterMeasure number of idle cycle ... */
+ uint32_t s2m_backp_cnt; /* S2M backpressure CounterS2M backpressure cycles cou ... */
+ uint32_t cmd_dn_cnt; /* RAID Command Done CounterTotal Number of RAID comma ... */
+ uint32_t src_blocks_cnt; /* RAID Source Blocks CounterTotal Number of Source Bl ... */
+ uint32_t dst_blocks_cnt; /* RAID Destination Blocks CounterTotal Number of Dest ... */
+ uint32_t mem_cmd_dn_cnt; /* Memory Command Done CounterTotal Number of Non-RAID ... */
+ uint32_t recover_err_cnt; /* Recoverable Errors counterTotal Number of recoverab ... */
+ uint32_t src_data_beats; /* Count the number of the data beats enter to RAID */
+ uint32_t dst_data_beats; /* Count the number of the data beats get out from RAI ... */
+ uint32_t rsrvd[6];
+};
+
+struct raid_accelerator_perfm_cnt_cntl {
+ uint32_t conf; /* Performance counter control */
+ uint32_t rsrvd[27];
+
+};
+
+
+struct raid_accelerator_regs {
+ struct raid_accelerator_configuration configuration;
+ struct raid_accelerator_error error;
+ struct raid_accelerator_log log;
+ struct raid_accelerator_raid_perf_counter raid_perf_counter;
+ struct raid_accelerator_perfm_cnt_cntl perfm_cnt_cntl;
+ struct raid_accelerator_gflog_table gflog_table[16];
+ struct raid_accelerator_gfilog_table gfilog_table[16];
+ struct raid_accelerator_raid_status raid_status;
+};
+
+
+/*
+* Registers Fields
+*/
+
+
+/**** unit_conf register ****/
+/* When this bit is set to 1, the raid engine accept n ... */
+#define RAID_ACCELERATOR_CONFIGURATION_UNIT_CONF_MUL_CMD_EN (1 << 0)
+/* When this bit is set to 1, when error occure the pi ... */
+#define RAID_ACCELERATOR_CONFIGURATION_UNIT_CONF_HOLD_PIPE_WHEN_ERROR (1 << 1)
+/* When this bit is set to 1, Reset the ack fifo. */
+#define RAID_ACCELERATOR_CONFIGURATION_UNIT_CONF_FIFO_ACK_ENABLE_MASK 0x0000007C
+#define RAID_ACCELERATOR_CONFIGURATION_UNIT_CONF_FIFO_ACK_ENABLE_SHIFT 2
+
+/**** unit_status register ****/
+/* Timeout on S2M */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_S2M_TOUT (1 << 0)
+/* Timeout on M2S */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_M2S_TOUT (1 << 1)
+/* Wrong/Unknown Command */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_CMD_DECODE_ERR (1 << 2)
+/* Multiple Source-Blocks that are not equal in size */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_BLOCK_SIZE_ERR (1 << 3)
+/* Wrong and illegal software configuration of the des ... */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_ILLEGAL_CONF (1 << 4)
+/* source length is bigger from 16Kbytes for p_only or ... */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_SOURCE_ABOVE_16K (1 << 5)
+/* source length is bigger from 8Kbytes for p&q operat ... */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_SOURCE_ABOVE_8K (1 << 6)
+/* Data read frominternal memory has parity error */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_INTERNAL_PARITY_ERR (1 << 7)
+/* Error received from M2S interface */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_M2S_ERR (1 << 8)
+/* Completion acknoledge Fifo overrun */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_ACK_FIFO_OVR_MASK 0x00003E00
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_ACK_FIFO_OVR_SHIFT 9
+/* Data FIFO (used in Q operation) overrun */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_Q_FIFO_OVR (1 << 14)
+/* EOP without SOP */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_EOP_WO_SOP (1 << 15)
+/* SOP without EOP */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_SOP_WO_EOP (1 << 16)
+/* SOP and EOP in the same cycle */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_SOP_EOP_SAME_CYCLE (1 << 17)
+/* Request from strem without SOP */
+#define RAID_ACCELERATOR_ERROR_UNIT_STATUS_REQ_VALID_WO_SOP (1 << 18)
+
+/**** mask_fatal_error register ****/
+/* Timeout on S2M */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_S2M_TOUT (1 << 0)
+/* Timeout on M2S */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_M2S_TOUT (1 << 1)
+/* Wrong/Unknown Command */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_CMD_DECODE_ERR (1 << 2)
+/* Multiple Source-Blocks that are not equal in size */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_BLOCK_SIZE_ERR (1 << 3)
+/* Wrong and illegal software configuration of the des ... */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_ILLEGAL_CONF (1 << 4)
+/* source length is bigger from 16Kbytes for p_only or ... */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_SOURCE_ABOVE_16K (1 << 5)
+/* source length is bigger from 8Kbytes for p&q operat ... */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_SOURCE_ABOVE_8K (1 << 6)
+/* Data read frominternal memory has parity error */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_INTERNAL_PARITY_ERR (1 << 7)
+/* Error received from M2S interface */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_M2S_ERR (1 << 8)
+/* Completion acknoledge Fifo overrun */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_ACK_FIFO_OVR_MASK 0x00003E00
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_ACK_FIFO_OVR_SHIFT 9
+/* Data FIFO (used in Q operation) overrun */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_Q_FIFO_OVR (1 << 14)
+/* EOP without SOP */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_EOP_WO_SOP (1 << 15)
+/* SOP without EOP */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_SOP_WO_EOP (1 << 16)
+/* SOP and EOP in the same cycle */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_SOP_EOP_SAME_CYCLE (1 << 17)
+/* Request from strem without SOP */
+#define RAID_ACCELERATOR_ERROR_MASK_FATAL_ERROR_REQ_VALID_WO_SOP (1 << 18)
+
+/**** trans_info_1 register ****/
+/* Transaction length in bytes */
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_1_TRANS_LEN_MASK 0x000FFFFF
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_1_TRANS_LEN_SHIFT 0
+/* Number of descriptors in the transaction */
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_1_NUM_OF_DESC_MASK 0x00F00000
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_1_NUM_OF_DESC_SHIFT 20
+/* Reserved */
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_1_RESERVED_MASK 0xFF000000
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_1_RESERVED_SHIFT 24
+
+/**** trans_info_2 register ****/
+/* Queue Number of the transaction */
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_Q_NUM_MASK 0x00000FFF
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_Q_NUM_SHIFT 0
+/* UDMA ID of the transaction */
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_DMA_ID_MASK 0x0000F000
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_DMA_ID_SHIFT 12
+/* Internal Serial Number of the transaction */
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_SERIAL_NUM_MASK 0x03FF0000
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_SERIAL_NUM_SHIFT 16
+/* Reserved */
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_RESERVED_MASK 0xFC000000
+#define RAID_ACCELERATOR_LOG_TRANS_INFO_2_RESERVED_SHIFT 26
+
+/**** conf register ****/
+/* Not effect the recover_err_cnt 0: clear pe ... */
+#define RAID_ACCELERATOR_PERFM_CNT_CNTL_CONF_CONT_PERFORM_MASK 0x00000003
+#define RAID_ACCELERATOR_PERFM_CNT_CNTL_CONF_CONT_PERFORM_SHIFT 0
+
+/**** status register ****/
+/* indicate when RAID is empty. */
+#define RAID_ACCELERATOR_RAID_STATUS_STATUS_RAID_EMPTY (1 << 0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __AL_HAL_RAID_ACCELERATOR_REG_H */
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 93f7992..06915b4 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -347,7 +347,7 @@ EXPORT_SYMBOL(dma_find_channel);
*/
struct dma_chan *net_dma_find_channel(void)
{
- struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
+ struct dma_chan *chan = dma_find_channel(DMA_SG);
if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
return NULL;
@@ -1019,6 +1019,60 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
}
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
+/**
+ * dma_async_memcpy_sg_to_sg - offloaded copy from sg to sg
+ * @chan: DMA channel to offload copy to
+ * @dest_pg: destination page
+ * @dest_off: offset in page to copy to
+ * @src_pg: source page
+ * @src_off: offset in page to copy from
+ * @len: length
+ */
+dma_cookie_t
+dma_async_memcpy_sg_to_sg(struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents)
+{
+ struct dma_device *dev = chan->device;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ unsigned long flags;
+ int src_sglen;
+ int dst_sglen;
+
+ /* Map DMA buffers */
+ src_sglen = dma_map_sg(chan->device->dev, src_sg,
+ src_nents, DMA_TO_DEVICE);
+ BUG_ON(!src_sglen);
+
+ dst_sglen = dma_map_sg(chan->device->dev, dst_sg,
+ dst_nents, DMA_FROM_DEVICE);
+ BUG_ON(!dst_sglen);
+
+ flags = DMA_CTRL_ACK;
+
+ tx = dev->device_prep_dma_sg(chan, dst_sg, dst_sglen,
+ src_sg, src_sglen, flags);
+ if (!tx) {
+ dma_unmap_sg(chan->device->dev, src_sg,
+ src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(chan->device->dev, dst_sg,
+ dst_nents, DMA_FROM_DEVICE);
+ return -ENOMEM;
+ }
+
+ tx->callback = NULL;
+ cookie = tx->tx_submit(tx);
+
+ preempt_disable();
+ __this_cpu_inc(chan->local->memcpy_count);
+ preempt_enable();
+
+ return cookie;
+}
+EXPORT_SYMBOL(dma_async_memcpy_sg_to_sg);
+
+
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
index bb48a57..ba41dca 100644
--- a/drivers/dma/iovlock.c
+++ b/drivers/dma/iovlock.c
@@ -39,15 +39,17 @@ static int num_pages_spanned(struct iovec *iov)
((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT);
}
+#define NETDMA_MAX_NR_IOVECS UIO_MAXIOV
+#define NETDMA_MAX_NR_PAGES NETDMA_MAX_NR_IOVECS
/*
* Pin down all the iovec pages needed for len bytes.
- * Return a struct dma_pinned_list to keep track of pages pinned down.
+ * return 0 on success
*
* We are allocating a single chunk of memory, and then carving it up into
* 3 sections, the latter 2 whose size depends on the number of iovecs and the
* total number of pages, respectively.
*/
-struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
+int dma_pin_iovec_pages(struct tcp_sock *tp, struct iovec *iov, size_t len)
{
struct dma_pinned_list *local_list;
struct page **pages;
@@ -57,10 +59,40 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
int iovec_len_used = 0;
int iovec_pages_used = 0;
- /* don't pin down non-user-based iovecs */
- if (segment_eq(get_fs(), KERNEL_DS))
- return NULL;
+ if (!tp->ucopy.pinned_list) {
+ /* single kmalloc for pinned list, page_list[], and the page arrays */
+ local_list = kmalloc(sizeof(*local_list)
+ + (NETDMA_MAX_NR_IOVECS * sizeof (struct dma_page_list))
+ + (NETDMA_MAX_NR_PAGES * sizeof (struct page*)), GFP_KERNEL);
+
+ /* handle malloc failure */
+ if (!local_list)
+ return -1;
+
+ /* alloc sgt tables*/
+ local_list->sgts = kmalloc(2 * sizeof(struct sg_table), GFP_KERNEL);
+ if (!local_list->sgts)
+ goto sgts_fail;
+ ret = sg_alloc_table(local_list->sgts, NETDMA_MAX_NR_PAGES, GFP_KERNEL);
+ if (ret)
+ goto dst_sg_fail;
+ ret = sg_alloc_table(local_list->sgts + 1, NETDMA_MAX_NR_IOVECS, GFP_KERNEL);
+ if (ret)
+ goto src_sg_fail;
+
+ tp->ucopy.pinned_list = local_list;
+ goto alloc_ok;
+src_sg_fail:
+ sg_free_table(local_list->sgts);
+dst_sg_fail:
+ kfree(local_list->sgts);
+sgts_fail:
+ kfree(local_list);
+ return -1;
+ }
+alloc_ok:
+ local_list = tp->ucopy.pinned_list;
/* determine how many iovecs/pages there are, up front */
do {
iovec_len_used += iov[nr_iovecs].iov_len;
@@ -68,57 +100,80 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
nr_iovecs++;
} while (iovec_len_used < len);
- /* single kmalloc for pinned list, page_list[], and the page arrays */
- local_list = kmalloc(sizeof(*local_list)
- + (nr_iovecs * sizeof (struct dma_page_list))
- + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
- if (!local_list)
- goto out;
+ /* return error so DMA won't be used of the buffer is too large */
+ if (unlikely((nr_iovecs > NETDMA_MAX_NR_IOVECS) || (iovec_pages_used > NETDMA_MAX_NR_PAGES)))
+ return -1;
+
+ local_list->nr_pages = iovec_pages_used;
+ if (segment_eq(get_fs(), KERNEL_DS))
+ local_list->kernel = 1;
+ else
+ local_list->kernel = 0;
/* list of pages starts right after the page list array */
pages = (struct page **) &local_list->page_list[nr_iovecs];
local_list->nr_iovecs = 0;
- for (i = 0; i < nr_iovecs; i++) {
- struct dma_page_list *page_list = &local_list->page_list[i];
-
- len -= iov[i].iov_len;
-
- if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
- goto unpin;
-
- page_list->nr_pages = num_pages_spanned(&iov[i]);
- page_list->base_address = iov[i].iov_base;
-
- page_list->pages = pages;
- pages += page_list->nr_pages;
-
- /* pin pages down */
- down_read(¤t->mm->mmap_sem);
- ret = get_user_pages(
- current,
- current->mm,
- (unsigned long) iov[i].iov_base,
- page_list->nr_pages,
- 1, /* write */
- 0, /* force */
- page_list->pages,
- NULL);
- up_read(¤t->mm->mmap_sem);
-
- if (ret != page_list->nr_pages)
- goto unpin;
+ if (local_list->kernel) {
+ for (i = 0; i < nr_iovecs; i++) {
+ struct dma_page_list *page_list = &local_list->page_list[i];
+ page_list->base_address = iov[i].iov_base;
+ page_list->nr_pages = num_pages_spanned(&iov[i]);
+ BUG_ON(num_pages_spanned(&iov[i]) != 1);
+ *pages = kmap_to_page(iov[i].iov_base);
+ page_list->pages = pages;
+ pages++;
+ local_list->nr_iovecs = i + 1;
+ }
+ pr_debug("%s %d: added kernel %d pages (%d vecs)\n", __func__, __LINE__,
+ local_list->nr_pages, local_list->nr_iovecs);
+
+ } else {
+ for (i = 0; i < nr_iovecs; i++) {
+ struct dma_page_list *page_list = &local_list->page_list[i];
+
+ len -= iov[i].iov_len;
+
+ if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
+ goto unpin;
+
+ page_list->nr_pages = num_pages_spanned(&iov[i]);
+ page_list->base_address = iov[i].iov_base;
+
+ page_list->pages = pages;
+ pages += page_list->nr_pages;
+
+ /* pin pages down */
+ down_read(¤t->mm->mmap_sem);
+ ret = get_user_pages(
+ current,
+ current->mm,
+ (unsigned long) iov[i].iov_base,
+ page_list->nr_pages,
+ 1, /* write */
+ 0, /* force */
+ page_list->pages,
+ NULL);
+ up_read(¤t->mm->mmap_sem);
+
+ local_list->nr_iovecs = i + 1;
+ if (ret != page_list->nr_pages) {
+ pr_debug("%s %d: get_user_pages didn't succeed to pin all requested pages!!\n", __func__, __LINE__);
+ /* set the nr_pages that really allocated so the unpin will release it*/
+ page_list->nr_pages = ret > 0 ? ret : 0;
+ goto unpin;
+ }
- local_list->nr_iovecs = i + 1;
+ }
+ pr_debug("%s %d: added user %d pages (%d vecs)\n", __func__, __LINE__,
+ local_list->nr_pages, local_list->nr_iovecs);
}
-
- return local_list;
+ return 0;
unpin:
dma_unpin_iovec_pages(local_list);
-out:
- return NULL;
+ return -1;
}
void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
@@ -128,18 +183,97 @@ void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
if (!pinned_list)
return;
- for (i = 0; i < pinned_list->nr_iovecs; i++) {
- struct dma_page_list *page_list = &pinned_list->page_list[i];
- for (j = 0; j < page_list->nr_pages; j++) {
- set_page_dirty_lock(page_list->pages[j]);
- page_cache_release(page_list->pages[j]);
+ if (!pinned_list->kernel) {
+ for (i = 0; i < pinned_list->nr_iovecs; i++) {
+ struct dma_page_list *page_list = &pinned_list->page_list[i];
+ for (j = 0; j < page_list->nr_pages; j++) {
+ set_page_dirty_lock(page_list->pages[j]);
+ page_cache_release(page_list->pages[j]);
+ }
}
}
+}
- kfree(pinned_list);
+void dma_free_iovec_data(struct tcp_sock *tp)
+{
+ struct dma_pinned_list *local_list = tp->ucopy.pinned_list;
+
+ if (local_list) {
+ sg_free_table(local_list->sgts);
+ sg_free_table(local_list->sgts + 1);
+ kfree(local_list->sgts);
+ kfree(tp->ucopy.pinned_list);
+ tp->ucopy.pinned_list = NULL;
+ }
}
+/* return number of sg elements created on success, and negative of failure */
+int dma_memcpy_fill_sg_from_iovec(struct dma_chan *chan, struct iovec *iov,
+ struct dma_pinned_list *pinned_list, struct scatterlist *dst_sg,
+ unsigned int offset, size_t len)
+{
+ int iov_byte_offset;
+ int copy;
+ int iovec_idx;
+ int page_idx;
+ int sg_nents = 0;
+
+ pr_debug("%s %d: nr iovecs %d. len 0x%x\n",
+ __func__, __LINE__,
+ pinned_list->nr_iovecs, len);
+
+
+ iovec_idx = 0;
+ while (iovec_idx < pinned_list->nr_iovecs) {
+ struct dma_page_list *page_list;
+
+ /* skip already used-up iovecs */
+ while (!iov[iovec_idx].iov_len)
+ iovec_idx++;
+
+ page_list = &pinned_list->page_list[iovec_idx];
+
+ iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
+ page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
+ - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
+
+
+ pr_debug("%s %d: iov idx %d. len 0x%x\n",
+ __func__, __LINE__,
+ iovec_idx, iov[iovec_idx].iov_len);
+ /* break up copies to not cross page boundary */
+ while (iov[iovec_idx].iov_len) {
+ copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
+ copy = min_t(int, copy, iov[iovec_idx].iov_len);
+
+ pr_debug("%s %d: add dst buf page %p. len 0x%x offset 0x%x\n",
+ __func__, __LINE__,
+ page_list->pages[page_idx], copy, iov_byte_offset);
+ sg_set_page(dst_sg, page_list->pages[page_idx],
+ copy, iov_byte_offset);
+
+ dst_sg = sg_next(dst_sg);
+ sg_nents++;
+
+ len -= copy;
+ iov[iovec_idx].iov_len -= copy;
+ iov[iovec_idx].iov_base += copy;
+
+ page_idx++;
+ if (!len)
+ return sg_nents;
+
+ offset += copy;
+ iov_byte_offset = 0;
+ }
+ iovec_idx++;
+ }
+
+ /* really bad if we ever run out of iovecs */
+ BUG();
+ return -EFAULT;
+}
/*
* We have already pinned down the pages we will be using in the iovecs.
* Each entry in iov array has corresponding entry in pinned_list->page_list.
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index e443f2c1..c1fc42f 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -244,6 +244,13 @@ config EDAC_I7300
Support for error detection and correction the Intel
Clarksboro MCH (Intel 7300 chipset).
+config EDAC_AL_MC
+ tristate "Annapurna Labs Memory Controller"
+ depends on EDAC_MM_EDAC && ARCH_ALPINE
+ help
+ Support for error detection and correction for Annapurna
+ Labs Alpine chipset
+
config EDAC_SBRIDGE
tristate "Intel Sandy-Bridge Integrated MC"
depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 4154ed6..3e5a415 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -45,6 +45,8 @@ amd64_edac_mod-y := amd64_edac.o
amd64_edac_mod-$(CONFIG_EDAC_DEBUG) += amd64_edac_dbg.o
amd64_edac_mod-$(CONFIG_EDAC_AMD64_ERROR_INJECTION) += amd64_edac_inj.o
+obj-$(CONFIG_EDAC_AL_MC) += al_mc_edac.o
+
obj-$(CONFIG_EDAC_AMD64) += amd64_edac_mod.o
obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
diff --git a/drivers/edac/al_mc_edac.c b/drivers/edac/al_mc_edac.c
new file mode 100644
index 0000000..2fe66f8
--- /dev/null
+++ b/drivers/edac/al_mc_edac.c
@@ -0,0 +1,602 @@
+/*
+ * Copyright 2013 Annapurna Labs Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+
+#define DEVICE_ATTR_FLAGS S_IRUGO | S_IWUSR
+
+struct inject_addr {
+ int col;
+ int rank;
+ int row;
+ int bank;
+};
+
+struct al_mc_drvdata {
+ struct mem_ctl_info *mci;
+ void __iomem *vbase;
+ struct inject_addr inject;
+ int inject_enabled;
+ struct delayed_work handle_corr_err_work;
+ struct delayed_work handle_uncorr_err_work;
+};
+
+/* Memory Controller error handler */
+static irqreturn_t al_mc_corr_err_handler(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct al_mc_drvdata *drvdata = mci->pvt_info;
+
+ schedule_delayed_work(&drvdata->handle_corr_err_work,
+ msecs_to_jiffies(1));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t al_mc_uncorr_err_handler(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct al_mc_drvdata *drvdata = mci->pvt_info;
+
+ schedule_delayed_work(&drvdata->handle_uncorr_err_work,
+ msecs_to_jiffies(1));
+
+ return IRQ_HANDLED;
+}
+
+static void al_mc_corr_err_work(struct work_struct *work)
+{
+ struct al_mc_drvdata *drvdata = container_of(work, struct al_mc_drvdata,
+ handle_corr_err_work.work);
+ struct mem_ctl_info *mci = drvdata->mci;
+ struct al_ddr_ecc_status ecc_corr_status;
+ al_phys_addr_t err_addr;
+
+ /* get the ecc status */
+ al_ddr_ecc_status_get(drvdata->vbase,
+ &ecc_corr_status, NULL);
+
+ al_ddr_ecc_corr_count_clear(drvdata->vbase);
+
+ al_ddr_address_translate_dram2sys(drvdata->vbase, &err_addr,
+ ecc_corr_status.rank, ecc_corr_status.bank,
+ ecc_corr_status.col, ecc_corr_status.row);
+
+ /* log the error */
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ ecc_corr_status.err_cnt,
+ err_addr >> PAGE_SHIFT, err_addr & PAGE_MASK,
+ ecc_corr_status.syndromes_31_0,
+ ecc_corr_status.rank, -1, -1, mci->ctl_name, "");
+
+ al_ddr_ecc_corr_int_clear(NULL, drvdata->vbase);
+}
+
+static void al_mc_uncorr_err_work(struct work_struct *work)
+{
+ struct al_mc_drvdata *drvdata = container_of(work, struct al_mc_drvdata,
+ handle_uncorr_err_work.work);
+ struct mem_ctl_info *mci = drvdata->mci;
+ struct al_ddr_ecc_status ecc_uncorr_status;
+ al_phys_addr_t err_addr;
+
+ /* get the ecc status */
+ al_ddr_ecc_status_get(drvdata->vbase,
+ NULL, &ecc_uncorr_status);
+
+ al_ddr_ecc_uncorr_count_clear(drvdata->vbase);
+
+ al_ddr_address_translate_dram2sys(drvdata->vbase, &err_addr,
+ ecc_uncorr_status.rank, ecc_uncorr_status.bank,
+ ecc_uncorr_status.col, ecc_uncorr_status.row);
+
+ /* log the error */
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ ecc_uncorr_status.err_cnt,
+ err_addr >> PAGE_SHIFT, err_addr & PAGE_MASK, 0,
+ ecc_uncorr_status.rank, -1, -1, mci->ctl_name, "");
+
+ al_ddr_ecc_uncorr_int_clear(NULL, drvdata->vbase);
+}
+
+
+/*
+ * The following functions implement the sysfs behavior
+ */
+
+/* Data injection physical address configuration */
+static ssize_t al_inject_phys_addr_store(
+ struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = dev_get_drvdata(dev);
+ struct al_mc_drvdata *drvdata = mci->pvt_info;
+ unsigned long data_val;
+ al_phys_addr_t addr;
+ int rc;
+
+ rc = strict_strtoul(data, 16, &data_val);
+ if (rc < 0)
+ return -EIO;
+
+ addr = (al_phys_addr_t)data_val;
+ rc = al_ddr_address_translate_sys2dram(drvdata->vbase, addr,
+ &drvdata->inject.rank, &drvdata->inject.bank,
+ &drvdata->inject.col, &drvdata->inject.row);
+ if (rc < 0)
+ return -EIO;
+
+ if (drvdata->inject_enabled == 1) {
+ rc = al_ddr_ecc_data_poison_enable(
+ drvdata->vbase,
+ drvdata->inject.rank,
+ drvdata->inject.bank,
+ drvdata->inject.col,
+ drvdata->inject.row);
+ if (rc < 0)
+ return -EIO;
+ }
+
+ return count;
+}
+
+static ssize_t al_inject_phys_addr_show(
+ struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ struct mem_ctl_info *mci = dev_get_drvdata(dev);
+ struct al_mc_drvdata *drvdata = mci->pvt_info;
+ al_phys_addr_t addr;
+
+ al_ddr_address_translate_dram2sys(drvdata->vbase, &addr,
+ drvdata->inject.rank, drvdata->inject.bank,
+ drvdata->inject.col, drvdata->inject.row);
+
+ return sprintf(data, "0x%llx\n", (unsigned long long)addr);
+}
+
+
+/* Data injection en/disable interface */
+static ssize_t al_inject_enable_store(
+ struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = dev_get_drvdata(dev);
+ struct al_mc_drvdata *drvdata = mci->pvt_info;
+ unsigned long data_val;
+ int rc;
+
+ rc = strict_strtoul(data, 10, &data_val);
+ if (rc < 0)
+ return -EIO;
+
+ if (data_val == 1)
+ rc = al_ddr_ecc_data_poison_enable(
+ drvdata->vbase,
+ drvdata->inject.rank,
+ drvdata->inject.bank,
+ drvdata->inject.col,
+ drvdata->inject.row);
+ else if (data_val == 0)
+ rc = al_ddr_ecc_data_poison_disable(drvdata->vbase);
+ else
+ return -EIO;
+
+ drvdata->inject_enabled = data_val;
+ if (rc < 0)
+ return rc;
+
+ return count;
+}
+
+static ssize_t al_inject_enable_show(
+ struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ struct mem_ctl_info *mci = dev_get_drvdata(dev);
+ struct al_mc_drvdata *drvdata = mci->pvt_info;
+
+ return sprintf(data, "%d\n", drvdata->inject_enabled);
+}
+
+/* Data injection mechanism DDR-addressing configuration
+ * the store/show functions share the same implementation, defined below
+ */
+#define al_inject_store_impl(_field) \
+ do { \
+ struct mem_ctl_info *mci = dev_get_drvdata(dev); \
+ struct al_mc_drvdata *drvdata = mci->pvt_info; \
+ unsigned long data_val; \
+ int rc; \
+ \
+ rc = strict_strtoul(data, 10, &data_val); \
+ if (rc < 0) \
+ return -EIO; \
+ drvdata->inject._field = data_val; \
+ /* user must issue enable cmd after changing addr */ \
+ drvdata->inject_enabled = 0; \
+ rc = al_ddr_ecc_data_poison_disable(drvdata->vbase); \
+ if (rc < 0) \
+ return -EIO; \
+ } while(0)
+
+#define al_inject_show_impl(_field) \
+ do { \
+ struct mem_ctl_info *mci = dev_get_drvdata(dev); \
+ struct al_mc_drvdata *drvdata = mci->pvt_info; \
+ \
+ return sprintf(data, "%d\n", drvdata->inject._field); \
+ } while (0)
+
+static ssize_t al_inject_col_store(
+ struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ al_inject_store_impl(col);
+
+ return count;
+}
+
+static ssize_t al_inject_rank_store(
+ struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ al_inject_store_impl(rank);
+
+ return count;
+}
+
+static ssize_t al_inject_row_store(
+ struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ al_inject_store_impl(row);
+
+ return count;
+}
+
+static ssize_t al_inject_bank_store(
+ struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ al_inject_store_impl(bank);
+
+ return count;
+}
+
+static ssize_t al_inject_col_show(
+ struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ al_inject_show_impl(col);
+}
+
+static ssize_t al_inject_rank_show(
+ struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ al_inject_show_impl(rank);
+}
+
+static ssize_t al_inject_row_show(
+ struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ al_inject_show_impl(row);
+}
+
+static ssize_t al_inject_bank_show(
+ struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ al_inject_show_impl(bank);
+}
+
+/* show a short help for the sysfs attributes */
+static ssize_t al_inject_help_show(
+ struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ return sprintf(data,
+ "inject help\n"
+ "-----------\n"
+ "All of the following attributes use the sysfs interface for\n"
+ "setting/showing values:\n"
+ "echo VALUE > ATTRIBUTE - set ATTRIBUTE to VALUE\n"
+ "cat ATTRIBUTE - show the current value of ATTRIBUTE\n"
+ "\nAvailable commands:\n"
+ "- inject_phys_addr\n"
+ "\tset/show physical address for UC error injection\n"
+ "- inject_col/rank/row/bank\n"
+ "\tset/show ddr col/rank/row/bank value for UC error injection\n"
+ "- inject_enable\n"
+ "\tenable/disable the device (by setting to 1/0), or print\n"
+ "\tcurrent state\n"
+ "\t(*)when changing an address configuration, you need to\n"
+ "\t re-enable the interface i.o. to apply your changes\n"
+ );
+}
+
+/* define and bind all of the store/show implementations with their
+ * corresponding sysfs attributes */
+static DEVICE_ATTR(inject_phys_addr, DEVICE_ATTR_FLAGS,
+ al_inject_phys_addr_show, al_inject_phys_addr_store);
+static DEVICE_ATTR(inject_enable, DEVICE_ATTR_FLAGS,
+ al_inject_enable_show, al_inject_enable_store);
+static DEVICE_ATTR(inject_col,DEVICE_ATTR_FLAGS,
+ al_inject_col_show, al_inject_col_store);
+static DEVICE_ATTR(inject_rank,DEVICE_ATTR_FLAGS,
+ al_inject_rank_show, al_inject_rank_store);
+static DEVICE_ATTR(inject_row,DEVICE_ATTR_FLAGS,
+ al_inject_row_show, al_inject_row_store);
+static DEVICE_ATTR(inject_bank,DEVICE_ATTR_FLAGS,
+ al_inject_bank_show, al_inject_bank_store);
+static DEVICE_ATTR(inject_help, S_IRUGO,
+ al_inject_help_show, NULL);
+
+static void al_delete_sysfs_devices(struct mem_ctl_info *mci)
+{
+ device_remove_file(&mci->dev, &dev_attr_inject_phys_addr);
+ device_remove_file(&mci->dev, &dev_attr_inject_enable);
+ device_remove_file(&mci->dev, &dev_attr_inject_col);
+ device_remove_file(&mci->dev, &dev_attr_inject_rank);
+ device_remove_file(&mci->dev, &dev_attr_inject_row);
+ device_remove_file(&mci->dev, &dev_attr_inject_bank);
+ device_remove_file(&mci->dev, &dev_attr_inject_help);
+}
+
+static int al_create_sysfs_devices(struct mem_ctl_info *mci)
+{
+ int rc;
+
+ rc = device_create_file(&mci->dev, &dev_attr_inject_phys_addr);
+ if (rc < 0)
+ goto err;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_enable);
+ if (rc < 0)
+ goto err;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_col);
+ if (rc < 0)
+ goto err;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_rank);
+ if (rc < 0)
+ goto err;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_row);
+ if (rc < 0)
+ goto err;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_bank);
+ if (rc < 0)
+ goto err;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_help);
+ if (rc < 0)
+ goto err;
+
+ return rc;
+err:
+ al_delete_sysfs_devices(mci);
+ return rc;
+}
+
+/*
+ * end of sysfs section
+ */
+
+
+static int al_mc_probe(struct platform_device *pdev)
+{
+ struct edac_mc_layer layers[1];
+ struct mem_ctl_info *mci = NULL;
+ struct al_mc_drvdata *drvdata;
+ struct dimm_info *dimm;
+ struct resource *r;
+ struct al_ddr_ecc_cfg ecc_cfg;
+ void __iomem *vbase;
+ int ecc_corr_irq, ecc_uncorr_irq;
+ unsigned int active_ranks, rank_addr_bits;
+ int i, res = 0;
+
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ /* initialize the controller private database */
+ /* set controller register base address */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "Unable to get mem resource\n");
+ res = -ENODEV;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, r->start,
+ resource_size(r), dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "Error while requesting mem region\n");
+ res = -EBUSY;
+ goto err;
+ }
+
+ vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map regs\n");
+ res = -ENOMEM;
+ goto err;
+ }
+
+ al_ddr_ecc_cfg_get(vbase, &ecc_cfg);
+ if (!ecc_cfg.ecc_enabled) {
+ dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
+ res = -ENODEV;
+ goto err;
+ }
+
+ active_ranks = al_ddr_active_ranks_get(vbase);
+ if (!active_ranks) {
+ dev_err(&pdev->dev, "Failed to detect active ranks\n");
+ res = -ENODEV;
+ goto err;
+ }
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = active_ranks;
+ layers[0].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct al_mc_drvdata));
+ if (!mci)
+ return -ENOMEM;
+
+ mci->pdev = &pdev->dev;
+ drvdata = mci->pvt_info;
+ drvdata->mci = mci;
+ drvdata->vbase = vbase;
+ platform_set_drvdata(pdev, mci);
+
+ /* set default address for inject mechanism */
+ drvdata->inject.col = 0;
+ drvdata->inject.rank = 0;
+ drvdata->inject.row = 0;
+ drvdata->inject.bank = 0;
+ drvdata->inject_enabled = 0;
+ INIT_DELAYED_WORK(&drvdata->handle_corr_err_work, al_mc_corr_err_work);
+ INIT_DELAYED_WORK(&drvdata->handle_uncorr_err_work, al_mc_uncorr_err_work);
+
+ ecc_corr_irq =
+ al_fabric_get_cause_irq(0, AL_FABRIC_IRQ_MCTL_ECC_CORR_ERR);
+ ecc_uncorr_irq =
+ al_fabric_get_cause_irq(0, AL_FABRIC_IRQ_MCTL_ECC_UNCORR_ERR);
+
+ /*
+ * Configure the Memory Controller Info struct, according to the
+ * following:
+ * - Use DDR3 type memory
+ * - Single-bit Error Correction, Double-bit Error Detection (SECDED)
+ * - Scrub status is set according to the controller's configuration
+ */
+ mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = dev_name(&pdev->dev);
+ mci->mod_ver = "1";
+ mci->ctl_name = dev_name(&pdev->dev);
+ mci->scrub_mode = ecc_cfg.ecc_enabled ?
+ (ecc_cfg.scrub_enabled ? SCRUB_HW_SRC : SCRUB_NONE) :
+ SCRUB_UNKNOWN;
+
+ rank_addr_bits = al_ddr_bits_per_rank_get(drvdata->vbase);
+ /*
+ * Set dimm attributes
+ * - Use DDR3 type memory
+ * - Single-bit Error Correction, Double-bit Error Detection (SECDED)
+ * - Number of pages can be calculated using rank size and page shift
+ * - Granularity of reported errors (in bytes) according to data width
+ */
+ for (i = 0 ; i < active_ranks ; i++) {
+ dimm = mci->dimms[i];
+ dimm->nr_pages = (1ULL << rank_addr_bits) >> PAGE_SHIFT;
+ dimm->grain = (al_ddr_data_width_get(drvdata->vbase) ==
+ AL_DDR_DATA_WIDTH_64_BITS) ? 8 : 4;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->mtype = MEM_DDR3;
+ dimm->edac_mode = EDAC_SECDED;
+ }
+
+ res = edac_mc_add_mc(mci);
+ if (res < 0)
+ goto err;
+
+ res = devm_request_irq(&pdev->dev, ecc_corr_irq,
+ al_mc_corr_err_handler, 0, dev_name(&pdev->dev), mci);
+ if (res < 0) {
+ dev_err(&pdev->dev, "IRQ request failed (ecc corr irq) %d\n",
+ ecc_corr_irq);
+ goto err;
+ }
+
+ res = devm_request_irq(&pdev->dev, ecc_uncorr_irq,
+ al_mc_uncorr_err_handler, 0, dev_name(&pdev->dev), mci);
+ if (res < 0) {
+ dev_err(&pdev->dev, "IRQ request failed (ecc uncorr irq) %d\n",
+ ecc_uncorr_irq);
+ goto err;
+ }
+
+ res = al_create_sysfs_devices(mci);
+ if (res < 0)
+ goto err;
+
+ devres_close_group(&pdev->dev, NULL);
+ return 0;
+err:
+ devres_release_group(&pdev->dev, NULL);
+ if (mci)
+ edac_mc_free(mci);
+ return res;
+}
+
+static int al_mc_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ al_delete_sysfs_devices(mci);
+ edac_mc_free(mci);
+ return 0;
+}
+
+static const struct of_device_id al_mc_of_match[] = {
+ { .compatible = "annapurna-labs,al-mc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, al_mc_of_match);
+
+static struct platform_driver al_mc_edac_driver = {
+ .probe = al_mc_probe,
+ .remove = al_mc_remove,
+ .driver = {
+ .name = "al_mc_edac",
+ .of_match_table = al_mc_of_match,
+ },
+};
+
+module_platform_driver(al_mc_edac_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Annapurna Labs Inc.");
+MODULE_DESCRIPTION("EDAC Driver for Annapurna Labs MC");
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 6a4bd0d..962ee10 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -261,7 +261,7 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
struct device *dev = &adev->dev;
struct pl061_platform_data *pdata = dev->platform_data;
struct pl061_gpio *chip;
- int ret, irq, i, irq_base;
+ int ret, irq, i, irq_base = 0;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -272,9 +272,16 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
irq_base = pdata->irq_base;
if (irq_base <= 0)
return -ENODEV;
+ } else if (adev->dev.of_node) {
+ const void *ptr;
+ unsigned int baseidx = -1; /* GPIO dynamic allocation */
+
+ ptr = of_get_property(adev->dev.of_node, "baseidx", NULL);
+ if (ptr)
+ baseidx = be32_to_cpup(ptr);
+ chip->gc.base = baseidx;
} else {
chip->gc.base = -1;
- irq_base = 0;
}
if (!devm_request_mem_region(dev, adev->res.start,
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 19ceaa6..ad495b1 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -94,7 +94,7 @@ struct irq_chip gic_arch_extn = {
};
#ifndef MAX_GIC_NR
-#define MAX_GIC_NR 1
+#define MAX_GIC_NR 2
#endif
static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
@@ -588,6 +588,23 @@ static void gic_cpu_restore(unsigned int gic_nr)
writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
}
+static void gic_cpu_mask(unsigned int gic_nr)
+{
+ void __iomem *cpu_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+
+ if (!cpu_base)
+ return;
+
+ /* do not raise any interrupt from cpu interface.
+ * do not bypass to legacy_irq and legacy_fiq legs*/
+ writel_relaxed(0 | (3<<5), cpu_base + GIC_CPU_CTRL);
+}
+
static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
int i;
@@ -616,6 +633,10 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
}
}
+ /*do not accept interrupt from main gic*/
+ if (cmd == CPU_PM_ENTER)
+ gic_cpu_mask(0);
+
return NOTIFY_OK;
}
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 49794b4..247f8aa 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -92,6 +92,13 @@ config LEDS_TRIGGER_DEFAULT_ON
comment "iptables trigger is under Netfilter config (LED target)"
depends on LEDS_TRIGGERS
+config LEDS_TRIGGER_NETDEV
+ tristate "LED Netdev Trigger"
+ depends on NET && LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by network device activity.
+ If unsure, say Y.
+
config LEDS_TRIGGER_TRANSIENT
tristate "LED Transient Trigger"
depends on LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 1abf48d..0a2dde8 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
obj-$(CONFIG_LEDS_TRIGGER_CPU) += ledtrig-cpu.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
new file mode 100644
index 0000000..9fb9013
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -0,0 +1,451 @@
+/*
+ * LED Kernel Netdev Trigger
+ *
+ * Toggles the LED to reflect the link and traffic state of a named net device
+ *
+ * Copyright 2007 Oliver Jowett
+ *
+ * Derived from ledtrig-timer.c which is:
+ * Copyright 2005-2006 Openedhand Ltd.
+ * Author: Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include
+#endif
+
+#include "../leds.h"
+
+/*
+ * Configurable sysfs attributes:
+ *
+ * device_name - network device name to monitor
+ *
+ * interval - duration of LED blink, in milliseconds
+ *
+ * mode - either "none" (LED is off) or a space separated list of one or more of:
+ * link: LED's normal state reflects whether the link is up (has carrier) or not
+ * tx: LED blinks on transmitted data
+ * rx: LED blinks on receive data
+ *
+ * Some suggestions:
+ *
+ * Simple link status LED:
+ * $ echo netdev >someled/trigger
+ * $ echo eth0 >someled/device_name
+ * $ echo link >someled/mode
+ *
+ * Ethernet-style link/activity LED:
+ * $ echo netdev >someled/trigger
+ * $ echo eth0 >someled/device_name
+ * $ echo "link tx rx" >someled/mode
+ *
+ * Modem-style tx/rx LEDs:
+ * $ echo netdev >led1/trigger
+ * $ echo ppp0 >led1/device_name
+ * $ echo tx >led1/mode
+ * $ echo netdev >led2/trigger
+ * $ echo ppp0 >led2/device_name
+ * $ echo rx >led2/mode
+ *
+ */
+
+#define MODE_LINK 1
+#define MODE_TX 2
+#define MODE_RX 4
+
+struct led_netdev_data {
+ rwlock_t lock;
+
+ struct timer_list timer;
+ struct notifier_block notifier;
+
+ struct led_classdev *led_cdev;
+ struct net_device *net_dev;
+
+ char device_name[IFNAMSIZ];
+ unsigned interval;
+ unsigned mode;
+ unsigned link_up;
+ unsigned last_activity;
+};
+
+static void set_baseline_state(struct led_netdev_data *trigger_data)
+{
+ if ((trigger_data->mode & MODE_LINK) != 0 && trigger_data->link_up)
+ led_set_brightness(trigger_data->led_cdev, LED_FULL);
+ else
+ led_set_brightness(trigger_data->led_cdev, LED_OFF);
+
+ if ((trigger_data->mode & (MODE_TX | MODE_RX)) != 0 && trigger_data->link_up)
+ mod_timer(&trigger_data->timer, jiffies + trigger_data->interval);
+ else
+ del_timer(&trigger_data->timer);
+}
+
+static ssize_t led_device_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ read_lock(&trigger_data->lock);
+ sprintf(buf, "%s\n", trigger_data->device_name);
+ read_unlock(&trigger_data->lock);
+
+ return strlen(buf) + 1;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+extern struct net init_net;
+#endif
+
+static ssize_t led_device_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ if (size < 0 || size >= IFNAMSIZ)
+ return -EINVAL;
+
+ write_lock(&trigger_data->lock);
+
+ strcpy(trigger_data->device_name, buf);
+ if (size > 0 && trigger_data->device_name[size-1] == '\n')
+ trigger_data->device_name[size-1] = 0;
+
+ if (trigger_data->device_name[0] != 0) {
+ /* check for existing device to update from */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ trigger_data->net_dev = dev_get_by_name(&init_net, trigger_data->device_name);
+#else
+ trigger_data->net_dev = dev_get_by_name(trigger_data->device_name);
+#endif
+ if (trigger_data->net_dev != NULL)
+ trigger_data->link_up = (dev_get_flags(trigger_data->net_dev) & IFF_LOWER_UP) != 0;
+ set_baseline_state(trigger_data); /* updates LEDs, may start timers */
+ }
+
+ write_unlock(&trigger_data->lock);
+ return size;
+}
+
+static DEVICE_ATTR(device_name, 0644, led_device_name_show, led_device_name_store);
+
+static ssize_t led_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ read_lock(&trigger_data->lock);
+
+ if (trigger_data->mode == 0) {
+ strcpy(buf, "none\n");
+ } else {
+ if (trigger_data->mode & MODE_LINK)
+ strcat(buf, "link ");
+ if (trigger_data->mode & MODE_TX)
+ strcat(buf, "tx ");
+ if (trigger_data->mode & MODE_RX)
+ strcat(buf, "rx ");
+ strcat(buf, "\n");
+ }
+
+ read_unlock(&trigger_data->lock);
+
+ return strlen(buf)+1;
+}
+
+static ssize_t led_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ char copybuf[32];
+ int new_mode = -1;
+ char *p, *token;
+
+ /* take a copy since we don't want to trash the inbound buffer when using strsep */
+ strncpy(copybuf, buf, sizeof(copybuf));
+ copybuf[31] = 0;
+ p = copybuf;
+
+ while ((token = strsep(&p, " \t\n")) != NULL) {
+ if (!*token)
+ continue;
+
+ if (new_mode == -1)
+ new_mode = 0;
+
+ if (!strcmp(token, "none"))
+ new_mode = 0;
+ else if (!strcmp(token, "tx"))
+ new_mode |= MODE_TX;
+ else if (!strcmp(token, "rx"))
+ new_mode |= MODE_RX;
+ else if (!strcmp(token, "link"))
+ new_mode |= MODE_LINK;
+ else
+ return -EINVAL;
+ }
+
+ if (new_mode == -1)
+ return -EINVAL;
+
+ write_lock(&trigger_data->lock);
+ trigger_data->mode = new_mode;
+ set_baseline_state(trigger_data);
+ write_unlock(&trigger_data->lock);
+
+ return size;
+}
+
+static DEVICE_ATTR(mode, 0644, led_mode_show, led_mode_store);
+
+static ssize_t led_interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ read_lock(&trigger_data->lock);
+ sprintf(buf, "%u\n", jiffies_to_msecs(trigger_data->interval));
+ read_unlock(&trigger_data->lock);
+
+ return strlen(buf) + 1;
+}
+
+static ssize_t led_interval_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ int ret = -EINVAL;
+ char *after;
+ unsigned long value = simple_strtoul(buf, &after, 10);
+ size_t count = after - buf;
+
+ if (*after && isspace(*after))
+ count++;
+
+ /* impose some basic bounds on the timer interval */
+ if (count == size && value >= 5 && value <= 10000) {
+ write_lock(&trigger_data->lock);
+ trigger_data->interval = msecs_to_jiffies(value);
+ set_baseline_state(trigger_data); // resets timer
+ write_unlock(&trigger_data->lock);
+ ret = count;
+ }
+
+ return ret;
+}
+
+static DEVICE_ATTR(interval, 0644, led_interval_show, led_interval_store);
+
+static int netdev_trig_notify(struct notifier_block *nb,
+ unsigned long evt,
+ void *dv)
+{
+ struct net_device *dev = dv;
+ struct led_netdev_data *trigger_data = container_of(nb, struct led_netdev_data, notifier);
+
+ if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
+ return NOTIFY_DONE;
+
+ write_lock(&trigger_data->lock);
+
+ if (strcmp(dev->name, trigger_data->device_name))
+ goto done;
+
+ if (evt == NETDEV_REGISTER) {
+ if (trigger_data->net_dev != NULL)
+ dev_put(trigger_data->net_dev);
+ dev_hold(dev);
+ trigger_data->net_dev = dev;
+ trigger_data->link_up = 0;
+ goto done;
+ }
+
+ if (evt == NETDEV_UNREGISTER && trigger_data->net_dev != NULL) {
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
+ goto done;
+ }
+
+ /* UP / DOWN / CHANGE */
+
+ trigger_data->link_up = (evt != NETDEV_DOWN && netif_carrier_ok(dev));
+ set_baseline_state(trigger_data);
+
+done:
+ write_unlock(&trigger_data->lock);
+ return NOTIFY_DONE;
+}
+
+/* here's the real work! */
+static void netdev_trig_timer(unsigned long arg)
+{
+ struct led_netdev_data *trigger_data = (struct led_netdev_data *)arg;
+ struct rtnl_link_stats64 *dev_stats;
+ unsigned new_activity;
+ struct rtnl_link_stats64 temp;
+
+ write_lock(&trigger_data->lock);
+
+ if (!trigger_data->link_up || !trigger_data->net_dev || (trigger_data->mode & (MODE_TX | MODE_RX)) == 0) {
+ /* we don't need to do timer work, just reflect link state. */
+ led_set_brightness(trigger_data->led_cdev, ((trigger_data->mode & MODE_LINK) != 0 && trigger_data->link_up) ? LED_FULL : LED_OFF);
+ goto no_restart;
+ }
+
+ dev_stats = dev_get_stats(trigger_data->net_dev, &temp);
+ new_activity =
+ ((trigger_data->mode & MODE_TX) ? dev_stats->tx_packets : 0) +
+ ((trigger_data->mode & MODE_RX) ? dev_stats->rx_packets : 0);
+
+ if (trigger_data->mode & MODE_LINK) {
+ /* base state is ON (link present) */
+ /* if there's no link, we don't get this far and the LED is off */
+
+ /* OFF -> ON always */
+ /* ON -> OFF on activity */
+ if (trigger_data->led_cdev->brightness == LED_OFF) {
+ led_set_brightness(trigger_data->led_cdev, LED_FULL);
+ } else if (trigger_data->last_activity != new_activity) {
+ led_set_brightness(trigger_data->led_cdev, LED_OFF);
+ }
+ } else {
+ /* base state is OFF */
+ /* ON -> OFF always */
+ /* OFF -> ON on activity */
+ if (trigger_data->led_cdev->brightness == LED_FULL) {
+ led_set_brightness(trigger_data->led_cdev, LED_OFF);
+ } else if (trigger_data->last_activity != new_activity) {
+ led_set_brightness(trigger_data->led_cdev, LED_FULL);
+ }
+ }
+
+ trigger_data->last_activity = new_activity;
+ mod_timer(&trigger_data->timer, jiffies + trigger_data->interval);
+
+no_restart:
+ write_unlock(&trigger_data->lock);
+}
+
+static void netdev_trig_activate(struct led_classdev *led_cdev)
+{
+ struct led_netdev_data *trigger_data;
+ int rc;
+
+ trigger_data = kzalloc(sizeof(struct led_netdev_data), GFP_KERNEL);
+ if (!trigger_data)
+ return;
+
+ rwlock_init(&trigger_data->lock);
+
+ trigger_data->notifier.notifier_call = netdev_trig_notify;
+ trigger_data->notifier.priority = 10;
+
+ setup_timer(&trigger_data->timer, netdev_trig_timer, (unsigned long) trigger_data);
+
+ trigger_data->led_cdev = led_cdev;
+ trigger_data->net_dev = NULL;
+ trigger_data->device_name[0] = 0;
+
+ trigger_data->mode = 0;
+ trigger_data->interval = msecs_to_jiffies(50);
+ trigger_data->link_up = 0;
+ trigger_data->last_activity = 0;
+
+ led_cdev->trigger_data = trigger_data;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_device_name);
+ if (rc)
+ goto err_out;
+ rc = device_create_file(led_cdev->dev, &dev_attr_mode);
+ if (rc)
+ goto err_out_device_name;
+ rc = device_create_file(led_cdev->dev, &dev_attr_interval);
+ if (rc)
+ goto err_out_mode;
+
+ register_netdevice_notifier(&trigger_data->notifier);
+ return;
+
+err_out_mode:
+ device_remove_file(led_cdev->dev, &dev_attr_mode);
+err_out_device_name:
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+err_out:
+ led_cdev->trigger_data = NULL;
+ kfree(trigger_data);
+}
+
+static void netdev_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ if (trigger_data) {
+ unregister_netdevice_notifier(&trigger_data->notifier);
+
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+ device_remove_file(led_cdev->dev, &dev_attr_mode);
+ device_remove_file(led_cdev->dev, &dev_attr_interval);
+
+ write_lock(&trigger_data->lock);
+
+ if (trigger_data->net_dev) {
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
+ }
+
+ write_unlock(&trigger_data->lock);
+
+ del_timer_sync(&trigger_data->timer);
+
+ kfree(trigger_data);
+ }
+}
+
+static struct led_trigger netdev_led_trigger = {
+ .name = "netdev",
+ .activate = netdev_trig_activate,
+ .deactivate = netdev_trig_deactivate,
+};
+
+static int __init netdev_trig_init(void)
+{
+ return led_trigger_register(&netdev_led_trigger);
+}
+
+static void __exit netdev_trig_exit(void)
+{
+ led_trigger_unregister(&netdev_led_trigger);
+}
+
+module_init(netdev_trig_init);
+module_exit(netdev_trig_exit);
+
+MODULE_AUTHOR("Oliver Jowett