summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristian Pointner <equinox@helsinki.at>2024-05-10 18:26:46 (GMT)
committerChristian Pointner <equinox@helsinki.at>2024-05-10 18:26:46 (GMT)
commit627f7d488817e308d6f3a92fd9a877723ac7ae1d (patch)
tree554a3c53c90b20da5bd0da0c8da67a9b169bd10f
import snd-alpx V3.4.3
-rw-r--r--snd-alpx/.gitignore10
-rw-r--r--snd-alpx/Makefile115
-rw-r--r--snd-alpx/alpx.h268
-rw-r--r--snd-alpx/alpx_axcmem.c129
-rw-r--r--snd-alpx/alpx_axcmem.h115
-rw-r--r--snd-alpx/alpx_cards.c545
-rw-r--r--snd-alpx/alpx_cards.h27
-rw-r--r--snd-alpx/alpx_controls.c2096
-rw-r--r--snd-alpx/alpx_controls.h194
-rw-r--r--snd-alpx/alpx_core.c901
-rw-r--r--snd-alpx/alpx_gpio.c127
-rw-r--r--snd-alpx/alpx_gpio.h25
-rw-r--r--snd-alpx/alpx_led.h23
-rw-r--r--snd-alpx/alpx_mtd.c318
-rw-r--r--snd-alpx/alpx_mtd.h48
-rw-r--r--snd-alpx/alpx_proc.c116
-rw-r--r--snd-alpx/alpx_proc.h26
-rw-r--r--snd-alpx/alpx_reg.h619
-rw-r--r--snd-alpx/alpx_streams.c730
-rw-r--r--snd-alpx/alpx_streams.h62
-rw-r--r--snd-alpx/alpx_variants_882_apps_preFW240.h827
-rw-r--r--snd-alpx/alpx_variants_common.h97
-rw-r--r--snd-alpx/alpx_variants_dante.h227
-rw-r--r--snd-alpx/alpx_variants_dead.h45
-rw-r--r--snd-alpx/alpx_variants_madi.h89
-rw-r--r--snd-alpx/alpx_variants_mc.h1475
-rw-r--r--snd-alpx/alpx_variants_stereo.h709
-rw-r--r--snd-alpx/alpx_variants_stereo_apps_preFW283.h737
-rw-r--r--snd-alpx/alpx_version.h9
-rw-r--r--snd-alpx/alpx_xdma.c103
-rw-r--r--snd-alpx/alpx_xdma.h21
-rw-r--r--snd-alpx/alsa_conf/asound-stereo_alp882.conf154
-rw-r--r--snd-alpx/cdev_sgdma.h72
-rwxr-xr-xsnd-alpx/core/RedHat/4.18/dmaengine_pcm.h180
-rwxr-xr-xsnd-alpx/core/RedHat/4.18/pcm_dmaengine.c473
-rwxr-xr-xsnd-alpx/core/RedHat/5.14/dmaengine_pcm.h180
-rwxr-xr-xsnd-alpx/core/RedHat/5.14/pcm_dmaengine.c473
-rwxr-xr-xsnd-alpx/core/generic/4.19/dmaengine_pcm.h163
-rw-r--r--snd-alpx/core/generic/4.19/internal.h297
-rwxr-xr-xsnd-alpx/core/generic/4.19/pcm_dmaengine.c383
-rw-r--r--snd-alpx/core/generic/4.19/regmap-mmio.c399
-rw-r--r--snd-alpx/core/generic/5.12/dmaengine_pcm.h180
-rw-r--r--snd-alpx/core/generic/5.12/pcm_dmaengine.c462
-rw-r--r--snd-alpx/core/generic/5.14/internal.h340
-rw-r--r--snd-alpx/core/generic/5.14/regmap-mmio.c636
-rw-r--r--snd-alpx/core/generic/5.17/dmaengine_pcm.h178
-rw-r--r--snd-alpx/core/generic/5.17/pcm_dmaengine.c461
-rw-r--r--snd-alpx/core/generic/5.2/dmaengine_pcm.h159
-rw-r--r--snd-alpx/core/generic/5.2/pcm_dmaengine.c372
-rw-r--r--snd-alpx/core/generic/5.5/dmaengine_pcm.h164
-rw-r--r--snd-alpx/core/generic/5.5/pcm_dmaengine.c455
-rw-r--r--snd-alpx/core/generic/5.7/dmaengine_pcm.h164
-rw-r--r--snd-alpx/core/generic/5.7/pcm_dmaengine.c459
-rw-r--r--snd-alpx/core/generic/6.2/amd_xdma.h34
-rw-r--r--snd-alpx/core/generic/6.2/dmaengine.c1652
-rw-r--r--snd-alpx/core/generic/6.2/dmaengine.h201
-rw-r--r--snd-alpx/core/generic/6.2/virt-dma.c142
-rw-r--r--snd-alpx/core/generic/6.2/virt-dma.h227
-rw-r--r--snd-alpx/core/generic/6.2/xilinx/xdma-regs.h169
-rw-r--r--snd-alpx/core/generic/6.2/xilinx/xdma.c1437
-rw-r--r--snd-alpx/core/generic/6.3/amd_xdma.h34
-rw-r--r--snd-alpx/core/generic/6.3/dmaengine.h201
-rw-r--r--snd-alpx/core/generic/6.3/virt-dma.c142
-rw-r--r--snd-alpx/core/generic/6.3/virt-dma.h227
-rw-r--r--snd-alpx/core/generic/6.3/xilinx/xdma-regs.h169
-rw-r--r--snd-alpx/core/generic/6.3/xilinx/xdma.c1403
-rw-r--r--snd-alpx/dkms.conf10
-rw-r--r--snd-alpx/include/4.16/dmaengine.h174
-rw-r--r--snd-alpx/include/4.16/virt-dma.h221
-rw-r--r--snd-alpx/include/5.10/regmap.h1765
-rw-r--r--snd-alpx/include/5.14/regmap.h2041
-rw-r--r--snd-alpx/include/5.3/dmaengine.h174
-rw-r--r--snd-alpx/include/5.3/virt-dma.h222
-rw-r--r--snd-alpx/include/5.6/dmaengine.h185
-rw-r--r--snd-alpx/include/5.6/virt-dma.h227
-rw-r--r--snd-alpx/include/6.2/amd_xdma.h16
-rw-r--r--snd-alpx/include/6.2/dmaengine.h1637
-rw-r--r--snd-alpx/include/6.3/amd_xdma.h16
-rw-r--r--snd-alpx/include/6.3/dmaengine.h1637
-rw-r--r--snd-alpx/regmap-mmio.c9
-rw-r--r--snd-alpx/snd_alpx_xdma.c49
-rw-r--r--snd-alpx/snd_alpx_xdma.h24
-rwxr-xr-xsnd-alpx/tools/audio_card_update_firmware.sh99
-rwxr-xr-xsnd-alpx/tools/build-load-script.sh111
-rwxr-xr-xsnd-alpx/tools/build_driver_pkg.sh28
-rwxr-xr-xsnd-alpx/tools/build_virtual_board_alsa_conf.sh79
-rw-r--r--snd-alpx/udev/88-pulseaudio-no-alp.rules1
87 files changed, 32400 insertions, 0 deletions
diff --git a/snd-alpx/.gitignore b/snd-alpx/.gitignore
new file mode 100644
index 0000000..8696913
--- /dev/null
+++ b/snd-alpx/.gitignore
@@ -0,0 +1,10 @@
+*.o
+*.o.d
+*.ko
+*.cmd
+*.mod
+*.mod.c
+Module.symvers
+modules.order
+load_alp_driver.sh
+*.orig
diff --git a/snd-alpx/Makefile b/snd-alpx/Makefile
new file mode 100644
index 0000000..fbfafbd
--- /dev/null
+++ b/snd-alpx/Makefile
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Support for Digigram AlpX PCI-e boards
+#
+# Copyright (c) 2024 Digigram Digital (info@digigram.com)
+#
+
+#Get OS family out of /etc/os-release file, need "grep" command
+OS_FAMILY := $(strip $(filter-out ID, $(subst =, ,$(shell grep ^ID= /etc/os-release))))
+
+# *** GENERIC ***
+#UBUNTU
+ifneq ( ,$(findstring ubuntu, $(OS_FAMILY)))
+ KERNEL_FLAVOUR ?= KERNEL_GENERIC
+else ifneq ( ,$(findstring debian, $(OS_FAMILY)))
+ # Debian => Generic kernel
+ KERNEL_FLAVOUR ?= KERNEL_GENERIC
+else ifneq ( ,$(findstring rhel, $(OS_FAMILY)))
+# *** RedHat ***
+ KERNEL_FLAVOUR ?= KERNEL_REDHAT
+else ifneq ( ,$(findstring centos, $(OS_FAMILY)))
+# CentOs => RedHat
+ KERNEL_FLAVOUR ?= KERNEL_REDHAT
+else ifneq ( ,$(findstring fedora, $(OS_FAMILY)))
+ # fedora => RedHat
+ KERNEL_FLAVOUR ?= KERNEL_REDHAT
+else ifneq ( ,$(findstring rocky, $(OS_FAMILY)))
+ # Rocky => RedHat
+ KERNEL_FLAVOUR ?= KERNEL_REDHAT
+else
+# GENERIC kernel by default
+ KERNEL_FLAVOUR ?= KERNEL_GENERIC
+endif
+
+
+
+SND_ALPX_CCFLAGS += -DCONFIG_${KERNEL_FLAVOUR}
+
+
+KERNEL_VERSION ?= $(shell uname -r)
+KERNEL_PATH ?= /lib/modules/$(KERNEL_VERSION)/build
+
+LOAD_DRIVER_SCRIPT_NAME:="load_alp_driver.sh"
+BUILD_LOADER_SCRIPT_PATH:=./tools/build-load-script.sh
+
+#Special config, MUST check the consistency !
+ifeq ( ,$(finsdtring VARIANT_MADI, $(SND_ALPX_CCFLAGS)))
+SND_ALPX_CCFLAGS += -DCONFIG_ALP_MADI_PASSTHROUGH
+endif
+
+#Activate the MCU logs dump (at driver level)
+#SND_ALPX_CCFLAGS += -DCONFIG_ALPX_WITH_PROC_LOG
+
+#Activate CONTROLS Logs
+#SND_ALPX_CCFLAGS += -DCONFIG_ALPX_LOG_CONTROLS
+
+
+#The libXDMA Debug messages activation
+# SND_ALPX_CCFLAGS += -D__LIBXDMA_DEBUG__
+
+#Dump the DMA buffers(size in bytes, if 0 : all buffer).
+#SND_ALPX_CCFLAGS += -DCONFIG_ALPX_DUMP_DMA_BUFFER
+SND_ALPX_CCFLAGS += -DCONFIG_ALPX_DUMP_DMA_BUFFER_SIZE=128
+
+#Module revision log, need to desable the date-time warning
+SND_ALPX_CCFLAGS += -Wno-error=date-time -Wno-date-time
+
+#Enable the GPIO support in the card
+ifeq (true,$(CONFIG_WITHOUT_GPIO))
+SND_ALPX_CCFLAGS += -DCONFIG_WITHOUT_GPIO
+else
+OPTIONNAL_UNIT += alpx_gpio.o
+endif
+
+
+#Activate The REG Debug feature
+#SND_ALPX_CCFLAGS += -DWITH_REG_DEBUG
+
+ccflags-y += ${SND_ALPX_CCFLAGS}
+
+snd-alpx-xdma-y := snd_alpx_xdma.o
+snd-alpx-y := $(OPTIONNAL_UNIT) alpx_axcmem.o alpx_core.o alpx_proc.o alpx_mtd.o alpx_controls.o alpx_streams.o alpx_cards.o alpx_xdma.o
+
+#ALPX module in first
+obj-m := snd-alpx.o
+
+# XDMA Module creation
+obj-m += snd-alpx-xdma.o
+
+#Embed regmap--mmio in XDMA module if not enabled, use the 4.19 version as this is the base required for XDMA support
+ifndef CONFIG_REGMAP_MMIO
+snd-alpx-xdma-y += core/generic/4.19/regmap-mmio.o
+endif
+
+all: load-script
+ $(MAKE) -C $(KERNEL_PATH) M=$(CURDIR) modules
+
+clean:
+ $(MAKE) -C $(KERNEL_PATH) M=$(CURDIR) clean
+ rm -f ${LOAD_DRIVER_SCRIPT_NAME}
+
+.PHONY: clean dump_env
+
+#Add chmod to handle the exec right to the script.
+load-script:
+ chmod 744 ${BUILD_LOADER_SCRIPT_PATH} && exec ${BUILD_LOADER_SCRIPT_PATH} ${LOAD_DRIVER_SCRIPT_NAME}
+
+dump_env:
+ @echo "SND_ALPX_CCFLAGS: $(SND_ALPX_CCFLAGS)"
+ @echo "obj-m: $(obj-m)"
+ @echo "snd-alpx obj: $(snd-alpx-y)"
+ @echo "config regmap: $(CONFIG_REGMAP_MMIO)"
+ @echo "GCC :" CONFIG_CC_VERSION_TEXT
+ @echo "GCC :" $(CONFIG_CC_VERSION_TEXT)
+ @echo "CONFIG_WITHOUT_GPIO: $(CONFIG_WITHOUT_GPIO)"
diff --git a/snd-alpx/alpx.h b/snd-alpx/alpx.h
new file mode 100644
index 0000000..2cad9b9
--- /dev/null
+++ b/snd-alpx/alpx.h
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_H_
+#define _ALPX_H_
+
+
+#include "alpx_reg.h"
+#include "alpx_streams.h"
+
+#include <linux/version.h>
+
+#if !defined (CONFIG_WITHOUT_GPIO)
+#define ALPX_WITH_GPIO
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#endif
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mutex.h>
+#include <sound/pcm.h>
+
+/* Values */
+
+#define CARD_NAME "Digigram AlpX"
+
+#define ALPX_VARIANT_FEATURE_GPIOS BIT(0)
+
+/* Structures */
+
+enum alpx_variant_model {
+ ALPX_VARIANT_DEAD,
+ ALPX_VARIANT_MODEL_MADI,
+ ALPX_VARIANT_MODEL_MADI_LOOPBACK,
+ ALPX_VARIANT_MODEL_ALP222,
+ ALPX_VARIANT_MODEL_ALP222_MIC,
+ ALPX_VARIANT_MODEL_ALP882,
+ ALPX_VARIANT_MODEL_ALP882_MIC,
+ ALPX_VARIANT_MODEL_ALP442,
+ ALPX_VARIANT_MODEL_ALP442_MIC,
+ ALPX_VARIANT_MODEL_ALPDANTE,
+};
+
+/* Flash partitions IDs */
+enum ALPX_FLASH_PARTITION_ID {
+ ALPX_FLASH_PARTITION_GOLDEN_ID = 0,
+ ALPX_FLASH_PARTITION_FW_ID,
+ ALPX_FLASH_PARTITION_INIT_CFG_ID,
+ ALPX_FLASH_PARTITION_USER_CFG_ID,
+ ALPX_FLASH_PARTITION_PRODUCTION_ID,
+ ALPX_FLASH_PARTITION_QTY
+};
+
+/* Flash partitions IDs in DEAD mode ! only production */
+enum ALPX_DEAD_FLASH_PARTITION_ID {
+ ALPX_DEAD_FLASH_PARTITION_GOLDEN_PROD_ID = 0,
+ ALPX_DEAD_FLASH_PARTITION_PRODUCTION_ID,
+ ALPX_DEAD_FLASH_PARTITION_QTY
+};
+
+/* Structures */
+
+
+struct alpx_variant_gpios {
+ unsigned int base;
+ unsigned int inputs_reg_offset;
+ unsigned int inputs_qty;
+ unsigned int outputs_reg_offset;
+ unsigned int outputs_qty;
+};
+
+struct alpx_flash_partitions {
+ struct mtd_partition* partitions;
+ uint32_t qty;
+ uint32_t qty_for_fw_update;
+};
+
+struct alpx_variant {
+ enum alpx_variant_model model;
+ const char *shortname;
+ const char *longname;
+ const char *mixername;
+ u64 features;
+
+ struct alpx_control_descriptor *control_descriptors;
+ unsigned int control_descriptors_count;
+
+ struct snd_pcm_hardware* playback_hw;
+ struct snd_pcm_hardware* capture_hw;
+ struct alpx_variant_gpios gpios;
+ uint32_t flash_golden_production_base;
+ struct alpx_flash_partitions flash_partitions;
+};
+
+
+struct alpx_config {
+ spinlock_t lock;
+ unsigned int users;
+
+ unsigned int rate;
+};
+
+struct alpx_identity {
+ uint16_t sub_system_id; /* PCIe sub-system Id as read out of Production area*/
+ uint32_t ver_fpga; /* FPGA version */
+ uint32_t ver_mcu; /* MCU version.revision !! */
+ uint64_t serial_number; /* Card full identifier tag */
+};
+
+struct alpx_device {
+ struct device *dev;
+ struct pci_dev *pci_dev;
+#if defined(ALPX_WITH_GPIO)
+ struct gpio_chip gpio_chip;
+#endif
+ void *base;
+
+ struct platform_device *xdma_pdev;
+ struct alpx_config config;
+
+ struct alpx_control *controls;
+ unsigned int controls_index;
+ unsigned int controls_count;
+
+ struct alpx_pipe capture;
+ struct alpx_pipe playback;
+
+ const struct alpx_variant *variant;
+
+ struct mutex proc_mutex;
+
+ struct mtd_info mtd_info;
+#ifdef WITH_REG_DEBUG
+ uint32_t dbg_reg_offset;
+#endif
+ struct alpx_identity identity;
+};
+
+/* Constants */
+
+// The flash chip size (in bytes).
+#define ALPX_FLASH_CHIP_SIZE 0x800000
+
+// Sector size conversions
+#define ALPX_FLASH_SECTOR_SHIFT 12
+#define ALPX_FLASH_SECTOR_SIZE (1 << ALPX_FLASH_SECTOR_SHIFT)
+
+/* Amplifiers gains */
+#define ALP_AMPLIFIER_GAIN_MIN_cdB -9010
+#define ALP_AMPLIFIER_GAIN_MAX_cdB 1200
+
+#define ALP_AMPLIFIER_GAIN_MIN_REG 0
+#define ALP_AMPLIFIER_GAIN_MAX_REG 1021
+
+/* MIC Gains tange in dB */
+#define ALP222_MIC_GAINS_MIN_REG_VAL 10
+#define ALP222_MIC_GAINS_MAX_REG_VAL 65
+#define ALP222_MIC_REG_GAIN_SHIFT 9
+#define ALP222_MIC_GAIN_MIN_cdB 1000
+#define ALP222_MIC_GAIN_MAX_cdB 6500
+
+
+/* ALP222 ANALOG Equalization amplifiers */
+#define ALP222_ANALOG_EQ_GAIN_MIN_cdB -8800
+#define ALP222_ANALOG_EQ_GAIN_MAX_cdB 3900
+
+#define ALP222_ANALOG_EQ_GAIN_MIN_REG 0
+#define ALP222_ANALOG_EQ_GAIN_MAX_REG 255
+
+/* Registers offset range : from 0x60000 up to 0x7C200*/
+#define ALP222_MIN_REG_OFFSET 0x60000
+#define ALP222_MAX_REG_OFFSET 0x7C200
+
+/* Special revisions */
+/* -The design version is 1.5- */
+#define ALP222_1_5_DESIGN_VERSION ((1<<16)|(5))
+
+/* BUILD version register appears in version :... */
+#define ALP222_DESIGN_WITH_BUILD_REGISTER_VERSION ALP222_1_5_DESIGN_VERSION
+
+
+/* Range of the LINE/MIC gains (in register domain)*/
+#define ALPMC_LINE_ANALOG_GAIN_MIN_REG 0
+#define ALPMC_LINE_ANALOG_GAIN_MIN_cdB -2400
+
+#define ALPMC_MIC_ANALOG_GAIN_MIN_REG ALPMC_LINE_ANALOG_GAIN_MIN_REG
+#define ALPMC_MIC_ANALOG_GAIN_MIN_cdB ALPMC_LINE_ANALOG_GAIN_MIN_cdB
+
+
+/* Gain formula : G = (0.5*(N-1)-24), N : register value */
+/* 161 => +56dB */
+#define ALPMC_MIC_ANALOG_GAIN_MAX_REG 162
+#define ALPMC_MIC_ANALOG_GAIN_MAX_cdB 5600
+
+/* 82 => +16dB */
+#define ALPMC_LINE_ANALOG_GAIN_MAX_REG 82
+#define ALPMC_LINE_ANALOG_GAIN_MAX_cdB 1600
+
+static bool inline alpx_is_madi(struct alpx_device *alpx_dev)
+{
+ return alpx_dev->variant->model == ALPX_VARIANT_MODEL_MADI ||
+ alpx_dev->variant->model == ALPX_VARIANT_MODEL_MADI_LOOPBACK;
+}
+
+static bool inline alpx_is_222_line(struct alpx_device *alpx_dev)
+{
+ return alpx_dev->variant->model == ALPX_VARIANT_MODEL_ALP222;
+}
+
+static bool inline alpx_is_222_mic(struct alpx_device *alpx_dev)
+{
+ return alpx_dev->variant->model == ALPX_VARIANT_MODEL_ALP222_MIC;
+}
+
+static bool inline alpx_is_222(struct alpx_device *alpx_dev)
+{
+ return alpx_is_222_line(alpx_dev) || alpx_is_222_mic(alpx_dev);
+}
+
+static bool inline alpx_is_882_line(struct alpx_device *alpx_dev)
+{
+ return alpx_dev->variant->model == ALPX_VARIANT_MODEL_ALP882;
+}
+
+static bool inline alpx_is_882_mic(struct alpx_device *alpx_dev)
+{
+ return alpx_dev->variant->model == ALPX_VARIANT_MODEL_ALP882_MIC;
+}
+
+static bool inline alpx_is_882(struct alpx_device *alpx_dev)
+{
+ return alpx_is_882_line(alpx_dev) || alpx_is_882_line(alpx_dev);
+}
+
+static bool inline alpx_is_442_line(struct alpx_device *alpx_dev)
+{
+ return alpx_dev->variant->model == ALPX_VARIANT_MODEL_ALP442;
+}
+
+static bool inline alpx_is_442_mic(struct alpx_device *alpx_dev)
+{
+ return alpx_dev->variant->model == ALPX_VARIANT_MODEL_ALP442_MIC;
+}
+
+static bool inline alpx_is_442(struct alpx_device *alpx_dev)
+{
+ return alpx_is_442_line(alpx_dev) || alpx_is_442_line(alpx_dev);
+}
+
+static bool inline alpx_is_multichannel(struct alpx_device *alpx_dev)
+{
+ return alpx_is_882(alpx_dev) || alpx_is_442(alpx_dev);
+}
+
+static bool inline alpx_is_dante (struct alpx_device *alpx_dev)
+{
+ return alpx_dev->variant->model == ALPX_VARIANT_MODEL_ALPDANTE;
+}
+
+unsigned int
+alpx_get_samples_counter(struct alpx_device *alpx_dev);
+
+#endif
diff --git a/snd-alpx/alpx_axcmem.c b/snd-alpx/alpx_axcmem.c
new file mode 100644
index 0000000..4966b10
--- /dev/null
+++ b/snd-alpx/alpx_axcmem.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#include "alpx_axcmem.h"
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <asm/byteorder.h>
+
+/* Items sizes in bytes*/
+static const unsigned int AXCMEM_PAGE_SIZE = 32;
+static const unsigned int AXCMEM_LINE_SIZE = 4;
+
+/* AxCMem area offset */
+static const uint32_t AXCMEM_AREA_OFFSET = 0x72000;
+
+
+void* alpx_axcmem_getRegAddr(struct alpx_device* alp,
+ const struct alpx_axcmem_loc* loc)
+{
+ dev_dbg(alp->dev," BAR:%p, loc{%d:%d:%d}\n",
+ alp->base,
+ loc->page, loc->line, loc->row);
+
+ return (uint8_t*)alp->base +
+ AXCMEM_AREA_OFFSET +
+ loc->page * AXCMEM_PAGE_SIZE +
+ loc->line * AXCMEM_LINE_SIZE +
+ loc->row;
+}
+
+void* alpx_axcmem_getPointedRegAddrByRefLoc(struct alpx_device* alp,
+ const struct alpx_axcmem_loc* ref_loc,
+ const struct alpx_axcmem_loc* loc)
+{
+ void* const ref_reg_addr = alpx_axcmem_getRegAddr(alp, ref_loc);
+
+ dev_dbg(alp->dev, "BAR:%p, base:%p=>%d, loc{%d:%d:%d}\n",
+ alp->base, ref_reg_addr, alpx_axcmem_getRegU8Value(ref_reg_addr),
+ loc->page, loc->line, loc->row);
+
+ return (uint8_t*)alp->base + AXCMEM_AREA_OFFSET + alpx_axcmem_getRegU8Value(ref_reg_addr) * AXCMEM_PAGE_SIZE +
+ loc->page * AXCMEM_PAGE_SIZE +
+ loc->line * AXCMEM_LINE_SIZE +
+ loc->row;
+}
+
+int alpx_acxmem_getByteArrayByRefLoc(struct alpx_device* alp,
+ const struct alpx_axcmem_loc* ref_loc,
+ const struct alpx_axcmem_loc* loc,
+ unsigned char* dst,
+ unsigned int length)
+{
+ unsigned int idx = 0;
+ const unsigned char* src = NULL;
+
+ if ((alp == NULL) ||
+ (ref_loc == NULL) ||
+ (loc == NULL) ||
+ (dst == NULL)) {
+ return -EINVAL;
+ }
+
+ if (length == 0)
+ return 0;
+
+ src = (unsigned char*) alpx_axcmem_getPointedRegAddrByRefLoc(alp, ref_loc, loc);
+
+ for (idx = 0 ; idx < length ; ++idx) {
+ dst[idx] = src[idx];
+ dev_dbg(alp->dev, " src[%d]: 0x%02x => dst[%d]: 0x%02x\n", idx, src[idx], idx, dst[idx]);
+ }
+
+ return 0;
+}
+
+uint32_t alpx_axcmem_getRegU8Value_ptr(void* addr)
+{
+ return alpx_axcmem_getRegU8Value(addr);
+}
+
+void alpx_axcmem_setRegU8Value_ptr(void* addr, uint32_t value)
+{
+ alpx_axcmem_setRegU8Value(addr, value);
+}
+
+uint32_t alpx_axcmem_getRegU16Value_ptr(void* addr)
+{
+ return alpx_axcmem_getRegU16Value(addr);
+}
+
+void alpx_axcmem_setRegU16Value_ptr(void* addr, uint32_t value)
+{
+ alpx_axcmem_setRegU16Value(addr, value);
+}
+
+uint32_t alpx_axcmem_getRegBEU16Value_ptr(void* addr)
+{
+ return alpx_axcmem_getRegBEU16Value(addr);
+}
+
+void alpx_axcmem_setRegBEU16Value_ptr(void* addr, uint32_t value)
+{
+ alpx_axcmem_setRegBEU16Value(addr, value);
+}
+
+uint32_t alpx_axcmem_getRegU32Value_ptr(void* addr)
+{
+ return alpx_axcmem_getRegU32Value(addr);
+}
+
+void alpx_axcmem_setRegU32Value_ptr(void* addr, uint32_t value)
+{
+ alpx_axcmem_setRegU32Value(addr, value);
+}
+
+uint32_t alpx_axcmem_getRegBEU32Value_ptr(void* addr)
+{
+ return alpx_axcmem_getRegBEU32Value(addr);
+}
+
+void alpx_axcmem_setRegBEU32Value_ptr(void* addr, uint32_t value)
+{
+ alpx_axcmem_setRegBEU32Value(addr, value);
+}
diff --git a/snd-alpx/alpx_axcmem.h b/snd-alpx/alpx_axcmem.h
new file mode 100644
index 0000000..20b35a0
--- /dev/null
+++ b/snd-alpx/alpx_axcmem.h
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_AXCMEM_H_
+#define _ALPX_AXCMEM_H_
+
+#include "alpx.h"
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+
+/** Types **/
+
+/* Register's location in AXCMEM area */
+struct alpx_axcmem_loc
+{
+ uint32_t page; /* page index, absolute or relative to a page group */
+ uint32_t line; /* line index */
+ uint32_t row; /* Row index */
+};
+
+/** Services **/
+/* Navigation */
+
+/* return the register's address given the alp device and register's location */
+void* alpx_axcmem_getRegAddr(struct alpx_device* alp,
+ const struct alpx_axcmem_loc* loc);
+
+/* Return the register's address in a pointed page through the given reference register's localation, base is the address of the used PCI BAR */
+void* alpx_axcmem_getPointedRegAddrByRefLoc(struct alpx_device* alp,
+ const struct alpx_axcmem_loc* ref_loc,
+ const struct alpx_axcmem_loc* loc);
+
+
+
+/* Values, these functions check the value alignement ! Use assertion !*/
+/* Values Little Endian by default, "BE" for big endian */
+static inline uint8_t alpx_axcmem_getRegU8Value(void* addr)
+{
+ return readb(addr);
+}
+
+static inline void alpx_axcmem_setRegU8Value(void* addr, uint8_t value)
+{
+ writeb(value, addr);
+}
+
+static inline uint16_t alpx_axcmem_getRegU16Value(void* addr)
+{
+ return le16_to_cpu(readw(addr));
+}
+
+static inline void alpx_axcmem_setRegU16Value(void* addr, uint16_t value)
+{
+ writew(cpu_to_le16(value), addr);
+}
+
+static inline uint16_t alpx_axcmem_getRegBEU16Value(void* addr)
+{
+ return be16_to_cpu(alpx_axcmem_getRegU16Value(addr));
+
+}
+
+static inline void alpx_axcmem_setRegBEU16Value(void* addr, uint16_t value)
+{
+ writew(cpu_to_be16(value), addr);
+}
+
+
+static inline uint32_t alpx_axcmem_getRegU32Value(void* addr)
+{
+ return le32_to_cpu(readl(addr));
+}
+
+static inline void alpx_axcmem_setRegU32Value(void* addr, uint32_t value)
+{
+
+ writel(cpu_to_le32(value), addr);
+}
+
+static inline uint32_t alpx_axcmem_getRegBEU32Value(void* addr)
+{
+ return be32_to_cpu(readl(addr));
+}
+
+static inline void alpx_axcmem_setRegBEU32Value(void* addr, uint32_t value)
+{
+
+ writel(cpu_to_be32(value), addr);
+}
+/* Same but can be used as ptr : not inlined*/
+uint32_t alpx_axcmem_getRegU8Value_ptr(void* addr);
+void alpx_axcmem_setRegU8Value_ptr(void* addr, uint32_t value);
+uint32_t alpx_axcmem_getRegU16Value_ptr(void* addr);
+void alpx_axcmem_setRegU16Value_ptr(void* addr, uint32_t value);
+uint32_t alpx_axcmem_getRegBEU16Value_ptr(void* addr);
+void alpx_axcmem_setRegBEU16Value_ptr(void* addr, uint32_t value);
+uint32_t alpx_axcmem_getRegU32Value_ptr(void* addr);
+void alpx_axcmem_setRegU32Value_ptr(void* addr, uint32_t value);
+uint32_t alpx_axcmem_getRegBEU32Value_ptr(void* addr);
+void alpx_axcmem_setRegBEU32Value_ptr(void* addr, uint32_t value);
+
+
+int alpx_acxmem_getByteArrayByRefLoc(struct alpx_device* alp,
+ const struct alpx_axcmem_loc* ref_loc,
+ const struct alpx_axcmem_loc* loc,
+ unsigned char* dst,
+ unsigned int length);
+
+#endif /*_ALPX_AXCMEM_H_*/
diff --git a/snd-alpx/alpx_cards.c b/snd-alpx/alpx_cards.c
new file mode 100644
index 0000000..7f9ec3a
--- /dev/null
+++ b/snd-alpx/alpx_cards.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#include <linux/pci.h>
+
+#include "alpx_cards.h"
+#include "alpx_reg.h"
+#include "alpx_mtd.h"
+#include "alpx_axcmem.h"
+#include "alpx_variants_dante.h"
+
+#include <linux/delay.h>
+
+void alpstereo_print_identity(struct alpx_device *alpx_dev, struct snd_card *card, const unsigned char* label)
+{
+ const unsigned int design_revision = readl(ALPX_REG(alpx_dev, ALP, CONTROL, DESIGN_VERSION));
+
+ //Add Build information, only for design >= 1.8
+ if (design_revision >= ALP222_DESIGN_WITH_BUILD_REGISTER_VERSION) {
+ const unsigned int build_version = alpx_dev->identity.ver_fpga;
+ const unsigned int ctrl_revision = readl(ALPX_COMMON_REG(alpx_dev, ALP222, CONTROL, VERSION));
+ const unsigned int mxr_revision = readl(ALPX_COMMON_REG(alpx_dev, ALP222, MIXER, VERSION));
+ const unsigned int ampli_in_revision = readl(ALPX_COMMON_REG(alpx_dev, ALP222, AMPLI_IN, VERSION));
+ const unsigned int ampli_out_revision = readl(ALPX_COMMON_REG(alpx_dev, ALP222, AMPLI_OUT, VERSION));
+ const unsigned int proc_revision = alpx_dev->identity.ver_mcu;
+
+ dev_info( alpx_dev->dev,\
+ "\n************ \n"
+ "%s Alp Stereo %s soundcard[%d]{#%llu} : %s - %s (%s - 0x%04x), \n\t== Revisions ==\n\t* Build: \t\t%u\n\t* Design: \t\t%u.%lu\n\t* Control: \t\t%u.%lu\n\t* Ampli In:\t\t%u.%lu\n\t* Mixer: \t\t%u.%lu\n"
+ "\t* Ampli Out:\t\t%u.%lu\n\t* MCU:\t\t\t%u.%lu\n"
+ "************\n",
+ label,
+ alpx_is_222_mic(alpx_dev) ? "MIC":"",
+ card->number,
+ alpx_dev->identity.serial_number,
+ card->id,
+ card->shortname,
+ card->longname,
+ alpx_dev->identity.sub_system_id,
+ build_version,
+ ALPX_COMMON_VERSION_VERSION(design_revision),
+ ALPX_COMMON_VERSION_REVISION(design_revision),
+
+ ALPX_COMMON_VERSION_VERSION(ctrl_revision),
+ ALPX_COMMON_VERSION_REVISION(ctrl_revision),
+
+ ALPX_COMMON_VERSION_VERSION(ampli_in_revision),
+ ALPX_COMMON_VERSION_REVISION(ampli_in_revision),
+
+ ALPX_COMMON_VERSION_VERSION(mxr_revision),
+ ALPX_COMMON_VERSION_REVISION(mxr_revision),
+
+ ALPX_COMMON_VERSION_VERSION(ampli_out_revision),
+ ALPX_COMMON_VERSION_REVISION(ampli_out_revision),
+
+ ALPX_COMMON_VERSION_VERSION(proc_revision),
+ ALPX_COMMON_VERSION_REVISION(proc_revision)
+ );
+ }
+ else {
+ const unsigned int ctrl_revision = readl(ALPX_COMMON_REG(alpx_dev, ALP222, CONTROL, VERSION));
+ const unsigned int mxr_revision = readl(ALPX_COMMON_REG(alpx_dev, ALP222, MIXER, VERSION));
+ const unsigned int ampli_in_revision = readl(ALPX_COMMON_REG(alpx_dev, ALP222, AMPLI_IN, VERSION));
+ const unsigned int ampli_out_revision = readl(ALPX_COMMON_REG(alpx_dev, ALP222, AMPLI_OUT, VERSION));
+ const unsigned int proc_revision = readl(ALPX_REG(alpx_dev, ALP, PROC, VERSION));
+dev_info(alpx_dev->dev, "Serial# : %llu\n", alpx_dev->identity.serial_number);
+ dev_info( alpx_dev->dev,\
+ "\n************ \n"
+ "%s Alp222 soundcard[%d] {#%llu} : %s - %s (%s - 0x%04x), \n\t== Revisions ==\n\t* Design: \t\t%u.%lu\n\t* Control: \t\t%u.%lu\n\t* Ampli In:\t\t%u.%lu\n\t* Mixer: \t\t%u.%lu\n"
+ "\t* Ampli Out:\t\t%u.%lu\n\t* MCU:\t\t\t%u.%lu\n"
+ "************\n",
+ label,
+ card->number,
+ alpx_dev->identity.serial_number,
+ card->id,
+ card->shortname,
+ card->longname,
+ alpx_dev->identity.sub_system_id,
+ ALPX_COMMON_VERSION_VERSION(design_revision),
+ ALPX_COMMON_VERSION_REVISION(design_revision),
+
+ ALPX_COMMON_VERSION_VERSION(ctrl_revision),
+ ALPX_COMMON_VERSION_REVISION(ctrl_revision),
+
+ ALPX_COMMON_VERSION_VERSION(ampli_in_revision),
+ ALPX_COMMON_VERSION_REVISION(ampli_in_revision),
+
+ ALPX_COMMON_VERSION_VERSION(mxr_revision),
+ ALPX_COMMON_VERSION_REVISION(mxr_revision),
+
+ ALPX_COMMON_VERSION_VERSION(ampli_out_revision),
+ ALPX_COMMON_VERSION_REVISION(ampli_out_revision),
+
+ ALPX_COMMON_VERSION_VERSION(proc_revision),
+ ALPX_COMMON_VERSION_REVISION(proc_revision)
+ );
+ }
+}
+
+void alpmadi_print_identity(struct alpx_device *alpx_dev, struct snd_card *card, const unsigned char* label)
+{
+
+ const unsigned int ctrl_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMADI, CONTROL, VERSION));
+ const unsigned int mxr_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMADI, MIXER, VERSION));
+ const unsigned int meas_daw_in_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMADI, MEAS_DAW_IN, VERSION));
+ const unsigned int meas_daw_out_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMADI, MEAS_DAW_OUT, VERSION));
+ const unsigned int meas_madi_in_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMADI, MEAS_MADI_IN, VERSION));
+ const unsigned int meas_madi_out_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMADI, MEAS_MADI_OUT, VERSION));
+ const unsigned int gain_madi_in_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMADI, GAIN_MADI_IN, VERSION));
+ const unsigned int gain_madi_out_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMADI, GAIN_MADI_OUT, VERSION));
+ const unsigned int gain_daw_in_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMADI, GAIN_DAW_IN, VERSION));
+ const unsigned int gain_daw_out_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMADI, GAIN_DAW_OUT, VERSION));
+ const unsigned int router_in_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMADI, ROUTER_IN, VERSION));
+
+ dev_info( alpx_dev->dev,\
+ "************ \n"
+ "%s AlpMadi soundcard[%d]{#%llu} : %s - %s (%s - 0x%04x), \n\t== Revisions ==\n\t * Control: \t\t%u.%lu\n\t * Mixer: \t\t%u.%lu\n\t * Gain Daw In:\t%u.%lu\n\t * Gain Daw Out:\t%u.%lu\n\t * Gain MADI In:\t\t%u.%lu\n\t * Gain MADI Out:\t%u.%lu\n\t * Meas DAW In:\t%u.%lu\n\t * Meas Daw Out:\t%u.%lu\n\t * Meas MADI In:\t\t%u.%lu\n\t * Meas MADI Out:\t%u.%lu\n\t * Router:\t\t%u.%lu\n"
+ "************\n",
+ label,
+ card->number,
+ alpx_dev->identity.serial_number,
+ card->id,
+ card->shortname,
+ card->longname,
+ alpx_dev->identity.sub_system_id,
+ ALPX_COMMON_VERSION_VERSION(ctrl_revision),
+ ALPX_COMMON_VERSION_REVISION(ctrl_revision),
+
+ ALPX_COMMON_VERSION_VERSION(mxr_revision),
+ ALPX_COMMON_VERSION_REVISION(mxr_revision),
+
+ ALPX_COMMON_VERSION_VERSION(gain_daw_in_revision),
+ ALPX_COMMON_VERSION_REVISION(gain_daw_in_revision),
+ ALPX_COMMON_VERSION_VERSION(gain_daw_out_revision),
+ ALPX_COMMON_VERSION_REVISION(gain_daw_out_revision),
+
+ ALPX_COMMON_VERSION_VERSION(gain_madi_in_revision),
+ ALPX_COMMON_VERSION_REVISION(gain_madi_in_revision),
+ ALPX_COMMON_VERSION_VERSION(gain_madi_out_revision),
+ ALPX_COMMON_VERSION_REVISION(gain_madi_out_revision),
+
+ ALPX_COMMON_VERSION_VERSION(meas_daw_in_revision),
+ ALPX_COMMON_VERSION_REVISION(meas_daw_in_revision),
+ ALPX_COMMON_VERSION_VERSION(meas_daw_out_revision),
+ ALPX_COMMON_VERSION_REVISION(meas_daw_out_revision),
+
+ ALPX_COMMON_VERSION_VERSION(meas_madi_in_revision),
+ ALPX_COMMON_VERSION_REVISION(meas_madi_in_revision),
+ ALPX_COMMON_VERSION_VERSION(meas_madi_out_revision),
+ ALPX_COMMON_VERSION_REVISION(meas_madi_out_revision),
+
+ ALPX_COMMON_VERSION_VERSION(router_in_revision),
+ ALPX_COMMON_VERSION_REVISION(router_in_revision));
+}
+
+void alpmultichan_print_identity(struct alpx_device *alpx_dev, struct snd_card *card, const unsigned char* label)
+{
+ const unsigned int design_revision = readl(ALPX_REG(alpx_dev, ALP, CONTROL, DESIGN_VERSION));
+ const unsigned int build_version = alpx_dev->identity.ver_fpga;
+ const unsigned int ctrl_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMC, CONTROL, VERSION));
+ const unsigned int mxr_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMC, MIXER, VERSION));
+ const unsigned int ampli_in_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMC, AMPLI_IN, VERSION));
+ const unsigned int ampli_out_revision = readl(ALPX_COMMON_REG(alpx_dev, ALPMC, AMPLI_OUT, VERSION));
+ const unsigned int proc_revision = alpx_dev->identity.ver_mcu;
+
+ dev_info( alpx_dev->dev,\
+ "\n************ \n"
+ "%s Alp multi-channels soundcard[%d]{#%llu} : %s - %s (%s - 0x%04x) , \n\t== Revisions ==\n\t* Build: \t\t%u\n\t* Design: \t\t%u.%lu\n\t* Control: \t\t%u.%lu\n\t* Ampli In:\t\t%u.%lu\n\t* Mixer: \t\t%u.%lu\n"
+ "\t* Ampli Out:\t\t%u.%lu\n\t* MCU:\t\t\t%u.%lu\n"
+ "************\n",
+ label,
+ card->number,
+ alpx_dev->identity.serial_number,
+ card->id,
+ card->shortname,
+ card->longname,
+ alpx_dev->identity.sub_system_id,
+ build_version,
+ ALPX_COMMON_VERSION_VERSION(design_revision),
+ ALPX_COMMON_VERSION_REVISION(design_revision),
+
+ ALPX_COMMON_VERSION_VERSION(ctrl_revision),
+ ALPX_COMMON_VERSION_REVISION(ctrl_revision),
+
+ ALPX_COMMON_VERSION_VERSION(ampli_in_revision),
+ ALPX_COMMON_VERSION_REVISION(ampli_in_revision),
+
+ ALPX_COMMON_VERSION_VERSION(mxr_revision),
+ ALPX_COMMON_VERSION_REVISION(mxr_revision),
+
+ ALPX_COMMON_VERSION_VERSION(ampli_out_revision),
+ ALPX_COMMON_VERSION_REVISION(ampli_out_revision),
+
+ ALPX_COMMON_VERSION_VERSION(proc_revision),
+ ALPX_COMMON_VERSION_REVISION(proc_revision));
+}
+
+static int alpx_card_read_serial_number(struct alpx_device* alpx_dev)
+{
+ unsigned char raw_serial[9];
+ int ret = alpx_mtd_read_shared(alpx_dev, ALPX_SERIAL_OFFSET_IN_PRODUCTION,
+ raw_serial,
+ sizeof(raw_serial) - 1);
+
+ raw_serial[sizeof(raw_serial) - 1] = 0;
+
+ if (ret){
+ dev_err(alpx_dev->dev, "Error %d when reading Shared Area\n", ret);
+ /* DEFAULT VALUES */
+ alpx_dev->identity.serial_number = (~ 0);
+ return -EIO;
+ }
+
+ dev_dbg(alpx_dev->dev, " %s() : BEFORE shift %llu // 0x%llx\n", __func__, (uint64_t)raw_serial, (uint64_t)raw_serial);
+
+ //Conversion RAW => Little endian (not use be64_to_cpu() ?? Should be tested later)
+ alpx_dev->identity.serial_number = ((uint64_t)raw_serial[0])<<32 | ((uint64_t)raw_serial[1]) << 24 |
+ ((uint64_t)raw_serial[2]) << 16 |
+ ((uint64_t)raw_serial[3]) << 8 |
+ ((uint64_t)raw_serial[4]);
+
+ return 0;
+}
+
+int alpxxx_finalize_identity(struct alpx_device* alpx_dev)
+{
+ alpx_dev->identity.ver_mcu = readl(ALPX_REG(alpx_dev, ALP, PROC, VERSION));
+ alpx_dev->identity.ver_fpga = readl(ALPX_REG(alpx_dev, ALP, CONTROL, BUILD_VERSION)) & ALP_CONTROL_BUILD_VERSION_MASK;
+ return 0;
+}
+
+int alpstereo_finalize_identity(struct alpx_device* alpx_dev)
+{
+
+ int ret = alpxxx_finalize_identity(alpx_dev);
+ if (ret){
+ dev_err(alpx_dev->dev, "Error %d when finalizing base identity\n", ret);
+ return ret;
+ }
+
+ ret = alpx_mtd_load_shared_from(alpx_dev,
+ alpx_dev->variant->flash_partitions.partitions[ALPX_FLASH_PARTITION_PRODUCTION_ID].offset);
+
+ /* DEFAULT VALUES */
+ alpx_dev->identity.sub_system_id = (~ 0);
+ alpx_dev->identity.sub_system_id = (~ 0);
+
+ if (ret){
+ dev_err(alpx_dev->dev, "Error %d when extracting Production Area\n", ret);
+ return -EIO;
+ }
+
+ alpx_card_read_serial_number(alpx_dev);
+ alpx_dev->identity.sub_system_id = alpx_dev->pci_dev->subsystem_device;
+
+ return 0;
+}
+
+int alpmultichan_finalize_identity(struct alpx_device* alpx_dev)
+{
+ int ret = alpxxx_finalize_identity(alpx_dev);
+ if (ret){
+ dev_err(alpx_dev->dev, "Error %d when finalizing base identity\n", ret);
+ return ret;
+ }
+
+ ret = alpx_mtd_load_shared_from(alpx_dev,
+ alpx_dev->variant->flash_partitions.partitions[ALPX_FLASH_PARTITION_PRODUCTION_ID].offset);
+
+ /* DEFAULT VALUES */
+ alpx_dev->identity.sub_system_id = alpx_dev->pci_dev->subsystem_device;
+
+ if (ret){
+ dev_err(alpx_dev->dev, "Error %d when extracting Production Area\n", ret);
+ return -EIO;
+ }
+
+ alpx_card_read_serial_number(alpx_dev);
+
+ return 0;
+}
+
+int alpdante_finalize_identity(struct alpx_device* alpx_dev)
+{
+ void* fir_reg = NULL;
+ void* csppr_reg = NULL;
+ void* ublaze_version_reg = NULL;
+ void* design_bloc_version_reg = NULL;
+ void* low_serial_reg = NULL;
+ void* high_serial_reg = NULL;
+
+ uint32_t low_serial_raw = 0;
+ uint32_t high_serial_raw = 0;
+
+ dev_info(alpx_dev->dev, "DANTE card detected, finalizing identity\n");
+ /* All this is extracted from AXcMEM registers */
+
+ fir_reg = alpx_axcmem_getRegAddr(alpx_dev, &ALPDANTE_FIR_LOC);
+
+ csppr_reg = alpx_axcmem_getRegAddr(alpx_dev, &ALPDANTE_CSPPR_LOC);
+
+ ublaze_version_reg = alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev, &ALPDANTE_CSPPR_LOC, &ALPDANTE_UBLAZE_VERSION_LOC);
+
+ design_bloc_version_reg = alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev, &ALPDANTE_CSPPR_LOC, &ALPDANTE_DESIGN_BLOC_VERSION_LOC);
+
+ low_serial_reg = alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev, &ALPDANTE_CSPPR_LOC, &ALPDANTE_LOW_SERIAL_NUM_LOC);
+
+ high_serial_reg = alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev, &ALPDANTE_CSPPR_LOC, &ALPDANTE_HIGH_SERIAL_NUM_LOC);
+
+
+ dev_dbg(alpx_dev->dev, "FIR[0x%lx]=>%d\n", (uint8_t*)fir_reg - (uint8_t*)alpx_dev->base, alpx_axcmem_getRegBEU16Value(fir_reg));
+
+ dev_dbg(alpx_dev->dev, "CSPPR[0x%lx]=> %d\n", (uint8_t*)csppr_reg - (uint8_t*)alpx_dev->base, alpx_axcmem_getRegU8Value(csppr_reg));
+
+ dev_dbg(alpx_dev->dev, "uBlaze Version[0x%lx]=> %d\n", (uint8_t*)ublaze_version_reg - (uint8_t*)alpx_dev->base, alpx_axcmem_getRegU16Value(ublaze_version_reg));
+
+ dev_dbg(alpx_dev->dev, "Design bloc Version[0x%lx]=> %d\n", (uint8_t*)design_bloc_version_reg - (uint8_t*)alpx_dev->base, alpx_axcmem_getRegU16Value(design_bloc_version_reg));
+
+ alpx_dev->identity.ver_mcu = alpx_axcmem_getRegU16Value(ublaze_version_reg);
+ alpx_dev->identity.ver_fpga = alpx_axcmem_getRegBEU16Value(fir_reg);
+
+ low_serial_raw = alpx_axcmem_getRegU32Value(low_serial_reg);
+ high_serial_raw = alpx_axcmem_getRegU32Value(high_serial_reg);
+
+ dev_dbg(alpx_dev->dev, "Low serial number[0x%lx]=> 0x%x\n", (uint8_t*)low_serial_reg - (uint8_t*)alpx_dev->base, low_serial_raw);
+
+ dev_dbg(alpx_dev->dev, "High serial number[0x%lx]=> 0x%x\n", (uint8_t*)high_serial_reg - (uint8_t*)alpx_dev->base, high_serial_raw);
+
+ alpx_dev->identity.serial_number = ((uint64_t)high_serial_raw)<<8 | (((uint64_t)low_serial_raw) >> 24);
+
+ /* DEFAULT VALUES */
+ alpx_dev->identity.sub_system_id = alpx_dev->pci_dev->subsystem_device;
+
+ return 0;
+}
+
+void alpdante_print_identity(struct alpx_device *alpx_dev, struct snd_card *card, const unsigned char* label)
+{
+ void* design_bloc_version_reg = design_bloc_version_reg = alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev, &ALPDANTE_CSPPR_LOC, &ALPDANTE_DESIGN_BLOC_VERSION_LOC);
+
+ const unsigned int design_bloc_version = alpx_axcmem_getRegU16Value(design_bloc_version_reg);
+ char dante_name[ALPDANTE_NETWORK_NAME_LENGTH];
+
+ dante_name[ALPDANTE_NETWORK_NAME_LENGTH - 1] = 0;
+ alpdante_get_dante_name(alpx_dev, dante_name, ALPDANTE_NETWORK_NAME_LENGTH);
+
+ dev_info( alpx_dev->dev,
+ "\n************ \n"
+ "%s Alp DANTE soundcard[%d]{#%llu / %s }: %s - %s (%s - 0x%04x) , \n\t== Revisions ==\n\t* Build: \t\t%u\n\t* Design: \t\t%u.%lu\n\t* MCU:\t\t\t%u.%lu\n"
+ "************\n",
+ label,
+ card->number,
+ alpx_dev->identity.serial_number,
+ dante_name,
+ card->id,
+ card->shortname,
+ card->longname,
+ alpx_dev->identity.sub_system_id,
+ alpx_dev->identity.ver_fpga,
+ ALPX_DANTE_VERSION_VERSION(design_bloc_version),
+ ALPX_DANTE_VERSION_REVISION(design_bloc_version),
+ ALPX_DANTE_VERSION_VERSION(alpx_dev->identity.ver_mcu),
+ ALPX_DANTE_VERSION_REVISION(alpx_dev->identity.ver_mcu));
+}
+
+int alpdante_get_dante_name(struct alpx_device* alpx_dev, char* name, unsigned int length)
+{
+ if (length < ALPDANTE_NETWORK_NAME_LENGTH) {
+ dev_warn(alpx_dev->dev, "name parameter must be at least %d char long !\n", ALPDANTE_NETWORK_NAME_LENGTH);
+ return -EINVAL;
+ }
+
+ return alpx_acxmem_getByteArrayByRefLoc(alpx_dev, &ALPDANTE_CNAMPR_LOC,
+ &ALPDANTE_NAME_LOC,
+ name,
+ length);
+}
+
+int alpdante_store_config(struct alpx_device* alpx_dev)
+{
+ void* fsdr_reg = alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev, &ALPDANTE_FPPR_LOC,
+ &ALPDANTE_FSDR_LOC);
+
+ void* fcr_reg = alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev, &ALPDANTE_FPPR_LOC,
+ &ALPDANTE_FCR_LOC);
+
+ void* fcar_reg = alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev, &ALPDANTE_FPPR_LOC,
+ &ALPDANTE_FCAR_LOC);
+
+ void* smalar_reg = alpx_axcmem_getRegAddr(alpx_dev, &ALPDANTE_SMALAR_LOC);
+
+
+ unsigned int waitLoopQty = ALPDANTE_FLASH_LOCK_ACCESS_TRY_QTY;
+ u32 cmd_status = 0;
+ int result = 0;
+
+ dev_info(alpx_dev->dev, "Storing configuration ...\n");
+
+ //Wait for availability
+ while ((alpx_axcmem_getRegU8Value(smalar_reg) != ALPDANTE_SMALAR_AVAILABLE) && (--waitLoopQty)) {
+ dev_dbg(alpx_dev->dev," FCR:0x%x : Not yet available, wai a bit...\n", alpx_axcmem_getRegU8Value(smalar_reg));
+ mdelay(ALPDANTE_FLASH_WAIT_STEP_TIME);
+ }
+
+ if (waitLoopQty != 0) {
+ alpx_axcmem_setRegU8Value(smalar_reg, ALPDANTE_SMALAR_IN_USE);
+ dev_dbg(alpx_dev->dev," Semaphore taken\n");
+ }
+ else {
+ dev_err(alpx_dev->dev, "!! Flash memory unavailable !!\n ");
+ return -EBUSY;
+ }
+
+ //Write sequence
+ alpx_axcmem_setRegU8Value(fsdr_reg, 1);
+ alpx_axcmem_setRegU8Value(fcr_reg, ALPDANTE_FPPR_SAVE_REGISTERS_CMD_ID);
+
+ //Wait for completion
+ waitLoopQty = ALPDANTE_FPPR_SAVE_REGS_CMD_COMPLETION_WAIT_STEPS_QTY;
+ while ((alpx_axcmem_getRegU8Value(fcr_reg) != ALPDANTE_FPPR_NOPE_CMD_ID) && (--waitLoopQty)) {
+ dev_dbg(alpx_dev->dev,"FCR: 0x%x : Cmd in progress, wait a bit...\n", alpx_axcmem_getRegU8Value(fcr_reg));
+ mdelay(ALPDANTE_FLASH_WAIT_STEP_TIME);
+ }
+
+ if (waitLoopQty==0) {
+ dev_err(alpx_dev->dev, "!! Flash Command TIMEOUT!!\n");
+ result = -EIO;
+ goto EXIT;
+ }
+
+ cmd_status = ALPDANTE_FPPR_CMD_STATUS(alpx_axcmem_getRegU32Value(fcar_reg));
+
+ dev_info(alpx_dev->dev, "Configuration %s (0x%x)\n", cmd_status ? "NOT STORED !" : "STORED", cmd_status);
+ result = (cmd_status == 0) ? 0 : -EIO;
+
+EXIT:
+ alpx_axcmem_setRegU8Value(smalar_reg, ALPDANTE_SMALAR_AVAILABLE);
+ dev_dbg(alpx_dev->dev," Semaphore released\n");
+ return result;
+}
+
+int alpdante_card_setup(struct alpx_device *alpx_dev, struct snd_card *card, unsigned int configured_fs, bool is_loopback_enabled)
+{
+ unsigned int sr_config_value = 0;
+ void* sr_config_reg = alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev, &ALPDANTE_CLKPR_LOC,
+ &ALPDANTE_SRConfig_LOC);
+
+ unsigned int fppr_prod_test_value = 0;
+ void* fppr_prod_test_reg = alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev, &ALPDANTE_ERRTST_LOC,
+ &ALPDANTE_PROD_TEST_LOC);
+
+ dev_dbg(alpx_dev->dev, "Setting AlpDANTE card {S/N: %llu} ...\n", alpx_dev->identity.serial_number);
+
+ switch (configured_fs) {
+ case 44100:
+ sr_config_value = ALPDANTE_CLK_MANAGER_CLK_VALUE_44_1K;
+ alpx_dev->variant->capture_hw->rates=SNDRV_PCM_RATE_44100;
+ alpx_dev->variant->capture_hw->channels_min = 64;
+ alpx_dev->variant->capture_hw->channels_max = 64;
+ alpx_dev->variant->capture_hw->rate_min = configured_fs;
+ alpx_dev->variant->capture_hw->rate_max = configured_fs;
+ break;
+ case 48000:
+ sr_config_value = ALPDANTE_CLK_MANAGER_CLK_VALUE_48K;
+ alpx_dev->variant->capture_hw->rates=SNDRV_PCM_RATE_48000;
+ alpx_dev->variant->capture_hw->channels_min = 64;
+ alpx_dev->variant->capture_hw->channels_max = 64;
+ alpx_dev->variant->capture_hw->rate_min = configured_fs;
+ alpx_dev->variant->capture_hw->rate_max = configured_fs;
+ break;
+ case 88200:
+ sr_config_value = ALPDANTE_CLK_MANAGER_CLK_VALUE_88_2K;
+ alpx_dev->variant->capture_hw->rates=SNDRV_PCM_RATE_88200;
+ alpx_dev->variant->capture_hw->channels_min = 32;
+ alpx_dev->variant->capture_hw->channels_max = 32;
+ alpx_dev->variant->capture_hw->rate_min = configured_fs;
+ alpx_dev->variant->capture_hw->rate_max = configured_fs;
+ break;
+ case 96000:
+ sr_config_value = ALPDANTE_CLK_MANAGER_CLK_VALUE_96K;
+ alpx_dev->variant->capture_hw->rates=SNDRV_PCM_RATE_96000;
+ alpx_dev->variant->capture_hw->channels_min = 32;
+ alpx_dev->variant->capture_hw->channels_max = 32;
+ alpx_dev->variant->capture_hw->rate_min = configured_fs;
+ alpx_dev->variant->capture_hw->rate_max = configured_fs;
+ break;
+ case 176400:
+ sr_config_value = ALPDANTE_CLK_MANAGER_CLK_VALUE_176_4K;
+ alpx_dev->variant->capture_hw->rates=SNDRV_PCM_RATE_176400;
+ alpx_dev->variant->capture_hw->channels_min = 16;
+ alpx_dev->variant->capture_hw->channels_max = 16;
+ alpx_dev->variant->capture_hw->rate_min = configured_fs;
+ alpx_dev->variant->capture_hw->rate_max = configured_fs;
+ break;
+ case 192000:
+ sr_config_value = ALPDANTE_CLK_MANAGER_CLK_VALUE_192K;
+ alpx_dev->variant->capture_hw->rates=SNDRV_PCM_RATE_192000;
+ alpx_dev->variant->capture_hw->channels_min = 16;
+ alpx_dev->variant->capture_hw->channels_max = 16;
+ alpx_dev->variant->capture_hw->rate_min = configured_fs;
+ alpx_dev->variant->capture_hw->rate_max = configured_fs;
+ break;
+ default:
+ dev_err(alpx_dev->dev, "AlpDANTE card {S/N: %llu}; %dHz : unsupported FS, keep default %dHz !\n", alpx_dev->identity.serial_number, configured_fs, alpx_dev->variant->capture_hw->rate_min);
+ };
+
+ dev_dbg(alpx_dev->dev, "Setting AlpDANTE card {S/N: %llu}: FS:%dHz => reg=0x%x\n", alpx_dev->identity.serial_number,
+ alpx_dev->variant->capture_hw->rate_min, sr_config_value);
+
+ alpx_axcmem_setRegU8Value(sr_config_reg, sr_config_value);
+
+
+ dev_info(alpx_dev->dev, "AlpDANTE card {S/N: %llu}: loopback mode %s.\n",
+ alpx_dev->identity.serial_number,
+ is_loopback_enabled ? "ENABLED" : "DISABLED");
+
+ fppr_prod_test_value = alpx_axcmem_getRegU8Value(fppr_prod_test_reg);
+ fppr_prod_test_value = is_loopback_enabled ?
+ (ALPDANTE_PROD_TEST_LOOPBACK | fppr_prod_test_value) :
+ (fppr_prod_test_value & ~ALPDANTE_PROD_TEST_LOOPBACK);
+ alpx_axcmem_setRegU8Value(fppr_prod_test_reg, fppr_prod_test_value);
+
+ dev_dbg(alpx_dev->dev, "fppr_prod_test_value = 0x%x, reg: 0x%x\n", fppr_prod_test_value,
+ alpx_axcmem_getRegU8Value(fppr_prod_test_reg));
+
+ return alpdante_store_config(alpx_dev);
+}
+
+/* Helpers */
+unsigned int
+alpx_get_samples_counter(struct alpx_device *alpx_dev)
+{
+ if (alpx_dev->variant->model != ALPX_VARIANT_MODEL_ALPDANTE) {
+ return readl(ALPX_REG(alpx_dev, ALP, CLK_MANAGER, SAMPLES_COUNT));
+ }
+ else {
+ void* sample_count_reg = alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev, &ALPDANTE_CSPPR_LOC, &ALPDANTE_SAMPLE_COUNT_LOC);
+ return alpx_axcmem_getRegU32Value(sample_count_reg);
+ }
+ return 0;
+}
diff --git a/snd-alpx/alpx_cards.h b/snd-alpx/alpx_cards.h
new file mode 100644
index 0000000..9e2aab6
--- /dev/null
+++ b/snd-alpx/alpx_cards.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+
+/* This file contains the card centered services */
+#ifndef _ALPX_CARDS_H
+#define _ALPX_CARDS_H
+
+#include "alpx.h"
+#include <sound/core.h>
+
+void alpstereo_print_identity(struct alpx_device *alpx_dev, struct snd_card *card, const unsigned char* label);
+void alpmadi_print_identity(struct alpx_device *alpx_dev, struct snd_card *card, const unsigned char* label);
+void alpmultichan_print_identity(struct alpx_device *alpx_dev, struct snd_card *card, const unsigned char* label);
+void alpdante_print_identity(struct alpx_device *alpx_dev, struct snd_card *card, const unsigned char* label);
+int alpdante_card_setup(struct alpx_device *alpx_dev, struct snd_card *card, unsigned int configured_fs, bool is_loopback_enabled);
+int alpstereo_finalize_identity(struct alpx_device* alpx_dev);
+int alpmultichan_finalize_identity(struct alpx_device* alpx_dev);
+int alpdante_finalize_identity(struct alpx_device* alpx_dev);
+int alpdante_store_config(struct alpx_device* alpx_dev);
+int alpdante_get_dante_name(struct alpx_device* alpx_dev, char* name, unsigned int length);
+
+#endif
diff --git a/snd-alpx/alpx_controls.c b/snd-alpx/alpx_controls.c
new file mode 100644
index 0000000..397fe99
--- /dev/null
+++ b/snd-alpx/alpx_controls.c
@@ -0,0 +1,2096 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#include "alpx_controls.h"
+#include "alpx_variants_stereo.h"
+#include "alpx_variants_mc.h"
+#include "alpx_variants_dead.h"
+#include "alpx_variants_madi.h"
+
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/tlv.h>
+#include <linux/slab.h>
+
+/* Amplifiers */
+
+static int alpx_control_amplifier_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned long control_index = kcontrol->private_value;
+
+ const struct alpx_control* control = &alpx_dev->controls[control_index];
+
+ const struct alpx_control_descriptor_amplifier* descr =
+ (struct alpx_control_descriptor_amplifier*) &control->descriptor->data.ampli;
+
+ dev_dbg( alpx_dev->dev," for volume[%u] (controls[%lu]){%p}.\n",
+ control->data.ampli.idx,
+ control_index,
+ control);
+
+ info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ info->count = 1; //Single channel amplifier
+ info->value.integer.min = descr->reg_gain_min;
+ info->value.integer.max = descr->reg_gain_max;
+
+ return 0;
+}
+
+static int alpx_control_amplifier_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ const struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ const struct alpx_control* control = &alpx_dev->controls[control_index];
+
+ const struct alpx_control_amplifier* ampli = &control->data.ampli;
+
+ const u32 offset = control->descriptor->base + ALP_INDEX_TO_REG_OFFSET(ampli->idx);
+
+ u32 value;
+
+ value = readl(alpx_dev->base + offset);
+
+ dev_dbg( alpx_dev->dev," volume[%u](controls[%u]){%p} : 0x%x <= [0x%p:%x:%x]\n",
+ control->data.ampli.idx,
+ control_index,
+ control,
+ value,
+ alpx_dev->base,
+ control->descriptor->base,
+ ALP_INDEX_TO_REG_OFFSET(ampli->idx));
+
+ elem_value->value.integer.value[0] = value;
+
+ return 0;
+}
+
+static int alpx_control_amplifier_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ const struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ const struct alpx_control* control = &alpx_dev->controls[control_index];
+ const struct alpx_control_amplifier* ampli = &control->data.ampli;
+
+ const u32 offset = control->descriptor->base + ALP_INDEX_TO_REG_OFFSET(ampli->idx);
+ const u32 value = elem_value->value.integer.value[0];
+
+ dev_dbg( alpx_dev->dev," volume[%u](controls[%u]){%p} : 0x%x => [0x%p:%x:%x]\n",
+ control->data.ampli.idx,
+ control_index,
+ control,
+ value,
+ alpx_dev->base,
+ control->descriptor->base,
+ ALP_INDEX_TO_REG_OFFSET(ampli->idx));
+
+ writel(value, alpx_dev->base + offset);
+
+ return 1; /* Value changed */
+}
+
+
+static struct snd_kcontrol_new alpx_control_amplifier = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+
+ .info = alpx_control_amplifier_info,
+ .get = alpx_control_amplifier_get,
+ .put = alpx_control_amplifier_put,
+};
+
+static int alpx_control_amplifier_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor,
+ unsigned int idx)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ char name[64] = { 0 };
+ const unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ int ret;
+
+ control->descriptor = descriptor;
+ control->data.ampli.idx = idx;
+
+ snprintf(name, sizeof(name), "%s Volume", /* Just add FUNCTION, see alsa driver guide */
+ control->descriptor->prefix);
+
+ dev_dbg( alpx_dev->dev,"creating amplifier %s[%u] in controls[%u] linked to [0x%x:0x%x].\n",
+ name,
+ control->data.ampli.idx,
+ control_index,
+ control->descriptor->base,
+ ALP_INDEX_TO_REG_OFFSET(control->data.ampli.idx));
+
+ alpx_control_amplifier.name = name;
+ alpx_control_amplifier.private_value = control_index;
+ alpx_control_amplifier.index = control->data.ampli.idx;
+ alpx_control_amplifier.access = descriptor->access;
+ alpx_control_amplifier.tlv.p = descriptor->data.ampli.gains_scale;
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_amplifier, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+
+ return 0;
+}
+
+static int alpx_controls_amplifier_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ const unsigned int items_count = descriptor->data.ampli.lines_count;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < items_count; i++) {
+ ret = alpx_control_amplifier_register(card, descriptor, i);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/* Codec : analog equalization*/
+
+static int alpx_control_codec_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ const struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned int control_index = kcontrol->private_value;
+ const struct alpx_control *control = &alpx_dev->controls[control_index];
+ const struct alpx_control_descriptor_codec* descr = &control->descriptor->data.codec;
+
+ info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ info->count = 1; /* single line */
+ info->value.integer.min = descr->reg_gain_min;
+ info->value.integer.max = descr->reg_gain_max;
+
+ return 0;
+}
+
+static int alpx_control_codec_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ const struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned int control_index = kcontrol->private_value;
+ const struct alpx_control *control = &alpx_dev->controls[control_index];
+ const unsigned int idx = control->data.codec.idx;
+
+ const u32 offset = control->descriptor->base + control->descriptor->data.codec.offset +
+ ALPX_CODEC_CTRL_GAIN_REG(idx);
+
+ elem_value->value.integer.value[0] = readl(alpx_dev->base + offset);
+
+
+ dev_dbg( alpx_dev->dev,"0x%lx <= [0x%x:%x]\n",
+ elem_value->value.integer.value[0],
+ control->descriptor->base,
+ control->descriptor->data.codec.offset + ALPX_CODEC_CTRL_GAIN_REG(idx));
+
+ return 0;
+}
+
+static int alpx_control_codec_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ const struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned int control_index = kcontrol->private_value;
+ const struct alpx_control *control = &alpx_dev->controls[control_index];
+ const unsigned int idx = control->data.codec.idx;
+
+ const u32 offset = control->descriptor->base + control->descriptor->data.codec.offset +
+ ALPX_CODEC_CTRL_GAIN_REG(idx);
+
+ const u32 value = elem_value->value.integer.value[0];
+
+ dev_dbg( alpx_dev->dev," 0x%x => [0x%x:%x]\n",
+ value,
+ control->descriptor->base,
+ control->descriptor->data.codec.offset + ALPX_CODEC_CTRL_GAIN_REG(idx));
+
+ writel(value, alpx_dev->base + offset);
+
+ return 1;
+}
+
+
+static struct snd_kcontrol_new alpx_control_codec = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+
+ .info = alpx_control_codec_info,
+ .get = alpx_control_codec_get,
+ .put = alpx_control_codec_put,
+};
+
+static int alpx_control_codec_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor,
+ unsigned int idx)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ char name[64] = { 0 };
+ unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ int ret;
+
+ control->descriptor = descriptor;
+ control->data.codec.idx = idx;
+
+ snprintf(name, sizeof(name), "%s Volume", descriptor->prefix);
+
+ alpx_control_codec.name = name;
+ alpx_control_codec.private_value = control_index;
+ alpx_control_codec.index = control->data.codec.idx;
+ alpx_control_codec.access = descriptor->access;
+ alpx_control_codec.tlv.p = descriptor->data.codec.gains_scale;
+
+ dev_dbg( alpx_dev->dev," creating codec eq %s[%u] in controls[%u] linked to [0x%x:0x%x].\n",
+ name,
+ control->data.codec.idx,
+ control_index,
+ control->descriptor->base,
+ ALPX_CODEC_CTRL_GAIN_REG(control->data.codec.idx));
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_codec, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+
+ return 0;
+}
+
+static int alpx_controls_codec_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ const unsigned int items_count = descriptor->data.codec.lines_count;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < items_count; i++) {
+ ret = alpx_control_codec_register(card, descriptor, i);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/* Router */
+
+static int alpx_control_router_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ struct alpx_control_descriptor_router *router =
+ &control->descriptor->data.router;
+
+ return snd_ctl_enum_info(info, 1, router->entries_count, router->entries);
+}
+
+static int alpx_control_router_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ unsigned int index = control->data.router.index;
+ unsigned int index_value;
+ u32 offset, value;
+
+ offset = control->descriptor->base + ALPX_ROUTER_REG(index);
+ value = readl(alpx_dev->base + offset);
+
+ dev_dbg( alpx_dev->dev,"0x%x <= [0x%p:%x:%x]\n",
+ value,
+ alpx_dev->base,
+ control->descriptor->base,
+ ALPX_ROUTER_REG(index));
+
+ index_value = ALPX_ROUTER_VALUE(index, value);
+ elem_value->value.enumerated.item[0] = index_value;
+
+ return 0;
+}
+
+static int alpx_control_router_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ unsigned int index = control->data.router.index;
+ unsigned int index_value;
+ u32 offset, value;
+
+ offset = control->descriptor->base + ALPX_ROUTER_REG(index);
+ value = readl(alpx_dev->base + offset);
+ index_value = ALPX_ROUTER_VALUE(index, value);
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev," 0x%x => [0x%p:%x:%x:%d]\n",
+ value,
+ alpx_dev->base,
+ control->descriptor->base,
+ ALPX_ROUTER_REG(index),
+ index_value);
+#endif
+
+ if (index_value == elem_value->value.enumerated.item[0])
+ return 0;
+
+ index_value = elem_value->value.enumerated.item[0];
+
+ value &= ~ALPX_ROUTER_MASK(index);
+ value |= ALPX_ROUTER_SEL(index, index_value);
+
+ writel(value, alpx_dev->base + offset);
+
+ return 1;
+}
+
+static struct snd_kcontrol_new alpx_control_router = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+
+ .info = alpx_control_router_info,
+ .get = alpx_control_router_get,
+ .put = alpx_control_router_put,
+};
+
+static int alpx_control_router_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor,
+ unsigned int index)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ char name[64] = { 0 };
+ unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ int ret;
+
+ control->descriptor = descriptor;
+ control->data.router.index = index;
+
+ snprintf(name, sizeof(name), "%s Route", descriptor->prefix);
+
+ alpx_control_router.name = name;
+ alpx_control_router.private_value = control_index;
+ alpx_control_router.index = index;
+ alpx_control_router.access = descriptor->access;
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_router, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+
+ return 0;
+}
+/* RESERVED controls */
+static int alpx_control_reserved_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ info->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ info->count = 1;
+ info->value.integer.min = 0;
+ info->value.integer.max = 1;
+
+ return 0;
+}
+
+static int alpx_control_reserved_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ elem_value->value.integer.value[0] = 0;
+ return 0;
+}
+
+static int alpx_control_reserved_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ return 0;
+}
+
+static struct snd_kcontrol_new alpx_control_reserved = {
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+
+ .info = alpx_control_reserved_info,
+ .get = alpx_control_reserved_get,
+ .put = alpx_control_reserved_put,
+};
+
+static int alpx_control_reserved_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ char name[64] = { 0 };
+ unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ int ret;
+
+ control->descriptor = descriptor;
+ snprintf(name, sizeof(name), "%s", descriptor->prefix);
+
+ alpx_control_reserved.name = name;
+ alpx_control_reserved.private_value = control_index;
+ alpx_control_reserved.index = 0;
+ alpx_control_reserved.access = SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_reserved, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+
+ return 0;
+
+}
+
+/* Constant */
+static int alpx_control_constant_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned long control_index = kcontrol->private_value;
+
+ const struct alpx_control* control = &alpx_dev->controls[control_index];
+
+ const struct alpx_control_descriptor_constant* descr =
+ (struct alpx_control_descriptor_constant*) &control->descriptor->data.constant;
+
+ dev_dbg(alpx_dev->dev, "INFO constant control \n");
+
+ info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ info->count = 1;
+ info->value.integer.min = descr->value;
+ info->value.integer.max = descr->value;
+
+ return 0;
+}
+
+static int alpx_control_constant_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned long control_index = kcontrol->private_value;
+
+ const struct alpx_control* control = &alpx_dev->controls[control_index];
+
+ const struct alpx_control_descriptor_constant* descr =
+ (struct alpx_control_descriptor_constant*) &control->descriptor->data.constant;
+
+ dev_dbg(alpx_dev->dev, "GET constant control \n");
+
+ elem_value->value.integer.value[0] = descr->value;
+ return 0;
+}
+
+
+static struct snd_kcontrol_new alpx_control_constant = {
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+
+ .info = alpx_control_constant_info,
+ .get = alpx_control_constant_get,
+ .put = NULL,
+};
+
+static int alpx_control_constant_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ char name[64] = { 0 };
+ unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ int ret;
+
+ dev_dbg(alpx_dev->dev, "Registering constant control for %s\n", card->longname);
+
+ control->descriptor = descriptor;
+ snprintf(name, sizeof(name), "%s", descriptor->prefix);
+
+ alpx_control_constant.name = name;
+ alpx_control_constant.private_value = control_index;
+ alpx_control_constant.index = 0;
+ alpx_control_constant.access = SNDRV_CTL_ELEM_ACCESS_READ; /* ENFORCE Read only */
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_constant, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+
+ return 0;
+}
+
+/* AXCMem Relative choice control */
+static int alpx_control_axcmem_rel_choice_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned long control_index = kcontrol->private_value;
+
+ const struct alpx_control* control = &alpx_dev->controls[control_index];
+
+ const struct alpx_control_descriptor_axcmem_rel_choice* choice_descr =
+ (struct alpx_control_descriptor_axcmem_rel_choice*) &control->descriptor->data.axcmem_rel_choice;
+
+ dev_dbg(alpx_dev->dev, "INFO choice control\n");
+
+ return snd_ctl_enum_info(info, 1, choice_descr->entries_count, choice_descr->entries);
+}
+
+static int alpx_control_axcmem_rel_choice_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned long control_index = kcontrol->private_value;
+
+ const struct alpx_control* control = &alpx_dev->controls[control_index];
+
+ const struct alpx_control_descriptor_axcmem_rel_choice* choice_descr =
+ (struct alpx_control_descriptor_axcmem_rel_choice*) &control->descriptor->data.axcmem_rel_choice;
+
+ const unsigned int choice_index = elem_value->value.enumerated.item[0];
+
+ unsigned int reg_value = choice_descr->getter(alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev,
+ &choice_descr->base_loc,
+ &choice_descr->reg_loc));
+
+ unsigned int choice_value = 0;
+
+ if (choice_index >= choice_descr->entries_count ) {
+ dev_err(alpx_dev->dev, "Index out of bound : %d > %d !\n", choice_index, choice_descr->entries_count - 1);
+ return -EINVAL;
+ }
+
+ choice_value = (control->descriptor->data.axcmem_rel_choice.entries_values[choice_index] << choice_descr->pos);
+
+ reg_value &= choice_descr->mask;
+ reg_value |= choice_value;
+
+ choice_descr->setter(alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev,
+ &choice_descr->base_loc,
+ &choice_descr->reg_loc),
+ reg_value);
+
+ dev_dbg(alpx_dev->dev, "%s: index = %d, value = 0x%x, mask: 0x%08x => reg_value=0x%08x\n",
+ control->descriptor->prefix,
+ elem_value->value.enumerated.item[0],
+ choice_value,
+ choice_descr->mask,
+ choice_descr->getter(alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev,
+ &choice_descr->base_loc,
+ &choice_descr->reg_loc)));
+ return 0;
+}
+
+static int alpx_control_axcmem_rel_choice_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned long control_index = kcontrol->private_value;
+
+ const struct alpx_control* control = &alpx_dev->controls[control_index];
+
+ const struct alpx_control_descriptor_axcmem_rel_choice* choice =
+ (struct alpx_control_descriptor_axcmem_rel_choice*) &control->descriptor->data.axcmem_rel_choice;
+
+ const unsigned int reg_value = choice->getter(alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev,
+ &choice->base_loc,
+ &choice->reg_loc));
+
+ const unsigned int read_value = (reg_value & choice->mask) >> choice->pos;
+ unsigned int value_index = 0;
+
+ for (value_index = 0; value_index < choice->entries_count; ++value_index) {
+ u32 entry_value = choice->entries_values[value_index];
+
+ if (read_value == entry_value) {
+ elem_value->value.enumerated.item[0] = value_index;
+ dev_dbg(alpx_dev->dev, "%s: reg_value=0x%08x, mask: 0x%08x => index = %d\n",
+ control->descriptor->prefix,
+ reg_value,
+ choice->mask,
+ value_index);
+ return 0;
+ }
+ }
+
+ dev_err(alpx_dev->dev, "%s: unknown choice for: reg_value=0x%08x, mask: 0x%08x => value = %d\n",
+ control->descriptor->prefix,
+ reg_value,
+ choice->mask,
+ (reg_value & choice->mask)>>choice->pos);
+
+ return -EINVAL;
+}
+
+static struct snd_kcontrol_new alpx_control_axcmem_rel_choice = {
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .info = alpx_control_axcmem_rel_choice_info,
+ .get = alpx_control_axcmem_rel_choice_get,
+ .put = alpx_control_axcmem_rel_choice_put,
+};
+
+int alpx_control_axcmem_rel_choice_register (struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ char name[64] = { 0 };
+ unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ int ret;
+
+ dev_dbg(alpx_dev->dev, "Registering choice control for %s\n", card->longname);
+
+ control->descriptor = descriptor;
+ snprintf(name, sizeof(name), "%s", descriptor->prefix);
+
+ alpx_control_axcmem_rel_choice.name = name;
+ alpx_control_axcmem_rel_choice.private_value = control_index;
+ alpx_control_axcmem_rel_choice.index = 0;
+ alpx_control_axcmem_rel_choice.access = descriptor->access;
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_axcmem_rel_choice, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+
+ return 0;
+
+}
+
+/* AxCMem Value */
+static int alpx_control_axcmem_rel_value_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned long control_index = kcontrol->private_value;
+
+ const struct alpx_control* control = &alpx_dev->controls[control_index];
+
+ const struct alpx_control_descriptor_axcmem_rel_value* value_descr =
+ (struct alpx_control_descriptor_axcmem_rel_value*) &control->descriptor->data.axcmem_rel_value;
+
+ dev_dbg(alpx_dev->dev, "INFO Value control \n");
+
+ info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ info->count = 1;
+ info->value.integer.min = value_descr->min;
+ info->value.integer.max = value_descr->max;
+
+ return 0;
+}
+
+static int alpx_control_axcmem_rel_value_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned long control_index = kcontrol->private_value;
+
+ const struct alpx_control* control = &alpx_dev->controls[control_index];
+
+ const struct alpx_control_descriptor_axcmem_rel_value* value_descr =
+ (struct alpx_control_descriptor_axcmem_rel_value*) &control->descriptor->data.axcmem_rel_value;
+
+ unsigned int reg_value = value_descr->getter(alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev,
+ &value_descr->base_loc,
+ &value_descr->reg_loc));
+
+ reg_value &= value_descr->mask;
+ reg_value |= ((elem_value->value.integer.value[0] & value_descr->mask) << value_descr->pos);
+
+ value_descr->setter(alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev,
+ &value_descr->base_loc,
+ &value_descr->reg_loc),
+ reg_value);
+
+ dev_dbg(alpx_dev->dev, "%s: reg_value=0x%08x, mask: 0x%08x => index = %ld\n",
+ control->descriptor->prefix,
+ value_descr->getter(alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev,
+ &value_descr->base_loc,
+ &value_descr->reg_loc)),
+ value_descr->mask,
+ elem_value->value.integer.value[0]);
+
+ return 0;
+}
+
+static int alpx_control_axcmem_rel_value_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned long control_index = kcontrol->private_value;
+
+ const struct alpx_control* control = &alpx_dev->controls[control_index];
+
+ const struct alpx_control_descriptor_axcmem_rel_value* value_descr =
+ (struct alpx_control_descriptor_axcmem_rel_value*) &control->descriptor->data.axcmem_rel_value;
+
+ const unsigned int reg_value = value_descr->getter(alpx_axcmem_getPointedRegAddrByRefLoc(alpx_dev,
+ &value_descr->base_loc,
+ &value_descr->reg_loc));
+
+ elem_value->value.integer.value[0] = (reg_value & value_descr->mask) >> value_descr->pos;
+
+ dev_dbg(alpx_dev->dev, "%s: reg_value=0x%08x, mask: 0x%08x => value: 0x%lx\n",
+ control->descriptor->prefix,
+ reg_value,
+ value_descr->mask,
+ elem_value->value.integer.value[0]);
+
+ return 0;
+}
+
+static struct snd_kcontrol_new alpx_control_axcmem_rel_value = {
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .info = alpx_control_axcmem_rel_value_info,
+ .get = alpx_control_axcmem_rel_value_get,
+ .put = alpx_control_axcmem_rel_value_put,
+};
+
+int alpx_control_axcmem_rel_value_register (struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ char name[64] = { 0 };
+ unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ int ret;
+
+ dev_dbg(alpx_dev->dev, "Registering VALUE control for %s\n", card->longname);
+
+ control->descriptor = descriptor;
+ snprintf(name, sizeof(name), "%s", descriptor->prefix);
+
+ alpx_control_axcmem_rel_value.name = name;
+ alpx_control_axcmem_rel_value.private_value = control_index;
+ alpx_control_axcmem_rel_value.index = 0;
+ alpx_control_axcmem_rel_value.access = descriptor->access;
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_axcmem_rel_value, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+ return 0;
+}
+
+/* Mixer */
+
+static int alpx_control_mixer_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ const unsigned long control_index = kcontrol->private_value;
+
+ const struct alpx_control* control = &alpx_dev->controls[control_index];
+
+ const struct alpx_control_descriptor_mixer* descr =
+ (struct alpx_control_descriptor_mixer*) &control->descriptor->data.mixer;
+
+
+
+
+ info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ info->count = 1;
+ info->value.integer.min = descr->reg_gain_min;
+ info->value.integer.max = descr->reg_gain_max;
+
+ return 0;
+}
+
+static int alpx_control_mixer_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ unsigned int lines_count = control->descriptor->data.mixer.lines_count;
+ unsigned int mixer_in = control->data.mixer.mixer_in;
+ unsigned int mixer_out = control->data.mixer.mixer_out;
+ u32 offset, reg_value;
+
+ //Registers layout depends on the card's variant
+ switch(alpx_dev->variant->model) {
+ case ALPX_VARIANT_MODEL_ALP882:
+ case ALPX_VARIANT_MODEL_ALP882_MIC:
+ case ALPX_VARIANT_MODEL_ALP442:
+ case ALPX_VARIANT_MODEL_ALP442_MIC:
+ offset = control->descriptor->base +
+ ALPMC_MIXER_GAIN_REG(lines_count, mixer_in, mixer_out);
+ reg_value = readl(alpx_dev->base + offset);
+ elem_value->value.integer.value[0] = ALPMC_MIXER_GAIN_VALUE(reg_value);
+
+ #if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev," %ld (0x%lx <= 0x%x)<= (%d,%d,%d)[REG:%x:%02x]\n",
+ elem_value->value.integer.value[0],
+ ALPMC_MIXER_GAIN_VALUE(reg_value),
+ reg_value,
+ lines_count, mixer_in, mixer_out,
+ control->descriptor->base,
+ ALPMC_MIXER_GAIN_REG(lines_count, mixer_in, mixer_out));
+ #endif
+ break;
+
+ case ALPX_VARIANT_MODEL_ALP222:
+ case ALPX_VARIANT_MODEL_ALP222_MIC:
+ offset = control->descriptor->base +
+ ALP222_MIXER_GAIN_REG(lines_count, mixer_in, mixer_out);
+ reg_value = readl(alpx_dev->base + offset);
+ elem_value->value.integer.value[0] = ALP222_MIXER_GAIN_VALUE(mixer_in, reg_value);
+
+ #if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev," %ld (0x%lx <= 0x%x)<= (%d,%d,%d)[REG:%x:%02x]\n",
+ elem_value->value.integer.value[0],
+ ALP222_MIXER_GAIN_VALUE(mixer_in, reg_value),
+ reg_value,
+ lines_count, mixer_in, mixer_out,
+ control->descriptor->base,
+ ALP222_MIXER_GAIN_REG(lines_count, mixer_in, mixer_out));
+ #endif
+ break;
+
+ default:
+ //No mixer by default !
+ elem_value->value.integer.value[0] = 0;
+ return 0;
+ };
+
+
+ return 0;
+}
+
+static int alpx_control_mixer_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ unsigned int lines_count = control->descriptor->data.mixer.lines_count;
+ unsigned int mixer_in = control->data.mixer.mixer_in;
+ unsigned int mixer_out = control->data.mixer.mixer_out;
+ unsigned int mixer_value;
+ u32 offset, reg_value;
+
+ mixer_value = elem_value->value.integer.value[0];
+
+ switch(alpx_dev->variant->model) {
+ case ALPX_VARIANT_MODEL_ALP882:
+ case ALPX_VARIANT_MODEL_ALP882_MIC:
+ case ALPX_VARIANT_MODEL_ALP442:
+ case ALPX_VARIANT_MODEL_ALP442_MIC:
+ offset = control->descriptor->base +
+ ALPMC_MIXER_GAIN_REG(lines_count, mixer_in, mixer_out);
+ reg_value = readl(alpx_dev->base + offset);
+
+ reg_value &= ~ALPMC_MIXER_GAIN_MASK;
+
+ #if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev," 0x%x (0x%lx => 0x%x)=> (%d,%d,%d)[REG:%x:%02x]\n",
+ mixer_value,
+ ALPMC_MIXER_GAIN_SEL(mixer_value),
+ reg_value,
+ lines_count, mixer_in, mixer_out,
+ control->descriptor->base,
+ ALPMC_MIXER_GAIN_REG(lines_count, mixer_in, mixer_out));
+ #endif
+
+ reg_value |= ALPMC_MIXER_GAIN_SEL(mixer_value);
+ break;
+
+ case ALPX_VARIANT_MODEL_ALP222:
+ case ALPX_VARIANT_MODEL_ALP222_MIC:
+ offset = control->descriptor->base +
+ ALP222_MIXER_GAIN_REG(lines_count, mixer_in, mixer_out);
+ reg_value = readl(alpx_dev->base + offset);
+
+ reg_value &= ~ALP222_MIXER_GAIN_MASK(mixer_in);
+
+ #if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev," 0x%x (0x%lx => 0x%x)=> (%d,%d,%d)[REG:%x:%02x]\n",
+ mixer_value,
+ ALP222_MIXER_GAIN_SEL(mixer_in, mixer_value),
+ reg_value,
+ lines_count, mixer_in, mixer_out,
+ control->descriptor->base,
+ ALP222_MIXER_GAIN_REG(lines_count, mixer_in, mixer_out));
+ #endif
+
+ reg_value |= ALP222_MIXER_GAIN_SEL(mixer_in, mixer_value);
+
+ break;
+ default:
+ //Nothing to do for others cards yet
+ return 0;
+ }
+
+
+
+ writel(reg_value, alpx_dev->base + offset);
+
+ return 1;
+}
+
+static struct snd_kcontrol_new alpx_control_mixer = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+
+ .info = alpx_control_mixer_info,
+ .get = alpx_control_mixer_get,
+ .put = alpx_control_mixer_put,
+};
+
+static int alpx_control_mixer_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor,
+ unsigned int mixer_in,
+ unsigned int mixer_out)
+{
+ struct alpx_device* const alpx_dev = card->private_data;
+ const unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control* const control = &alpx_dev->controls[control_index];
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ const unsigned int lines_count = descriptor->data.mixer.lines_count;
+#endif
+ char name[64] = { 0 };
+ int ret;
+
+ control->descriptor = descriptor;
+ control->data.mixer.mixer_in = mixer_in;
+ control->data.mixer.mixer_out = mixer_out;
+
+ snprintf(name, sizeof(name), "%s %u/%u Playback Volume", descriptor->prefix,
+ mixer_in, mixer_out);
+
+ alpx_control_mixer.name = name;
+ alpx_control_mixer.private_value = control_index;
+ alpx_control_mixer.access = descriptor->access;
+ alpx_control_mixer.tlv.p = descriptor->data.mixer.gains_scale;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev," creating mixer %s in controls[%u] linked to [0x%x:0x%x].\n",
+ name,
+ control_index,
+ control->descriptor->base,
+ ALP222_MIXER_GAIN_REG(lines_count, mixer_in, mixer_out));
+#endif
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_mixer, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+
+ return 0;
+}
+/* Field */
+
+static int alpx_control_choice_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ struct alpx_control_descriptor_choice *choice =
+ &control->descriptor->data.choice;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev," Field %d entries.\n", choice->entries_count);
+#endif
+
+ return snd_ctl_enum_info(info, 1, choice->entries_count, choice->entries);
+}
+
+static int alpx_control_choice_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ u32 offset, value;
+ unsigned int i;
+
+ offset = control->descriptor->base +
+ control->descriptor->data.choice.offset;
+
+ value = readl(alpx_dev->base + offset);
+ value &= control->descriptor->data.choice.mask;
+ value >>= control->descriptor->data.choice.pos;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_info( alpx_dev->dev," GET[%d] 0x%x (0x%x)<= [0x%p:%x:%x]\n",
+ control_index,
+ value,
+ readl(alpx_dev->base + offset),
+ alpx_dev->base,
+ control->descriptor->base,
+ control->descriptor->data.choice.offset);
+#endif
+
+ for (i = 0; i < control->descriptor->data.choice.entries_count; i++) {
+ u32 entry_value =
+ control->descriptor->data.choice.entries_values[i];
+
+ if (value == entry_value) {
+ elem_value->value.enumerated.item[0] = i;
+
+ dev_info( alpx_dev->dev,"GET[%d]: Reg: %d => Sync[%d] (%s) \n",
+ control_index,
+ value,
+ i,
+ control->descriptor->data.translated_choice.entries[i]);
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int alpx_control_choice_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ unsigned int index_value;
+ u32 offset, reg_value;
+
+ offset = control->descriptor->base +
+ control->descriptor->data.choice.offset;
+ reg_value = readl(alpx_dev->base + offset);
+
+ reg_value &= ~control->descriptor->data.choice.mask;
+
+ index_value = elem_value->value.enumerated.item[0];
+ reg_value |= control->descriptor->data.choice.entries_values[index_value] << control->descriptor->data.choice.pos ;
+
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_info( alpx_dev->dev,"PUT[%d] 0x%x ->([%d]0x%x) 0x%x => [0x%p:%x:%x]\n",
+ control_index,
+ readl(alpx_dev->base + offset),
+ index_value,
+ control->descriptor->data.choice.entries_values[index_value],
+ reg_value,
+ alpx_dev->base,
+ control->descriptor->base,
+ control->descriptor->data.choice.offset);
+#endif
+
+ dev_info( alpx_dev->dev,"PUT[%d]: Sync[%d] (%s) => Reg: %d\n",
+ control_index,
+ index_value,
+ control->descriptor->data.choice.entries[index_value],
+ control->descriptor->data.choice.entries_values[index_value]);
+
+ writel(reg_value, alpx_dev->base + offset);
+
+ return 1;
+}
+
+static struct snd_kcontrol_new alpx_control_choice = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+
+ .info = alpx_control_choice_info,
+ .get = alpx_control_choice_get,
+ .put = alpx_control_choice_put,
+};
+
+static int alpx_control_choice_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ int ret = 0;
+
+ control->descriptor = descriptor;
+
+ alpx_control_choice.name = descriptor->prefix;
+ alpx_control_choice.private_value = control_index;
+ alpx_control_choice.access = descriptor->access;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev,"registering CHOICE %s[%d]\n",
+ alpx_control_choice.name,
+ control_index);
+#endif
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_choice, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+
+ return 0;
+}
+
+/* Translated choice */
+
+
+static int alpx_control_translated_choice_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ struct alpx_control_descriptor_translated_choice *choice =
+ &control->descriptor->data.translated_choice;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev," Field %d entries.\n", choice->entries_count);
+#endif
+
+ return snd_ctl_enum_info(info, 1, choice->entries_count, choice->entries);
+}
+
+static int alpx_control_translated_choice_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ u32 offset, value;
+
+ offset = control->descriptor->base +
+ control->descriptor->data.translated_choice.offset;
+
+ value = readl(alpx_dev->base + offset);
+ value &= control->descriptor->data.translated_choice.mask;
+ value >>= control->descriptor->data.translated_choice.pos;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev," 0x%x (0x%x)<= [0x%p:%x:%x]\n",
+ value,
+ readl(alpx_dev->base + offset),
+ alpx_dev->base,
+ control->descriptor->base,
+ control->descriptor->data.translated_choice.offset);
+#endif
+
+
+ if (value < control->descriptor->data.translated_choice.card_entries_count) {
+ const unsigned int app_value = control->descriptor->data.translated_choice.card_entries_values[value];
+ dev_info( alpx_dev->dev,"GET[%d]: Reg: %d => %d: App Sync: %s \n",
+ control_index,
+ value,
+ app_value,
+ control->descriptor->data.translated_choice.entries[app_value]);
+ elem_value->value.enumerated.item[0] = app_value;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int alpx_control_translated_choice_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ unsigned int index_value;
+ u32 offset, reg_value;
+
+ offset = control->descriptor->base +
+ control->descriptor->data.translated_choice.offset;
+ reg_value = readl(alpx_dev->base + offset);
+
+ reg_value &= ~control->descriptor->data.translated_choice.mask;
+
+ index_value = elem_value->value.enumerated.item[0];
+
+ //Avoid error :RESET the source if BUGGED !!
+ if (index_value >= control->descriptor->data.translated_choice.entries_count)
+ index_value = 0;
+
+ reg_value |= control->descriptor->data.translated_choice.entries_values[index_value] << control->descriptor->data.translated_choice.pos ;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev,"0x%x ->([%d]0x%x) 0x%x => [0x%p:%x:%x]\n",
+ readl(alpx_dev->base + offset),
+ index_value,
+ control->descriptor->data.translated_choice.entries_values[index_value],
+ reg_value,
+ alpx_dev->base,
+ control->descriptor->base,
+ control->descriptor->data.translated_choice.offset);
+#endif
+
+ dev_info( alpx_dev->dev,"PUT[%d]: App Sync[%d] (%s) => Reg: %d\n",
+ control_index,
+ index_value,
+ control->descriptor->data.translated_choice.entries[index_value],
+ control->descriptor->data.translated_choice.entries_values[index_value]);
+
+ //Avoid out of range values
+ if (control->descriptor->data.translated_choice.entries_values[index_value] <= ALP222_CLK_MANAGER_CLK_SRC_MAX) {
+ writel(reg_value, alpx_dev->base + offset);
+ }
+
+ return 1;
+}
+
+
+static struct snd_kcontrol_new alpx_control_translated_choice = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+
+ .info = alpx_control_translated_choice_info,
+ .get = alpx_control_translated_choice_get,
+ .put = alpx_control_translated_choice_put,
+};
+
+static int alpx_control_translated_choice_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ int ret = 0;
+
+ control->descriptor = descriptor;
+
+ alpx_control_translated_choice.name = descriptor->prefix;
+ alpx_control_translated_choice.private_value = control_index;
+ alpx_control_translated_choice.access = descriptor->access;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev,"registering Translated CHOICE %s[%d]\n",
+ alpx_control_translated_choice.name,
+ control_index);
+#endif
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_translated_choice, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+
+ return 0;
+}
+
+/* Flags */
+static int alpx_control_flag_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ struct alpx_control_descriptor_choice *choice =
+ &control->descriptor->data.choice;
+
+ dev_dbg( alpx_dev->dev," Flag %d entries.\n", choice->entries_count);
+#endif
+
+ info->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ info->count = 1;
+ info->value.integer.min = 0;
+ info->value.integer.max = 1;
+
+ return 0;
+}
+
+static int alpx_control_flag_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ u32 offset, value;
+
+ offset = control->descriptor->base +
+ control->descriptor->data.flag.offset;
+
+ value = readl(alpx_dev->base + offset);
+ value &= control->descriptor->data.flag.mask;
+ value >>= control->descriptor->data.flag.pos;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev,"0x%x (0x%x)<= [0x%p:%x:%x]\n",
+ value,
+ readl(alpx_dev->base + offset),
+ alpx_dev->base,
+ control->descriptor->base,
+ control->descriptor->data.flag.offset);
+#endif
+
+ elem_value->value.enumerated.item[0] = value;
+
+ return 0;
+}
+
+static int alpx_control_flag_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ unsigned int new_value;
+ u32 offset, reg_value;
+
+ offset = control->descriptor->base +
+ control->descriptor->data.flag.offset;
+ reg_value = readl(alpx_dev->base + offset);
+
+ reg_value &= ~control->descriptor->data.flag.mask;
+
+ new_value = elem_value->value.enumerated.item[0];
+ reg_value |= new_value << control->descriptor->data.flag.pos;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev,"0x%x -> 0x%x => [0x%p:%x:%x]\n",
+ readl(alpx_dev->base + offset),
+ reg_value,
+ alpx_dev->base,
+ control->descriptor->base,
+ control->descriptor->data.flag.offset);
+#endif
+
+ writel(reg_value, alpx_dev->base + offset);
+
+ return 1;
+}
+
+static struct snd_kcontrol_new alpx_control_flag = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+
+ .info = alpx_control_flag_info,
+ .get = alpx_control_flag_get,
+ .put = alpx_control_flag_put,
+};
+
+
+static int alpx_control_flag_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ int ret;
+
+ control->descriptor = descriptor;
+
+ alpx_control_flag.name = descriptor->prefix;
+ alpx_control_flag.private_value = control_index;
+ alpx_control_flag.access = descriptor->access;
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_flag, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+
+ return 0;
+}
+
+/* Flags_channels */
+static struct alpx_control* alpx_lookup_codec_pga_control(struct alpx_device * alpx_dev, unsigned int side)
+{
+ //Lookup the LINE PGA associated to the given side
+ unsigned int ctrl_idx = 0;
+dev_dbg(alpx_dev->dev," alpx_dev->controls_count = %d, side:%d\n", alpx_dev->controls_count, side);
+
+ for (ctrl_idx = 0 ; ctrl_idx < (alpx_dev->controls_count - 1) ; ctrl_idx++) {
+ if ((alpx_dev->controls[ctrl_idx].descriptor->type == ALPX_CONTROL_TYPE_ANALOG_EQ) &&
+ (alpx_dev->controls[ctrl_idx].data.codec.idx == side)){
+ dev_dbg(alpx_dev->dev," found LINE EQ ctrl[%d]{%d: 0x%x:%x}\n",
+ ctrl_idx, alpx_dev->controls[ctrl_idx].data.codec.idx,
+ alpx_dev->controls[ctrl_idx].descriptor->base, ALPX_CODEC_CTRL_GAIN_REG(side));
+
+ //return alpx_dev->controls[ctrl_idx].descriptor->base + ALPX_CODEC_CTRL_GAIN_REG(side);
+ return &alpx_dev->controls[ctrl_idx];
+ }
+ }
+ dev_err(alpx_dev->dev," NOTHING FOUND\n");
+ return NULL;
+}
+
+
+static struct alpx_control* alpx_lookup_codec_mic_gain_control(struct alpx_device * alpx_dev, unsigned int pos)
+{
+ //Lookup the LINE PGA associated to the given side
+ unsigned int ctrl_idx = 0;
+dev_dbg(alpx_dev->dev," alpx_dev->controls_count = %d, pos:%d\n", alpx_dev->controls_count, pos);
+
+ for (ctrl_idx = 0 ; ctrl_idx < (alpx_dev->controls_count - 1) ; ctrl_idx++) {
+#if 0
+ dev_dbg(alpx_dev->dev,"idx:%d, descriptor : %p (%s), t:%d\n",
+ ctrl_idx,
+ &alpx_dev->controls[ctrl_idx].descriptor,
+ alpx_dev->controls[ctrl_idx].descriptor->prefix,
+ alpx_dev->controls[ctrl_idx].descriptor->type);
+#endif
+ if ((alpx_dev->controls[ctrl_idx].descriptor->type == ALPX_CONTROL_TYPE_GAINS_EMBEDDED) &&
+ (alpx_dev->controls[ctrl_idx].descriptor->data.mic_gains.pos == pos)){
+ dev_dbg(alpx_dev->dev," found MIC Gain ctrl[%d]{0x%x:%x}\n", ctrl_idx,
+ alpx_dev->controls[ctrl_idx].descriptor->base, alpx_dev->controls[ctrl_idx].descriptor->data.mic_gains.offset);
+
+ //return alpx_dev->controls[ctrl_idx].descriptor->base + alpx_dev->controls[ctrl_idx].descriptor->data.mic_gains.offset;
+ return &alpx_dev->controls[ctrl_idx];
+ }
+ }
+ dev_err(alpx_dev->dev,"NOTHING FOUND\n");
+ return NULL;
+}
+
+static void alpx_mic_handle_activate_change(struct alpx_device * alpx_dev, struct alpx_control * mic_control, u32 mic_pos, unsigned int is_mic_on)
+{
+ struct alpx_control* mic_gain_ctrl = NULL;
+ struct alpx_control* line_eq_gain_ctrl = NULL;
+ u32 line_codec_reg_offset = 0;
+ u32 mic_gain_offset;
+ u32 mic_gain_mask;
+ u32 line_side = 0;
+ u32 mic_side_pos = 0;
+
+
+ dev_dbg(alpx_dev->dev,"%s(%p, %p,%d,%d) : CALLED\n",__func__,
+ alpx_dev,
+ mic_control,
+ mic_pos,
+ is_mic_on);
+
+ //Find the right LINE PGA register
+ switch(mic_pos) {
+ case ALP222_MIC_EN_L_POS:
+ dev_dbg(alpx_dev->dev,"LEFT %s\n", is_mic_on ? "On" : "Off");
+ line_side = 0;
+ mic_side_pos = ALP222_MIC_GAIN_L_POS;
+ break;
+ case ALP222_MIC_EN_R_POS:
+ dev_dbg(alpx_dev->dev," RIGHT %s\n",is_mic_on ? "On" : "Off");
+ line_side = 1;
+ mic_side_pos = ALP222_MIC_GAIN_R_POS;
+ break;
+ default:
+ dev_dbg(alpx_dev->dev," Unknown mic position %d !!\n", mic_pos);
+ return;
+ };
+
+
+ line_eq_gain_ctrl = alpx_lookup_codec_pga_control(alpx_dev, line_side);
+ line_codec_reg_offset = line_eq_gain_ctrl->descriptor->base + ALPX_CODEC_CTRL_GAIN_REG(line_side);
+ mic_gain_ctrl = alpx_lookup_codec_mic_gain_control(alpx_dev, mic_side_pos);
+
+ if (line_eq_gain_ctrl == NULL){
+ dev_warn(alpx_dev->dev,"No line EQ ctrl found !\n");
+ return;
+ }
+
+ if ( mic_gain_ctrl == NULL){
+ dev_warn(alpx_dev->dev,"No MIC ctrl found !!\n");
+ return;
+ }
+
+ mic_gain_mask = mic_gain_ctrl->descriptor->data.mic_gains.mask;
+ mic_gain_offset = mic_gain_ctrl->descriptor->base + mic_gain_ctrl->descriptor->data.mic_gains.offset;
+
+
+ dev_dbg(alpx_dev->dev,"PGA offset :0x%x, MIC %s offset : 0x%x, MIC mask: 0x%08x\n",
+ line_codec_reg_offset,
+ mic_control->descriptor->prefix,
+ mic_gain_offset,
+ mic_gain_mask);
+
+#if 1
+ dev_dbg(alpx_dev->dev,"BEFORE MIC %s stored:0X%x, regMIC:0x%x, regPGA:0x%x\n",
+ is_mic_on ? "On" : "Off",
+ mic_control->data.mic.stored_gain_in_reg,
+ readl(alpx_dev->base + mic_gain_offset),
+ readl(alpx_dev->base + line_codec_reg_offset));
+
+ //Now handle the mute state change
+ if (is_mic_on == 0) {
+ //MIC is going to be MUTED
+ //restore the LINE gain and store the current MIC gain.
+ writel(mic_control->data.mic.stored_gain_in_reg, alpx_dev->base + line_codec_reg_offset);
+ mic_control->data.mic.stored_gain_in_reg = readl(alpx_dev->base + mic_gain_offset);
+ //MUTE with MIC with ZERO in gain
+ writel(mic_control->data.mic.stored_gain_in_reg & ~mic_gain_mask, alpx_dev->base + mic_gain_offset);
+ //Keep the gain only
+ mic_control->data.mic.stored_gain_in_reg &= mic_gain_mask;
+
+ } else {
+ //LINE is going to be MUTED
+ //Restore the MIC gain and store the line gain and MUTE line
+ u32 mic_reg_value = readl(alpx_dev->base + mic_gain_offset);
+ writel(mic_control->data.mic.stored_gain_in_reg | (mic_reg_value & ~mic_gain_mask), alpx_dev->base + mic_gain_offset);
+ mic_control->data.mic.stored_gain_in_reg = readl(alpx_dev->base + line_codec_reg_offset);
+ //MUTE LINE with 0 in LINE
+ writel(0, alpx_dev->base + line_codec_reg_offset);
+ }
+
+ dev_dbg(alpx_dev->dev," AFTER MIC %s stored: 0x%x, regMIC:0x%x, regPGA:0x%x\n",
+ is_mic_on ? "On" : "Off",
+ mic_control->data.mic.stored_gain_in_reg,
+ readl(alpx_dev->base + mic_gain_offset),
+ readl(alpx_dev->base + line_codec_reg_offset));
+#endif
+}
+
+static int alpx_control_flags_embedded_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ struct alpx_control_descriptor *control_desc = control->descriptor;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev,"Flags %d entries.\n", control_desc->data.mic_flags.lines_count);
+#endif
+
+ info->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ info->count = 1; //One line per control
+ info->value.integer.min = 0;
+ info->value.integer.max = 1;
+ return 0;
+}
+
+static int alpx_control_flags_embedded_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ struct alpx_control_descriptor *control_desc = control->descriptor;
+ u32 offset, value;
+
+ /* read the composite register */
+ offset = control_desc->base +
+ control_desc->data.mic_flags.offset;
+
+ value = readl(alpx_dev->base + offset);
+
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev,"0x%x (0x%x)<= [0x%p:%x:%x]\n",
+ value,
+ readl(alpx_dev->base + offset),
+ alpx_dev->base,
+ control_desc->base,
+ control_desc->data.mic_flags.offset);
+#endif
+
+ elem_value->value.enumerated.item[0] = value & control_desc->data.mic_flags.mask;
+ elem_value->value.enumerated.item[0] >>= control_desc->data.mic_flags.pos;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev,"item[0]=%d\n",
+ elem_value->value.enumerated.item[0]);
+#endif
+
+ return 0;
+}
+
+static int alpx_control_flags_embedded_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ struct alpx_control_descriptor *control_desc = control->descriptor;
+ const unsigned int new_state = elem_value->value.enumerated.item[0];
+ const u32 offset = control_desc->base +
+ control_desc->data.mic_flags.offset;
+ u32 reg_value;
+ u32 current_state;
+
+
+ reg_value = readl(alpx_dev->base + offset);
+
+ dev_dbg(alpx_dev->dev," ctrl[%d]=>%p{name:%s pos:%d reg:0x%08x} : %d\n", control_index, control, control_desc->prefix, control_desc->data.mic_flags.pos, reg_value, new_state);
+
+ current_state = (reg_value & control_desc->data.mic_flags.mask) >> control_desc->data.mic_flags.pos;
+
+ if (current_state == new_state) {
+ dev_dbg(alpx_dev->dev,"EXIT W/O state change\n");
+ return 0;
+ }
+
+ //* SET the bit according to the associated line */
+ reg_value &= ~control_desc->data.mic_flags.mask;
+ reg_value |= new_state << control_desc->data.mic_flags.pos;
+ writel(reg_value, alpx_dev->base + offset);
+
+ //Handle the MUTE state transition Only for MIC enable/disable
+ if ((control_desc->data.mic_flags.pos == ALP222_MIC_EN_L_POS) ||
+ (control_desc->data.mic_flags.pos == ALP222_MIC_EN_R_POS)){
+ alpx_mic_handle_activate_change(alpx_dev, control,control_desc->data.mic_flags.pos, new_state);
+ }
+
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev," item[0]=%d\n",
+ elem_value->value.enumerated.item[0]);
+ dev_dbg( alpx_dev->dev," 0x%x -> 0x%x => [0x%p:%x:%x]\n",
+ readl(alpx_dev->base + offset),
+ reg_value,
+ alpx_dev->base,
+ control_desc->base,
+ control_desc->data.mic_flags.offset);
+#endif
+
+ dev_dbg(alpx_dev->dev," EXIT\n");
+ return 1;
+}
+
+static struct snd_kcontrol_new alpx_control_flags_embedded = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+
+ .info = alpx_control_flags_embedded_info,
+ .get = alpx_control_flags_embedded_get,
+ .put = alpx_control_flags_embedded_put,
+};
+
+static int alpx_control_flags_embedded_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ int ret;
+
+ control->descriptor = descriptor;
+
+ alpx_control_flags_embedded.name = descriptor->prefix;
+ alpx_control_flags_embedded.private_value = control_index;
+ alpx_control_flags_embedded.access = descriptor->access;
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_flags_embedded, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+
+ //Mut the MIC
+ alpx_mic_handle_activate_change(alpx_dev, control, descriptor->data.mic_flags.pos, 0);
+
+ return 0;
+}
+
+/* Gains mixed */
+
+static u32 alpx_mic_gain_db_to_reg (u32 db)
+{
+ /* Gain range is 10 to 65, skip special value 0*/
+ if (db <= ALP222_MIC_GAINS_MIN_REG_VAL){
+ return 1;
+ } else if (db < ALP222_MIC_GAINS_MAX_REG_VAL){
+ return db - ALP222_MIC_REG_GAIN_SHIFT;
+ } else {
+ return (ALP222_MIC_GAINS_MAX_REG_VAL + 1 - ALP222_MIC_REG_GAIN_SHIFT);
+ }
+}
+
+static u32 alpx_mic_gain_reg_to_db (u32 reg)
+{
+ /* Skip reg == 0 */
+ if (reg <= 1) {
+ return ALP222_MIC_GAINS_MIN_REG_VAL;
+ } else if (reg < (ALP222_MIC_GAINS_MAX_REG_VAL + 1 - ALP222_MIC_REG_GAIN_SHIFT)) {
+ return reg + ALP222_MIC_REG_GAIN_SHIFT;
+ } else {
+ return ALP222_MIC_GAINS_MAX_REG_VAL;
+ }
+}
+
+static int alpx_control_gains_embedded_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ struct alpx_control_descriptor *control_desc = control->descriptor;
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev," Gains %d entries.\n", control_desc->data.mic_gains.lines_count);
+#endif
+
+ info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ info->count = 1; //One control per item !
+ info->value.integer.min = control_desc->data.mic_gains.min;
+ info->value.integer.max = control_desc->data.mic_gains.max;
+
+ return 0;
+}
+
+static int alpx_control_gains_embedded_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ u32 offset, reg_value, raw_value;
+ struct alpx_control_descriptor *control_desc = control->descriptor;
+
+ /* read the composite register */
+ offset = control_desc->base + control_desc->data.mic_gains.offset;
+
+ reg_value = readl(alpx_dev->base + offset);
+
+
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev," 0x%x (0x%x)<= [0x%p:%x:%x]\n",
+ reg_value,
+ readl(alpx_dev->base + offset),
+ alpx_dev->base,
+ control_desc->base,
+ control_desc->data.mic_gains.offset);
+#endif
+ //* GET the bit according to the associated line */
+ raw_value = reg_value & (control_desc->data.mic_gains.mask);
+ raw_value >>= (control_desc->data.mic_gains.pos);
+ //Convert Raw value into dB
+ elem_value->value.integer.value[0] = alpx_mic_gain_reg_to_db(raw_value);
+
+ #if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev,"value[0]=%ld / raw: 0x%x\n",
+ elem_value->value.integer.value[0],
+ raw_value);
+#endif
+
+ return 0;
+}
+
+static int alpx_control_gains_embedded_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *elem_value)
+{
+ struct alpx_device *alpx_dev = snd_kcontrol_chip(kcontrol);
+ unsigned int control_index = kcontrol->private_value;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ const unsigned int new_value = alpx_mic_gain_db_to_reg(elem_value->value.integer.value[0]);
+ u32 offset, reg_value;
+ struct alpx_control_descriptor *control_desc = control->descriptor;
+
+ offset = control_desc->base +
+ control_desc->data.mic_gains.offset;
+ reg_value = readl(alpx_dev->base + offset);
+
+ //* SET the bit according to the associated line */
+ reg_value &= ~(control_desc->data.mic_gains.mask);
+ reg_value |= new_value << (control_desc->data.mic_gains.pos);
+
+
+
+
+#if defined(CONFIG_ALPX_LOG_CONTROLS)
+ dev_dbg( alpx_dev->dev,"value[0]=%ld, reg_value: 0x%x\n",
+ elem_value->value.integer.value[0],
+ reg_value );
+ dev_dbg( alpx_dev->dev,"0x%x -> 0x%x => [0x%p:%x:%x]\n",
+ readl(alpx_dev->base + offset),
+ reg_value,
+ alpx_dev->base,
+ control_desc->base,
+ control_desc->data.mic_gains.offset);
+#endif
+
+ writel(reg_value, alpx_dev->base + offset);
+
+
+ return 1;
+}
+
+
+static struct snd_kcontrol_new alpx_control_gains_embedded = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+
+ .info = alpx_control_gains_embedded_info,
+ .get = alpx_control_gains_embedded_get,
+ .put = alpx_control_gains_embedded_put,
+};
+
+static int alpx_control_gains_embedded_register(struct snd_card *card,
+ struct alpx_control_descriptor *alp_descriptor)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ unsigned int control_index = alpx_dev->controls_index;
+ struct alpx_control *control = &alpx_dev->controls[control_index];
+ int ret;
+ char name[64] = { 0 };
+ control->descriptor = alp_descriptor;
+
+ snprintf(name, sizeof(name), "%s Volume", alp_descriptor->prefix);
+
+ alpx_control_gains_embedded.name = name;
+ alpx_control_gains_embedded.private_value = control_index;
+ alpx_control_gains_embedded.access = alp_descriptor->access;
+ alpx_control_gains_embedded.tlv.p = alp_descriptor->data.mic_gains.gains_scale;
+
+
+ dev_dbg(alpx_dev->dev," mic: %s, gain offset : 0x%x, gains scale : %p\n",
+ name,
+ alp_descriptor->data.mic_gains.offset,
+ alp_descriptor->data.mic_gains.gains_scale);
+
+ ret = snd_ctl_add(card, snd_ctl_new1(&alpx_control_gains_embedded, alpx_dev));
+ if (ret)
+ return ret;
+
+ alpx_dev->controls_index++;
+
+ return 0;
+}
+
+/* Controls */
+
+static int alpx_controls_router_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < descriptor->data.router.lines_count; i++) {
+ ret = alpx_control_router_register(card, descriptor, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alpx_controls_mixer_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ unsigned int lines_count = descriptor->data.mixer.lines_count;
+ unsigned int i, o;
+ int ret;
+
+ for (o = 0; o < lines_count; o++) {
+ for (i = 0; i < lines_count; i++) {
+ ret = alpx_control_mixer_register(card, descriptor, i, o);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#if 0
+static int alpx_controls_gains_embedded_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ unsigned int lines_count = descriptor->data.mixer.lines_count;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; !ret && i < lines_count; i++) {
+ ret = alpx_control_gains_embedded_register(card, descriptor, i);
+ }
+
+ return ret;
+}
+
+static int alpx_controls_flags_embedded_register(struct snd_card *card,
+ struct alpx_control_descriptor *descriptor)
+{
+ unsigned int lines_count = descriptor->data.mixer.lines_count;
+ unsigned int i, o;
+ int ret = 0;
+
+ for (i = 0;!ret && i < lines_count; i++) {
+ ret = alpx_control_flags_embedded_register(card, descriptor, i, o);
+ }
+
+ return ret;
+}
+#endif
+
+
+/* AlpX Controls */
+int alpx_controls_register(struct snd_card *card)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+ const struct alpx_variant *variant = alpx_dev->variant;
+ unsigned int controls_count = 0;
+ unsigned int i;
+ int ret;
+
+ dev_dbg(alpx_dev->dev, "Registering controls for %s\n", card->longname);
+
+ if (alpx_dev->controls)
+ return -EBUSY;
+
+ for (i = 0; i < variant->control_descriptors_count; i++) {
+ struct alpx_control_descriptor *descriptor =
+ &variant->control_descriptors[i];
+
+ switch (descriptor->type) {
+ case ALPX_CONTROL_TYPE_CHOICE:
+ case ALPX_CONTROL_TYPE_FLAG:
+ case ALPX_CONTROL_TYPE_TRANSLATED_CHOICE:
+ /* One control for each descriptor. */
+ controls_count++;
+ break;
+ case ALPX_CONTROL_TYPE_AMPLIFIER:
+ /* Amplified lines */
+ controls_count += descriptor->data.ampli.lines_count;
+ break;
+ case ALPX_CONTROL_TYPE_ROUTER:
+ /* One control for each router line. */
+ controls_count += descriptor->data.router.lines_count;
+ break;
+ case ALPX_CONTROL_TYPE_MIXER:
+ /* One control for each mixer line intersection. */
+ controls_count += descriptor->data.mixer.lines_count *
+ descriptor->data.mixer.lines_count;
+ break;
+ case ALPX_CONTROL_TYPE_ANALOG_EQ:
+ controls_count += descriptor->data.codec.lines_count;
+ break;
+ case ALPX_CONTROL_TYPE_GAINS_EMBEDDED:
+ controls_count += descriptor->data.mic_gains.lines_count;
+ break;
+ case ALPX_CONTROL_TYPE_FLAGS_EMBEDDED:
+ controls_count += descriptor->data.mic_flags.lines_count;
+ break;
+ case ALPX_CONTROL_RESERVED:
+ controls_count ++;
+ break;
+ case ALPX_CONTROL_TYPE_CONSTANT:
+ controls_count++;
+ break;
+ case ALPX_CONTROL_TYPE_AXCMEM_REL_CHOICE:
+ controls_count++;
+ break;
+ case ALPX_CONTROL_TYPE_AXCMEM_REL_VALUE:
+ controls_count++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ alpx_dev->controls = kcalloc(controls_count,
+ sizeof(struct alpx_control), GFP_KERNEL);
+
+ if (!alpx_dev->controls)
+ return -ENOMEM;
+
+ alpx_dev->controls_count = controls_count;
+
+ dev_dbg(alpx_dev->dev," alpx_dev->controls_count = %d\n", alpx_dev->controls_count);
+
+ for (i = 0; i < variant->control_descriptors_count; i++) {
+ struct alpx_control_descriptor *alp_descriptor =
+ &variant->control_descriptors[i];
+
+ dev_dbg(alpx_dev->dev,"descriptor[%d]:{%d, %s}\n",
+ i, alp_descriptor->type, alp_descriptor->prefix);
+
+ switch (alp_descriptor->type) {
+ case ALPX_CONTROL_TYPE_AMPLIFIER:
+ ret = alpx_controls_amplifier_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ case ALPX_CONTROL_TYPE_ANALOG_EQ:
+ ret = alpx_controls_codec_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ case ALPX_CONTROL_TYPE_ROUTER:
+ ret = alpx_controls_router_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ case ALPX_CONTROL_TYPE_MIXER:
+ ret = alpx_controls_mixer_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ case ALPX_CONTROL_TYPE_CHOICE:
+ ret = alpx_control_choice_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ case ALPX_CONTROL_TYPE_TRANSLATED_CHOICE:
+ ret = alpx_control_translated_choice_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ case ALPX_CONTROL_TYPE_FLAG:
+ ret = alpx_control_flag_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ case ALPX_CONTROL_TYPE_GAINS_EMBEDDED:
+ ret = alpx_control_gains_embedded_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ case ALPX_CONTROL_TYPE_FLAGS_EMBEDDED:
+ ret = alpx_control_flags_embedded_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ case ALPX_CONTROL_RESERVED:
+ ret = alpx_control_reserved_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ case ALPX_CONTROL_TYPE_CONSTANT:
+ ret = alpx_control_constant_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ case ALPX_CONTROL_TYPE_AXCMEM_REL_CHOICE:
+ ret = alpx_control_axcmem_rel_choice_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ case ALPX_CONTROL_TYPE_AXCMEM_REL_VALUE:
+ ret = alpx_control_axcmem_rel_value_register(card, alp_descriptor);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+ return 0;
+}
+
+int alp222_mic_controls_default_config(struct alpx_device * alpx_dev)
+{
+ //By default CM is enabled and DC is disabled
+ uint32_t ctrl_reg = readl(alpx_dev->base + ALP222_CONTROL_BASE + ALP222_MIC_CONTROL_REG);
+
+ dev_dbg(alpx_dev->dev," mic::ctrl_reg[0x%08x:%x]=0x%08x\n",
+ ALP222_CONTROL_BASE, ALP222_MIC_CONTROL_REG,
+ readl(alpx_dev->base + ALP222_CONTROL_BASE + ALP222_MIC_CONTROL_REG));
+
+ //remove DC and CM bits
+ ctrl_reg &= ~(ALP222_MIC_DC_L_MASK | ALP222_MIC_DC_R_MASK);
+ ctrl_reg &= ~(ALP222_MIC_CM_L_MASK | ALP222_MIC_CM_R_MASK);
+
+ //Then set the default value, only CM
+ ctrl_reg |= (1<<ALP222_MIC_CM_L_POS) | (1<<ALP222_MIC_CM_R_POS);
+
+ writel(ctrl_reg, alpx_dev->base + ALP222_CONTROL_BASE + ALP222_MIC_CONTROL_REG);
+
+ dev_dbg(alpx_dev->dev," mic::ctrl_reg[0x%08x:%x]=0x%08x\n",
+ ALP222_CONTROL_BASE, ALP222_MIC_CONTROL_REG,
+ readl(alpx_dev->base + ALP222_CONTROL_BASE + ALP222_MIC_CONTROL_REG));
+
+ return 0;
+}
diff --git a/snd-alpx/alpx_controls.h b/snd-alpx/alpx_controls.h
new file mode 100644
index 0000000..567d583
--- /dev/null
+++ b/snd-alpx/alpx_controls.h
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_CONTROLS_H
+#define _ALPX_CONTROLS_H
+
+#include "alpx.h"
+#include "alpx_axcmem.h"
+
+
+enum alpx_control_type {
+ ALPX_CONTROL_TYPE_AMPLIFIER = 0, /* Amplification stage*/
+ ALPX_CONTROL_TYPE_ANALOG_EQ, /* Analog CODEC stage*/
+ ALPX_CONTROL_TYPE_ROUTER, /* Router stage*/
+ ALPX_CONTROL_TYPE_MIXER, /* Mixer stage*/
+ ALPX_CONTROL_TYPE_CHOICE, /* misc controls */
+ ALPX_CONTROL_TYPE_FLAG, /* boolean controls */
+ ALPX_CONTROL_TYPE_FLAGS_EMBEDDED, /* Boolean controls embedded with others in the same register*/
+ ALPX_CONTROL_TYPE_GAINS_EMBEDDED, /* Amplifier controls embedded with others in the same register*/
+ ALPX_CONTROL_RESERVED, /* RESERVED Control, used to keep NumId if some controls are removed */
+ ALPX_CONTROL_TYPE_CONSTANT, /* CONSTANT Control, used for a mere constant value */
+ ALPX_CONTROL_TYPE_AXCMEM_REL_CHOICE, /*AXCM Relativ position Control*/
+ ALPX_CONTROL_TYPE_AXCMEM_REL_VALUE, /*AXCM Relativ position Control*/
+ ALPX_CONTROL_TYPE_TRANSLATED_CHOICE, /* Enumerated controls with translation tables for compatibility issue between Apps and FW */
+};
+
+struct alpx_control_descriptor_amplifier {
+ const unsigned int* gains_scale; /* Amplifier gains scale for application id cdB (snd_kcontrol_tlv_rw_t*) */
+ unsigned int reg_gain_min; /* min value for the register value*/
+ unsigned int reg_gain_max; /* max value for the register value*/
+ unsigned int lines_count; /* lines of this amplifier */
+};
+
+struct alpx_control_descriptor_codec {
+ u32 offset; /* Offset of the associated register */
+ const unsigned int* gains_scale; /* Amplifier gains scale for application id cdB (snd_kcontrol_tlv_rw_t*) */
+ unsigned int reg_gain_min; /* min value for the register value*/
+ unsigned int reg_gain_max; /* max value for the register value*/
+ unsigned int lines_count;
+};
+
+struct alpx_control_descriptor_router {
+ unsigned int lines_count;
+
+ const char **entries;
+ unsigned int entries_count;
+};
+
+struct alpx_control_descriptor_mixer {
+ const unsigned int* gains_scale; /* Amplifier gains scale for application id cdB (snd_kcontrol_tlv_rw_t*) */
+ unsigned int reg_gain_min; /* min value for the register value*/
+ unsigned int reg_gain_max; /* max value for the register value*/
+ unsigned int lines_count;
+};
+
+struct alpx_control_descriptor_choice {
+ u32 offset;
+ u32 mask;
+ u32 pos; /* start bit position in the register */
+
+ const char **entries;
+ u32 *entries_values;
+ unsigned int entries_count;
+};
+
+struct alpx_control_descriptor_translated_choice {
+ u32 offset;
+ u32 mask;
+ u32 pos; /* start bit position in the register */
+
+ const char **entries;
+ u32 *entries_values; /*Entries in card's context */
+ unsigned int entries_count;
+ u32* card_entries_values; /*Entries in register's context */
+ unsigned int card_entries_count;
+};
+
+struct alpx_control_descriptor_flag {
+ u32 offset;
+ u32 mask;
+ u32 pos;
+};
+
+struct alpx_control_descriptor_flags_embedded {
+ u32 offset;
+ u32 mask;
+ u32 pos;
+ unsigned int lines_count;
+};
+/* To be used when the amplifier controls are mixed with others in the same register*/
+struct alpx_control_descriptor_gains_embedded {
+ const unsigned int* gains_scale; /* Amplifier gains scale for application id cdB (snd_kcontrol_tlv_rw_t*)*/
+ unsigned int min ;
+ unsigned int max;
+ unsigned int lines_count ;
+ unsigned int offset;
+ unsigned int mask;
+ unsigned int pos;
+ unsigned int width;
+};
+
+
+struct alpx_control_descriptor_constant {
+ u32 value;
+};
+
+struct alpx_control_descriptor_axcmem_rel_choice {
+ struct alpx_axcmem_loc base_loc;
+ struct alpx_axcmem_loc reg_loc;
+ u32 mask;
+ u32 pos;
+ uint32_t (*getter) (void* addr);
+ void (*setter) (void* addr, uint32_t value);
+ const char** entries;
+ u32* entries_values;
+ unsigned int entries_count;
+};
+
+struct alpx_control_descriptor_axcmem_rel_value {
+ struct alpx_axcmem_loc base_loc;
+ struct alpx_axcmem_loc reg_loc;
+ u32 mask;
+ u32 pos;
+ u32 min;
+ u32 max;
+ uint32_t (*getter) (void* addr);
+ void (*setter) (void* addr, uint32_t value);
+};
+
+/*****************************************************/
+struct alpx_control_descriptor {
+ enum alpx_control_type type;
+ u32 base;
+ const char *prefix;
+ unsigned int access; /*Control access in ALSA way (se https://www.kernel.org/doc/html/v4.17/sound/kernel-api/writing-an-alsa-driver.html?highlight=sndrv_ctl_elem_access_readwrite#access-flags)*/
+ union {
+ struct alpx_control_descriptor_amplifier ampli;
+ struct alpx_control_descriptor_codec codec;
+ struct alpx_control_descriptor_router router;
+ struct alpx_control_descriptor_mixer mixer;
+ struct alpx_control_descriptor_choice choice;
+ struct alpx_control_descriptor_flag flag;
+ struct alpx_control_descriptor_flags_embedded mic_flags;
+ struct alpx_control_descriptor_gains_embedded mic_gains;
+ struct alpx_control_descriptor_constant constant;
+ struct alpx_control_descriptor_axcmem_rel_choice axcmem_rel_choice;
+ struct alpx_control_descriptor_axcmem_rel_value axcmem_rel_value;
+ struct alpx_control_descriptor_translated_choice translated_choice;
+ } data;
+};
+
+struct alpx_control_amplifier {
+ unsigned int gain;
+ unsigned int idx;
+};
+
+struct alpx_control_router {
+ unsigned int index;
+};
+
+struct alpx_control_mixer {
+ unsigned int mixer_in;
+ unsigned int mixer_out;
+};
+
+struct alpx_control_codec{
+ unsigned int idx;
+};
+
+struct alpx_control_mic {
+ unsigned int stored_gain_in_reg;
+};
+
+struct alpx_control {
+ struct alpx_control_descriptor *descriptor;
+
+ union {
+ struct alpx_control_amplifier ampli;
+ struct alpx_control_router router;
+ struct alpx_control_mixer mixer;
+ struct alpx_control_codec codec;
+ struct alpx_control_mic mic;
+
+ } data;
+};
+
+int alpx_controls_register(struct snd_card *card);
+int alp222_mic_controls_default_config(struct alpx_device * alpx_dev);
+
+#endif
diff --git a/snd-alpx/alpx_core.c b/snd-alpx/alpx_core.c
new file mode 100644
index 0000000..0fb4d82
--- /dev/null
+++ b/snd-alpx/alpx_core.c
@@ -0,0 +1,901 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#include "alpx.h"
+#include "alpx_reg.h"
+#include "alpx_mtd.h"
+#include "alpx_led.h"
+#include "alpx_controls.h"
+#include "alpx_streams.h"
+#include "alpx_version.h"
+#include "alpx_variants_stereo.h"
+#include "alpx_variants_stereo_apps_preFW283.h"
+#include "alpx_variants_mc.h"
+#include "alpx_variants_882_apps_preFW240.h"
+#include "alpx_variants_dead.h"
+#include "alpx_variants_madi.h"
+#include "alpx_variants_dante.h"
+#include "alpx_cards.h"
+#include "alpx_xdma.h"
+#include "snd_alpx_xdma.h"
+
+
+#include <linux/version.h>
+#include <linux/kmod.h>
+
+#if !defined (CONFIG_WITHOUT_GPIO)
+#define ALPX_WITH_GPIO
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include "alpx_gpio.h"
+#define ALPX_GPIO_OPTION_STRING ""
+#else
+#warning !! GPIOs are DISABLED !!
+#define ALPX_GPIO_OPTION_STRING "(without GPIO)"
+#endif
+
+
+#include <linux/mtd/partitions.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <linux/string.h>
+
+
+#if KERNEL_VERSION(6, 7, 0) > LINUX_VERSION_CODE
+#include "core/generic/6.3/amd_xdma.h"
+#include "include/6.3/amd_xdma.h"
+#else
+#include <linux/platform_data/amd_xdma.h>
+#include <linux/dma/amd_xdma.h>
+#endif
+
+#ifdef WITH_REG_DEBUG
+#include <linux/device.h>
+#endif
+
+/* Constants */
+
+/* Structures */
+
+/* Parameters */
+
+#define ALPX_DEFAULT_CARD_NAME "Digigram AlpX"
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
+module_param_array(index, int, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for " ALPX_DEFAULT_CARD_NAME " soundcard.");
+
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
+module_param_array(id, charp, NULL, 0444);
+MODULE_PARM_DESC(id, "ID string for " ALPX_DEFAULT_CARD_NAME " soundcard.");
+
+static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(enable, "Enable " ALPX_DEFAULT_CARD_NAME " soundcard.");
+
+unsigned int log_transfers = 0;
+module_param(log_transfers, uint, 0644);
+MODULE_PARM_DESC(log_transfers, "0: no transfer logged(default), 1 : transfers logged");
+
+static bool with_board_in_prod_mode = false;
+module_param(with_board_in_prod_mode, bool, 0644);
+MODULE_PARM_DESC(with_board_in_prod_mode, "0: board in USER mode(default) ; 1: board in PRODUCTION mode ");
+
+static bool with_board_in_dead_mode = false;
+module_param(with_board_in_dead_mode, bool, 0644);
+MODULE_PARM_DESC(with_board_in_dead_mode, "0: board in NORMAL mode(default) ; 1: board in DEAD mode ");
+
+static unsigned int dante_configured_fs = ALPDANTE_DEFAULT_FS;
+module_param(dante_configured_fs, uint, 0644);
+MODULE_PARM_DESC(dante_configured_fs, "Configured FS for Alp DANTE card (value in Hz, default: 48000)");
+
+static bool dante_loopback_enabled = false;
+module_param(dante_loopback_enabled, bool, 0644);
+MODULE_PARM_DESC(dante_loopback_enabled, "Enable the Loopback for Alp DANTE card (default: FALSE)");
+
+static bool alp222_pre_FW283_apps_support = false;
+module_param(alp222_pre_FW283_apps_support, bool, 0644);
+MODULE_PARM_DESC(alp222_pre_FW283_apps_support, "For ALp222e cards ONLY, Enable the compatibility mode for applications written for PRE V283 Firmwares (default: FALSE)");
+
+static bool alp882_pre_FW240_apps_support = false;
+module_param(alp882_pre_FW240_apps_support, bool, 0644);
+MODULE_PARM_DESC(alp882_pre_FW240_apps_support, "For ALp882e cards ONLY, Enable the compatibility mode for applications written for PRE V240 Firmwares (default: FALSE)");
+
+
+/* Below, this is significant only if WITH_REG_DEBUG is defined */
+#ifdef WITH_REG_DEBUG
+#warning BUILD with Registers Debug enabled
+
+static ssize_t reg_offset_show(struct device* dev, struct device_attribute* attr, char* buf )
+{
+
+ //HOWTO struct dev TO struct alpx_device :
+ struct snd_card* const card = dev_get_drvdata(dev);
+ struct alpx_device* const alpx_dev = card->private_data;
+
+ dev_dbg( dev,"%s(): CALLED for card: 0x%p : REG OFFSET: 0x%08x\n", __func__,
+ alpx_dev,
+ alpx_dev->dbg_reg_offset);
+
+
+ return scnprintf(buf, PAGE_SIZE, "0x%08x\n", alpx_dev->dbg_reg_offset);
+}
+
+static ssize_t reg_offset_store(struct device* dev, struct device_attribute* attr, const char* buf , size_t count)
+{
+ //HOWTO struct dev TO struct alpx_device :
+ struct snd_card* const card = dev_get_drvdata(dev);
+ struct alpx_device* const alpx_dev = card->private_data;
+
+ unsigned int reg_offset = 0;
+ if (kstrtou32(buf, 0, &reg_offset) < 0)
+ return -EINVAL;
+
+ //Check offset range
+ if ((reg_offset < ALP222_MIN_REG_OFFSET) &&
+ (reg_offset > ALP222_MAX_REG_OFFSET))
+ return -EINVAL;
+
+ alpx_dev->dbg_reg_offset = reg_offset;
+ dev_dbg( dev,"%s(): CALLED for alpx: %p, reg_offset: 0x%08x\n", __func__, alpx_dev, alpx_dev->dbg_reg_offset);
+
+ return count;
+}
+
+//static struct device_attribute dev_reg_addr = __ATTR_RW(reg_addr);
+static struct device_attribute dev_reg_offset = __ATTR_RW(reg_offset);
+
+/******************************/
+static ssize_t reg_value_show(struct device* dev, struct device_attribute* attr, char* buf )
+{
+
+ //HOWTO struct dev TO struct alpx_device :
+ struct snd_card* const card = dev_get_drvdata(dev);
+ struct alpx_device* const alpx_dev = card->private_data;
+ /* Read the register once to cope with 'clear on read' registers */
+ const uint32_t reg_value = readl(alpx_dev->base+ alpx_dev->dbg_reg_offset);
+
+ dev_dbg( dev,"%s(): CALLED for %p, (dev:%p): [0x%08x] => 0x%08x\n", __func__,
+ alpx_dev, dev,
+ alpx_dev->dbg_reg_offset,
+ reg_value);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%08x\n", reg_value);
+}
+
+static ssize_t reg_value_store(struct device* dev, struct device_attribute* attr, const char* buf , size_t count)
+{
+ //HOWTO struct dev TO struct alpx_device :
+ struct snd_card* const card = dev_get_drvdata(dev);
+ struct alpx_device* const alpx_dev = card->private_data;
+
+ uint32_t reg_value = 0;
+ if (kstrtou32(buf, 0, &reg_value) < 0)
+ return -EINVAL;
+
+
+ writel(reg_value, alpx_dev->base + alpx_dev->dbg_reg_offset);
+
+ dev_dbg( dev,"%s(): CALLED for alpx: %p, [0x%08x] <= 0x%08x, Check:0x%08x\n", __func__,
+ alpx_dev,
+ alpx_dev->dbg_reg_offset,
+ reg_value,
+ readl(alpx_dev->base+ alpx_dev->dbg_reg_offset));
+
+ return count;
+}
+
+static struct device_attribute dev_reg_value = __ATTR_RW(reg_value);
+
+#endif
+
+/******************************/
+static ssize_t serial_number_show(struct device* dev, struct device_attribute* attr, char* buf )
+{
+
+ struct snd_card* const card = dev_get_drvdata(dev);
+ struct alpx_device* alpx_dev = card->private_data;
+
+ dev_dbg(alpx_dev->dev, " %s() : AFTER shift %llu / 0x%llx\n", __func__, alpx_dev->identity.serial_number, alpx_dev->identity.serial_number);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", alpx_dev->identity.serial_number);
+}
+
+static struct device_attribute dev_serial_number = __ATTR_RO(serial_number);
+/***************************************/
+
+static ssize_t ver_fpga_show(struct device* dev, struct device_attribute* attr, char* buf )
+{
+
+ struct snd_card* const card = dev_get_drvdata(dev);
+ struct alpx_device* alpx_dev = card->private_data;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", alpx_dev->identity.ver_fpga);
+}
+
+static struct device_attribute dev_ver_fpga = __ATTR_RO(ver_fpga);
+
+/***************************************/
+static ssize_t ver_mcu_show(struct device* dev, struct device_attribute* attr, char* buf )
+{
+
+ struct snd_card* const card = dev_get_drvdata(dev);
+ struct alpx_device* alpx_dev = card->private_data;
+
+ return scnprintf(buf, PAGE_SIZE, "%u.%lu\n",
+ ALPX_COMMON_VERSION_VERSION(alpx_dev->identity.ver_mcu),
+ ALPX_COMMON_VERSION_REVISION(alpx_dev->identity.ver_mcu) );
+}
+
+static struct device_attribute dev_ver_mcu = __ATTR_RO(ver_mcu);
+/***************************************/
+static ssize_t dante_card_name_show(struct device* dev, struct device_attribute* attr, char* buf )
+{
+
+ struct snd_card* const card = dev_get_drvdata(dev);
+ struct alpx_device* alpx_dev = card->private_data;
+
+ int result = alpdante_get_dante_name(alpx_dev, buf, ALPDANTE_NETWORK_NAME_LENGTH);
+ return (!result) ? strlcat(buf, "\n", PAGE_SIZE) : 0;
+}
+
+static struct device_attribute dev_dante_card_name = __ATTR_RO(dante_card_name);
+/***************************************/
+
+/* PCI */
+
+static void alpx_card_private_free(struct snd_card *card)
+{
+ struct alpx_device *alpx_dev = card->private_data;
+
+#if defined (ALPX_WITH_GPIO)
+ if (alpx_dev->variant->features & ALPX_VARIANT_FEATURE_GPIOS)
+ alpx_gpio_unregister(alpx_dev);
+#endif
+
+ if (alpx_dev->controls)
+ kfree(alpx_dev->controls);
+
+ /* dirty crash avoid when dma fails and frees card before pipes are init. */
+}
+
+static inline unsigned int is_alp222_with_mic_option(struct alpx_device* alpx_dev)
+{
+ const u32 reg_mic = readl(alpx_dev->base + ALP222_CONTROL_BASE + ALP222_MIC_CONTROL_REG);
+ dev_dbg(alpx_dev->dev, "Daughter_Control[0x%08x:%x] = 0x%08x\n",
+ ALP222_CONTROL_BASE,
+ ALP222_MIC_CONTROL_REG,
+ reg_mic);
+ return reg_mic & (1<<ALP222_MIC_HERE_POS);
+}
+
+static int alpx_core_set_pci_bus(struct alpx_device* alpx_dev)
+{
+ int ret = pci_enable_device(alpx_dev->pci_dev);
+ if (ret) {
+ dev_err(alpx_dev->dev, "failed to enable PCI device\n");
+ return ret;
+ }
+
+ /* Enable PCI relaxed ordering. */
+ pcie_capability_set_word(alpx_dev->pci_dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_RELAX_EN);
+
+ /* Enable extended tagging. */
+ pcie_capability_set_word(alpx_dev->pci_dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_EXT_TAG);
+
+ /* Set maximum memory read request. */
+ pcie_set_readrq(alpx_dev->pci_dev, 512);
+
+ pci_set_master(alpx_dev->pci_dev);
+
+ return ret;
+}
+
+
+/****************************************************************/
+/*
+* This function will HANDLE the XMA module dependency either from kernel or
+* from the package according to the kernel's version.
+* Return an error the kernel's module is in bad version (kernel <6.7) as no CYLLIC DMA
+* was yet supported.
+* MUST be CALLED after alpx_xdma_register() to get the xdma_dev
+*/
+static int alpx_core_handle_xdma_dep (struct device *dev, struct platform_device* xdma_dev)
+{
+
+ dev_info(dev, "Handling XDMA support for Linux : %d.\n", LINUX_VERSION_CODE);
+
+#if KERNEL_VERSION(6, 3, 0) > LINUX_VERSION_CODE
+ dev_info(dev, "OLD Kernel, USE snd-alpx-xdma.\n");
+ //NO XDMA in the kernel Force the dependency to package's xdma module already done before call by
+ // SND_ALPX_XDMA_DEP();
+
+#elif KERNEL_VERSION(6, 7, 0) <= LINUX_VERSION_CODE
+ #if IS_ENABLED(CONFIG_XILINX_XDMA)
+ dev_info(dev, "Kernel XDMA Ok, USE xdma.\n");
+ //NO XDMA in the kernel Force the dependency to package's xdma module.
+ xdma_get_user_irq(xdma_dev, 256); //Dumb irq to for an error : no way
+ #else
+ dev_info(dev, "Kernel XDMA NOT enabled, USE snd-alpx-xdma.\n");
+ //Dependency already handled by SND_ALPX_XDMA_DEP() call defined accordingly !!
+ #endif
+#elif (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE) && \
+ (KERNEL_VERSION(6, 7, 0) > LINUX_VERSION_CODE)
+ //FORCE snd-alpx-xdma use as kernel's one w/o cyclic support
+ #if IS_ENABLED(CONFIG_XILINX_XDMA)
+ #if IS_MODULE(CONFIG_XILINX_XDMA)
+ #warning "Xilinx XDMA module must be unloaded before use : NOT COMPLIANT !!"
+ #else
+ #error "Xilinx XDMA moule in Kernel is not COMPLIANT, Kernel must be Reconfigured/Rebuild without it !!!!"
+ #endif
+ #endif
+#endif
+ return 0;
+}
+/****************************************************************************/
+static int alpx_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_device_id)
+{
+ static int card_idx = 0;
+ struct device *dev = &pci_dev->dev;
+ struct alpx_device *alpx_dev;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ int ret;
+ int is_update_required = 0;
+
+ /* Module identity */
+
+ dev_info(dev,"version %s %s build date: %s, time: %s. %s, %s\n",
+ ALPX_MODULE_VERSION,
+ ALPX_GPIO_OPTION_STRING,
+ __DATE__ ,
+ __TIME__,
+ SND_ALPX_XDMA_DEP(),
+ log_transfers ? "Transfers Logged":"");
+
+ /* Ensure dependency on AlpX XDMA support if needed*/
+ SND_ALPX_XDMA_DEP();
+
+ /* Card */
+
+ if (card_idx >= SNDRV_CARDS)
+ return -ENODEV;
+
+ if (!enable[card_idx]) {
+ card_idx++;
+ return -ENOENT;
+ }
+
+ dev_dbg(dev,"%s() : PCI Id : Vendor: 0x04%x, Device: 0x%04x, subvendor: 0x%04x, subdevice: 0x%04x\n", __func__,
+ pci_device_id->vendor,
+ pci_device_id->device,
+ pci_device_id->subvendor,
+ pci_device_id->subdevice);
+
+ dev_dbg(dev,"%s() : PCI dev: Vendor: 0x04%x, Device: 0x%04x, subvendor: 0x%04x, subdevice: 0x%04x\n", __func__,
+ pci_dev->vendor,
+ pci_dev->device,
+ pci_dev->subsystem_vendor,
+ pci_dev->subsystem_device);
+
+
+ ret = snd_card_new(dev, index[card_idx], id[card_idx], THIS_MODULE,
+ sizeof(*alpx_dev), &card);
+ if (ret) {
+ dev_err(dev," snd_card_new() => %d\n", ret);
+ return ret;
+ }
+
+ card->private_free = alpx_card_private_free;
+
+ /* PCI */
+
+ pci_set_drvdata(pci_dev, card);
+
+ alpx_dev = card->private_data;
+ alpx_dev->dev = dev;
+ alpx_dev->pci_dev = pci_dev;
+
+ ret = alpx_core_set_pci_bus(alpx_dev);
+ if (ret){
+ dev_err(dev," alpx_core_set_pci_bus(alpx_dev) => %d\n", ret);
+ goto error_card;
+ }
+
+ //Map USER BAR now to get access to all card's resources
+ alpx_dev->base = pci_ioremap_bar(pci_dev, 0);
+
+ /* PCI */
+
+ ret = pci_enable_device(pci_dev);
+ if (ret) {
+ dev_err(dev, "failed to enable PCI device\n");
+ goto error_card;
+ }
+
+ /* Enable PCI relaxed ordering. */
+ pcie_capability_set_word(pci_dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_RELAX_EN);
+
+ /* Enable extended tagging. */
+ pcie_capability_set_word(pci_dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_EXT_TAG);
+
+ /* Set maximum memory read request. */
+ pcie_set_readrq(pci_dev, 512);
+
+ pci_set_master(pci_dev);
+
+ /* XDMA */
+
+ alpx_dev->xdma_pdev = alpx_xdma_register(pci_dev);
+ if (!alpx_dev->xdma_pdev) {
+ dev_err(dev," alpx_xdma_register(alpx_dev) => %d\n", ret);
+ goto disable_pci_dev;
+ }
+
+ /* XDMA dependency handling*/
+ ret = alpx_core_handle_xdma_dep(dev, alpx_dev->xdma_pdev);
+ if (ret) {
+ dev_err(dev, "Error %d when handling xdma dependency !\n", ret);
+ goto disable_pci_dev;
+ }
+
+ /* Variant */
+
+ //Use PCI Id to select the actual variant : WARNING, MIC option is checked with user's registers !!
+ switch (pci_dev->subsystem_device){
+ case ALPX_PCI_ID_SUBSYSTEM_ALP222:
+ /* HANDLE Alp222_MIC with LINE PCI Id : firmwares ante V272*/
+ if (alp222_pre_FW283_apps_support) {
+ dev_warn(alpx_dev->dev, "!!! Alp2220 Pre FW283 Application compatibility mode ACTIVATED !!!");
+ }
+ if (!is_alp222_with_mic_option(alpx_dev)) {
+ alpx_dev->variant = (!alp222_pre_FW283_apps_support) ? &alp222_variant : &alp222_app_preFW283_variant;
+ }
+ else {
+ alpx_dev->variant = (!alp222_pre_FW283_apps_support) ? &alp222_mic_variant : &alp222_app_preFW283_mic_variant;
+ alp222_mic_controls_default_config(alpx_dev);
+ }
+ break;
+ case ALPX_PCI_ID_SUBSYSTEM_ALP222_MIC:
+ if (alp222_pre_FW283_apps_support) {
+ dev_warn(alpx_dev->dev, "!!! Alp222 Pre FW283 Application compatibility mode ACTIVATED !!!");
+ }
+ alpx_dev->variant = (!alp222_pre_FW283_apps_support) ? &alp222_mic_variant : &alp222_app_preFW283_mic_variant;
+ alp222_mic_controls_default_config(alpx_dev);
+ break;
+ case ALPX_PCI_ID_SUBSYSTEM_MADI:
+ alpx_dev->variant = &alpx_madi_variant;
+ break;
+ case ALPX_PCI_ID_SUBSYSTEM_ALP882:
+ if (alp882_pre_FW240_apps_support)
+ dev_warn(alpx_dev->dev, "!!! Alp882 LINE with Pre FW240 Application compatibility mode ACTIVATED !!!");
+ alpx_dev->variant = (!alp882_pre_FW240_apps_support) ? &alp882_line_variant : &alp882_app_preFW240_line_variant;
+ break;
+ case ALPX_PCI_ID_SUBSYSTEM_ALP882_MIC:
+ if (alp882_pre_FW240_apps_support)
+ dev_warn(alpx_dev->dev, "!!! Alp882 MIC with Pre FW240 Application compatibility mode ACTIVATED !!!");
+ alpx_dev->variant = (!alp882_pre_FW240_apps_support) ? &alp882_mic_variant : &alp882_app_preFW240_mic_variant;
+ break;
+ case ALPX_PCI_ID_SUBSYSTEM_ALP442:
+ alpx_dev->variant = &alp442_line_variant;
+ break;
+ case ALPX_PCI_ID_SUBSYSTEM_ALP442_MIC:
+ alpx_dev->variant = &alp442_mic_variant;
+ break;
+ case ALPX_PCI_ID_SUBSYSTEM_MADI_ALP_DEAD:
+ alpx_dev->variant = &alpx_dead_variant;
+ break;
+ case ALPX_PCI_ID_SUBSYSTEM_ALPDANTE:
+ alpx_dev->variant = &alpx_dante_variant;
+ break;
+ default:
+ dev_warn(alpx_dev->dev,"ALPX Driver: Model Id 0x%04x is not supported => DEAD by default Here !\n",
+ pci_dev->subsystem_device);
+ alpx_dev->variant = &alpx_dead_variant;
+ }
+
+ /* FORCED DEAD mode, not for DANTE card */
+ if (with_board_in_dead_mode)
+ {
+ /* Check DEAD mode for supported variants*/
+ if (alpx_dev->variant->model != ALPX_VARIANT_MODEL_ALPDANTE) {
+ alpx_dev->variant = &alpx_dead_variant;
+ dev_warn(alpx_dev->dev, " !!! Board forced in DEAD mode !!\n");
+ }
+ else {
+ dev_err(alpx_dev->dev, " NO DEAD mode supported for this board : %s\n", alpx_dev->variant->shortname);
+ ret = -EINVAL;
+ goto unregister_xdma;
+ }
+ }
+
+ /* MTD */
+ if (alpx_dev->variant->flash_partitions.qty != 0) {
+// /* mtd Handle the PRODUCTION access or DEAD card, BUT not for DANTE cards*/
+ with_board_in_prod_mode &= (alpx_dev->variant->model != ALPX_VARIANT_MODEL_ALPDANTE);
+
+ if ( with_board_in_prod_mode ||
+ (alpx_dev->variant->model == ALPX_VARIANT_DEAD)) {
+ dev_warn( alpx_dev->dev," Flash in PRODUCTION mode or DEAD card: full access !\n");
+ ret = alpx_mtd_probe(alpx_dev, alpx_dev->variant->flash_partitions.partitions ,alpx_dev->variant->flash_partitions.qty);
+ }
+ else {
+ dev_dbg( alpx_dev->dev," Flash in USER mode: firmware update access only.\n");
+ ret = alpx_mtd_probe(alpx_dev,
+ &alpx_dev->variant->flash_partitions.partitions[ALPX_FLASH_PARTITION_FW_ID] ,
+ alpx_dev->variant->flash_partitions.qty_for_fw_update);
+ }
+
+ if (ret)
+ goto unregister_xdma;
+ }
+
+ if (alpx_dev->variant->model == ALPX_VARIANT_DEAD) {
+ dev_warn(alpx_dev->dev," !!!! DEAD card found : Flash in PROD mode and nothing else !!\n");
+ return 0;
+ }
+
+ /* finalize identity */
+ switch (alpx_dev->variant->model) {
+ case ALPX_VARIANT_MODEL_ALP882:
+ case ALPX_VARIANT_MODEL_ALP882_MIC:
+ case ALPX_VARIANT_MODEL_ALP442:
+ case ALPX_VARIANT_MODEL_ALP442_MIC:
+ alpmultichan_finalize_identity(alpx_dev);
+ /* Check FW compatibility */
+ if (alpx_dev->identity.ver_fpga < ALPMC_SUPPORTED_BASE_FW_VERSION) {
+ dev_warn(alpx_dev->dev, "UNSUPPORTED MultiChannels firmware version %d (supported started at %d), UPDATE required !!\n",
+ alpx_dev->identity.ver_fpga,
+ ALPMC_SUPPORTED_BASE_FW_VERSION);
+ is_update_required = 1;
+ }
+
+ break;
+ case ALPX_VARIANT_MODEL_ALP222:
+ case ALPX_VARIANT_MODEL_ALP222_MIC:
+ alpstereo_finalize_identity(alpx_dev);
+ /* Check FW compatibility */
+ if (alpx_dev->identity.ver_fpga < ALP222_SUPPORTED_BASE_FW_VERSION) {
+ dev_warn(alpx_dev->dev, "UNSUPPORTED Stereo firmware version %d (supported started at %d), UPDATE required !!\n",
+ alpx_dev->identity.ver_fpga,
+ ALP222_SUPPORTED_BASE_FW_VERSION);
+// is_update_required = 1;
+ }
+ break;
+ case ALPX_VARIANT_MODEL_ALPDANTE:
+ alpdante_finalize_identity(alpx_dev);
+ /* Check FW compatibility */
+ if (alpx_dev->identity.ver_fpga < ALPDANTE_SUPPORTED_BASE_FW_VERSION) {
+ dev_warn(alpx_dev->dev, "UNSUPPORTED AlpDANTE firmware version %d (supported started at %d), UPDATE required !!\n",
+ alpx_dev->identity.ver_fpga,
+ ALPDANTE_SUPPORTED_BASE_FW_VERSION);
+ is_update_required = 1;
+ }
+ break;
+ default:
+ dev_warn(alpx_dev->dev, " identity not finalized for this model : %d\n", alpx_dev->variant->model);
+ }
+
+
+ /* Remount MTD with SERIAL number added only in USER mod, to clearly identify the cards */
+ if (!with_board_in_prod_mode ) {
+
+ //Build a special partition table to add the serial number (A COPY) !!
+ struct mtd_partition fw_partitions_table[ALPX_FLASH_PARTITION_QTY];
+ char part_names[ALPX_FLASH_PARTITION_QTY][128];
+ unsigned int part_idx = 0;
+
+ for (part_idx = 0 ; part_idx < alpx_dev->variant->flash_partitions.qty_for_fw_update ; ++part_idx) {
+
+ fw_partitions_table[part_idx] = alpx_dev->variant->flash_partitions.partitions[ALPX_FLASH_PARTITION_FW_ID + part_idx];
+ scnprintf(part_names[part_idx], PAGE_SIZE, "%s-%llu",
+ fw_partitions_table[part_idx].name,
+ alpx_dev->identity.serial_number);
+
+ fw_partitions_table[part_idx].name = part_names[part_idx];
+ }
+
+ dev_dbg( alpx_dev->dev," Flash in USER mode: firmware partitionS with S/N appened.\n");
+ /* Replace the current partitions */
+ alpx_mtd_remove(alpx_dev);
+ ret = alpx_mtd_probe(alpx_dev,
+ fw_partitions_table,
+ alpx_dev->variant->flash_partitions.qty_for_fw_update);
+ }
+
+ if (ret){
+ dev_err(alpx_dev->dev," Error %d when re-creating Firmware partition\n", ret);
+ goto unregister_xdma;
+ }
+
+ //Stop here if update required.
+ if (is_update_required == 1)
+ return 0;
+
+ /* Information */
+
+ strcpy(card->driver, "Driver AlpX");
+
+ //Check if replaced by parameters, default : use the variant names
+ dev_dbg( alpx_dev->dev,"%s() : id[%d] = %s\n", __func__,
+ card_idx,
+ id[card_idx] == NULL ? "NULL" : id[card_idx]);
+
+ if (id[card_idx])
+ sprintf(card->shortname, id[card_idx]);
+ else
+ sprintf(card->shortname, alpx_dev->variant->shortname);
+
+ strcpy(card->longname, alpx_dev->variant->longname);
+ strcpy(card->mixername, alpx_dev->variant->mixername);
+
+ /* Pipes */
+
+ spin_lock_init(&alpx_dev->config.lock);
+
+ alpx_pipe_init(&alpx_dev->playback, true);
+ alpx_pipe_init(&alpx_dev->capture, false);
+
+ /* PCM */
+
+ ret = snd_pcm_new(card, card->shortname, 0, 1, 1, &pcm);
+ if (ret) {
+ dev_err(dev," snd_pcm_new(card) => %d\n", ret);
+ goto unregister_xdma;
+ }
+
+ pcm->private_data = alpx_dev;
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &alpx_playback_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &alpx_capture_ops);
+
+ pcm->info_flags = 0;
+
+ strcpy(pcm->name, card->longname);
+
+ /* Controls */
+
+ ret = alpx_controls_register(card);
+ if (ret) {
+ dev_err(dev," alpx_controls_register(card) => %d\n", ret);
+ goto unregister_xdma;
+ }
+
+ /* Buffer */
+
+#if KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, dev,
+ alpx_dev->variant->playback_hw->buffer_bytes_max,
+ alpx_dev->variant->playback_hw->buffer_bytes_max);
+#else
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, dev,
+ alpx_dev->variant->playback_hw->buffer_bytes_max,
+ alpx_dev->variant->playback_hw->buffer_bytes_max);
+#endif
+#if defined (ALPX_WITH_GPIO)
+ /* GPIO */
+
+ if (alpx_dev->variant->features & ALPX_VARIANT_FEATURE_GPIOS)
+ ret = alpx_gpio_register(alpx_dev, card->shortname);
+
+ if (ret) {
+ dev_err(dev," alpx_gpio_register(alpx_dev) => %d\n", ret);
+ goto unregister_xdma;
+ }
+#endif
+ /* Proc */
+ ret = alpx_proc_probe(alpx_dev);
+ if (ret) {
+ dev_err(dev," alpx_proc_probe(alpx_dev) => %d\n", ret);
+ goto unregister_xdma;
+ }
+
+ /* Card */
+
+ ret = snd_card_register(card);
+ if (ret){
+ dev_err(dev," snd_card_register(card) => %d\n", ret);
+ goto error_proc;
+ }
+
+ /* Sys FS files */
+ /* Reg debug */
+#ifdef WITH_REG_DEBUG
+ dev_info(dev,"REG_DEBUG activated\n");
+ alpx_dev->dbg_reg_offset = ALP222_MIN_REG_OFFSET;
+
+ ret = device_create_file(dev, &dev_reg_offset);
+ if (ret)
+ {
+ dev_err(dev," device_create_file(addr) => %d\n", ret);
+ }
+ else
+ {
+ dev_err(dev," device_create_file(addr) : Ok\n");
+ }
+
+ ret = device_create_file(dev, &dev_reg_value);
+ if (ret)
+ {
+ dev_err(dev," device_create_file(value) => %d\n", ret);
+ }
+#endif
+
+ /* Serial number file */
+ ret = device_create_file(dev, &dev_serial_number);
+ if (ret)
+ {
+ dev_err(dev," Serial Number device_create_file() => %d\n", ret);
+ }
+
+ /* FPGA version file */
+ ret = device_create_file(dev, &dev_ver_fpga);
+ if (ret)
+ {
+ dev_err(dev," FPGA Version device_create_file() => %d\n", ret);
+ }
+
+ /* MCU version file */
+ ret = device_create_file(dev, &dev_ver_mcu);
+ if (ret)
+ {
+ dev_err(dev," MCU Version device_create_file() => %d\n", ret);
+ }
+
+ /* DANTE card's name */
+ if (alpx_dev->variant->model == ALPX_VARIANT_MODEL_ALPDANTE) {
+ ret = device_create_file(dev, &dev_dante_card_name);
+ if (ret)
+ {
+ dev_err(dev," DANTE card's name device_create_file() => %d\n", ret);
+ }
+ }
+
+ /* Production area replication from USER to GOLDEN if needed*/
+ if (((alpx_dev->variant->model == ALPX_VARIANT_MODEL_ALP222) ||
+ (alpx_dev->variant->model == ALPX_VARIANT_MODEL_ALP222_MIC)) &&
+ alpx_mtd_is_available(alpx_dev) &&
+ !alpx_mtd_is_golden_prod_area_valid(alpx_dev)) {
+ dev_info(alpx_dev->dev," Production area in GOLDEN must be INITIALIZED\n");
+ ret = alpx_mtd_replicate_prod_area(alpx_dev);
+ if (!ret) {
+ dev_info(alpx_dev->dev," Production area in GOLDEN is INITIALIZED\n");
+ }
+ else {
+ dev_warn(alpx_dev->dev," Production area in GOLDEN NOT CORRECTLY INITIALIZED !!\n");
+ }
+ }
+
+ switch (alpx_dev->variant->model){
+ case ALPX_VARIANT_MODEL_ALP222:
+ case ALPX_VARIANT_MODEL_ALP222_MIC:
+ alpstereo_print_identity(alpx_dev, card, "Created");
+ break;
+ case ALPX_VARIANT_MODEL_ALP882:
+ case ALPX_VARIANT_MODEL_ALP882_MIC:
+ case ALPX_VARIANT_MODEL_ALP442:
+ case ALPX_VARIANT_MODEL_ALP442_MIC:
+ alpmultichan_print_identity(alpx_dev, card, "Created");
+ break;
+ case ALPX_VARIANT_MODEL_MADI:
+ case ALPX_VARIANT_MODEL_MADI_LOOPBACK:
+ alpmadi_print_identity(alpx_dev, card, "Created");
+ break;
+ case ALPX_VARIANT_MODEL_ALPDANTE:
+ alpdante_print_identity(alpx_dev, card, "Created");
+ alpdante_card_setup(alpx_dev, card, dante_configured_fs, dante_loopback_enabled);
+ break;
+ default:
+ dev_warn(alpx_dev->dev," !!! UNKNOW variant identity: %d !!!!\n", alpx_dev->variant->model);
+ };
+
+ card_idx++;
+
+ return 0;
+
+error_proc:
+ dev_err(alpx_dev->dev," %s(): Error : 0x%x when creating proc entry\n", __func__, ret);
+ alpx_proc_remove(alpx_dev);
+unregister_xdma:
+ alpx_xdma_unregister(pci_dev, alpx_dev->xdma_pdev);
+disable_pci_dev:
+ pci_disable_device(pci_dev);
+error_card:
+ dev_err(alpx_dev->dev," %s(): Error : 0x%x when creating the card\n", __func__, ret);
+ snd_card_free(card);
+
+ return ret;
+}
+
+static void alpx_remove(struct pci_dev *pci_dev)
+{
+ struct snd_card *card = pci_get_drvdata(pci_dev);
+ struct alpx_device *alpx_dev = card->private_data;
+
+ switch (alpx_dev->variant->model){
+ case ALPX_VARIANT_MODEL_ALP222:
+ case ALPX_VARIANT_MODEL_ALP222_MIC:
+ alpstereo_print_identity(alpx_dev, card, "Deleted");
+ break;
+ case ALPX_VARIANT_MODEL_ALP882:
+ case ALPX_VARIANT_MODEL_ALP882_MIC:
+ case ALPX_VARIANT_MODEL_ALP442:
+ case ALPX_VARIANT_MODEL_ALP442_MIC:
+ if (card != NULL) {
+ alpmultichan_print_identity(alpx_dev, card, "Deleted");
+ }
+ break;
+ case ALPX_VARIANT_MODEL_MADI:
+ case ALPX_VARIANT_MODEL_MADI_LOOPBACK:
+ alpmadi_print_identity(alpx_dev, card, "Deleted");
+ break;
+ case ALPX_VARIANT_MODEL_ALPDANTE:
+ alpdante_print_identity(alpx_dev, card, "Deleted");
+ break;
+ default:
+ dev_warn(alpx_dev->dev," !!! UNKNOW variant identity: %d !!!!\n", alpx_dev->variant->model);
+ }
+
+ /* Reg debug */
+#ifdef WITH_REG_DEBUG
+ device_remove_file(alpx_dev->dev, &dev_reg_offset);
+ device_remove_file(alpx_dev->dev, &dev_reg_value);
+#endif
+ device_remove_file(alpx_dev->dev, &dev_serial_number);
+ device_remove_file(alpx_dev->dev, &dev_ver_fpga);
+ device_remove_file(alpx_dev->dev, &dev_ver_mcu);
+
+ if (alpx_dev->variant->model == ALPX_VARIANT_MODEL_ALPDANTE) {
+ device_remove_file(alpx_dev->dev, &dev_dante_card_name);
+ }
+
+ alpx_mtd_remove(alpx_dev);
+ alpx_proc_remove(alpx_dev);
+
+ if (alpx_dev->xdma_pdev != NULL) {
+ alpx_xdma_unregister(pci_dev, alpx_dev->xdma_pdev);
+ }
+
+ if (card != NULL) {
+ snd_card_free(card);
+ }
+
+ pci_disable_device(alpx_dev->pci_dev);
+}
+
+static const struct pci_device_id alpx_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_DIGIGRAM, ALPX_PCI_ID_DEVICE) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, alpx_pci_ids);
+
+static struct pci_driver alpx_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = alpx_pci_ids,
+ .probe = alpx_probe,
+ .remove = alpx_remove,
+};
+
+module_pci_driver(alpx_driver);
+
+MODULE_DESCRIPTION("AlpX audio cards driver");
+MODULE_AUTHOR("Digigram Digital");
+MODULE_VERSION(ALPX_MODULE_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/alpx_gpio.c b/snd-alpx/alpx_gpio.c
new file mode 100644
index 0000000..4734057
--- /dev/null
+++ b/snd-alpx/alpx_gpio.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+
+#include "alpx_gpio.h"
+
+#include <linux/io.h>
+
+static inline bool alpx_gpio_is_an_input (struct alpx_device *alpx_dev, unsigned offset)
+{
+ return offset < alpx_dev->variant->gpios.inputs_qty;
+}
+
+
+int alpx_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct alpx_device *alpx_dev = gpiochip_get_data(chip);
+ u32 value;
+
+ if (alpx_gpio_is_an_input(alpx_dev, offset)) {
+ value = readl(alpx_dev->base + alpx_dev->variant->gpios.base + alpx_dev->variant->gpios.inputs_reg_offset);
+
+ dev_dbg( alpx_dev->dev,"[0x%08x] =>[0x%08x]\n", alpx_dev->variant->gpios.base + alpx_dev->variant->gpios.inputs_reg_offset, value);
+ }
+ else {
+ value = readl(alpx_dev->base + alpx_dev->variant->gpios.base + alpx_dev->variant->gpios.outputs_reg_offset);
+
+ dev_dbg( alpx_dev->dev,"[0x%08x] =>[0x%08x]\n", alpx_dev->variant->gpios.base + alpx_dev->variant->gpios.outputs_reg_offset, value);
+ //translate offset -> bit rank
+ offset -= alpx_dev->variant->gpios.inputs_qty;
+ }
+
+ return ALPX_GPIO_VALUE(offset, value);
+}
+
+void alpx_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int gpio_value)
+{
+ struct alpx_device *alpx_dev = gpiochip_get_data(chip);
+ unsigned int value;
+
+
+ if (alpx_gpio_is_an_input(alpx_dev, offset))
+ return;
+
+ //translate Offset to bit rank
+ offset -= alpx_dev->variant->gpios.inputs_qty;
+
+ value = readl(alpx_dev->base + alpx_dev->variant->gpios.base + alpx_dev->variant->gpios.outputs_reg_offset );
+
+ dev_dbg( alpx_dev->dev,"read [0x%08x] =>0x%08x\n",alpx_dev->variant->gpios.base + alpx_dev->variant->gpios.outputs_reg_offset, value);
+
+
+ value &= ~ALPX_GPIO_MASK(offset);
+ value |= ALPX_GPIO_SEL(offset, gpio_value);
+
+ dev_dbg( alpx_dev->dev,"write 0x%08x =>[0x%08x]\n", value , alpx_dev->variant->gpios.base + alpx_dev->variant->gpios.outputs_reg_offset);
+
+ writel(value, alpx_dev->base + alpx_dev->variant->gpios.base + alpx_dev->variant->gpios.outputs_reg_offset);
+}
+
+int alpx_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct alpx_device *alpx_dev = gpiochip_get_data(chip);
+
+ if (offset >= chip->ngpio)
+ return -EINVAL;
+
+ dev_dbg( alpx_dev->dev,"Offset %d is %s\n",
+ offset, alpx_gpio_is_an_input(alpx_dev, offset) ? "INPUT" : "OUTPUT");
+
+ return alpx_gpio_is_an_input(alpx_dev, offset) ? GPIOF_DIR_IN : GPIOF_DIR_OUT;
+}
+
+int alpx_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int gpio_value)
+{
+ struct alpx_device *alpx_dev = gpiochip_get_data(chip);
+ if (alpx_gpio_is_an_input(alpx_dev, offset))
+ return -EINVAL;
+
+ alpx_gpio_set(chip, offset, gpio_value);
+
+ return 0;
+}
+
+int alpx_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ /* All ports handle INPUTs operations */
+ return 0;
+}
+
+int alpx_gpio_register(struct alpx_device *alpx_dev, const char* card_name)
+{
+ struct gpio_chip *chip = &alpx_dev->gpio_chip;
+
+ chip->parent = alpx_dev->dev;
+ chip->owner = THIS_MODULE;
+ chip->label = card_name;
+ chip->base = -1;
+
+ chip->ngpio = alpx_dev->variant->gpios.inputs_qty + alpx_dev->variant->gpios.outputs_qty;
+ chip->get = alpx_gpio_get;
+ chip->set = alpx_gpio_set;
+ chip->get_direction = alpx_gpio_get_direction;
+ chip->direction_output = alpx_gpio_direction_output;
+ chip->direction_input = alpx_gpio_direction_input;
+
+ dev_dbg( alpx_dev->dev,"%s(): creating GP(%d)I(%d)O for %s.\n", __func__,
+ alpx_dev->variant->gpios.inputs_qty,
+ alpx_dev->variant->gpios.outputs_qty,
+ chip->label);
+
+ return gpiochip_add_data(chip, alpx_dev);
+}
+
+void alpx_gpio_unregister(struct alpx_device *alpx_dev)
+{
+ struct gpio_chip* const chip = &alpx_dev->gpio_chip;
+ if (chip->parent == alpx_dev->dev)
+ gpiochip_remove(chip);
+}
+
diff --git a/snd-alpx/alpx_gpio.h b/snd-alpx/alpx_gpio.h
new file mode 100644
index 0000000..90395a0
--- /dev/null
+++ b/snd-alpx/alpx_gpio.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_GPIOS_H
+#define _ALPX_GPIOS_H
+#include "alpx.h"
+
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+
+int alpx_gpio_get(struct gpio_chip *chip, unsigned offset);
+void alpx_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int gpio_value);
+int alpx_gpio_get_direction(struct gpio_chip *chip, unsigned offset);
+int alpx_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int gpio_value);
+int alpx_gpio_direction_input(struct gpio_chip *chip, unsigned offset);
+int alpx_gpio_register(struct alpx_device *alpx_dev, const char* card_name);
+void alpx_gpio_unregister(struct alpx_device *alpx_dev);
+
+#endif
diff --git a/snd-alpx/alpx_led.h b/snd-alpx/alpx_led.h
new file mode 100644
index 0000000..b69d836
--- /dev/null
+++ b/snd-alpx/alpx_led.h
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_LEDS_H
+#define _ALPX_LEDS_H
+
+#include "alpx.h"
+
+#include <linux/io.h>
+
+static inline void alpmadi_set_led_state(struct alpx_device *alpx_dev, bool blink)
+{
+ writel(blink ? 1 : 0, ALPX_REG(alpx_dev, ALPMADI, CONTROL, BLINK_LED));
+
+ dev_dbg( alpx_dev->dev,"%s(): LED %s, val:0x%08x/\n", __func__,
+ blink ? "On":"Off",readl(ALPX_REG(alpx_dev, ALPMADI, CONTROL, BLINK_LED)));
+}
+
+#endif
diff --git a/snd-alpx/alpx_mtd.c b/snd-alpx/alpx_mtd.c
new file mode 100644
index 0000000..d0ddd52
--- /dev/null
+++ b/snd-alpx/alpx_mtd.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#include "alpx_mtd.h"
+#include "alpx.h"
+#include "alpx_reg.h"
+
+static const u32 ALPX_FLASH_ERASED_AREA_VALUE = 0xFFFFFFFF;
+
+
+int alpx_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct alpx_device *alpx_dev = mtd->priv;
+ size_t size;
+ u32 page_index;
+ u32 offset;
+ u32 cmd;
+ int ret;
+
+ page_index = from >> ALPX_FLASH_SECTOR_SHIFT;
+ offset = from & (ALPX_FLASH_SECTOR_SIZE-1);
+
+ while (len) {
+ if ((offset + len) > ALPX_FLASH_SECTOR_SIZE)
+ size = ALPX_FLASH_SECTOR_SIZE - offset;
+ else
+ size = len;
+
+ cmd = ALP_PROC_COMMAND_MAKE_P16(
+ ALP_PROC_CMD_READ_FLASH_SECTOR, page_index);
+
+ dev_dbg(alpx_dev->dev,"Reading %zu bytes Flash sector 0x%x.\n", size, page_index);
+
+ mutex_lock(&alpx_dev->proc_mutex);
+ ret = alpx_proc_cmd(alpx_dev, cmd);
+ if (ret) {
+ mutex_unlock(&alpx_dev->proc_mutex);
+ dev_err(alpx_dev->dev,
+ "cmd(CMD_READ_FLASH_SECTOR, 0x%x) failed (%d)\n",
+ page_index, ret);
+
+ return ret;
+ }
+
+ memcpy_fromio(buf, ALPX_AREA(alpx_dev, ALP, SHARED) + offset,
+ size);
+ mutex_unlock(&alpx_dev->proc_mutex);
+
+ if (retlen)
+ *retlen += size;
+ buf += size;
+ len -= size;
+ offset = 0;
+ page_index++;
+ }
+ return 0;
+}
+
+int alpx_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct alpx_device *alpx_dev = mtd->priv;
+ size_t size;
+ u32 page_index;
+ u32 offset;
+ u32 cmd;
+ int ret;
+
+ page_index = to >> ALPX_FLASH_SECTOR_SHIFT;
+ offset = to & (ALPX_FLASH_SECTOR_SIZE-1);
+
+ while (len) {
+ if ((offset + len) > ALPX_FLASH_SECTOR_SIZE)
+ size = ALPX_FLASH_SECTOR_SIZE - offset;
+ else
+ size = len;
+
+ dev_dbg(alpx_dev->dev,"Writing %zu bytes to sector 0x%x.\n", size, page_index);
+
+ mutex_lock(&alpx_dev->proc_mutex);
+ if (size != ALPX_FLASH_SECTOR_SIZE) {
+ /* Partial page write -> read modify write*/
+ cmd = ALP_PROC_COMMAND_MAKE_P16(
+ ALP_PROC_CMD_READ_FLASH_SECTOR, page_index);
+
+ ret = alpx_proc_cmd(alpx_dev, cmd);
+ if (ret) {
+ mutex_unlock(&alpx_dev->proc_mutex);
+ dev_err(alpx_dev->dev,
+ "cmd(CMD_READ_FLASH_SECTOR, 0x%x) failed (%d)\n",
+ page_index, ret);
+ return ret;
+ }
+ }
+
+ memcpy_toio(ALPX_AREA(alpx_dev, ALP, SHARED) + offset, buf,
+ size);
+
+ cmd = ALP_PROC_COMMAND_MAKE_P16(
+ ALP_PROC_CMD_WRITE_FLASH_SECTOR, page_index);
+
+ ret = alpx_proc_cmd(alpx_dev, cmd);
+ if (ret) {
+ mutex_unlock(&alpx_dev->proc_mutex);
+ dev_err(alpx_dev->dev,
+ "cmd(CMD_WRITE_FLASH_SECTOR, 0x%x) failed (%d)\n",
+ page_index, ret);
+ return ret;
+ }
+ mutex_unlock(&alpx_dev->proc_mutex);
+
+ if (retlen)
+ *retlen += size;
+ buf += size;
+ len -= size;
+ offset = 0;
+ page_index++;
+ }
+ return 0;
+}
+
+int alpx_mtd_probe(struct alpx_device *alpx_dev, const struct mtd_partition* partitions, u32 partitions_size)
+{
+ struct mtd_info *mtd = &alpx_dev->mtd_info;
+
+ /* Setup the MTD structure */
+ mtd->type = MTD_RAM;
+ mtd->flags = MTD_WRITEABLE | MTD_NO_ERASE;
+ mtd->size = ALPX_FLASH_CHIP_SIZE;
+ mtd->writesize = ALPX_FLASH_SECTOR_SIZE;
+ mtd->writebufsize = mtd->writesize;
+ mtd->priv = alpx_dev;
+
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = alpx_dev->dev;
+ mtd->_read = alpx_mtd_read;
+ mtd->_write = alpx_mtd_write;
+
+ return mtd_device_register(mtd, partitions,
+ partitions_size);
+}
+
+int alpx_mtd_remove(struct alpx_device *alpx_dev)
+{
+ return alpx_dev->variant->flash_partitions.qty ? mtd_device_unregister(&alpx_dev->mtd_info) : 0;
+}
+
+/* Read one Flash's page into the shared area NO LOCK DONE !!*/
+static int alpx_mtd_load_one_page_into_shared_area(struct alpx_device* alpx_dev, u32 from)
+{
+ u32 page_index = from >> ALPX_FLASH_SECTOR_SHIFT;
+ u32 cmd = ALP_PROC_COMMAND_MAKE_P16(
+ ALP_PROC_CMD_READ_FLASH_SECTOR, page_index);
+ size_t size = ALPX_FLASH_SECTOR_SIZE;
+
+ int ret;
+
+ /* Check requested length below FLASH sector's size, TODO check page_index too TODO check form is aligned on page */
+ if (from % ALPX_FLASH_SECTOR_SIZE != 0)
+ return -EINVAL;
+
+ dev_dbg(alpx_dev->dev,"Reading %zu bytes Flash sector 0x%x (0x%x).\n", size, page_index, from);
+
+
+ ret = alpx_proc_cmd(alpx_dev, cmd);
+ if (ret) {
+ dev_err(alpx_dev->dev,
+ "cmd(CMD_READ_FLASH_SECTOR, 0x%x) failed (%d)\n",
+ page_index, ret);
+ return ret;
+ }
+ print_hex_dump_bytes("LOADED Shared area :", DUMP_PREFIX_NONE, ALPX_AREA(alpx_dev, ALP, SHARED), 32);
+ return ret;
+}
+
+/* Store the shared area into the Flash at the to address. NO LOCK DONE */
+static int alpx_mtd_store_one_page_into_shared_area(struct alpx_device* alpx_dev, u32 to)
+{
+ u32 page_index = to >> ALPX_FLASH_SECTOR_SHIFT;
+ u32 cmd = ALP_PROC_COMMAND_MAKE_P16(
+ ALP_PROC_CMD_WRITE_FLASH_SECTOR, page_index);
+ size_t size = ALPX_FLASH_SECTOR_SIZE;
+
+ int ret;
+
+ print_hex_dump_bytes("STORED Shared area :", DUMP_PREFIX_NONE, ALPX_AREA(alpx_dev, ALP, SHARED), 32);
+
+ /* Check requested length below FLASH sector's size, TODO check page_index too TODO check form is aligned on page */
+ if (to % ALPX_FLASH_SECTOR_SIZE != 0)
+ return -EINVAL;
+
+ dev_dbg(alpx_dev->dev,"Storing %zu bytes to Flash sector 0x%x.\n", size, page_index);
+
+ ret = alpx_proc_cmd(alpx_dev, cmd);
+ if (ret) {
+ dev_err(alpx_dev->dev,
+ "cmd(ALP_PROC_CMD_WRITE_FLASH_SECTOR, 0x%x) failed (%d)\n",
+ page_index, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+int alpx_mtd_is_golden_prod_area_valid(struct alpx_device* alpx_dev)
+{
+ int ret = 0;
+
+ mutex_lock(&alpx_dev->proc_mutex);
+ ret = alpx_mtd_load_one_page_into_shared_area(alpx_dev, alpx_dev->variant->flash_golden_production_base);
+
+ if (!ret) {
+ //Now check Production area validity per se : something set in and not in erased state
+ ret = (*(u32*)ALPX_AREA(alpx_dev, ALP, SHARED)) != ALPX_FLASH_ERASED_AREA_VALUE;
+ }
+ else {
+ dev_err(alpx_dev->dev, "Error %d when loading shared area from flash.\n", ret);
+ }
+ mutex_unlock(&alpx_dev->proc_mutex);
+ return ret;
+}
+
+int alpx_mtd_replicate_prod_area(struct alpx_device* alpx_dev)
+{
+ /* Read the USER's Production area and then write it to the GOLDEN'sProduction area */
+ //Read User
+
+ int ret = 0;
+ mutex_lock(&alpx_dev->proc_mutex);
+
+ ret= alpx_mtd_load_one_page_into_shared_area(alpx_dev,
+ alpx_dev->variant->flash_partitions.partitions[ALPX_FLASH_PARTITION_PRODUCTION_ID].offset);
+ if (!ret) {
+ //Write it to the GOLDEN's area
+ ret = alpx_mtd_store_one_page_into_shared_area(alpx_dev, alpx_dev->variant->flash_golden_production_base);
+ if (ret) {
+ dev_err(alpx_dev->dev,"Error %d when storing the USER's Production area into GOLDEN.\n", ret);
+ }
+ }
+ else {
+ dev_err(alpx_dev->dev,"Error %d when reading the USER's Production area.\n", ret);
+ }
+
+ mutex_unlock(&alpx_dev->proc_mutex);
+ return ret;
+}
+
+int alpx_mtd_read_from(struct alpx_device* alpx_dev,
+ uint32_t from,
+ unsigned char* to,
+ unsigned int length)
+{
+ int ret = 0;
+
+ //Check : never read more than SHARE area length
+ if (length > ALP_SHARED_SIZE)
+ return -EINVAL;
+
+ mutex_lock(&alpx_dev->proc_mutex);
+ //Load the PRODUCTION into the shared AREA, then extract the Serial Number
+ ret= alpx_mtd_load_one_page_into_shared_area(alpx_dev, from);
+
+ if (!ret) {
+ unsigned int idx = 0;
+ //NO memcpy() when dealing with SHARED Area
+ for (idx = 0 ; idx < length; ++idx) {
+ to[idx] = *(((unsigned char*)ALPX_AREA(alpx_dev, ALP, SHARED))+idx);
+ }
+ print_hex_dump_bytes("READ:", DUMP_PREFIX_NONE, to, length);
+ } else {
+ dev_err(alpx_dev->dev, " Error 0x%x when reading Flash memory at %d.\n", ret, from);
+ }
+
+ mutex_unlock(&alpx_dev->proc_mutex);
+
+ return ret;
+}
+
+int alpx_mtd_load_shared_from(struct alpx_device* alpx_dev, uint32_t from)
+{
+ //Only one page at the moment, must be ALP_SHARED_SIZE bytes after.
+ return alpx_mtd_load_one_page_into_shared_area(alpx_dev, from);
+}
+
+int alpx_mtd_read_shared(struct alpx_device* alpx_dev,
+ uint32_t at,
+ unsigned char* to,
+ unsigned int length)
+{
+ unsigned int idx;
+ //NO memcpy() when dealing with SHARED Area
+ for (idx = 0 ; idx < length; ++idx) {
+ to[idx] = *(((unsigned char*)ALPX_AREA(alpx_dev, ALP, SHARED))+idx + at);
+ }
+ print_hex_dump_bytes("READ:", DUMP_PREFIX_NONE, to, length);
+ return 0;
+}
+
+int alpx_mtd_clean_shared(struct alpx_device* alpx_dev)
+{
+ unsigned int idx;
+ //NO memcpy() when dealing with SHARED Area
+ for (idx = 0 ; idx < (ALPX_FLASH_SECTOR_SIZE / sizeof(unsigned int)); ++idx) {
+ *(unsigned int*)(ALPX_AREA(alpx_dev, ALP, SHARED)+idx) = 0;
+ }
+ return 0;
+}
+
+int alpx_mtd_is_available(struct alpx_device* alpx_dev)
+{
+ //DANTE : no MTD available yet
+ return alpx_dev->variant->model != ALPX_VARIANT_MODEL_ALPDANTE;
+}
diff --git a/snd-alpx/alpx_mtd.h b/snd-alpx/alpx_mtd.h
new file mode 100644
index 0000000..1735619
--- /dev/null
+++ b/snd-alpx/alpx_mtd.h
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+#ifndef _ALPX_MTD_H
+#define _ALPX_MTD_H
+
+#include "alpx.h"
+#include "alpx_proc.h"
+#include <linux/mtd/partitions.h>
+
+int alpx_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf);
+int alpx_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
+int alpx_mtd_probe(struct alpx_device *alpx_dev, const struct mtd_partition* partitions, u32 partitions_size);
+int alpx_mtd_remove(struct alpx_device *alpx_dev);
+
+/* Check validity of PRODUCTION area in Golden Region : Serial Number set */
+int alpx_mtd_is_golden_prod_area_valid(struct alpx_device* alpx_dev);
+
+/*replicate the USER's Production area into the GOLDEN's Production area */
+int alpx_mtd_replicate_prod_area(struct alpx_device* alpx_dev);
+
+/* Read lenght Bytes out of the flash FROM to the TO buffer using the SHARED area => length limited*/
+int alpx_mtd_read_from(struct alpx_device* alpx_dev,
+ uint32_t from,
+ unsigned char* to,
+ unsigned int length);
+
+/* Load the SHARED Area with Flash data "from" , NOT MT safe */
+int alpx_mtd_load_shared_from(struct alpx_device* alpx_dev, uint32_t from);
+
+/* Read Shared Area "at" to for "length" bytes NOT MT safe */
+int alpx_mtd_read_shared(struct alpx_device* alpx_dev,
+ uint32_t at,
+ unsigned char* to,
+ unsigned int length);
+
+/* Clean the Shared Area NOT MT safe */
+int alpx_mtd_clean_shared(struct alpx_device* alpx_dev);
+
+/* return true if the MTD is availble for the given device */
+int alpx_mtd_is_available(struct alpx_device* alpx_dev);
+
+#endif
diff --git a/snd-alpx/alpx_proc.c b/snd-alpx/alpx_proc.c
new file mode 100644
index 0000000..65d9daa
--- /dev/null
+++ b/snd-alpx/alpx_proc.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#include "alpx_proc.h"
+
+#if defined(CONFIG_ALPX_WITH_PROC_LOG)
+int alpx_proc_log_dump(struct alpx_device *alpx_dev)
+{
+ static unsigned char log_buffer[ALP_PROC_LOG_LENGTH];
+ unsigned char logged_word = 0;
+ unsigned int addr = 0;
+ //unsigned int idx = 0;
+ struct alpx_device zero_base;
+ zero_base.base = 0;
+
+
+ dev_dbg( alpx_dev->dev,"%s(): uBlaze Log (%d):", __func__, ALP_PROC_LOG_LENGTH);
+
+ for (addr = 0; addr < ALP_PROC_LOG_LENGTH ; addr += sizeof (u32)) {
+#if 0
+ dev_dbg( alpx_dev->dev,"%s() : log reg: %p + 0x%x => %p\n", __func__,
+ alpx_dev->base,
+ ALP_PROC_LOG_BASE + addr,
+ ALPX_AREA(alpx_dev, ALP, PROC_LOG) + addr);
+#endif
+ logged_word = readl(ALPX_AREA(alpx_dev, ALP, PROC_LOG) + addr);
+ *(unsigned int*)&log_buffer[addr] = logged_word;
+#if 0
+ dev_dbg( alpx_dev->dev,"%s() : [0x%x] =>%d\n", __func__,
+ ALP_PROC_LOG_BASE + addr,
+ logged_word);
+#endif
+
+#if 0
+ for (idx = 0 ; idx < sizeof(u32) ; ++idx) {
+ printk("%c",(unsigned char)logged_word);
+ logged_word >>= 8;
+ }
+#endif
+print_hex_dump_bytes(alpx_dev->dev,"LOG", DUMP_PREFIX_NONE,
+ log_buffer, ALP_PROC_LOG_LENGTH);
+ }
+
+ return 0;
+}
+#endif
+
+int alpx_proc_cmd(struct alpx_device *alpx_dev, u32 command)
+{
+ u32 status;
+ u32 fails;
+ int ret;
+
+ /* Set status to detect proc register update */
+ writel(0xFFFFFFFF, ALPX_REG(alpx_dev, ALP, PROC, STATUS));
+
+ /* Post command */
+ writel(command, ALPX_REG(alpx_dev, ALP, PROC, COMMAND));
+
+ /* Poll status for first update */
+ ret = readl_poll_timeout(ALPX_REG(alpx_dev, ALP, PROC, STATUS),
+ status, status != 0xFFFFFFFF, 10, 1000000);
+ if (ret) {
+ dev_err(alpx_dev->dev, "cmd 0x%08X timeout (status=%x)\n",
+ command, status);
+ alpx_proc_log_dump(alpx_dev);
+ return ret;
+ }
+
+ /* Continue polling for end of processing */
+ ret = readl_poll_timeout(ALPX_REG(alpx_dev, ALP, PROC, STATUS),
+ status, !(status & ALP_PROC_STATUS_PENDING),
+ 10, 1000000);
+ if (ret) {
+ dev_err(alpx_dev->dev, "cmd 0x%08X timeout (status=%x)\n",
+ command, status);
+ alpx_proc_log_dump(alpx_dev);
+ return ret;
+ }
+
+ mdelay(1);
+
+ /* Command processing is done -> check results */
+ status = readl(ALPX_REG(alpx_dev, ALP, PROC, STATUS));
+ fails = readl(ALPX_REG(alpx_dev, ALP, PROC, FAILS));
+
+ dev_dbg(alpx_dev->dev,"cmd 0x%08X => status: 0x%x, fail:0x%x\n", command, status, fails);
+
+ if ((status & ALP_PROC_STATUS_FAIL) &&
+ (fails != ALP_PROC_FAILS_FAIL_NONE)) {
+ dev_err(alpx_dev->dev, "cmd 0x%08X failed (status=%x, fails=%x)\n",
+ command, status, fails);
+ alpx_proc_log_dump(alpx_dev);
+ return -EIO;
+ }
+
+ alpx_proc_log_dump(alpx_dev);
+
+ return 0;
+}
+
+int alpx_proc_probe(struct alpx_device *alpx_dev)
+{
+ mutex_init(&alpx_dev->proc_mutex);
+ return 0;
+}
+
+int alpx_proc_remove(struct alpx_device *alpx_dev)
+{
+ mutex_destroy(&alpx_dev->proc_mutex);
+ return 0;
+}
diff --git a/snd-alpx/alpx_proc.h b/snd-alpx/alpx_proc.h
new file mode 100644
index 0000000..8328a14
--- /dev/null
+++ b/snd-alpx/alpx_proc.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_PROC_H
+#define _ALPX_PROC_H
+
+#include "alpx.h"
+#include <linux/iopoll.h>
+
+#if defined(CONFIG_ALPX_WITH_PROC_LOG)
+int alpx_proc_log_dump(struct alpx_device *alpx_dev);
+#else
+/* Empty def */
+#define alpx_proc_log_dump(p)
+#endif
+
+int alpx_proc_cmd(struct alpx_device *alpx_dev, u32 command);
+int alpx_proc_probe(struct alpx_device *alpx_dev);
+int alpx_proc_remove(struct alpx_device *alpx_dev);
+
+
+#endif
diff --git a/snd-alpx/alpx_reg.h b/snd-alpx/alpx_reg.h
new file mode 100644
index 0000000..276e16d
--- /dev/null
+++ b/snd-alpx/alpx_reg.h
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_REG_H_
+#define _ALPX_REG_H_
+
+/* Macros */
+
+#define ALPX_BASE(variant, group) \
+ (variant ## _ ## group ## _BASE)
+
+#define ALPX_OFFSET(variant, group, reg) \
+ ((variant ## _ ## group ## _BASE) + \
+ (variant ## _ ## group ## _ ## reg ## _REG))
+
+#define ALPX_AREA(alpx_dev, variant, group) \
+ ((alpx_dev)->base + ALPX_BASE(variant, group))
+
+#define ALPX_ITEM_REG(variant, group, index) \
+ ((variant ## _ ## group ## _BASE) + \
+ (variant ## _ ## group ## _ ## index))
+
+#define ALPX_REG(alpx_dev, variant, group, reg) \
+ ((alpx_dev)->base + ALPX_OFFSET(variant, group, reg))
+
+#define ALPX_REG_ARRAY(alpx_dev, variant, group, index) \
+ ((alpx_dev)->base + ALPX_ITEM_REG(variant, group, index))
+
+#define ALPX_COMMON_OFFSET(variant, group, reg) \
+ ((variant ## _ ## group ## _BASE) + \
+ (ALPX_COMMON_ ## reg ## _REG))
+
+#define ALPX_COMMON_REG(alpx_dev, variant, group, reg) \
+ ((alpx_dev)->base + ALPX_COMMON_OFFSET(variant, group, reg))
+
+/* Conversion Register index to register offset, register are 32 bits words */
+#define ALP_INDEX_TO_REG_OFFSET(i) ((i) * 4)
+
+
+/* PCI */
+
+#define ALPX_PCI_BAR 0
+#define ALPX_PCI_ID_DEVICE 0x0002
+
+#define ALPX_PCI_ID_SUBSYSTEM_MADI 0x0020
+#define ALPX_PCI_ID_SUBSYSTEM_ALP222 0x0040
+#define ALPX_PCI_ID_SUBSYSTEM_ALP222_MIC 0x0840
+#define ALPX_PCI_ID_SUBSYSTEM_ALP442 0x0080
+#define ALPX_PCI_ID_SUBSYSTEM_ALP442_MIC 0x0880
+#define ALPX_PCI_ID_SUBSYSTEM_ALP882 0x00C0
+#define ALPX_PCI_ID_SUBSYSTEM_ALP882_MIC 0x08C0
+#define ALPX_PCI_ID_SUBSYSTEM_MADI_LOOPBACK 0x0000
+#define ALPX_PCI_ID_SUBSYSTEM_ALPDANTE 0x00E0
+
+
+/* This one is a very special one : The production area is invalid */
+#define ALPX_PCI_ID_SUBSYSTEM_MADI_ALP_DEAD 0xDEAD
+
+/* UNKNOWN */
+#define ALPUNKNOWN_GPIO_BASE 0xFFFFF
+
+/* ALPX Base */
+
+#define ALP_SHARED_BASE 0x50000
+#define ALP_SHARED_SIZE 8 * 1024
+#define ALP_PROC_BASE 0x60000
+#define ALP_CONTROL_BASE 0x70000
+#define ALP_CLK_MANAGER_BASE ALP_CONTROL_BASE
+
+
+/* Alpxxx Flash defs */
+/** Common size for all the data areas in Flash, exclude the FIRMWARE **/
+#define ALPxxx_FLASH_DATA_AREAS_SIZE (32*1024)
+
+/** PRODUCTION Area in GOLDEN Region **/
+#define ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE 0x228000
+
+/* PRODUCTION structure */
+#define ALPX_SERIAL_OFFSET_IN_PRODUCTION 0
+
+/* BUILD version register : common addresses */
+#define ALP_CONTROL_DESIGN_VERSION_REG 0x100
+#define ALP_CONTROL_BUILD_VERSION_REG 0x104
+
+#define ALP_CONTROL_BUILD_VERSION_MASK GENMASK(15,0)
+
+
+/** Multi-channels cards **/
+
+/* Clock Manager */
+#define ALPMC_CLK_MANAGER_CONFIG_REG 0x24
+#define ALPMC_CLK_MANAGER_CONFIG_FS(v) ((v) & GENMASK(4, 0))
+#define ALPMC_CLK_MANAGER_CONFIG_FS_MASK GENMASK(4, 0)
+
+
+#define ALPMC_CLK_MANAGER_EFFECTIVE_FS(v) (((v) & GENMASK(20, 16)) >> 16)
+#define ALPMC_CLK_MANAGER_EFFECTIVE_FS_INVALID 0
+#define ALPMC_CLK_MANAGER_EFFECTIVE_FS_32K 5
+#define ALPMC_CLK_MANAGER_EFFECTIVE_FS_44_1K 6
+#define ALPMC_CLK_MANAGER_EFFECTIVE_FS_48K 7
+#define ALPMC_CLK_MANAGER_EFFECTIVE_FS_DOUBLE BIT(3)
+#define ALPMC_CLK_MANAGER_EFFECTIVE_FS_QUAD BIT(4)
+#define ALPMC_CLK_MANAGER_EFFECTIVE_FS_BASE_MASK GENMASK(2, 0)
+
+
+#define ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_MASK GENMASK(20, 16)
+#define ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_POS 16
+#define ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_QTY 8
+
+#define ALPMC_CLK_MANAGER_SOURCE(v) (((v) & GENMASK(23, 21)) >> 21)
+#define ALPMC_CLK_MANAGER_SOURCE_INTERNAL 0
+#define ALPMC_CLK_MANAGER_SOURCE_SIC 1
+#define ALPMC_CLK_MANAGER_SOURCE_WCLK_IN 2
+#define ALPMC_CLK_MANAGER_SOURCE_AES_SYNC 3
+#define ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_12 4
+#define ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_34 5
+#define ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_56 6
+#define ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_78 7
+#define ALPMC_CLK_MANAGER_SOURCE_DANTE 8
+
+/* Pre FW V240 compatibility support */
+#define ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_WCLK_IN 0
+#define ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_AES_SYNC 1
+#define ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_AES_AUDIO_12 2
+#define ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_AES_AUDIO_34 3
+#define ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_AES_AUDIO_56 4
+#define ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_AES_AUDIO_78 5
+#define ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_INTERNAL 6
+
+#define ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_MASK GENMASK(23, 21)
+#define ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_POS 21
+
+#define ALPMC_CLK_MANAGER_BASE ALP_CLK_MANAGER_BASE
+#define ALPMC_CONTROL_BASE ALP_CLK_MANAGER_BASE
+#define ALPMC_GPIO_BASE 0x45000
+#define ALPMC_CODEC_CTRL_BASE 0x71000
+#define ALPMC_MIXER_BASE 0x74000
+#define ALPMC_AMPLI_IN_BASE 0x79000
+#define ALPMC_AMPLI_OUT_BASE 0x7c000
+
+/* FS switch delay required by HW (in ms) */
+#define ALPMC_FS_SWITCH_DELAY 500
+
+/* Supported Firmware versions */
+#define ALPMC_SUPPORTED_BASE_FW_VERSION 247
+#define ALP222_SUPPORTED_BASE_FW_VERSION 283
+/* The base Firmware version for AlpDANTE supported by this driver */
+#define ALPDANTE_SUPPORTED_BASE_FW_VERSION 316
+
+
+
+/* GPIO */
+
+#define ALPMC_GPIO_INPUT_REG 0x08
+#define ALPMC_GPIO_INPUT_QTY 8
+#define ALPMC_GPIO_OUTPUT_REG 0x0C
+#define ALPMC_GPIO_OUTPUT_QTY 8
+
+/* ALPMC Gain access*/
+#define ALPMC_GAIN_TABLE_BASE(c) (0xc + (c) * 0x4)
+
+/* Codec */
+#define ALPMC_INPUT_PARAMS_REG 0x2C
+#define ALPMC_MIC_INPUT_PH_1_MASK BIT(0)
+#define ALPMC_MIC_INPUT_PH_2_MASK BIT(1)
+#define ALPMC_MIC_INPUT_PH_3_MASK BIT(2)
+#define ALPMC_MIC_INPUT_PH_4_MASK BIT(3)
+#define ALPMC_MIC_INPUT_PH_5_MASK BIT(4)
+#define ALPMC_MIC_INPUT_PH_6_MASK BIT(5)
+#define ALPMC_MIC_INPUT_PH_7_MASK BIT(6)
+#define ALPMC_MIC_INPUT_PH_8_MASK BIT(7)
+
+#define ALPMC_MIC_INPUT_PH_1_POS 0
+#define ALPMC_MIC_INPUT_PH_2_POS 1
+#define ALPMC_MIC_INPUT_PH_3_POS 2
+#define ALPMC_MIC_INPUT_PH_4_POS 3
+#define ALPMC_MIC_INPUT_PH_5_POS 4
+#define ALPMC_MIC_INPUT_PH_6_POS 5
+#define ALPMC_MIC_INPUT_PH_7_POS 6
+#define ALPMC_MIC_INPUT_PH_8_POS 7
+
+#define ALPMC_CODEC_PGA_REGS 0x0C
+
+#define ALPMC_CODEC_AES_SRC_CONTROL_REG 0x08
+#define ALPMC_CODEC_AES_SRC_CONTROL_AUTO_DISABLE_1_2 BIT(0)
+#define ALPMC_CODEC_AES_SRC_CONTROL_AUTO_DISABLE_3_4 BIT(1)
+#define ALPMC_CODEC_AES_SRC_CONTROL_AUTO_DISABLE_5_6 BIT(2)
+#define ALPMC_CODEC_AES_SRC_CONTROL_AUTO_DISABLE_7_8 BIT(3)
+
+#define ALPMC_CODEC_AES_SRC_1_2_MASK BIT(0)
+#define ALPMC_CODEC_AES_SRC_1_2_POS 0
+#define ALPMC_CODEC_AES_SRC_3_4_MASK BIT(1)
+#define ALPMC_CODEC_AES_SRC_3_4_POS 1
+#define ALPMC_CODEC_AES_SRC_5_6_MASK BIT(2)
+#define ALPMC_CODEC_AES_SRC_5_6_POS 2
+#define ALPMC_CODEC_AES_SRC_7_8_MASK BIT(3)
+#define ALPMC_CODEC_AES_SRC_7_8_POS 3
+
+/* MC Mixer */
+/* One Gain value per register */
+#define ALPMC_MIXER_ENTRIES_IN_REG 1
+
+#define ALPMC_MIXER_GAIN_MASK GENMASK(ALPX_MIXER_GAIN_BITS - 1, 0)
+
+#define ALPMC_MIXER_GAIN_SEL(v) ((v) & GENMASK(ALPX_MIXER_GAIN_BITS - 1, 0))
+
+#define ALPMC_MIXER_GAIN_VALUE(v) ((v) & GENMASK(ALPX_MIXER_GAIN_BITS-1, 0))
+
+#define ALPMC_MIXER_GAIN_REG(size, in, out) (ALPX_MIXER_REG_ENTRIES_OFFSET + (out) * ((size) * 4 / ALPMC_MIXER_ENTRIES_IN_REG) + ((in) / ALPMC_MIXER_ENTRIES_IN_REG) * 4)
+
+
+/* Dedicated to ALP882 */
+
+#define ALP882_CHANNELS_DAW_COUNT 16
+#define ALP882_CHANNELS_ANALOG_COUNT 8
+#define ALP882_CHANNELS_AES_COUNT 8
+
+#define ALP882_CHANNELS_IN_DAW_OFFSET 0
+#define ALP882_CHANNELS_IN_ANALOG_OFFSET (ALP882_CHANNELS_IN_DAW_OFFSET + \
+ ALP882_CHANNELS_DAW_COUNT)
+#define ALP882_CHANNELS_IN_AES_OFFSET (ALP882_CHANNELS_IN_ANALOG_OFFSET + \
+ ALP882_CHANNELS_ANALOG_COUNT)
+
+#define ALP882_CHANNELS_OUT_ANALOG_OFFSET 0
+#define ALP882_CHANNELS_OUT_AES_OFFSET (ALP882_CHANNELS_OUT_ANALOG_OFFSET + \
+ ALP882_CHANNELS_ANALOG_COUNT)
+#define ALP882_CHANNELS_OUT_DAW_OFFSET (ALP882_CHANNELS_OUT_AES_OFFSET + \
+ ALP882_CHANNELS_AES_COUNT)
+
+
+/* ALP882 Clock Manager */
+#define ALP882_CLK_MANAGER_CONFIG_CLK_SRC_QTY 8
+
+/* Alp 882 clk source in Pre 240 FW */
+#define ALP882_APP_PREFW240_CLK_MANAGER_CLK_SRC_QTY 7
+
+/* Mixer 882 */
+#define ALP882_MIXER_SIZE 32
+
+/* Dedicated to ALP442 */
+
+#define ALP442_CHANNELS_ANALOG_COUNT 4
+#define ALP442_CHANNELS_AES_COUNT 4
+#define ALP442_CHANNELS_DAW_COUNT (ALP442_CHANNELS_ANALOG_COUNT + \
+ ALP442_CHANNELS_AES_COUNT)
+
+
+#define ALP442_CHANNELS_IN_DAW_OFFSET 0
+#define ALP442_CHANNELS_IN_ANALOG_OFFSET (ALP442_CHANNELS_IN_DAW_OFFSET + \
+ ALP442_CHANNELS_DAW_COUNT)
+#define ALP442_CHANNELS_IN_AES_OFFSET (ALP442_CHANNELS_IN_ANALOG_OFFSET + \
+ ALP442_CHANNELS_ANALOG_COUNT)
+
+#define ALP442_CHANNELS_OUT_ANALOG_OFFSET 0
+#define ALP442_CHANNELS_OUT_AES_OFFSET (ALP442_CHANNELS_OUT_ANALOG_OFFSET + \
+ ALP442_CHANNELS_ANALOG_COUNT)
+#define ALP442_CHANNELS_OUT_DAW_OFFSET (ALP442_CHANNELS_OUT_AES_OFFSET + \
+ ALP442_CHANNELS_AES_COUNT)
+
+
+/* ALP442 Clock Manager */
+#define ALP442_CLK_MANAGER_CONFIG_CLK_SRC_QTY 6
+#define ALP442_CLK_MANAGER_SOURCE_INTERNAL ALPMC_CLK_MANAGER_SOURCE_INTERNAL
+
+/* Mixer 442 */
+#define ALP442_MIXER_SIZE (ALP442_CHANNELS_ANALOG_COUNT + \
+ ALP442_CHANNELS_AES_COUNT + \
+ ALP442_CHANNELS_DAW_COUNT)
+
+
+
+/* Alp 222 */
+
+#define ALP222_ANALOG_QTY 0x2
+#define ALP222_AES3_QTY 0x2
+
+#define ALP222_EXT_CHANNEL_QTY (ALP222_ANALOG_QTY + ALP222_AES3_QTY)
+
+#define ALP222_DAW_QTY ALP222_EXT_CHANNEL_QTY
+
+
+#define ALP222_CLK_MANAGER_BASE ALP_CLK_MANAGER_BASE
+#define ALP222_CONTROL_BASE ALP_CLK_MANAGER_BASE
+#define ALP222_AMPLI_IN_BASE 0x79000
+#define ALP222_AMPLI_OUT_BASE 0x7c000
+#define ALP222_CODEC_CTRL_BASE 0x71000
+#define ALP222_GPIO_BASE 0x45000
+#define ALP222_MIXER_BASE 0x73000
+
+#define ALP222_DAW_PLAYBACK_AMPLI_BASE ALP222_AMPLI_IN_BASE + ALPX_GAIN_REG(0)
+#define ALP222_ANALOG_CAPTURE_AMPLI_BASE ALP222_AMPLI_IN_BASE + ALPX_GAIN_REG(ALP222_DAW_QTY)
+#define ALP222_AES3_CAPTURE_AMPLI_BASE ALP222_AMPLI_IN_BASE + ALPX_GAIN_REG(ALP222_DAW_QTY + ALP222_ANALOG_QTY)
+
+#define ALP222_ANALOG_PLAYBACK_AMPLI_BASE ALP222_AMPLI_OUT_BASE + ALPX_GAIN_REG(0)
+#define ALP222_AES3_PLAYBACK_AMPLI_BASE ALP222_AMPLI_OUT_BASE + ALPX_GAIN_REG(ALP222_ANALOG_QTY)
+#define ALP222_DAW_CAPTURE_AMPLI_BASE ALP222_AMPLI_OUT_BASE + ALPX_GAIN_REG(ALP222_ANALOG_QTY + ALP222_AES3_QTY)
+
+
+/* Alp222 GPIO */
+#define ALP222_GPIO_INPUT_REG 0x00
+#define ALP222_GPIO_INPUT_QTY 2
+#define ALP222_GPIO_OUTPUT_REG 0x08
+#define ALP222_GPIO_OUTPUT_QTY 2
+
+/* Alp222 CODEC*/
+#define ALP222_CODEC_PGA_REGS 0x08
+
+/* Alp MADI */
+#define ALP222_MADI_QTY 0x40
+
+#define ALPMADI_CHANNEL_QTY ALP222_MADI_QTY
+
+//MADI Unit & DAW Unit
+#define ALPMADI_AUDIO_UNIT_QTY 2
+
+#define ALPMADI_CLK_MANAGER_BASE ALP_CLK_MANAGER_BASE
+#define ALPMADI_CONTROL_BASE ALP_CLK_MANAGER_BASE
+#define ALPMADI_ROUTER_IN_BASE 0x71000
+#define ALPMADI_MIXER_BASE 0x72000
+#define ALPMADI_ROUTER_OUT_BASE 0x73000
+#define ALPMADI_MEAS_DAW_IN_BASE 0x74000
+#define ALPMADI_GAIN_DAW_IN_BASE 0x75000
+#define ALPMADI_MEAS_DAW_OUT_BASE 0x76000
+#define ALPMADI_GAIN_DAW_OUT_BASE 0x77000
+#define ALPMADI_MEAS_MADI_IN_BASE 0x78000
+#define ALPMADI_GAIN_MADI_IN_BASE 0x79000
+#define ALPMADI_MEAS_MADI_OUT_BASE 0x7A000
+#define ALPMADI_GAIN_MADI_OUT_BASE 0x7B000
+#define ALPMADI_GPIO_BASE ALPUNKNOWN_GPIO_BASE
+
+#define ALPMADI_GPIO_INPUT_REG 0xFF
+#define ALPMADI_GPIO_OUTPUT_REG 0xFF
+
+/* Common */
+
+#define ALPX_COMMON_ID_REG 0x0
+
+
+#define ALPX_COMMON_VERSION_REG 0x4
+#define ALPX_COMMON_VERSION_VERSION(v) ((v) >> 16)
+#define ALPX_COMMON_VERSION_REVISION(v) ((v) & GENMASK(15, 0))
+
+#define ALPX_DANTE_VERSION_VERSION(v) ((v) >> 8)
+#define ALPX_DANTE_VERSION_REVISION(v) ((v) & GENMASK(7, 0))
+
+
+/* Common Clock 222/882 models*/
+
+#define ALPxxx_CLK_MANAGER_CLK_VALUES_QTY 14
+
+/* Values below use the full 5 bits of clock definitions : factor | base */
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_8K (0)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_11_025K (1)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_16K (2)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_22_05K (3)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_24K (4)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_32K (5)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_44_1K (6)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_48K (7)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_64K (0xD)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_128K (0x15)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_88_2K (0xE)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_176_4K (0x16)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_96K (0xF)
+#define ALPxxx_CLK_MANAGER_CLK_VALUE_192K (0x17)
+
+/* ALP222 Clock Manager */
+
+#define ALP222_CLK_MANAGER_CONFIG_REG 0x28
+#define ALP222_CLK_MANAGER_CONFIG_FS_MASK GENMASK(4, 0)
+#define ALP222_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_MASK GENMASK(20, 16)
+#define ALP222_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_POS 16
+
+
+#define ALP222_CLK_MANAGER_CLK_SRC_MASK GENMASK(23, 21)
+#define ALP222_CLK_MANAGER_CLK_SRC_POS 21
+#define ALP222_CLK_MANAGER_CLK_SRC_QTY 5
+#define ALP222_CLK_MANAGER_CLK_SRC_INTERNAL (0)
+#define ALP222_CLK_MANAGER_CLK_SRC_SIC (1)
+#define ALP222_CLK_MANAGER_CLK_SRC_WCLK (2)
+#define ALP222_CLK_MANAGER_CLK_SRC_AES_SYNC (3)
+#define ALP222_CLK_MANAGER_CLK_SRC_AES_AUDIO (4)
+#define ALP222_CLK_MANAGER_CLK_SRC_MAX ALP222_CLK_MANAGER_CLK_SRC_AES_AUDIO
+
+/* Pre FW 283 clock sources definitions */
+#define ALP222_APPS_PREFW283_CLK_MANAGER_CLK_SRC_QTY 4
+#define ALP222_CLK_MANAGER_APPSPREFW283_CLK_SRC_WCLK 0
+#define ALP222_CLK_MANAGER_APPSPREFW283_CLK_SRC_AES_SYNC 1
+#define ALP222_CLK_MANAGER_APPSPREFW283_CLK_SRC_AES_AUDIO 2
+#define ALP222_CLK_MANAGER_APPSPREFW283_CLK_SRC_INTERNAL 3
+
+/* ALPMADI Clock Manager */
+
+#define ALPMADI_CLK_MANAGER_CONFIG_REG 0x1C
+#define ALPMADI_CLK_MANAGER_CONFIG_FS_MASK GENMASK(10, 8)
+#define ALPMADI_CLK_MANAGER_CONFIG_FS_44_1K (0 << 8)
+#define ALPMADI_CLK_MANAGER_CONFIG_FS_48K (1 << 8)
+#define ALPMADI_CLK_MANAGER_CONFIG_FS_88_2K (2 << 8)
+#define ALPMADI_CLK_MANAGER_CONFIG_FS_96K (3 << 8)
+#define ALPMADI_CLK_MANAGER_CONFIG_FS_176_4K (4 << 8)
+#define ALPMADI_CLK_MANAGER_CONFIG_FS_192K (5 << 8)
+
+/* AlpDANTE Clock Manager clock registervalues*/
+#define ALPDANTE_CLK_MANAGER_CLK_VALUE_44_1K 0x01
+#define ALPDANTE_CLK_MANAGER_CLK_VALUE_48K 0x00
+#define ALPDANTE_CLK_MANAGER_CLK_VALUE_88_2K 0x03
+#define ALPDANTE_CLK_MANAGER_CLK_VALUE_96K 0x02
+#define ALPDANTE_CLK_MANAGER_CLK_VALUE_176_4K 0x05
+#define ALPDANTE_CLK_MANAGER_CLK_VALUE_192K 0x04
+
+/* ALPX PROC */
+/* Embedded processor dedicated area length (in bytes) */
+#define ALP_PROC_AREA_LENGTH 0x800U
+
+#define ALP_PROC_VERSION_REG 0x00
+
+#define ALP_PROC_STATUS_REG 0x04
+#define ALP_PROC_STATUS_FAIL BIT(0)
+#define ALP_PROC_STATUS_PENDING BIT(1)
+
+#define ALP_PROC_FAILS_REG 0x08
+#define ALP_PROC_FAILS_FAIL_NONE 0
+
+#define ALP_PROC_COMMAND_REG 0x0C
+
+#define ALP_PROC_COMMAND_MAKE_P16(_cmd, _p16) ((((_p16) & 0x0FFFF) << 8) | \
+ (_cmd))
+#define ALP_PROC_CMD_READ_FLASH_SECTOR 0x10
+#define ALP_PROC_CMD_WRITE_FLASH_SECTOR 0x0A
+
+#define ALP_PROC_FWCONFIG_REG 0x10
+#define ALP_PROC_FWCONFIG_CLKSRC_UP_MASK GENMASK(0,0)
+#define ALP_PROC_FWCONFIG_CLKSRC_UP_POS 0
+
+#define ALP_PROC_FWCONFIG_CLKSRC_DOWN_MASK GENMASK(1,1)
+#define ALP_PROC_FWCONFIG_CLKSRC_DOWN_POS 1
+
+
+/* PROC Logs base register */
+#define ALP_PROC_LOG_OFFSET 0x40U
+#define ALP_PROC_LOG_BASE ALP_PROC_BASE + ALP_PROC_LOG_OFFSET
+
+/* Log length, -8 ?? but done in the reference application from Easii-IC (log.c) */
+#define ALP_PROC_LOG_LENGTH ALP_PROC_AREA_LENGTH - ALP_PROC_LOG_OFFSET - 8U
+
+/* Clock source priority def */
+#define ALP_PROC_CLK_SOURCE_PRIO_REG 0x14
+
+#define ALP_PROC_CLK_SOURCE_PRIO0_MASK GENMASK(3,0)
+#define ALP_PROC_CLK_SOURCE_PRIO0_POS 0
+
+#define ALP_PROC_CLK_SOURCE_PRIO1_MASK GENMASK(7,4)
+#define ALP_PROC_CLK_SOURCE_PRIO1_POS 4
+
+#define ALP_PROC_CLK_SOURCE_PRIO2_MASK GENMASK(11,8)
+#define ALP_PROC_CLK_SOURCE_PRIO2_POS 8
+
+#define ALP_PROC_CLK_SOURCE_PRIO3_MASK GENMASK(15,12)
+#define ALP_PROC_CLK_SOURCE_PRIO3_POS 12
+
+/* ALPX GPIO */
+#define ALPX_GPIO_MASK(i) BIT(i)
+#define ALPX_GPIO_SEL(i, v) (((v) << (i)) & ALPX_GPIO_MASK(i))
+#define ALPX_GPIO_VALUE(i, v) (((v) & ALPX_GPIO_MASK(i)) >> (i))
+
+/* Control */
+#define ALP_CONTROL_PCIE_SUBSYS_ID_REG 0x8
+#define ALP_CONTROL_SAMPLES_CNT_REG 0x18
+#define ALP_CLK_MANAGER_SAMPLES_COUNT_REG 0x18
+
+#define ALP222_CONTROL_PCIE_MASK_IT_REG 0xc
+#define ALP222_CONTROL_WC_RTERM_REG 0x1c
+#define ALP222_CONTROL_PCIE_WARM_RESET_REG 0x10
+#define ALP222_CONTROL_PCIE_WARM_RESET_VALUE 0xc07d8e51
+#define ALP222_CONTROL_PCIE_COLD_RESET_REG 0x14
+#define ALP222_CONTROL_PCIE_COLD_RESET_VALUE 0x8e5ef96a
+#define ALP222_CONTROL_FS_REG 0x28
+#define ALP222_CODEC_CTRL_ASRC_REG 0x10
+#define ALP222_MIC_CONTROL_REG 0x24
+
+#define ALPMADI_CONTROL_WARM_REBOOT_FPGA_REG 0x10
+#define ALPMADI_CONTROL_COLD_REBOOT_FPGA_REG 0x14
+#define ALPMADI_CONTROL_BLINK_LED_REG 0x20
+#define ALPMADI_CONTROL_EXT_ITF_CONFIG_REG 0x24
+#define ALPMADI_CONTROL_FS_SEL_REG 0x28
+
+#define ALPMADI_CONTROL_EXT0_STATUS_CTRL_REG 0x2c
+#define ALPMADI_CONTROL_EXT1_STATUS_CTRL_REG 0x30
+#define ALPMADI_CONTROL_EXT2_STATUS_CTRL_REG 0x34
+#define ALPMADI_CONTROL_EXT3_STATUS_CTRL_REG 0x38
+#define ALPMADI_CONTROL_PI_PARAMS_REG 0x3c
+
+
+#define ALPMADI_CONTROL_BLINK_LED_EN BIT(0)
+#define ALPMADI_CONTROL_PI_PARAMS_KI(v) ((v) & GENMASK(15, 0))
+#define ALPMADI_CONTROL_PI_PARAMS_KP(v) (((v) << 16) & GENMASK(31, 16))
+#define ALPMADI_CONTROL_EXT_ITF_CONFIG_MADI_LINK(v) ((v) & GENMASK(1, 0))
+#define ALPMADI_CONTROL_EXT_ITF_CONFIG_BNC_SIGNAL(v) (((v<<8)) & GENMASK(8, 8))
+
+/* Codec */
+#define ALPX_CODEC_CTRL_GAIN_REG(i) ((i) * 0x4)
+
+/* Gains */
+#define ALPX_GAIN_VALUE_BITS 10
+#define ALPX_GAIN_REG_ENTRIES_OFFSET 0xC
+#define ALPX_GAIN_REG(i) (ALPX_GAIN_REG_ENTRIES_OFFSET + (i) * 0x4)
+
+/* Router */
+#define ALPMADI_ROUTER_SIZE 128
+
+#define ALPX_ROUTER_REG_ENTRIES_OFFSET 0x8
+
+/* RAW ACCESS to router register i*/
+#define ALPX_ROUTER_REG(i) (0x8 + ((i) / 4) * 0x4)
+
+#define ALPX_ROUTER_SHIFT(i) (((i) % 4) * 8)
+#define ALPX_ROUTER_MASK(i) (GENMASK(7, 0) << \
+ ALPX_ROUTER_SHIFT(i))
+#define ALPX_ROUTER_SEL(i, v) (((v) & GENMASK(7, 0)) << \
+ ALPX_ROUTER_SHIFT(i))
+#define ALPX_ROUTER_VALUE(i, v) (((v) >> \
+ ALPX_ROUTER_SHIFT(i)) & \
+ GENMASK(7, 0))
+
+#define ALPMADI_ROUTER_PORT(i) (ALPX_ROUTER_REG_ENTRIES_OFFSET + ((i) * 0x4))
+
+#define ALPMADI_ROUTER_IN_PORT(i) ALPMADI_ROUTER_PORT(i)
+
+#define ALPMADI_ROUTER_OUT_PORT(i) ALPMADI_ROUTER_PORT(i)
+
+/* Mixer */
+#define ALPX_MIXER_GAIN_BITS 10
+#define ALPX_GAIN_0dB 0x385
+#define ALPX_GAIN_MUTE 0x000
+
+/** Mixer 222 **/
+
+/* Mixer Input i, output j & j+1*/
+#define ALP222_MIXER_SIZE 8
+#define ALPMADI_MIXER_SIZE 16
+
+//2 gains per register
+#define ALPMADI_MIXER_BANK_SIZE 2
+
+#define ALP222_MIXER_ENTRIES_IN_REG 2
+
+//#define ALPX_GAIN_REG_VALUE_STEP_PER_DB 10
+#define ALPX_GAIN_dB_TO_10ofdB (dB) ((db) * 10)
+
+
+#define ALPX_GAIN_REG_VALUE_FROM_10ofDB(val10ofdb) (ALPX_GAIN_0dB + (val10ofdb))
+
+#define ALPX_MIXER_REG_ENTRIES_OFFSET 0x8
+
+/* Return the @reg for the given (input/output) in the mixer. MIND the register structure !! */
+//MADI structured as In into Outs
+//EDF MADI formula TO BE CHECKED
+//#define ALPMADI_MIXER_GAIN_REG(mixerSize, inputId, outputId) (0x8 + ((((mixerSize) * (inputId) + (outputId)) * 4 / ALPX_MIXER_ENTRIES_IN_REG)))
+
+// Structured as : Out from Ins
+#define ALP222_MIXER_GAIN_REG(size, in, out) (ALPX_MIXER_REG_ENTRIES_OFFSET + (out) * ((size) * 4 / ALP222_MIXER_ENTRIES_IN_REG) + ((in) / ALP222_MIXER_ENTRIES_IN_REG) * 4)
+
+//Odd entry is in higher bits
+#define ALP222_MIXER_GAIN_SHIFT_STEP(i) (((i) & 1) ? ALPX_MIXER_GAIN_BITS : 0)
+
+#define ALP222_MIXER_GAIN_MASK(i) (GENMASK(ALPX_MIXER_GAIN_BITS-1, 0) << \
+ ALP222_MIXER_GAIN_SHIFT_STEP(i))
+
+#define ALP222_MIXER_GAIN_SEL(i, v) (((v) & GENMASK(ALPX_MIXER_GAIN_BITS-1, 0)) << \
+ ALP222_MIXER_GAIN_SHIFT_STEP(i))
+
+#define ALP222_MIXER_GAIN_VALUE(i, v) (((v) >> \
+ ALP222_MIXER_GAIN_SHIFT_STEP(i)) & \
+ GENMASK(ALPX_MIXER_GAIN_BITS-1, 0))
+
+
+//Build the mixer register
+//#define ALP222_MIXER_REG_VALUE(left, right) ((left)<< ALP_GAIN_VALUE_BITS | (right))
+
+// Flags access
+#define ALP222_CODEC_CTRL_ASRC_MASK GENMASK(0, 0)
+#define ALP222_CODEC_CTRL_ASRC_POS 0
+
+// MIC Options defs
+#define ALP222_MIC_GAIN_L_MASK GENMASK(7,0)
+#define ALP222_MIC_GAIN_L_POS 0
+#define ALP222_MIC_GAIN_WIDTH 8
+
+#define ALP222_MIC_GAIN_R_MASK GENMASK(15,8)
+#define ALP222_MIC_GAIN_R_POS 8
+
+
+#define ALP222_MIC_DC_L_MASK GENMASK(16,16)
+#define ALP222_MIC_DC_L_POS 16
+#define ALP222_MIC_CM_L_MASK GENMASK(17,17)
+#define ALP222_MIC_CM_L_POS 17
+#define ALP222_MIC_PH_L_MASK GENMASK(18,18)
+#define ALP222_MIC_PH_L_POS 18
+
+#define ALP222_MIC_DC_R_MASK GENMASK(19,19)
+#define ALP222_MIC_DC_R_POS 19
+#define ALP222_MIC_CM_R_MASK GENMASK(20,20)
+#define ALP222_MIC_CM_R_POS 20
+#define ALP222_MIC_PH_R_MASK GENMASK(21,21)
+#define ALP222_MIC_PH_R_POS 21
+
+#define ALP222_MIC_EN_L_MASK GENMASK(22,22)
+#define ALP222_MIC_EN_R_MASK GENMASK(23,23)
+#define ALP222_MIC_EN_L_POS 22
+#define ALP222_MIC_EN_R_POS 23
+
+
+#define ALP222_MIC_HERE_POS 31
+
+#endif
diff --git a/snd-alpx/alpx_streams.c b/snd-alpx/alpx_streams.c
new file mode 100644
index 0000000..9215752
--- /dev/null
+++ b/snd-alpx/alpx_streams.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#include <linux/version.h>
+
+#include "alpx_reg.h"
+#include "alpx.h"
+#include "alpx_streams.h"
+#include "alpx_led.h"
+
+#include <sound/asound.h>
+#include <sound/core.h>
+#include <linux/io.h>
+
+
+extern unsigned int log_transfers; //defined in alpx_core.c
+
+#if !IS_ENABLED(CONFIG_SND_DMAENGINE_PCM)
+ #if defined (CONFIG_KERNEL_GENERIC)
+ #if KERNEL_VERSION(5, 17, 0) <= LINUX_VERSION_CODE
+ #include "core/generic/5.17/dmaengine_pcm.h"
+ #include "core/generic/5.17/pcm_dmaengine.c"
+ #elif KERNEL_VERSION(5, 12, 0) <= LINUX_VERSION_CODE
+ #include "core/generic/5.12/dmaengine_pcm.h"
+ #include "core/generic/5.12/pcm_dmaengine.c"
+ #elif KERNEL_VERSION(5, 7, 0) <= LINUX_VERSION_CODE
+ #include "core/generic/5.7/dmaengine_pcm.h"
+ #include "core/generic/5.7/pcm_dmaengine.c"
+ #elif KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE
+ #include "core/generic/5.5/dmaengine_pcm.h"
+ #include "core/generic/5.5/pcm_dmaengine.c"
+ #elif KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE
+ #include "core/generic/5.2/dmaengine_pcm.h"
+ #include "core/generic/5.2/pcm_dmaengine.c"
+ #elif KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE
+ #include "core/generic/4.19/dmaengine_pcm.h"
+ #include "core/generic/4.19/pcm_dmaengine.c"
+ #endif
+ #elif defined (CONFIG_KERNEL_REDHAT)
+ #if KERNEL_VERSION(5, 14, 0) <= LINUX_VERSION_CODE
+ #include "core/RedHat/5.14/dmaengine_pcm.h"
+ #include "core/RedHat/5.14/pcm_dmaengine.c"
+ #elif KERNEL_VERSION(4, 18, 0) == LINUX_VERSION_CODE
+ #include "core/RedHat/4.18/dmaengine_pcm.h"
+ #include "core/RedHat/4.18/pcm_dmaengine.c"
+ #else
+ #error RedHat kernel not supported yet.
+ #endif
+ #endif
+#elif defined (CONFIG_KERNEL_GENERIC)
+ #include <sound/dmaengine_pcm.h>
+#else
+ #error "No valid DMA Engine support!"
+#endif
+
+/* The size (in bytes) of sample's container */
+const unsigned int ALPX_SAMPLE_CONTAINER_SIZE = 4;
+
+
+/* Configure */
+
+static int alpmadi_configure(struct alpx_device *alpx_dev, unsigned int rate)
+{
+ u32 config_fs, value;
+
+ dev_dbg(alpx_dev->dev, "%s(): requesting rate %d\n", __func__, rate);
+
+ switch (rate) {
+ case 44100:
+ config_fs = ALPMADI_CLK_MANAGER_CONFIG_FS_44_1K;
+ break;
+ case 48000:
+ config_fs = ALPMADI_CLK_MANAGER_CONFIG_FS_48K;
+ break;
+ case 88200:
+ config_fs = ALPMADI_CLK_MANAGER_CONFIG_FS_88_2K;
+ break;
+ case 96000:
+ config_fs = ALPMADI_CLK_MANAGER_CONFIG_FS_96K;
+ break;
+ case 176400:
+ config_fs = ALPMADI_CLK_MANAGER_CONFIG_FS_176_4K;
+ break;
+ case 192000:
+ config_fs = ALPMADI_CLK_MANAGER_CONFIG_FS_192K;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ value = readl(ALPX_REG(alpx_dev, ALPMADI, CLK_MANAGER, CONFIG));
+ dev_dbg(alpx_dev->dev, "%s(): alpMadi clk manager config %x\n", __func__, value);
+
+ value &= ~ALPMADI_CLK_MANAGER_CONFIG_FS_MASK;
+ value |= config_fs;
+
+ writel(value, ALPX_REG(alpx_dev, ALPMADI, CLK_MANAGER, CONFIG));
+ dev_dbg(alpx_dev->dev, "%s(): alpMadi clk manager set to %x\n", __func__, value);
+
+ return 0;
+}
+
+static int alpdante_configure(struct alpx_device *alpx_dev, unsigned int rate)
+{
+ dev_dbg(alpx_dev->dev, "%s(): requesting rate %dHz\n", __func__, rate);
+
+ if (alpx_dev->variant->capture_hw->rate_min == rate) {
+ dev_dbg(alpx_dev->dev, "%s(): requested rate %dHz is supported\n", __func__, rate);
+ return 0;
+ }
+ else {
+ dev_dbg(alpx_dev->dev, "Requested %dHz not supported, Currently supported rate is %dHz\n", rate,
+ alpx_dev->variant->capture_hw->rate_min);
+ return -EINVAL;
+ }
+
+}
+
+static int alpstereo_configure(struct alpx_device *alpx_dev, unsigned int rate)
+{
+ u32 config_fs, read_value, target_value;
+ u32 try_qty = 10; /* retries */
+
+ dev_dbg(alpx_dev->dev, "%s(): requesting rate %d\n", __func__, rate);
+
+ switch (rate) {
+ case 8000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_8K;
+ break;
+ case 11025:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_11_025K;
+ break;
+ case 16000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_16K;
+ break;
+ case 22000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_22_05K;
+ break;
+ case 24000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_24K;
+ break;
+ case 32000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_32K;
+ break;
+ case 44100:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_44_1K;
+ break;
+ case 48000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_48K;
+ break;
+ case 64000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_64K;
+ break;
+ case 88200:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_88_2K;
+ break;
+ case 96000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_96K;
+ break;
+ case 128000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_128K;
+ break;
+ case 176400:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_176_4K;
+ break;
+ case 192000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_192K;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ read_value = readl(ALPX_REG(alpx_dev, ALP222, CLK_MANAGER, CONFIG));
+
+ /* Only if needed */
+ if ((read_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK) == config_fs)
+ return 0;
+
+ read_value &= ~ALP222_CLK_MANAGER_CONFIG_FS_MASK;
+ target_value = read_value | config_fs;
+
+ do {
+ writel(target_value, ALPX_REG(alpx_dev, ALP222, CLK_MANAGER, CONFIG));
+ dev_dbg(alpx_dev->dev, "%s(): alp222 new clk config set to %x try: %d\n",
+ __func__, target_value, try_qty);
+
+ /* Add a delay to give time to the card to initialize its
+ * internals (PLL, buses,...) to avoid internal stalls.
+ */
+ mdelay(1);
+
+ read_value = readl(ALPX_REG(alpx_dev, ALP222, CLK_MANAGER, CONFIG));
+ dev_dbg(alpx_dev->dev, "%s(): alp222 check clk config: %x\n", __func__, read_value);
+ } while (--try_qty &&
+ ((read_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK ) !=
+ (target_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK)));
+
+ if ((read_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK) !=
+ (target_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK))
+ dev_err(alpx_dev->dev, "%s(): alp222 ERROR clk config, expected: 0x%x, actual: 0x%x, \n", __func__,
+ target_value, read_value);
+
+ dev_dbg(alpx_dev->dev, "%s(): alp222 new current clk config: 0x%x\n", __func__,
+ readl(ALPX_REG(alpx_dev, ALP222, CLK_MANAGER, CONFIG)));
+
+ return (read_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK) ==
+ (target_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK) ? 0 : -EIO;
+}
+
+static int alpmultichannel_configure(struct alpx_device *alpx_dev, unsigned int rate)
+{
+ u32 config_fs, value;
+ u32 effective_fs, source;
+ /* XXX: check clock source, if not internal, error out if rate is not
+ external one */
+ dev_dbg(alpx_dev->dev, "%s(): requesting rate %d\n", __func__, rate);
+
+ value = readl(ALPX_REG(alpx_dev, ALPMC, CLK_MANAGER, CONFIG));
+
+
+
+ source = ALPMC_CLK_MANAGER_SOURCE(value);
+ effective_fs = ALPMC_CLK_MANAGER_EFFECTIVE_FS(value);
+
+ switch (rate) {
+ case 8000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_8K;
+ break;
+ case 11025:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_11_025K;
+ break;
+ case 16000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_16K;
+ break;
+ case 22050:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_22_05K;
+ break;
+ case 24000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_24K;
+ break;
+ case 32000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_32K;
+ break;
+ case 44100:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_44_1K;
+ break;
+ case 48000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_48K;
+ break;
+ case 64000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_64K;
+ break;
+ case 88200:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_88_2K;
+ break;
+ case 96000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_96K;
+ break;
+ case 128000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_128K;
+ break;
+ case 176400:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_176_4K;
+ break;
+ case 192000:
+ config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_192K;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (source != ALPMC_CLK_MANAGER_SOURCE_INTERNAL) {
+ unsigned int effective_rate;
+
+ switch(effective_fs) {
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_8K :
+ effective_rate = 8000;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_11_025K :
+ effective_rate = 11025;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_16K :
+ effective_rate = 16000;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_22_05K :
+ effective_rate = 22050;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_24K :
+ effective_rate = 24000;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_32K:
+ effective_rate = 32000;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_44_1K:
+ effective_rate = 44100;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_48K :
+ effective_rate = 48000;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_64K :
+ effective_rate = 64000;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_88_2K :
+ effective_rate = 88200;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_96K :
+ effective_rate = 96000;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_128K :
+ effective_rate = 128000;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_176_4K :
+ effective_rate = 176400;
+ break;
+ case ALPxxx_CLK_MANAGER_CLK_VALUE_192K :
+ effective_rate = 192000;
+ break;
+ default :
+ dev_err(alpx_dev->dev,
+ "Invalid effective sample rate\n");
+ return -EINVAL;
+ };
+
+ if (effective_rate != rate) {
+ dev_err(alpx_dev->dev,
+ "Requested sample rate (%u) does not match external clock sample rate (%u)\n",
+ rate, effective_rate);
+ return -EINVAL;
+ }
+ } else {
+ if (ALPMC_CLK_MANAGER_CONFIG_FS(value) != config_fs) {
+
+ value &= ~ALPMC_CLK_MANAGER_CONFIG_FS_MASK;
+ value |= config_fs;
+
+ writel(value, ALPX_REG(alpx_dev, ALPMC, CLK_MANAGER, CONFIG));
+ //Add a delay to give time to the card (PLL, internal buses,...), w/o, the setting is not effective !
+ mdelay(ALPMC_FS_SWITCH_DELAY);
+ }
+ }
+
+ dev_dbg(alpx_dev->dev, "%s() : %s new current clk config: 0x%x\n", __func__,
+ alpx_dev->variant->shortname,
+ readl(ALPX_REG(alpx_dev, ALPMC, CLK_MANAGER, CONFIG)));
+
+ return 0;
+}
+
+static int alpx_configure(struct alpx_device *alpx_dev, unsigned int rate)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&alpx_dev->config.lock, flags);
+
+ if (alpx_dev->config.users > 0 && rate != alpx_dev->config.rate) {
+ ret = -EINVAL;
+ goto complete;
+ }
+
+ alpx_dev->config.rate = rate;
+
+ if (alpx_is_882(alpx_dev))
+ ret = alpmultichannel_configure(alpx_dev, rate);
+ else if (alpx_is_222(alpx_dev))
+ ret = alpstereo_configure(alpx_dev, rate);
+ else if (alpx_is_madi(alpx_dev))
+ ret = alpmadi_configure(alpx_dev, rate);
+ else if (alpx_is_dante(alpx_dev))
+ ret = alpdante_configure(alpx_dev, rate);
+ else
+ snd_BUG();
+
+ if (!ret)
+ alpx_dev->config.users++;
+
+complete:
+ spin_unlock_irqrestore(&alpx_dev->config.lock, flags);
+
+ return ret;
+}
+
+static void alpx_configure_close(struct alpx_device *alpx_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&alpx_dev->config.lock, flags);
+
+ alpx_dev->config.users--;
+
+ spin_unlock_irqrestore(&alpx_dev->config.lock, flags);
+}
+
+/* Pipe */
+
+static int alpx_pipe_start(struct alpx_pipe *pipe, struct snd_pcm_substream *substream)
+{
+ struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream);
+
+ dev_dbg( alpx_dev->dev, "%s starting\n", pipe->xdma_write ? "Playback" : "Capture");
+
+ if (pipe->status != ALPX_PIPE_STATUS_IDLE &&
+ pipe->status != ALPX_PIPE_STATUS_STOPPED) {
+ dev_warn(alpx_dev->dev, "%s pipe is busy!\n",
+ pipe->xdma_write ? "playback" : "capture");
+ return -EBUSY;
+ }
+
+ if (alpx_is_madi(alpx_dev))
+ alpmadi_set_led_state(alpx_dev, true);
+
+ snd_dmaengine_pcm_trigger(substream, SNDRV_PCM_TRIGGER_START);
+ pipe->period_qty_on_start = alpx_get_samples_counter(alpx_dev);
+
+ dev_dbg( alpx_dev->dev, "%s Period:%u\n", pipe->xdma_write ? "Playback" : "Capture", pipe->period_qty_on_start);
+
+ return 0;
+}
+
+static int alpx_pipe_open(struct alpx_pipe *pipe,
+ struct snd_pcm_substream *substream)
+{
+ struct alpx_device *alpx_dev = substream->pcm->private_data;
+ struct dma_chan *chan;
+
+ /* Check */
+ if (!alpx_dev) {
+ printk(KERN_ERR"%s() : alpx_dev is NULL\n",__func__);
+ return -EINVAL;
+ }
+
+ if (!alpx_dev->dev) {
+ printk(KERN_ERR"%s() : alpx_dev->dev is NULL\n",__func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(alpx_dev->dev, "%s(): CALLED\n", __func__);
+
+ dev_dbg(alpx_dev->dev, "%s: matching with dev [%s]\n", __func__, dev_name(alpx_dev->dev));
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ chan = dma_request_chan(alpx_dev->dev, "h2c-0");
+ else
+ chan = dma_request_chan(alpx_dev->dev, "c2h-0");
+
+ if (IS_ERR(chan)) {
+ dev_err(alpx_dev->dev, "%s() : Error %ld when requesting DMA channel\n",
+ __func__, PTR_ERR(chan));
+ return PTR_ERR(chan);
+ }
+
+ pipe->substream = substream;
+
+ return snd_dmaengine_pcm_open(substream, chan);
+
+}
+
+static int alpx_pipe_stop(struct alpx_pipe *pipe, struct snd_pcm_substream *substream)
+{
+ struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ unsigned int samples_counter, card_samples_qty;
+
+ /* Retrieve the samples counter as soon as possible */
+ samples_counter = alpx_get_samples_counter(alpx_dev);
+ snd_dmaengine_pcm_trigger(substream, SNDRV_PCM_TRIGGER_STOP);
+
+ if (alpx_is_madi(alpx_dev))
+ alpmadi_set_led_state(alpx_dev, false);
+
+ /*
+ * Since we are under stream lock here and the transfer done callback
+ * also needs to grab stream lock, we cannot have a sync wait for the
+ * completion. As a result, we request a stop with the stopping status.
+ */
+ card_samples_qty = samples_counter - pipe->period_qty_on_start;
+ dev_dbg(alpx_dev->dev, "%s stopped after %u samples (%u bytes)\n",
+ pipe->xdma_write ? "Playback" : "Capture", card_samples_qty,
+ runtime->hw.channels_max * ALPX_SAMPLE_CONTAINER_SIZE * card_samples_qty);
+
+ if (pipe->status == ALPX_PIPE_STATUS_RUNNING)
+ pipe->status = ALPX_PIPE_STATUS_STOPPING;
+ else
+ pipe->status = ALPX_PIPE_STATUS_STOPPED;
+
+ return 0;
+}
+
+static int alpx_pipe_configure(struct alpx_pipe *pipe,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct alpx_device *alpx_dev = pipe->substream->pcm->private_data;
+ unsigned int rate = params_rate(hw_params);
+ int ret;
+
+ dev_dbg(alpx_dev->dev, "%s hardware parameters:\n",
+ pipe->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? "Playback" : "Capture");
+ dev_dbg(alpx_dev->dev, "* channels: %d\n", params_channels(hw_params));
+ dev_dbg(alpx_dev->dev, "* rate: %d\n", params_rate(hw_params));
+ dev_dbg(alpx_dev->dev, "* periods: %d\n", params_periods(hw_params));
+ dev_dbg(alpx_dev->dev, "* period size: %d frames\n",
+ params_period_size(hw_params));
+ dev_dbg(alpx_dev->dev, "* period bytes: %d bytes\n",
+ params_period_size(hw_params) * params_channels(hw_params) * 4);
+ dev_dbg(alpx_dev->dev, "* buffer size: %d frames\n",
+ params_buffer_size(hw_params));
+ dev_dbg(alpx_dev->dev, "* buffer bytes: %d bytes\n",
+ params_buffer_bytes(hw_params));
+
+ ret = alpx_configure(alpx_dev, rate);
+ if (ret)
+ return ret;
+
+ pipe->configured = true;
+
+ return 0;
+}
+
+static void alpx_pipe_close(struct alpx_pipe *pipe)
+{
+ struct alpx_device *alpx_dev = pipe->substream->pcm->private_data;
+
+ dev_dbg(alpx_dev->dev, "%s(): CALLED\n", __func__);
+
+ snd_dmaengine_pcm_close_release_chan(pipe->substream);
+
+ if (pipe->configured) {
+ alpx_configure_close(alpx_dev);
+ pipe->configured = false;
+ }
+}
+
+int alpx_pipe_init(struct alpx_pipe *pipe, bool xdma_write)
+{
+ pipe->xdma_write = xdma_write;
+
+ pipe->status = ALPX_PIPE_STATUS_IDLE;
+
+ return 0;
+}
+
+/* Playback */
+
+int alpx_playback_open(struct snd_pcm_substream *substream)
+{
+ struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ //Add a break to avoid playback lock when chaining playbacks. This is required for AlpDANTE cards. Looks like the XDMA needs some time here.
+ msleep(200);
+
+ dev_dbg(alpx_dev->dev, "CALLED\n");
+
+ ret = alpx_pipe_open(&alpx_dev->playback, substream);
+ if (!ret)
+ runtime->hw = *alpx_dev->variant->playback_hw;
+
+ dev_dbg(alpx_dev->dev, "=> %u\n", ret);
+ return ret;
+}
+
+int alpx_playback_close(struct snd_pcm_substream *substream)
+{
+ struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream);
+
+ alpx_pipe_close(&alpx_dev->playback);
+ return 0;
+}
+
+int alpx_playback_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream);
+ int ret;
+
+ ret = alpx_pipe_configure(&alpx_dev->playback, hw_params);
+ if (ret)
+ return ret;
+
+#if KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE
+ return 0;
+#else
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+#endif
+}
+
+int alpx_playback_hw_free(struct snd_pcm_substream *substream)
+{
+
+#if KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE
+ return 0;
+#else
+ return snd_pcm_lib_free_pages(substream);
+#endif
+}
+
+int alpx_playback_prepare(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+int alpx_playback_trigger(struct snd_pcm_substream *substream,
+ int command)
+{
+ struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream);
+
+ dev_dbg(alpx_dev->dev, "CALLED\n");
+
+ switch (command) {
+ case SNDRV_PCM_TRIGGER_START:
+ alpx_pipe_start(&alpx_dev->playback, substream);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ alpx_pipe_stop(&alpx_dev->playback, substream);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct snd_pcm_ops alpx_playback_ops = {
+ .open = alpx_playback_open,
+ .close = alpx_playback_close,
+#if KERNEL_VERSION(5, 6, 0) > LINUX_VERSION_CODE
+ .ioctl = snd_pcm_lib_ioctl,
+#endif
+ .hw_params = alpx_playback_hw_params,
+ .hw_free = alpx_playback_hw_free,
+ .prepare = alpx_playback_prepare,
+ .trigger = alpx_playback_trigger,
+ .pointer = snd_dmaengine_pcm_pointer,
+};
+
+/* Capture */
+
+int alpx_capture_open(struct snd_pcm_substream *substream)
+{
+ struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ ret = alpx_pipe_open(&alpx_dev->capture, substream);
+ if (ret)
+ return ret;
+
+ runtime->hw = *alpx_dev->variant->capture_hw;
+
+ return 0;
+}
+
+int alpx_capture_close(struct snd_pcm_substream *substream)
+{
+ struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream);
+
+ alpx_pipe_close(&alpx_dev->capture);
+
+ return 0;
+}
+
+int alpx_capture_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream);
+ int ret;
+
+ ret = alpx_pipe_configure(&alpx_dev->capture, hw_params);
+ if (ret)
+ return ret;
+
+#if KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE
+ return 0;
+#else
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+#endif
+}
+
+int alpx_capture_hw_free(struct snd_pcm_substream *substream)
+{
+#if KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE
+ return 0;
+#else
+ return snd_pcm_lib_free_pages(substream);
+#endif
+}
+
+int alpx_capture_prepare(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+int alpx_capture_trigger(struct snd_pcm_substream *substream,
+ int command)
+{
+ struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream);
+
+ switch (command) {
+ case SNDRV_PCM_TRIGGER_START:
+ alpx_pipe_start(&alpx_dev->capture, substream);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ alpx_pipe_stop(&alpx_dev->capture, substream);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct snd_pcm_ops alpx_capture_ops = {
+ .open = alpx_capture_open,
+ .close = alpx_capture_close,
+#if KERNEL_VERSION(5, 6, 0) > LINUX_VERSION_CODE
+ .ioctl = snd_pcm_lib_ioctl,
+#endif
+ .hw_params = alpx_capture_hw_params,
+ .hw_free = alpx_capture_hw_free,
+ .prepare = alpx_capture_prepare,
+ .trigger = alpx_capture_trigger,
+ .pointer = snd_dmaengine_pcm_pointer,
+};
diff --git a/snd-alpx/alpx_streams.h b/snd-alpx/alpx_streams.h
new file mode 100644
index 0000000..bb33554
--- /dev/null
+++ b/snd-alpx/alpx_streams.h
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+#ifndef _ALPX_STREAMS_EDF_H
+#define _ALPX_STREAMS_EDF_H
+
+#include <sound/pcm.h>
+
+/* Structures */
+
+enum alpx_pipe_status {
+ ALPX_PIPE_STATUS_IDLE = 0,
+ ALPX_PIPE_STATUS_RUNNING,
+ ALPX_PIPE_STATUS_STOPPING,
+ ALPX_PIPE_STATUS_STOPPED,
+};
+
+struct alpx_pipe {
+ u32 period_qty_on_start; /* 2^64-1 periods @ 192kHZ = 2M years ! , u32 => 0.2year , enough to debug*/
+ struct snd_pcm_substream *substream;
+
+ enum alpx_pipe_status status;
+ bool configured;
+ bool xdma_write;
+};
+
+
+/* Pipe */
+
+int alpx_pipe_init(struct alpx_pipe *pipe, bool xdma_write);
+
+/* Playback */
+extern const struct snd_pcm_ops alpx_playback_ops;
+
+int alpx_playback_open(struct snd_pcm_substream *substream);
+int alpx_playback_close(struct snd_pcm_substream *substream);
+int alpx_playback_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params);
+int alpx_playback_hw_free(struct snd_pcm_substream *substream);
+int alpx_playback_prepare(struct snd_pcm_substream *substream);
+int alpx_playback_trigger(struct snd_pcm_substream *substream,
+ int command);
+snd_pcm_uframes_t alpx_playback_pointer(struct snd_pcm_substream *substream);
+
+/* Capture */
+
+extern const struct snd_pcm_ops alpx_capture_ops;
+
+
+int alpx_capture_open(struct snd_pcm_substream *substream);
+int alpx_capture_close(struct snd_pcm_substream *substream);
+int alpx_capture_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params);
+int alpx_capture_hw_free(struct snd_pcm_substream *substream);
+int alpx_capture_prepare(struct snd_pcm_substream *substream);
+int alpx_capture_trigger(struct snd_pcm_substream *substream,
+ int command);
+
+#endif
diff --git a/snd-alpx/alpx_variants_882_apps_preFW240.h b/snd-alpx/alpx_variants_882_apps_preFW240.h
new file mode 100644
index 0000000..713a3a4
--- /dev/null
+++ b/snd-alpx/alpx_variants_882_apps_preFW240.h
@@ -0,0 +1,827 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_VARIANTS_882_APP_PREFW240H_
+#define _ALPX_VARIANTS_882_APP_PREFW240H_
+
+#include "alpx.h"
+#include "alpx_reg.h"
+
+#include <sound/tlv.h>
+#include "alpx_variants_common.h"
+
+
+/* 882 ONLY !!*/
+/* Alp882 Commons */
+/** Pre FW V240 for compatibility with OLD applications Translation tables FOR CLOCK sources !!*/
+static const char *alp882_app_preFW240_control_choice_clk_src_entries[ALP882_APP_PREFW240_CLK_MANAGER_CLK_SRC_QTY] = {
+ "Word Clk",
+ "AES Syn",
+ "AES Aud 1/2",
+ "AES Aud 3/4",
+ "AES Aud 5/6",
+ "AES Aud 7/8",
+ "Internal",
+};
+
+/* Index is application side index, items is the register ones*/
+static u32 alp882_app_preFW240_control_choice_clk_src_entries_values[ALP882_APP_PREFW240_CLK_MANAGER_CLK_SRC_QTY] = {
+ ALPMC_CLK_MANAGER_SOURCE_WCLK_IN,
+ ALPMC_CLK_MANAGER_SOURCE_AES_SYNC,
+ ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_12,
+ ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_34,
+ ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_56,
+ ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_78,
+ ALPMC_CLK_MANAGER_SOURCE_INTERNAL,
+};
+
+/* Indexed by the register => application side value */
+static u32 alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values [ALP882_CLK_MANAGER_CONFIG_CLK_SRC_QTY] = {
+ ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_INTERNAL,
+ ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_INTERNAL, /* FORBIDDEN VALUE => INTERNAL !!*/
+ ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_WCLK_IN,
+ ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_AES_SYNC,
+ ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_AES_AUDIO_12,
+ ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_AES_AUDIO_34,
+ ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_AES_AUDIO_56,
+ ALP882_APP_PREFW240_CLK_MANAGER_SOURCE_AES_AUDIO_78,
+};
+
+
+static struct snd_pcm_hardware alp882_app_preFW240_hardware_specs = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS |
+ SNDRV_PCM_RATE_8000_192000,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 16,
+ .channels_max = 16,
+ .buffer_bytes_max = 256 * SZ_1K * 4, /* period_bytes_max * periods_max */
+ .period_bytes_min = 48, /* min latency 1ms */
+ .period_bytes_max = 256 * SZ_1K, /* 20ms at 192kHz * nchans * 4B, rounded at 2^n */
+ .periods_min = 1,
+ .periods_max = 4,
+};
+
+
+/* Alp 882 LINE Variant definition */
+
+static const DECLARE_TLV_DB_MINMAX_MUTE(alp882_app_preFW240_line_control_codec_gains_scale, ALPMC_LINE_ANALOG_GAIN_MIN_cdB, ALPMC_LINE_ANALOG_GAIN_MAX_cdB);
+
+static struct alpx_control_descriptor alp882_app_preFW240_line_control_descriptors[] = {
+ /* Gain */
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_IN_DAW_OFFSET),
+ .prefix = "DAW Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_DAW_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_IN_ANALOG_OFFSET),
+ .prefix = "Ana Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_IN_AES_OFFSET),
+ .prefix = "AES Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_AES_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_OUT_ANALOG_OFFSET),
+ .prefix = "Ana Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_OUT_AES_OFFSET),
+ .prefix = "AES Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_AES_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_OUT_DAW_OFFSET),
+ .prefix = "DAW Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_DAW_COUNT,
+ },
+ },
+ /* Mixer */
+ {
+ .type = ALPX_CONTROL_TYPE_MIXER,
+ .base = ALPMC_MIXER_BASE,
+ .prefix = "Mxr",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mixer = {
+ .lines_count = ALP882_MIXER_SIZE,
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ },
+ },
+ /* Clock sources Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff Src",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .data.translated_choice = {
+ .offset = ALPMC_CLK_MANAGER_CONFIG_REG,
+ .mask = ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_MASK,
+ .pos = ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_POS,
+ .entries = alp882_app_preFW240_control_choice_clk_src_entries,
+ .entries_values = alp882_app_preFW240_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_app_preFW240_control_choice_clk_src_entries),
+ .card_entries_values = alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Current Clock BASE Value Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .data.choice = {
+ .offset = ALPMC_CLK_MANAGER_CONFIG_REG,
+ .mask = ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_MASK,
+ .pos = ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_POS,
+ .entries = alpxxx_control_choice_current_clk_values_entries,
+ .entries_values = alpxxx_control_choice_current_clk_values_entries_values,
+ .entries_count = ARRAY_SIZE(alpxxx_control_choice_current_clk_values_entries),
+ },
+ },
+
+ /* Current Clock factor values Read Only, RESERVED to keep Ids order */
+ {
+ .type = ALPX_CONTROL_RESERVED,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "RESERVED",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ },
+ /* Clock UP on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk Src Up",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_UP_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_UP_POS,
+ },
+ },
+ /* Clock DOWN on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk Src Down",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_DOWN_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_DOWN_POS,
+ },
+ },
+ /* Clock Failover priority 0 TOP*/
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P0",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO0_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO0_POS,
+ .entries = alp882_app_preFW240_control_choice_clk_src_entries,
+ .entries_values = alp882_app_preFW240_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_app_preFW240_control_choice_clk_src_entries),
+ .card_entries_values = alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Clock Failover priority 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO1_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO1_POS,
+ .entries = alp882_app_preFW240_control_choice_clk_src_entries,
+ .entries_values = alp882_app_preFW240_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_app_preFW240_control_choice_clk_src_entries),
+ .card_entries_values = alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Clock Failover priority 2*/
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO2_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO2_POS,
+ .entries = alp882_app_preFW240_control_choice_clk_src_entries,
+ .entries_values = alp882_app_preFW240_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_app_preFW240_control_choice_clk_src_entries),
+ .card_entries_values = alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Clock Failover priority 3*/
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO3_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO3_POS,
+ .entries = alp882_app_preFW240_control_choice_clk_src_entries,
+ .entries_values = alp882_app_preFW240_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_app_preFW240_control_choice_clk_src_entries),
+ .card_entries_values = alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Auto AES SRC disable */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 1/2 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_1_2_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_1_2_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 3/4 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_3_4_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_3_4_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 5/6 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_5_6_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_5_6_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 7/8 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_7_8_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_7_8_POS,
+ },
+ },
+ /* Codec Input Gain for LINE Option*/
+ {
+ .type = ALPX_CONTROL_TYPE_ANALOG_EQ,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "Codec Analog Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.codec = {
+ .offset = ALPMC_CODEC_PGA_REGS,
+ .gains_scale = alp882_app_preFW240_line_control_codec_gains_scale,
+ .reg_gain_min = ALPMC_LINE_ANALOG_GAIN_MIN_REG,
+ .reg_gain_max = ALPMC_LINE_ANALOG_GAIN_MAX_REG,
+ .lines_count = 8,
+ },
+ },
+};
+
+static struct alpx_variant alp882_app_preFW240_line_variant __attribute__((unused)) = {
+ .shortname = "Alp882e",
+ .longname = "Alp 882e",
+ .model = ALPX_VARIANT_MODEL_ALP882,
+ .mixername = "Alp882e_Mix",
+ .features = ALPX_VARIANT_FEATURE_GPIOS,
+ .control_descriptors = alp882_app_preFW240_line_control_descriptors,
+ .control_descriptors_count = ARRAY_SIZE(alp882_app_preFW240_line_control_descriptors),
+
+ .capture_hw = &alp882_app_preFW240_hardware_specs,
+ .playback_hw = &alp882_app_preFW240_hardware_specs,
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .gpios = {
+ .base = ALPMC_GPIO_BASE,
+ .inputs_reg_offset = ALPMC_GPIO_INPUT_REG,
+ .inputs_qty = ALPMC_GPIO_INPUT_QTY,
+ .outputs_reg_offset = ALPMC_GPIO_OUTPUT_REG,
+ .outputs_qty = ALPMC_GPIO_OUTPUT_QTY,
+ },
+
+ .flash_partitions.partitions = alpx_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_mtd_partitions),
+ .flash_partitions.qty_for_fw_update = 1,
+};
+
+
+/* Alp 882 - MIC App preFW240 Variant definition */
+
+static const DECLARE_TLV_DB_MINMAX_MUTE(alp882_app_preFW240_mic_control_codec_gains_scale, ALPMC_MIC_ANALOG_GAIN_MIN_cdB, ALPMC_MIC_ANALOG_GAIN_MAX_cdB);
+
+static struct alpx_control_descriptor alp882_app_preFW240_mic_control_descriptors[] = {
+ /* Gain */
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_IN_DAW_OFFSET),
+ .prefix = "DAW Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_DAW_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_IN_ANALOG_OFFSET),
+ .prefix = "Ana Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_IN_AES_OFFSET),
+ .prefix = "AES Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_AES_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_OUT_ANALOG_OFFSET),
+ .prefix = "Ana Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_OUT_AES_OFFSET),
+ .prefix = "AES Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_AES_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_OUT_DAW_OFFSET),
+ .prefix = "DAW Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_DAW_COUNT,
+ },
+ },
+ /* Mixer */
+ {
+ .type = ALPX_CONTROL_TYPE_MIXER,
+ .base = ALPMC_MIXER_BASE,
+ .prefix = "Mxr",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mixer = {
+ .lines_count = ALP882_MIXER_SIZE,
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ },
+ },
+ /* Clock sources Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff Src",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .data.translated_choice= {
+ .offset = ALPMC_CLK_MANAGER_CONFIG_REG,
+ .mask = ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_MASK,
+ .pos = ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_POS,
+ .entries = alp882_app_preFW240_control_choice_clk_src_entries,
+ .entries_values = alp882_app_preFW240_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_app_preFW240_control_choice_clk_src_entries),
+ .card_entries_values = alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Current Clock BASE Value Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .data.choice = {
+ .offset = ALPMC_CLK_MANAGER_CONFIG_REG,
+ .mask = ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_MASK,
+ .pos = ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_POS,
+ .entries = alpxxx_control_choice_current_clk_values_entries,
+ .entries_values = alpxxx_control_choice_current_clk_values_entries_values,
+ .entries_count = ARRAY_SIZE(alpxxx_control_choice_current_clk_values_entries),
+ },
+ },
+
+ /* Current Clock factor values Read Only, RESERVED now to keep ids order */
+ {
+ .type = ALPX_CONTROL_RESERVED,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "RESERVED",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ },
+ /* Clock UP on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk Src Up",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_UP_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_UP_POS,
+ },
+ },
+ /* Clock DOWN on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk Src Down",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_DOWN_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_DOWN_POS,
+ },
+ },
+ /* Clock Failover priority 0 TOP*/
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P0",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO0_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO0_POS,
+ .entries = alp882_app_preFW240_control_choice_clk_src_entries,
+ .entries_values = alp882_app_preFW240_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_app_preFW240_control_choice_clk_src_entries),
+ .card_entries_values = alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Clock Failover priority 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO1_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO1_POS,
+ .entries = alp882_app_preFW240_control_choice_clk_src_entries,
+ .entries_values = alp882_app_preFW240_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_app_preFW240_control_choice_clk_src_entries),
+ .card_entries_values = alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Clock Failover priority 2*/
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO2_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO2_POS,
+ .entries = alp882_app_preFW240_control_choice_clk_src_entries,
+ .entries_values = alp882_app_preFW240_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_app_preFW240_control_choice_clk_src_entries),
+ .card_entries_values = alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Clock Failover priority 3*/
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO3_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO3_POS,
+ .entries = alp882_app_preFW240_control_choice_clk_src_entries,
+ .entries_values = alp882_app_preFW240_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_app_preFW240_control_choice_clk_src_entries),
+ .card_entries_values = alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp882_app_preFW240_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Auto AES SRC disable */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 1/2 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_1_2_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_1_2_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 3/4 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_3_4_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_3_4_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 5/6 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_5_6_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_5_6_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 7/8 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_7_8_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_7_8_POS,
+ },
+ },
+ /* Codec Input Gain for MIC Option*/
+ {
+ .type = ALPX_CONTROL_TYPE_ANALOG_EQ,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "Codec MIC Analog Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.codec = {
+ .offset = ALPMC_CODEC_PGA_REGS,
+ .gains_scale = alp882_app_preFW240_mic_control_codec_gains_scale,
+ .reg_gain_min = ALPMC_MIC_ANALOG_GAIN_MIN_REG,
+ .reg_gain_max = ALPMC_MIC_ANALOG_GAIN_MAX_REG,
+ .lines_count = 8,
+ },
+ },
+ /** MIC Phantoms **/
+ /* MIC Phantom 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_1_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_1_POS,
+ },
+ },
+ /* MIC Phantom 2 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_2_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_2_POS,
+ },
+ },
+ /* MIC Phantom 3 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_3_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_3_POS,
+ },
+ },
+ /* MIC Phantom 4 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh4",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_4_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_4_POS,
+ },
+ },
+ /* MIC Phantom 5 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh5",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_5_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_5_POS,
+ },
+ },
+ /* MIC Phantom 6 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh6",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_6_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_6_POS,
+ },
+ },
+ /* MIC Phantom 7 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh7",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_7_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_7_POS,
+ },
+ },
+ /* MIC Phantom 8 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh8",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_8_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_8_POS,
+ },
+ },
+};
+
+
+static struct alpx_variant alp882_app_preFW240_mic_variant __attribute__((unused)) = {
+ .shortname = "Alp882e-MIC",
+ .longname = "Alp 882e MIC",
+ .model = ALPX_VARIANT_MODEL_ALP882_MIC,
+ .mixername = "Alp882e_MIC_Mix",
+ .features = ALPX_VARIANT_FEATURE_GPIOS,
+ .control_descriptors = alp882_app_preFW240_mic_control_descriptors,
+ .control_descriptors_count = ARRAY_SIZE(alp882_app_preFW240_mic_control_descriptors),
+
+ .capture_hw = &alp882_app_preFW240_hardware_specs,
+ .playback_hw = &alp882_app_preFW240_hardware_specs,
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .gpios = {
+ .base = ALPMC_GPIO_BASE,
+ .inputs_reg_offset = ALPMC_GPIO_INPUT_REG,
+ .inputs_qty = ALPMC_GPIO_INPUT_QTY,
+ .outputs_reg_offset = ALPMC_GPIO_OUTPUT_REG,
+ .outputs_qty = ALPMC_GPIO_OUTPUT_QTY,
+ },
+
+ .flash_partitions.partitions = alpx_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_mtd_partitions),
+ .flash_partitions.qty_for_fw_update = 1,
+};
+
+#endif /* _ALPX_VARIANTS_882_APP_PREFW240H_ */
diff --git a/snd-alpx/alpx_variants_common.h b/snd-alpx/alpx_variants_common.h
new file mode 100644
index 0000000..ef33e55
--- /dev/null
+++ b/snd-alpx/alpx_variants_common.h
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_VARIANTS_COMMON_H_
+#define _ALPX_VARIANTS_COMMON_H_
+
+#include "alpx.h"
+#include "alpx_reg.h"
+
+#include <sound/tlv.h>
+#include <linux/sizes.h>
+
+/** Common parts **/
+
+/* Flash Partitions with USER access mode rules*/
+static struct mtd_partition alpx_mtd_partitions[ALPX_FLASH_PARTITION_QTY] __attribute__((unused)) = {
+ /* golden */
+ {
+ .name = "golden",
+ .offset = 0x000000,
+ .size = 0x400000,
+ },
+ /* user firmware */
+ {
+ .name = "fw-user-updatable",
+ .offset = 0x400000,
+ .size = 0x218000,
+ },
+ /* initial configuration */
+ {
+ .name = "init-config",
+ .offset = 0x400000 + 0x218000,
+ .size = 0x008000,
+ },
+ /* user configuration */
+ {
+ .name = "user-config",
+ .offset = 0x400000 + 0x218000 + 0x008000,
+ .size = 0x008000,
+ },
+ /* PRODUCTION : last area on flash, but keep it here to be the one before last partition*/
+ {
+ .name = "production",
+ .offset = 0x400000 + 0x218000 + 0x008000 + 0x008000,
+ .size = 0x008000, /* Limited size : this is enough instead of remaining Flash which is 0x1D8000 bytes*/
+ }
+};
+
+static struct mtd_partition alpx_dead_mtd_partitions[ALPX_DEAD_FLASH_PARTITION_QTY] __attribute__((unused)) = {
+ /* GOLDEN Prod */
+ {
+ .name = "golden-prod",
+ .offset = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+ .size = 0x008000,
+ },
+ /* PRODUCTION : last area on flash, but keep it here to be the one before last partition*/
+ {
+ .name = "production",
+ .offset = 0x400000 + 0x218000 + 0x008000 + 0x008000,
+ .size = 0x008000, /* Limited size : this is enough instead of remaining Flash which is 0x1D8000 bytes*/
+ }
+};
+
+/* Common clock controls */
+static const char* alpxxx_control_choice_current_clk_values_entries[ALPxxx_CLK_MANAGER_CLK_VALUES_QTY] __attribute__((unused)) = {
+ "8kHz", "11.025kHz", "16kHz","22.05kHz", "24kHz", "32kHz", "44.1kHz", "48kHz", "64kHz", "88.2kHz",
+ "96kHz", "128kHz", "176,4kHz","192kHz"
+};
+/* Same order than the constant values above */
+static u32 alpxxx_control_choice_current_clk_values_entries_values[ALPxxx_CLK_MANAGER_CLK_VALUES_QTY] __attribute__((unused)) = {
+ALPxxx_CLK_MANAGER_CLK_VALUE_8K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_11_025K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_16K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_22_05K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_24K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_32K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_44_1K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_48K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_64K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_88_2K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_96K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_128K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_176_4K,
+ALPxxx_CLK_MANAGER_CLK_VALUE_192K ,
+};
+
+/* Digital channels gains scale */
+
+static const DECLARE_TLV_DB_MINMAX_MUTE(alpxxx_line_digital_gains_scale,
+ ALP_AMPLIFIER_GAIN_MIN_cdB, ALP_AMPLIFIER_GAIN_MAX_cdB);
+
+
+#endif /* _ALPX_VARIANTS_COMMON_H_ */
diff --git a/snd-alpx/alpx_variants_dante.h b/snd-alpx/alpx_variants_dante.h
new file mode 100644
index 0000000..41a5c82
--- /dev/null
+++ b/snd-alpx/alpx_variants_dante.h
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_VARIANTS_DANTE_H_
+#define _ALPX_VARIANTS_DANTE_H_
+
+#include "alpx.h"
+#include "alpx_reg.h"
+
+#include <sound/tlv.h>
+#include "alpx_variants_common.h"
+#include "alpx_axcmem.h"
+#include "alpx_controls.h"
+
+/* DANTE card */
+
+#define ALPDANTE_DEFAULT_FS 48000
+
+
+static struct snd_pcm_hardware alpdante_hardware_specs = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = ALPDANTE_DEFAULT_FS,
+ .rate_max = ALPDANTE_DEFAULT_FS,
+ .channels_min = 64,
+ .channels_max = 64,
+ .buffer_bytes_max = SZ_1M * 4, /* period_bytes_max * periods_max */
+ .period_bytes_min = 48, /* min latency 1ms */
+ .period_bytes_max = SZ_1M, /* 20ms at 192kHz * nchans * 4B, rounded at 2^n */
+ .periods_min = 1,
+ .periods_max = 4,
+};
+
+
+/* FFPR Support */
+
+/* FPPR Parameters */
+static const u32 ALPDANTE_FLASH_UNLOCK = 1;
+static const u32 ALPDANTE_FLASH_WAIT_STEP_TIME = 10;
+static const u32 ALPDANTE_FLASH_LOCK_ACCESS_TRY_QTY = 1;
+static const u32 ALPDANTE_FPPR_SAVE_REGS_CMD_COMPLETION_WAIT_STEPS_QTY = 200;
+static const u32 ALPDANTE_FPPR_NOPE_CMD_ID = 0;
+static const u32 ALPDANTE_FPPR_SAVE_REGISTERS_CMD_ID = 6;
+static const u32 ALPDANTE_FCAR_STATUS_MASK = 0xFFFFFF;
+static const u32 ALPDANTE_SMALAR_AVAILABLE = 0;
+static const u32 ALPDANTE_SMALAR_IN_USE = 1;
+
+/* PROD TEST Parameters */
+static const u32 ALPDANTE_PROD_TEST_LOOPBACK = BIT(1);
+
+/* FPPR Macros */
+#define ALPDANTE_FPPR_CMD_STATUS(reg_val) ((reg_val) & ALPDANTE_FCAR_STATUS_MASK)
+
+/* AXCmem Registers locations */
+
+static const struct alpx_axcmem_loc ALPDANTE_FIR_LOC = {1,0,2};
+static const struct alpx_axcmem_loc ALPDANTE_CSPPR_LOC = {1,3,2};
+
+static const struct alpx_axcmem_loc ALPDANTE_CLKPR_LOC = {1,3,1};
+#define DEF_ALPDANTE_CLKPR_LOC {1,3,1}
+
+static const struct alpx_axcmem_loc ALPDANTE_FPPR_LOC = {1,4,0};
+static const struct alpx_axcmem_loc ALPDANTE_ERRTST_LOC = {1,4,3};
+
+
+static const struct alpx_axcmem_loc ALPDANTE_UBLAZE_VERSION_LOC = {3,1,2};
+static const struct alpx_axcmem_loc ALPDANTE_DESIGN_BLOC_VERSION_LOC = {3,1,0};
+static const struct alpx_axcmem_loc ALPDANTE_LOW_SERIAL_NUM_LOC = {3,2,0};
+static const struct alpx_axcmem_loc ALPDANTE_HIGH_SERIAL_NUM_LOC = {3,3,0};
+
+static const struct alpx_axcmem_loc ALPDANTE_SRConfig_LOC = {0,6,2};
+static const struct alpx_axcmem_loc ALPDANTE_Frequency_Measure_LOC = {0,6,0};
+#define DEF_ALPDANTE_Frequency_Measure_LOC {0,6,0}
+
+static const struct alpx_axcmem_loc ALPDANTE_Clock_Status_LOC = {0,3,0};
+#define DEF_ALPDANTE_Clock_Status_LOC {0,3,0}
+static const struct alpx_axcmem_loc ALPDANTE_CLCSR_LOC = {1,1,0};
+
+static const struct alpx_axcmem_loc ALPDANTE_FSDR_LOC = {0,1,1};
+static const struct alpx_axcmem_loc ALPDANTE_FCR_LOC = {0,5,3};
+static const struct alpx_axcmem_loc ALPDANTE_FCAR_LOC = {0,6,0};
+static const struct alpx_axcmem_loc ALPDANTE_SMALAR_LOC = {0,4,0};
+
+static const struct alpx_axcmem_loc ALPDANTE_CNAMPR_LOC = {1,3,3};
+static const struct alpx_axcmem_loc ALPDANTE_NAME_LOC = {0,1,0};
+
+static const struct alpx_axcmem_loc ALPDANTE_SAMPLE_COUNT_LOC = {3,0,0};
+
+static const struct alpx_axcmem_loc ALPDANTE_PROD_TEST_LOC = {1,0,0};
+
+
+/* NETWORK NAME support */
+
+#define ALPDANTE_NETWORK_NAME_LENGTH 29
+
+/* Controls support */
+
+/* Clk controls support */
+#define ALPDANTE_CLK_MANAGER_CONFIG_CLK_SRC_QTY 2
+#define ALPDANTE_CLK_MANAGER_SOURCE_INTERNAL 0
+#define ALPDANTE_CLK_MANAGER_SOURCE_SIC 1
+
+static const char* alpdante_control_choice_clk_src_entries[ALPDANTE_CLK_MANAGER_CONFIG_CLK_SRC_QTY] __attribute__((unused)) = {
+ "Internal", /* Only this one at the moment */
+ "SIC"
+};
+
+/* Same order than the constants values */
+static u32 alpdante_control_choice_clk_src_entries_values[ALPDANTE_CLK_MANAGER_CONFIG_CLK_SRC_QTY] __attribute__((unused)) = {
+ ALPDANTE_CLK_MANAGER_SOURCE_INTERNAL, /* Only this one at the moment */
+ ALPDANTE_CLK_MANAGER_SOURCE_SIC,
+};
+
+
+#if 0
+#define ALPDANTE_CLK_MANAGER_CLK_VALUES_QTY 6
+static const char* alpdante_control_choice_current_clk_values_entries[ALPDANTE_CLK_MANAGER_CLK_VALUES_QTY] __attribute__((unused)) = {
+ "44.1kHz", "48kHz",
+ "88.2kHz", "96kHz",
+ "176,4kHz","192kHz"
+};
+/* Same order than the constant values above */
+static u32 alpdante_control_choice_current_clk_values_entries_values[ALPDANTE_CLK_MANAGER_CLK_VALUES_QTY] __attribute__((unused)) = {
+ALPDANTE_CLK_MANAGER_CLK_VALUE_44_1K,
+ALPDANTE_CLK_MANAGER_CLK_VALUE_48K,
+
+ALPDANTE_CLK_MANAGER_CLK_VALUE_88_2K,
+ALPDANTE_CLK_MANAGER_CLK_VALUE_96K,
+
+ALPDANTE_CLK_MANAGER_CLK_VALUE_176_4K,
+ALPDANTE_CLK_MANAGER_CLK_VALUE_192K ,
+};
+#endif
+
+/* Clock source register access */
+#define ALPDANTE_Clock_Status_CLK_SRC_MASK GENMASK(2, 2)
+#define ALPDANTE_Clock_Status_CLK_SRC_POS 2
+
+#define ALPDANTE_Clock_Status_Sample_Rate_MASK GENMASK(5,7)
+#define ALPDANTE_Clock_Status_Sample_Rate_POS 5
+
+static struct alpx_control_descriptor alpdante_control_descriptors[] = {
+ /* Clock sources Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_AXCMEM_REL_CHOICE,
+ .base = 0,
+ .prefix = "Clk Eff Src",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .data.axcmem_rel_choice = {
+ .base_loc = DEF_ALPDANTE_CLKPR_LOC,
+ .reg_loc = DEF_ALPDANTE_Clock_Status_LOC,
+ .mask = ALPDANTE_Clock_Status_CLK_SRC_MASK,
+ .pos = ALPDANTE_Clock_Status_CLK_SRC_POS,
+ .getter = alpx_axcmem_getRegU8Value_ptr,
+ .setter = NULL,
+ .entries = alpdante_control_choice_clk_src_entries,
+ .entries_values = alpdante_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alpdante_control_choice_clk_src_entries),
+ },
+ },
+ /* Read Measured Frequency Current Value Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_AXCMEM_REL_VALUE,
+ .base = 0,
+ .prefix = "Clk Eff",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .data.axcmem_rel_value = {
+ .base_loc = DEF_ALPDANTE_CLKPR_LOC,
+ .reg_loc = DEF_ALPDANTE_Frequency_Measure_LOC,
+ .mask = 0xFF,
+ .pos = 0,
+ .min = 0,
+ .max = 192,
+ .getter = alpx_axcmem_getRegU8Value_ptr,
+ .setter = NULL,
+ },
+ },
+/* Clock Fail over priority 0 TOP : a constant since no failover yet */
+ {
+ .type = ALPX_CONTROL_TYPE_CONSTANT,
+ .base = 0,
+ .prefix = "Clk P0",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .data.constant = {
+ .value = ALPDANTE_CLK_MANAGER_SOURCE_INTERNAL,
+ },
+ },
+};
+
+/* Alp DANTE Variant */
+static struct alpx_variant alpx_dante_variant __attribute__((unused)) = {
+ .shortname = "AlpDANTE",
+ .longname = "Alp DANTE",
+ .model = ALPX_VARIANT_MODEL_ALPDANTE,
+ .mixername = "AlpX-DANTE_Mix",
+ .capture_hw = &alpdante_hardware_specs,
+ .playback_hw = &alpdante_hardware_specs,
+ .gpios = {
+ .base = 0,
+ .inputs_reg_offset = 0,
+ .inputs_qty = 0,
+ .outputs_reg_offset = 0,
+ .outputs_qty = 0,
+ },
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .flash_partitions.partitions = NULL,
+ .flash_partitions.qty = 0,
+ .flash_partitions.qty_for_fw_update = 0,
+ .control_descriptors = alpdante_control_descriptors,
+ .control_descriptors_count = ARRAY_SIZE(alpdante_control_descriptors),
+};
+
+
+#endif /* _ALPX_VARIANTS_DANTE_H_ */
diff --git a/snd-alpx/alpx_variants_dead.h b/snd-alpx/alpx_variants_dead.h
new file mode 100644
index 0000000..ad7ee87
--- /dev/null
+++ b/snd-alpx/alpx_variants_dead.h
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_VARIANTS_DEAD_H_
+#define _ALPX_VARIANTS_DEAD_H_
+
+#include "alpx.h"
+#include "alpx_reg.h"
+
+#include <sound/tlv.h>
+#include "alpx_variants_common.h"
+
+
+/* DEAD Variant : used for DEAD cards : invalid production zones in Flash */
+static const struct alpx_variant alpx_dead_variant = {
+ .shortname = "AlpDEAD",
+ .longname = "Alp DEAD",
+ .model = ALPX_VARIANT_DEAD,
+ .mixername = "Alp DEAD_Mix",
+ .features = 0,
+ .control_descriptors = NULL,
+ .control_descriptors_count = 0,
+
+ .capture_hw = NULL,
+ .playback_hw = NULL,
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .gpios = {
+ .base = 0,
+ .inputs_reg_offset = 0,
+ .inputs_qty = 0,
+ .outputs_reg_offset = 0,
+ .outputs_qty = 0,
+ },
+
+ .flash_partitions.partitions = alpx_dead_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_dead_mtd_partitions),
+};
+
+#endif /* _ALPX_VARIANTS_DEAD_H_ */
diff --git a/snd-alpx/alpx_variants_madi.h b/snd-alpx/alpx_variants_madi.h
new file mode 100644
index 0000000..d269e9f
--- /dev/null
+++ b/snd-alpx/alpx_variants_madi.h
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_VARIANTS_MADI_H_
+#define _ALPX_VARIANTS_MADI_H_
+
+#include "alpx.h"
+#include "alpx_reg.h"
+
+#include <sound/tlv.h>
+#include "alpx_variants_common.h"
+
+/* MADI */
+static struct snd_pcm_hardware alpmadi_hardware_specs = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS |
+ SNDRV_PCM_RATE_8000_192000,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 64,
+ .channels_max = 64,
+ .buffer_bytes_max = SZ_1M * 4, /* period_bytes_max * periods_max */
+ .period_bytes_min = 48, /* min latency 1ms */
+ .period_bytes_max = SZ_1M, /* 20ms at 192kHz * nchans * 4B, rounded at 2^n */
+ .periods_min = 1,
+ .periods_max = 4,
+};
+
+
+/* Alp MADI Variant */
+static struct alpx_variant alpx_madi_variant __attribute__((unused)) = {
+ .shortname = "AlpMADI",
+ .longname = "Alp MADI",
+ .model = ALPX_VARIANT_MODEL_MADI,
+ .mixername = "AlpX-MADI_Mix",
+ .capture_hw = &alpmadi_hardware_specs,
+ .playback_hw = &alpmadi_hardware_specs,
+ .gpios = {
+ .base = 0,
+ .inputs_reg_offset = 0,
+ .inputs_qty = 0,
+ .outputs_reg_offset = 0,
+ .outputs_qty = 0,
+ },
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .flash_partitions.partitions = alpx_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_mtd_partitions),
+ .flash_partitions.qty_for_fw_update = 1,
+};
+
+
+/* Alp MADI Loopback */
+
+static struct alpx_variant alpx_madi_loopback_variant __attribute__((unused)) = {
+ .shortname = "AlpLoopback",
+ .longname = "Alp Loopback",
+ .model = ALPX_VARIANT_MODEL_MADI_LOOPBACK,
+ .mixername = "AlpX-MADI-Loopback_Mix",
+ .capture_hw = &alpmadi_hardware_specs,
+ .playback_hw = &alpmadi_hardware_specs,
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .gpios = {
+ .base = 0,
+ .inputs_reg_offset = 0,
+ .inputs_qty = 0,
+ .outputs_reg_offset = 0,
+ .outputs_qty = 0,
+ },
+
+ .flash_partitions.partitions = alpx_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_mtd_partitions),
+ .flash_partitions.qty_for_fw_update = 1,
+};
+
+
+#endif /* _ALPX_VARIANTS_MADI_H_ */
diff --git a/snd-alpx/alpx_variants_mc.h b/snd-alpx/alpx_variants_mc.h
new file mode 100644
index 0000000..aa32035
--- /dev/null
+++ b/snd-alpx/alpx_variants_mc.h
@@ -0,0 +1,1475 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_VARIANTS_MC_H_
+#define _ALPX_VARIANTS_MC_H_
+
+#include "alpx.h"
+#include "alpx_reg.h"
+
+#include <sound/tlv.h>
+#include "alpx_variants_common.h"
+
+
+/* 882 */
+/* Alp882 Commons */
+static const char *alp882_control_choice_clk_src_entries[ALP882_CLK_MANAGER_CONFIG_CLK_SRC_QTY] = {
+ "Internal",
+ "SIC",
+ "Word Clk",
+ "AES Syn",
+ "AES Aud 1/2",
+ "AES Aud 3/4",
+ "AES Aud 5/6",
+ "AES Aud 7/8",
+};
+
+/* Same order than the constants values */
+static u32 alp882_control_choice_clk_src_entries_values[ALP882_CLK_MANAGER_CONFIG_CLK_SRC_QTY] = {
+ ALPMC_CLK_MANAGER_SOURCE_INTERNAL,
+ ALPMC_CLK_MANAGER_SOURCE_SIC,
+ ALPMC_CLK_MANAGER_SOURCE_WCLK_IN,
+ ALPMC_CLK_MANAGER_SOURCE_AES_SYNC,
+ ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_12,
+ ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_34,
+ ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_56,
+ ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_78,
+};
+
+static struct snd_pcm_hardware alp882_hardware_specs = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS |
+ SNDRV_PCM_RATE_8000_192000,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 16,
+ .channels_max = 16,
+ .buffer_bytes_max = 256 * SZ_1K * 4, /* period_bytes_max * periods_max */
+ .period_bytes_min = 48, /* min latency 1ms */
+ .period_bytes_max = 256 * SZ_1K, /* 20ms at 192kHz * nchans * 4B, rounded at 2^n */
+ .periods_min = 1,
+ .periods_max = 4,
+};
+
+
+/* Alp 882 LINE Variant definition */
+
+static const DECLARE_TLV_DB_MINMAX_MUTE(alpmc_line_control_codec_gains_scale, ALPMC_LINE_ANALOG_GAIN_MIN_cdB, ALPMC_LINE_ANALOG_GAIN_MAX_cdB);
+
+static struct alpx_control_descriptor alp882_line_control_descriptors[] = {
+ /* Gain */
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_IN_DAW_OFFSET),
+ .prefix = "DAW Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_DAW_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_IN_ANALOG_OFFSET),
+ .prefix = "Ana Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_IN_AES_OFFSET),
+ .prefix = "AES Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_AES_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_OUT_ANALOG_OFFSET),
+ .prefix = "Ana Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_OUT_AES_OFFSET),
+ .prefix = "AES Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_AES_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_OUT_DAW_OFFSET),
+ .prefix = "DAW Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_DAW_COUNT,
+ },
+ },
+ /* Mixer */
+ {
+ .type = ALPX_CONTROL_TYPE_MIXER,
+ .base = ALPMC_MIXER_BASE,
+ .prefix = "Mxr",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mixer = {
+ .lines_count = ALP882_MIXER_SIZE,
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ },
+ },
+ /* Clock sources Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff Src",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .data.choice = {
+ .offset = ALPMC_CLK_MANAGER_CONFIG_REG,
+ .mask = ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_MASK,
+ .pos = ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_POS,
+ .entries = alp882_control_choice_clk_src_entries,
+ .entries_values = alp882_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_control_choice_clk_src_entries),
+ },
+ },
+ /* Current Clock BASE Value Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .data.choice = {
+ .offset = ALPMC_CLK_MANAGER_CONFIG_REG,
+ .mask = ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_MASK,
+ .pos = ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_POS,
+ .entries = alpxxx_control_choice_current_clk_values_entries,
+ .entries_values = alpxxx_control_choice_current_clk_values_entries_values,
+ .entries_count = ARRAY_SIZE(alpxxx_control_choice_current_clk_values_entries),
+ },
+ },
+
+ /* Current Clock factor values Read Only, RESERVED to keep Ids order */
+ {
+ .type = ALPX_CONTROL_RESERVED,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "RESERVED",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ },
+ /* Clock UP on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk Src Up",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_UP_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_UP_POS,
+ },
+ },
+ /* Clock DOWN on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk Src Down",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_DOWN_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_DOWN_POS,
+ },
+ },
+ /* Clock Failover priority 0 TOP*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P0",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO0_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO0_POS,
+ .entries = alp882_control_choice_clk_src_entries,
+ .entries_values = alp882_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO1_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO1_POS,
+ .entries = alp882_control_choice_clk_src_entries,
+ .entries_values = alp882_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 2*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO2_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO2_POS,
+ .entries = alp882_control_choice_clk_src_entries,
+ .entries_values = alp882_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 3*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO3_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO3_POS,
+ .entries = alp882_control_choice_clk_src_entries,
+ .entries_values = alp882_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_control_choice_clk_src_entries),
+ },
+ },
+ /* Auto AES SRC disable */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 1/2 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_1_2_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_1_2_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 3/4 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_3_4_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_3_4_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 5/6 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_5_6_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_5_6_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 7/8 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_7_8_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_7_8_POS,
+ },
+ },
+ /* Codec Input Gain for LINE Option*/
+ {
+ .type = ALPX_CONTROL_TYPE_ANALOG_EQ,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "Codec Analog Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.codec = {
+ .offset = ALPMC_CODEC_PGA_REGS,
+ .gains_scale = alpmc_line_control_codec_gains_scale,
+ .reg_gain_min = ALPMC_LINE_ANALOG_GAIN_MIN_REG,
+ .reg_gain_max = ALPMC_LINE_ANALOG_GAIN_MAX_REG,
+ .lines_count = 8,
+ },
+ },
+};
+
+static struct alpx_variant alp882_line_variant __attribute__((unused)) = {
+ .shortname = "Alp882e",
+ .longname = "Alp 882e",
+ .model = ALPX_VARIANT_MODEL_ALP882,
+ .mixername = "Alp882e_Mix",
+ .features = ALPX_VARIANT_FEATURE_GPIOS,
+ .control_descriptors = alp882_line_control_descriptors,
+ .control_descriptors_count = ARRAY_SIZE(alp882_line_control_descriptors),
+
+ .capture_hw = &alp882_hardware_specs,
+ .playback_hw = &alp882_hardware_specs,
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .gpios = {
+ .base = ALPMC_GPIO_BASE,
+ .inputs_reg_offset = ALPMC_GPIO_INPUT_REG,
+ .inputs_qty = ALPMC_GPIO_INPUT_QTY,
+ .outputs_reg_offset = ALPMC_GPIO_OUTPUT_REG,
+ .outputs_qty = ALPMC_GPIO_OUTPUT_QTY,
+ },
+
+ .flash_partitions.partitions = alpx_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_mtd_partitions),
+ .flash_partitions.qty_for_fw_update = 1,
+};
+
+
+/* Alp 882 - MIC Variant definition */
+
+static const DECLARE_TLV_DB_MINMAX_MUTE(alpmc_mic_control_codec_gains_scale, ALPMC_MIC_ANALOG_GAIN_MIN_cdB, ALPMC_MIC_ANALOG_GAIN_MAX_cdB);
+
+static struct alpx_control_descriptor alp882_mic_control_descriptors[] = {
+ /* Gain */
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_IN_DAW_OFFSET),
+ .prefix = "DAW Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_DAW_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_IN_ANALOG_OFFSET),
+ .prefix = "Ana Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_IN_AES_OFFSET),
+ .prefix = "AES Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_AES_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_OUT_ANALOG_OFFSET),
+ .prefix = "Ana Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_OUT_AES_OFFSET),
+ .prefix = "AES Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_AES_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP882_CHANNELS_OUT_DAW_OFFSET),
+ .prefix = "DAW Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP882_CHANNELS_DAW_COUNT,
+ },
+ },
+ /* Mixer */
+ {
+ .type = ALPX_CONTROL_TYPE_MIXER,
+ .base = ALPMC_MIXER_BASE,
+ .prefix = "Mxr",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mixer = {
+ .lines_count = ALP882_MIXER_SIZE,
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ },
+ },
+ /* Clock sources Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff Src",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .data.choice = {
+ .offset = ALPMC_CLK_MANAGER_CONFIG_REG,
+ .mask = ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_MASK,
+ .pos = ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_POS,
+ .entries = alp882_control_choice_clk_src_entries,
+ .entries_values = alp882_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_control_choice_clk_src_entries),
+ },
+ },
+ /* Current Clock BASE Value Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .data.choice = {
+ .offset = ALPMC_CLK_MANAGER_CONFIG_REG,
+ .mask = ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_MASK,
+ .pos = ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_POS,
+ .entries = alpxxx_control_choice_current_clk_values_entries,
+ .entries_values = alpxxx_control_choice_current_clk_values_entries_values,
+ .entries_count = ARRAY_SIZE(alpxxx_control_choice_current_clk_values_entries),
+ },
+ },
+
+ /* Current Clock factor values Read Only, RESERVED now to keep ids order */
+ {
+ .type = ALPX_CONTROL_RESERVED,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "RESERVED",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ },
+ /* Clock UP on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk Src Up",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_UP_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_UP_POS,
+ },
+ },
+ /* Clock DOWN on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk Src Down",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_DOWN_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_DOWN_POS,
+ },
+ },
+ /* Clock Failover priority 0 TOP*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P0",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO0_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO0_POS,
+ .entries = alp882_control_choice_clk_src_entries,
+ .entries_values = alp882_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO1_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO1_POS,
+ .entries = alp882_control_choice_clk_src_entries,
+ .entries_values = alp882_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 2*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO2_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO2_POS,
+ .entries = alp882_control_choice_clk_src_entries,
+ .entries_values = alp882_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 3*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO3_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO3_POS,
+ .entries = alp882_control_choice_clk_src_entries,
+ .entries_values = alp882_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp882_control_choice_clk_src_entries),
+ },
+ },
+ /* Auto AES SRC disable */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 1/2 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_1_2_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_1_2_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 3/4 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_3_4_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_3_4_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 5/6 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_5_6_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_5_6_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 7/8 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_7_8_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_7_8_POS,
+ },
+ },
+ /* Codec Input Gain for MIC Option*/
+ {
+ .type = ALPX_CONTROL_TYPE_ANALOG_EQ,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "Codec MIC Analog Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.codec = {
+ .offset = ALPMC_CODEC_PGA_REGS,
+ .gains_scale = alpmc_mic_control_codec_gains_scale,
+ .reg_gain_min = ALPMC_MIC_ANALOG_GAIN_MIN_REG,
+ .reg_gain_max = ALPMC_MIC_ANALOG_GAIN_MAX_REG,
+ .lines_count = 8,
+ },
+ },
+ /** MIC Phantoms **/
+ /* MIC Phantom 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_1_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_1_POS,
+ },
+ },
+ /* MIC Phantom 2 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_2_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_2_POS,
+ },
+ },
+ /* MIC Phantom 3 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_3_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_3_POS,
+ },
+ },
+ /* MIC Phantom 4 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh4",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_4_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_4_POS,
+ },
+ },
+ /* MIC Phantom 5 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh5",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_5_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_5_POS,
+ },
+ },
+ /* MIC Phantom 6 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh6",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_6_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_6_POS,
+ },
+ },
+ /* MIC Phantom 7 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh7",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_7_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_7_POS,
+ },
+ },
+ /* MIC Phantom 8 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh8",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_8_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_8_POS,
+ },
+ },
+};
+
+
+static struct alpx_variant alp882_mic_variant __attribute__((unused)) = {
+ .shortname = "Alp882e-MIC",
+ .longname = "Alp 882e MIC",
+ .model = ALPX_VARIANT_MODEL_ALP882_MIC,
+ .mixername = "Alp882e_MIC_Mix",
+ .features = ALPX_VARIANT_FEATURE_GPIOS,
+ .control_descriptors = alp882_mic_control_descriptors,
+ .control_descriptors_count = ARRAY_SIZE(alp882_mic_control_descriptors),
+
+ .capture_hw = &alp882_hardware_specs,
+ .playback_hw = &alp882_hardware_specs,
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .gpios = {
+ .base = ALPMC_GPIO_BASE,
+ .inputs_reg_offset = ALPMC_GPIO_INPUT_REG,
+ .inputs_qty = ALPMC_GPIO_INPUT_QTY,
+ .outputs_reg_offset = ALPMC_GPIO_OUTPUT_REG,
+ .outputs_qty = ALPMC_GPIO_OUTPUT_QTY,
+ },
+
+ .flash_partitions.partitions = alpx_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_mtd_partitions),
+ .flash_partitions.qty_for_fw_update = 1,
+};
+
+/* 442 */
+/* Commons clock sources : WARNING : Sme as 882 for the moment */
+static const char *alp442_control_choice_clk_src_entries[ALP442_CLK_MANAGER_CONFIG_CLK_SRC_QTY] = {
+ "Internal",
+ "SIC",
+ "Word Clk",
+ "AES Syn",
+ "AES Aud 1/2",
+ "AES Aud 3/4",
+};
+
+/* Same order than the constants values */
+static u32 alp442_control_choice_clk_src_entries_values[ALP442_CLK_MANAGER_CONFIG_CLK_SRC_QTY] = {
+ ALPMC_CLK_MANAGER_SOURCE_INTERNAL,
+ ALPMC_CLK_MANAGER_SOURCE_SIC,
+ ALPMC_CLK_MANAGER_SOURCE_WCLK_IN,
+ ALPMC_CLK_MANAGER_SOURCE_AES_SYNC,
+ ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_12,
+ ALPMC_CLK_MANAGER_SOURCE_AES_AUDIO_34
+};
+
+static struct snd_pcm_hardware alp442_hardware_specs = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS |
+ SNDRV_PCM_RATE_8000_192000,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = ALP442_CHANNELS_DAW_COUNT,
+ .channels_max = ALP442_CHANNELS_DAW_COUNT,
+ .buffer_bytes_max = 128 * SZ_1K * 4, /* period_bytes_max * periods_max */
+ .period_bytes_min = 48, /* min latency 1ms */
+ .period_bytes_max = 128 * SZ_1K, /* 20ms at 192kHz * nchans * 4B, rounded at 2^n */
+ .periods_min = 1,
+ .periods_max = 4,
+};
+
+
+/* Alp 442 LINE Variant definition */
+
+
+static struct alpx_control_descriptor alp442_line_control_descriptors[] = {
+ /* Gain */
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP442_CHANNELS_IN_DAW_OFFSET),
+ .prefix = "DAW Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_DAW_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP442_CHANNELS_IN_ANALOG_OFFSET),
+ .prefix = "Ana Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP442_CHANNELS_IN_AES_OFFSET),
+ .prefix = "AES Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_AES_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP442_CHANNELS_OUT_ANALOG_OFFSET),
+ .prefix = "Ana Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP442_CHANNELS_OUT_AES_OFFSET),
+ .prefix = "AES Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_AES_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP442_CHANNELS_OUT_DAW_OFFSET),
+ .prefix = "DAW Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_DAW_COUNT,
+ },
+ },
+ /* Mixer */
+ {
+ .type = ALPX_CONTROL_TYPE_MIXER,
+ .base = ALPMC_MIXER_BASE,
+ .prefix = "Mxr",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mixer = {
+ .lines_count = ALP442_MIXER_SIZE,
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ },
+ },
+ /* Clock sources Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff Src",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .data.choice = {
+ .offset = ALPMC_CLK_MANAGER_CONFIG_REG,
+ .mask = ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_MASK,
+ .pos = ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_POS,
+ .entries = alp442_control_choice_clk_src_entries,
+ .entries_values = alp442_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp442_control_choice_clk_src_entries),
+ },
+ },
+
+ /* Current Clock BASE Value Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .data.choice = {
+ .offset = ALPMC_CLK_MANAGER_CONFIG_REG,
+ .mask = ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_MASK,
+ .pos = ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_POS,
+ .entries = alpxxx_control_choice_current_clk_values_entries,
+ .entries_values = alpxxx_control_choice_current_clk_values_entries_values,
+ .entries_count = ARRAY_SIZE(alpxxx_control_choice_current_clk_values_entries),
+ },
+ },
+
+ /* Current Clock factor values Read Only, RESERVED to keep Ids order */
+ {
+ .type = ALPX_CONTROL_RESERVED,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "RESERVED",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ },
+ /* Clock UP on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk Src Up",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_UP_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_UP_POS,
+ },
+ },
+ /* Clock DOWN on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk Src Down",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_DOWN_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_DOWN_POS,
+ },
+ },
+ /* Clock Failover priority 0 TOP*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P0",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO0_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO0_POS,
+ .entries = alp442_control_choice_clk_src_entries,
+ .entries_values = alp442_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp442_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO1_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO1_POS,
+ .entries = alp442_control_choice_clk_src_entries,
+ .entries_values = alp442_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp442_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 2*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO2_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO2_POS,
+ .entries = alp442_control_choice_clk_src_entries,
+ .entries_values = alp442_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp442_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 3*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO3_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO3_POS,
+ .entries = alp442_control_choice_clk_src_entries,
+ .entries_values = alp442_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp442_control_choice_clk_src_entries),
+ },
+ },
+ /* Auto AES SRC disable */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 1/2 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_1_2_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_1_2_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 3/4 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_3_4_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_3_4_POS,
+ },
+ },
+ /* Codec Input Gain for LINE Option*/
+ {
+ .type = ALPX_CONTROL_TYPE_ANALOG_EQ,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "Codec Analog Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.codec = {
+ .offset = ALPMC_CODEC_PGA_REGS,
+ .gains_scale = alpmc_line_control_codec_gains_scale,
+ .reg_gain_min = ALPMC_LINE_ANALOG_GAIN_MIN_REG,
+ .reg_gain_max = ALPMC_LINE_ANALOG_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_ANALOG_COUNT,
+ },
+ },
+};
+
+static struct alpx_variant alp442_line_variant __attribute__((unused)) = {
+ .shortname = "Alp442e",
+ .longname = "Alp 442e",
+ .model = ALPX_VARIANT_MODEL_ALP442,
+ .mixername = "Alp442e_Mix",
+ .features = ALPX_VARIANT_FEATURE_GPIOS,
+ .control_descriptors = alp442_line_control_descriptors,
+ .control_descriptors_count = ARRAY_SIZE(alp442_line_control_descriptors),
+
+ .capture_hw = &alp442_hardware_specs,
+ .playback_hw = &alp442_hardware_specs,
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .gpios = {/*Same as Alp882*/
+ .base = ALPMC_GPIO_BASE,
+ .inputs_reg_offset = ALPMC_GPIO_INPUT_REG,
+ .inputs_qty = ALPMC_GPIO_INPUT_QTY,
+ .outputs_reg_offset = ALPMC_GPIO_OUTPUT_REG,
+ .outputs_qty = ALPMC_GPIO_OUTPUT_QTY,
+ },
+
+ .flash_partitions.partitions = alpx_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_mtd_partitions),
+ .flash_partitions.qty_for_fw_update = 1,
+};
+
+
+/* Alp 442 - MIC Variant definition */
+
+static struct alpx_control_descriptor alp442_mic_control_descriptors[] = {
+ /* Gain */
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP442_CHANNELS_IN_DAW_OFFSET),
+ .prefix = "DAW Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_DAW_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP442_CHANNELS_IN_ANALOG_OFFSET),
+ .prefix = "Ana Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_IN_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP442_CHANNELS_IN_AES_OFFSET),
+ .prefix = "AES Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_AES_COUNT,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP442_CHANNELS_OUT_ANALOG_OFFSET),
+ .prefix = "Ana Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ {/*a 882 with halved QTY*/
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP442_CHANNELS_OUT_AES_OFFSET),
+ .prefix = "AES Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_AES_COUNT,
+ },
+ },
+ {/*a 882 with halved QTY*/
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALPMC_AMPLI_OUT_BASE +
+ ALPMC_GAIN_TABLE_BASE(ALP442_CHANNELS_OUT_DAW_OFFSET),
+ .prefix = "DAW Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_DAW_COUNT,
+ },
+ },
+ /* Mixer a 882 with halved QTY Mixer : square*/
+ {
+ .type = ALPX_CONTROL_TYPE_MIXER,
+ .base = ALPMC_MIXER_BASE,
+ .prefix = "Mxr",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mixer = {
+ .lines_count = ALP442_MIXER_SIZE,
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ },
+ },
+ /* Clock sources Read Only As 882 except for AES due to 4 entries only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff Src",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .data.choice = {
+ .offset = ALPMC_CLK_MANAGER_CONFIG_REG,
+ .mask = ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_MASK,
+ .pos = ALPMC_CLK_MANAGER_CONFIG_CLK_SRC_POS,
+ .entries = alp442_control_choice_clk_src_entries,
+ .entries_values = alp442_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp442_control_choice_clk_src_entries),
+ },
+ },
+ /* Current Clock Value Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .data.choice = {
+ .offset = ALPMC_CLK_MANAGER_CONFIG_REG,
+ .mask = ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_MASK,
+ .pos = ALPMC_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_POS,
+ .entries = alpxxx_control_choice_current_clk_values_entries,
+ .entries_values = alpxxx_control_choice_current_clk_values_entries_values,
+ .entries_count = ARRAY_SIZE(alpxxx_control_choice_current_clk_values_entries),
+ },
+ },
+
+ /* Current Clock factor values Read Only, RESERVED now to keep ids order */
+ {
+ .type = ALPX_CONTROL_RESERVED,
+ .base = ALPMC_CLK_MANAGER_BASE,
+ .prefix = "RESERVED",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ },
+ /* Clock UP on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk Src Up",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_UP_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_UP_POS,
+ },
+ },
+ /* Clock DOWN on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk Src Down",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_DOWN_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_DOWN_POS,
+ },
+ },
+ /* Clock Failover priority 0 TOP*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P0",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO0_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO0_POS,
+ .entries = alp442_control_choice_clk_src_entries,
+ .entries_values = alp442_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp442_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO1_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO1_POS,
+ .entries = alp442_control_choice_clk_src_entries,
+ .entries_values = alp442_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp442_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 2*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO2_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO2_POS,
+ .entries = alp442_control_choice_clk_src_entries,
+ .entries_values = alp442_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp442_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 3*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO3_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO3_POS,
+ .entries = alp442_control_choice_clk_src_entries,
+ .entries_values = alp442_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp442_control_choice_clk_src_entries),
+ },
+ },
+ /* Auto AES SRC disable SAME as Alp882 except for qty*/
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 1/2 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_1_2_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_1_2_POS,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "AES SRC 3/4 Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALPMC_CODEC_AES_SRC_CONTROL_REG,
+ .mask = ALPMC_CODEC_AES_SRC_3_4_MASK,
+ .pos = ALPMC_CODEC_AES_SRC_3_4_POS,
+ },
+ },
+ /* Codec Input Gain for MIC Option SAME as 882 execpt for qty*/
+ {
+ .type = ALPX_CONTROL_TYPE_ANALOG_EQ,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "Codec MIC Analog Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.codec = {
+ .offset = ALPMC_CODEC_PGA_REGS,
+ .gains_scale = alpmc_mic_control_codec_gains_scale,
+ .reg_gain_min = ALPMC_MIC_ANALOG_GAIN_MIN_REG,
+ .reg_gain_max = ALPMC_MIC_ANALOG_GAIN_MAX_REG,
+ .lines_count = ALP442_CHANNELS_ANALOG_COUNT,
+ },
+ },
+ /** MIC Phantoms SAME as 882 except for qty **/
+ /* MIC Phantom 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_1_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_1_POS,
+ },
+ },
+ /* MIC Phantom 2 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_2_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_2_POS,
+ },
+ },
+ /* MIC Phantom 3 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_3_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_3_POS,
+ },
+ },
+ /* MIC Phantom 4 */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALPMC_CODEC_CTRL_BASE,
+ .prefix = "McPh4",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALPMC_INPUT_PARAMS_REG,
+ .mask = ALPMC_MIC_INPUT_PH_4_MASK,
+ .pos = ALPMC_MIC_INPUT_PH_4_POS,
+ },
+ },
+};
+
+
+static struct alpx_variant alp442_mic_variant __attribute__((unused)) = {
+ .shortname = "Alp442e-MIC",
+ .longname = "Alp 442e MIC",
+ .model = ALPX_VARIANT_MODEL_ALP442_MIC,
+ .mixername = "Alp442e_MIC_Mix",
+ .features = ALPX_VARIANT_FEATURE_GPIOS,
+ .control_descriptors = alp442_mic_control_descriptors,
+ .control_descriptors_count = ARRAY_SIZE(alp442_mic_control_descriptors),
+
+ .capture_hw = &alp442_hardware_specs,
+ .playback_hw = &alp442_hardware_specs,
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .gpios = {/* Same as 882 */
+ .base = ALPMC_GPIO_BASE,
+ .inputs_reg_offset = ALPMC_GPIO_INPUT_REG,
+ .inputs_qty = ALPMC_GPIO_INPUT_QTY,
+ .outputs_reg_offset = ALPMC_GPIO_OUTPUT_REG,
+ .outputs_qty = ALPMC_GPIO_OUTPUT_QTY,
+ },
+
+ .flash_partitions.partitions = alpx_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_mtd_partitions),
+ .flash_partitions.qty_for_fw_update = 1,
+};
+
+
+#endif /* _ALPX_VARIANTS_MC_H_ */
diff --git a/snd-alpx/alpx_variants_stereo.h b/snd-alpx/alpx_variants_stereo.h
new file mode 100644
index 0000000..aae4120
--- /dev/null
+++ b/snd-alpx/alpx_variants_stereo.h
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_VARIANTS_STEREO_H_
+#define _ALPX_VARIANTS_STEREO_H_
+
+#include "alpx.h"
+#include "alpx_reg.h"
+
+#include <sound/tlv.h>
+#include "alpx_variants_common.h"
+
+/* Specifications of stereo cards */
+
+/* Alp222 clock controls */
+static const char *alp222_control_choice_clk_src_entries[ALP222_CLK_MANAGER_CLK_SRC_QTY] = {
+ "Internal","SIC","Word Clk", "AES Syn", "AES Aud",
+};
+
+/* Same order than the constants values */
+static u32 alp222_control_choice_clk_src_entries_values[ALP222_CLK_MANAGER_CLK_SRC_QTY] = {
+ ALP222_CLK_MANAGER_CLK_SRC_INTERNAL,
+ ALP222_CLK_MANAGER_CLK_SRC_SIC,
+ ALP222_CLK_MANAGER_CLK_SRC_WCLK,
+ ALP222_CLK_MANAGER_CLK_SRC_AES_SYNC,
+ ALP222_CLK_MANAGER_CLK_SRC_AES_AUDIO,
+};
+
+
+/** 222 **/
+static struct snd_pcm_hardware alp222_hardware_specs = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS |
+ SNDRV_PCM_RATE_8000_192000,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 4,
+ .channels_max = 4,
+ .buffer_bytes_max = 64 * SZ_1K * 4, /* period_bytes_max * periods_max */
+ .period_bytes_min = 48, /* min latency 1ms */
+ .period_bytes_max = 64 * SZ_1K, /* 20ms at 192kHz * nchans * 4B, rounded at 2^n */
+ .periods_min = 1,
+ .periods_max = 4,
+};
+
+
+/* Alp 222e LINE */
+
+static const DECLARE_TLV_DB_MINMAX_MUTE(alp222_line_analog_gains_scale,
+ ALP222_ANALOG_EQ_GAIN_MIN_cdB,
+ ALP222_ANALOG_EQ_GAIN_MAX_cdB);
+
+/* Note : use ALPX_GAIN_REG(0) to jump to the actual parameters's registers */
+static struct alpx_control_descriptor alp222_control_descriptors[] = {
+ /* INPUT Amplification */
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_ANALOG_CAPTURE_AMPLI_BASE,
+ .prefix = "Ana Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_ANALOG_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_AES3_CAPTURE_AMPLI_BASE,
+ .prefix = "AES Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_AES3_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_DAW_PLAYBACK_AMPLI_BASE,
+ .prefix = "DAW Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_DAW_QTY,
+ },
+ },
+ /* OUTPUT Amplification*/
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_ANALOG_PLAYBACK_AMPLI_BASE,
+ .prefix = "Ana Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_ANALOG_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_AES3_PLAYBACK_AMPLI_BASE,
+ .prefix = "AES Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_AES3_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_DAW_CAPTURE_AMPLI_BASE ,
+ .prefix = "DAW Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_DAW_QTY,
+ },
+ },
+
+ /* Analog Eq */
+{
+ .type = ALPX_CONTROL_TYPE_ANALOG_EQ,
+ .base = ALP222_CODEC_CTRL_BASE,
+ .prefix = "Codec Analog Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.codec = {
+ .offset = ALP222_CODEC_PGA_REGS,
+ .gains_scale = alp222_line_analog_gains_scale,
+ .reg_gain_min = ALP222_ANALOG_EQ_GAIN_MIN_REG,
+ .reg_gain_max = ALP222_ANALOG_EQ_GAIN_MAX_REG,
+ .lines_count = ALP222_ANALOG_QTY,
+ },
+},
+ /* Mixer */
+ {
+ .type = ALPX_CONTROL_TYPE_MIXER,
+ .base = ALP222_MIXER_BASE,
+ .prefix = "Mxr",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE|
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mixer = {
+ .lines_count = ALP222_MIXER_SIZE,
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ },
+ },
+
+ /* Clock sources Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP222_CLK_MANAGER_BASE,
+ .prefix = "Clk Src",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .data.choice = {
+ .offset = ALP222_CLK_MANAGER_CONFIG_REG,
+ .mask = ALP222_CLK_MANAGER_CLK_SRC_MASK,
+ .pos = ALP222_CLK_MANAGER_CLK_SRC_POS,
+ .entries = alp222_control_choice_clk_src_entries,
+ .entries_values = alp222_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_control_choice_clk_src_entries),
+ },
+ },
+ /* Current Clock BASE Value Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP222_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .data.choice = {
+ .offset = ALP222_CLK_MANAGER_CONFIG_REG,
+ .mask = ALP222_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_MASK,
+ .pos = ALP222_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_POS,
+ .entries = alpxxx_control_choice_current_clk_values_entries,
+ .entries_values = alpxxx_control_choice_current_clk_values_entries_values,
+ .entries_count = ARRAY_SIZE(alpxxx_control_choice_current_clk_values_entries),
+ },
+ },
+ /* Current Clock factor values Read Only : RESERVED to keep id order*/
+ {
+ .type = ALPX_CONTROL_RESERVED,
+ .base = ALP222_CLK_MANAGER_BASE,
+ .prefix = "RESERVED",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ },
+ /* AES SRC */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP222_CODEC_CTRL_BASE,
+ .prefix = "AES SRC Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP222_CODEC_CTRL_ASRC_REG,
+ .mask = ALP222_CODEC_CTRL_ASRC_MASK,
+ .pos = ALP222_CODEC_CTRL_ASRC_POS,
+ },
+ },
+ /* Clock UP on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "CkSc Up",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_UP_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_UP_POS,
+ },
+ },
+ /* Clock DOWN on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "CkSc Down",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_DOWN_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_DOWN_POS,
+ },
+ },
+ /* Clock Failover priority 0 TOP*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P0",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO0_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO0_POS,
+ .entries = alp222_control_choice_clk_src_entries,
+ .entries_values = alp222_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO1_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO1_POS,
+ .entries = alp222_control_choice_clk_src_entries,
+ .entries_values = alp222_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 2*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO2_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO2_POS,
+ .entries = alp222_control_choice_clk_src_entries,
+ .entries_values = alp222_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 3 Bottom*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO3_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO3_POS,
+ .entries = alp222_control_choice_clk_src_entries,
+ .entries_values = alp222_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_control_choice_clk_src_entries),
+ },
+ },
+};
+
+static struct alpx_variant alp222_variant __attribute__((unused)) = {
+ .shortname = "Alp222e",
+ .longname = "Alp 222e",
+ .model = ALPX_VARIANT_MODEL_ALP222,
+ .mixername = "Alp222e_Mix",
+ .features = ALPX_VARIANT_FEATURE_GPIOS,
+ .control_descriptors = alp222_control_descriptors,
+ .control_descriptors_count = ARRAY_SIZE(alp222_control_descriptors),
+
+ .capture_hw = &alp222_hardware_specs,
+ .playback_hw = &alp222_hardware_specs,
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .gpios = {
+ .base = ALP222_GPIO_BASE,
+ .inputs_reg_offset = ALP222_GPIO_INPUT_REG,
+ .inputs_qty = ALP222_GPIO_INPUT_QTY,
+ .outputs_reg_offset = ALP222_GPIO_OUTPUT_REG,
+ .outputs_qty = ALP222_GPIO_OUTPUT_QTY,
+ },
+
+ .flash_partitions.partitions = alpx_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_mtd_partitions),
+ .flash_partitions.qty_for_fw_update = 1,
+};
+
+
+/* Alp 222e-MIC Variant definition */
+
+static const DECLARE_TLV_DB_MINMAX_MUTE(alp222_mic_control_scale,
+ ALP222_MIC_GAIN_MIN_cdB,
+ ALP222_MIC_GAIN_MAX_cdB);
+
+/* Alp222 MIC controls : superset of Alp222 controls. Don't know how to embed an array into antother as first items ? */
+/* So I've copied the items !! */
+static struct alpx_control_descriptor alp222_mic_control_descriptors[] = {
+ /* INPUT Amplification */
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_ANALOG_CAPTURE_AMPLI_BASE,
+ .prefix = "Ana Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_ANALOG_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_AES3_CAPTURE_AMPLI_BASE,
+ .prefix = "AES Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_AES3_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_DAW_PLAYBACK_AMPLI_BASE,
+ .prefix = "DAW Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_DAW_QTY,
+ },
+ },
+ /* OUTPUT Amplification*/
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_ANALOG_PLAYBACK_AMPLI_BASE,
+ .prefix = "Ana Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_ANALOG_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_AES3_PLAYBACK_AMPLI_BASE,
+ .prefix = "AES Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_AES3_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_DAW_CAPTURE_AMPLI_BASE ,
+ .prefix = "DAW Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_DAW_QTY,
+ },
+ },
+
+ /* Analog Eq */
+{
+ .type = ALPX_CONTROL_TYPE_ANALOG_EQ,
+ .base = ALP222_CODEC_CTRL_BASE,
+ .prefix = "Codec Analog Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.codec = {
+ .offset = ALP222_CODEC_PGA_REGS,
+ .gains_scale = alp222_line_analog_gains_scale,
+ .reg_gain_min = ALP222_ANALOG_EQ_GAIN_MIN_REG,
+ .reg_gain_max = ALP222_ANALOG_EQ_GAIN_MAX_REG,
+ .lines_count = ALP222_ANALOG_QTY,
+ },
+},
+ /* Mixer */
+ {
+ .type = ALPX_CONTROL_TYPE_MIXER,
+ .base = ALP222_MIXER_BASE,
+ .prefix = "Mxr",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mixer = {
+ .lines_count = ALP222_MIXER_SIZE,
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ },
+ },
+
+ /* Clock sources Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP222_CLK_MANAGER_BASE,
+ .prefix = "Clk Src",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .data.choice = {
+ .offset = ALP222_CLK_MANAGER_CONFIG_REG,
+ .mask = ALP222_CLK_MANAGER_CLK_SRC_MASK,
+ .pos = ALP222_CLK_MANAGER_CLK_SRC_POS,
+ .entries = alp222_control_choice_clk_src_entries,
+ .entries_values = alp222_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_control_choice_clk_src_entries),
+ },
+ },
+ /* Current Clock BASE Value Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP222_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .data.choice = {
+ .offset = ALP222_CLK_MANAGER_CONFIG_REG,
+ .mask = ALP222_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_MASK,
+ .pos = ALP222_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_POS,
+ .entries = alpxxx_control_choice_current_clk_values_entries,
+ .entries_values = alpxxx_control_choice_current_clk_values_entries_values,
+ .entries_count = ARRAY_SIZE(alpxxx_control_choice_current_clk_values_entries),
+ },
+ },
+ /* Current Clock factor values Read Only RESERVED to keep id order */
+ {
+ .type = ALPX_CONTROL_RESERVED,
+ .base = ALP222_CLK_MANAGER_BASE,
+ .prefix = "RESERVED",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ },
+ /* AES SRC */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP222_CODEC_CTRL_BASE,
+ .prefix = "AES SRC",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP222_CODEC_CTRL_ASRC_REG,
+ .mask = ALP222_CODEC_CTRL_ASRC_MASK,
+ .pos = ALP222_CODEC_CTRL_ASRC_POS,
+ },
+ },
+
+ /* Clock UP on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "CkSc Up",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_UP_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_UP_POS,
+ },
+ },
+ /* Clock DOWN on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "CkSc Down",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_DOWN_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_DOWN_POS,
+ },
+ },
+ /* Clock Failover priority 0 TOP*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P0",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO0_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO0_POS,
+ .entries = alp222_control_choice_clk_src_entries,
+ .entries_values = alp222_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO1_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO1_POS,
+ .entries = alp222_control_choice_clk_src_entries,
+ .entries_values = alp222_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 2*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO2_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO2_POS,
+ .entries = alp222_control_choice_clk_src_entries,
+ .entries_values = alp222_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_control_choice_clk_src_entries),
+ },
+ },
+ /* Clock Failover priority 3 Bottom*/
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO3_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO3_POS,
+ .entries = alp222_control_choice_clk_src_entries,
+ .entries_values = alp222_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_control_choice_clk_src_entries),
+ },
+ },
+/* MIC INPUT Amplification (the gains MUST BE KEPT FIRST, control initialization)*/
+ {
+ .type = ALPX_CONTROL_TYPE_GAINS_EMBEDDED,
+ .base = ALP222_CONTROL_BASE,
+ .prefix = "McGaL Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mic_gains = {
+ .min = ALP222_MIC_GAINS_MIN_REG_VAL,
+ .max = ALP222_MIC_GAINS_MAX_REG_VAL,
+ .lines_count = 1, /* Only one line per mic side on 222e*/
+ .offset = ALP222_MIC_CONTROL_REG,
+ .mask = ALP222_MIC_GAIN_L_MASK,
+ .pos = ALP222_MIC_GAIN_L_POS,
+ .width = ALP222_MIC_GAIN_WIDTH,
+ .gains_scale = alp222_mic_control_scale,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_GAINS_EMBEDDED,
+ .base = ALP222_CONTROL_BASE,
+ .prefix = "McGaR Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mic_gains = {
+ .min = ALP222_MIC_GAINS_MIN_REG_VAL,
+ .max = ALP222_MIC_GAINS_MAX_REG_VAL,
+ .lines_count = 1, /* Only one line per mic side on 222e*/
+ .offset = ALP222_MIC_CONTROL_REG,
+ .mask = ALP222_MIC_GAIN_R_MASK,
+ .pos = ALP222_MIC_GAIN_R_POS,
+ .width = ALP222_MIC_GAIN_WIDTH,
+ .gains_scale = alp222_mic_control_scale,
+ },
+ },
+ /* MIC Phantom */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAGS_EMBEDDED,
+ .base = ALP222_CONTROL_BASE,
+ .prefix = "McPhL",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALP222_MIC_CONTROL_REG,
+ .mask = ALP222_MIC_PH_L_MASK,
+ .pos = ALP222_MIC_PH_L_POS,
+ .lines_count = 1, // Should be if control items[i] was Ok : ALP222_ANALOG_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAGS_EMBEDDED,
+ .base = ALP222_CONTROL_BASE,
+ .prefix = "McPhR",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALP222_MIC_CONTROL_REG,
+ .mask = ALP222_MIC_PH_R_MASK,
+ .pos = ALP222_MIC_PH_R_POS,
+ .lines_count = 1, // Should be if control items[i] was Ok : ALP222_ANALOG_QTY,
+ },
+ },
+ /* MIC Enable*/
+ {
+ .type = ALPX_CONTROL_TYPE_FLAGS_EMBEDDED,
+ .base = ALP222_CONTROL_BASE,
+ .prefix = "McEnL",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALP222_MIC_CONTROL_REG,
+ .mask = ALP222_MIC_EN_L_MASK,
+ .pos = ALP222_MIC_EN_L_POS,
+ .lines_count = 1, // Should be if control items[i] was Ok : ALP222_ANALOG_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAGS_EMBEDDED,
+ .base = ALP222_CONTROL_BASE,
+ .prefix = "McEnR",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALP222_MIC_CONTROL_REG,
+ .mask = ALP222_MIC_EN_R_MASK,
+ .pos = ALP222_MIC_EN_R_POS,
+ .lines_count = 1, // Should be if control items[i] was Ok : ALP222_ANALOG_QTY,
+ },
+ },
+};
+
+
+static struct alpx_variant alp222_mic_variant __attribute__((unused)) = {
+ .shortname = "Alp222e-MIC",
+ .longname = "Alp 222e-MIC",
+ .model = ALPX_VARIANT_MODEL_ALP222_MIC,
+ .mixername = "Alp222e_MIC_Mix",
+ .features = ALPX_VARIANT_FEATURE_GPIOS,
+ .control_descriptors = alp222_mic_control_descriptors,
+ .control_descriptors_count = ARRAY_SIZE(alp222_mic_control_descriptors),
+
+ .capture_hw = &alp222_hardware_specs,
+ .playback_hw = &alp222_hardware_specs,
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .gpios = {
+ .base = ALP222_GPIO_BASE,
+ .inputs_reg_offset = ALP222_GPIO_INPUT_REG,
+ .inputs_qty = ALP222_GPIO_INPUT_QTY,
+ .outputs_reg_offset = ALP222_GPIO_OUTPUT_REG,
+ .outputs_qty = ALP222_GPIO_OUTPUT_QTY,
+ },
+
+ .flash_partitions.partitions = alpx_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_mtd_partitions),
+ .flash_partitions.qty_for_fw_update = 1,
+};
+
+#endif /* _ALPX_VARIANTS_STEREO_H_ */
diff --git a/snd-alpx/alpx_variants_stereo_apps_preFW283.h b/snd-alpx/alpx_variants_stereo_apps_preFW283.h
new file mode 100644
index 0000000..d5019f9
--- /dev/null
+++ b/snd-alpx/alpx_variants_stereo_apps_preFW283.h
@@ -0,0 +1,737 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#ifndef _ALPX_VARIANTS_STEREO_APP_PREFW283_H_
+#define _ALPX_VARIANTS_STEREO_APP_PREFW283_H_
+
+#include "alpx.h"
+#include "alpx_reg.h"
+
+#include <sound/tlv.h>
+#include "alpx_variants_common.h"
+
+/* Specifications of stereo cards */
+/** Pre FW V283 for compatibility with OLD applications Translation tables FOR CLOCK sources !!*/
+
+/* Alp222 clock controls */
+static const char *alp222_app_preFW283_control_choice_clk_src_entries[ALP222_APPS_PREFW283_CLK_MANAGER_CLK_SRC_QTY] = {
+ "Word Clk", "AES Syn", "AES Aud", "Internal",
+};
+
+/* Index is application side index, items is the register ones*/
+static u32 alp222_app_preFW283_control_choice_clk_src_entries_values[ALP222_APPS_PREFW283_CLK_MANAGER_CLK_SRC_QTY] = {
+ ALP222_CLK_MANAGER_CLK_SRC_WCLK,
+ ALP222_CLK_MANAGER_CLK_SRC_AES_SYNC,
+ ALP222_CLK_MANAGER_CLK_SRC_AES_AUDIO,
+ ALP222_CLK_MANAGER_CLK_SRC_INTERNAL,
+};
+
+/* Indexed by the register => application side value */
+static u32 alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values [ALP222_CLK_MANAGER_CLK_SRC_QTY] = {
+ ALP222_CLK_MANAGER_APPSPREFW283_CLK_SRC_INTERNAL,
+ ALP222_CLK_MANAGER_APPSPREFW283_CLK_SRC_INTERNAL, /* FORBIDDEN VALUE => INTERNAL !!*/
+ ALP222_CLK_MANAGER_APPSPREFW283_CLK_SRC_WCLK,
+ ALP222_CLK_MANAGER_APPSPREFW283_CLK_SRC_AES_SYNC,
+ ALP222_CLK_MANAGER_APPSPREFW283_CLK_SRC_AES_AUDIO,
+};
+
+/** 222 **/
+static struct snd_pcm_hardware alp222_app_preFW283_hardware_specs = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS |
+ SNDRV_PCM_RATE_8000_192000,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 4,
+ .channels_max = 4,
+ .buffer_bytes_max = 64 * SZ_1K * 4, /* period_bytes_max * periods_max */
+ .period_bytes_min = 48, /* min latency 1ms */
+ .period_bytes_max = 64 * SZ_1K, /* 20ms at 192kHz * nchans * 4B, rounded at 2^n */
+ .periods_min = 1,
+ .periods_max = 4,
+};
+
+
+/* Alp 222e LINE */
+
+static const DECLARE_TLV_DB_MINMAX_MUTE(alp222_app_preFW283_line_analog_gains_scale,
+ ALP222_ANALOG_EQ_GAIN_MIN_cdB,
+ ALP222_ANALOG_EQ_GAIN_MAX_cdB);
+
+/* Note : use ALPX_GAIN_REG(0) to jump to the actual parameters's registers */
+static struct alpx_control_descriptor alp222_app_preFW283_line_control_descriptors[] = {
+ /* INPUT Amplification */
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_ANALOG_CAPTURE_AMPLI_BASE,
+ .prefix = "Ana Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_ANALOG_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_AES3_CAPTURE_AMPLI_BASE,
+ .prefix = "AES Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_AES3_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_DAW_PLAYBACK_AMPLI_BASE,
+ .prefix = "DAW Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_DAW_QTY,
+ },
+ },
+ /* OUTPUT Amplification*/
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_ANALOG_PLAYBACK_AMPLI_BASE,
+ .prefix = "Ana Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_ANALOG_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_AES3_PLAYBACK_AMPLI_BASE,
+ .prefix = "AES Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_AES3_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_DAW_CAPTURE_AMPLI_BASE ,
+ .prefix = "DAW Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_DAW_QTY,
+ },
+ },
+
+ /* Analog Eq */
+{
+ .type = ALPX_CONTROL_TYPE_ANALOG_EQ,
+ .base = ALP222_CODEC_CTRL_BASE,
+ .prefix = "Codec Analog Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.codec = {
+ .offset = ALP222_CODEC_PGA_REGS,
+ .gains_scale = alp222_app_preFW283_line_analog_gains_scale,
+ .reg_gain_min = ALP222_ANALOG_EQ_GAIN_MIN_REG,
+ .reg_gain_max = ALP222_ANALOG_EQ_GAIN_MAX_REG,
+ .lines_count = ALP222_ANALOG_QTY,
+ },
+},
+ /* Mixer */
+ {
+ .type = ALPX_CONTROL_TYPE_MIXER,
+ .base = ALP222_MIXER_BASE,
+ .prefix = "Mxr",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE|
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mixer = {
+ .lines_count = ALP222_MIXER_SIZE,
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ },
+ },
+
+ /* Clock sources Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP222_CLK_MANAGER_BASE,
+ .prefix = "Clk Src",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .data.translated_choice = {
+ .offset = ALP222_CLK_MANAGER_CONFIG_REG,
+ .mask = ALP222_CLK_MANAGER_CLK_SRC_MASK,
+ .pos = ALP222_CLK_MANAGER_CLK_SRC_POS,
+ .entries = alp222_app_preFW283_control_choice_clk_src_entries,
+ .entries_values = alp222_app_preFW283_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_app_preFW283_control_choice_clk_src_entries),
+ .card_entries_values = alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Current Clock BASE Value Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP222_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .data.choice = {
+ .offset = ALP222_CLK_MANAGER_CONFIG_REG,
+ .mask = ALP222_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_MASK,
+ .pos = ALP222_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_POS,
+ .entries = alpxxx_control_choice_current_clk_values_entries,
+ .entries_values = alpxxx_control_choice_current_clk_values_entries_values,
+ .entries_count = ARRAY_SIZE(alpxxx_control_choice_current_clk_values_entries),
+ },
+ },
+ /* Current Clock factor values Read Only : RESERVED to keep id order*/
+ {
+ .type = ALPX_CONTROL_RESERVED,
+ .base = ALP222_CLK_MANAGER_BASE,
+ .prefix = "RESERVED",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ },
+ /* AES SRC */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP222_CODEC_CTRL_BASE,
+ .prefix = "AES SRC Disable",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP222_CODEC_CTRL_ASRC_REG,
+ .mask = ALP222_CODEC_CTRL_ASRC_MASK,
+ .pos = ALP222_CODEC_CTRL_ASRC_POS,
+ },
+ },
+ /* Clock UP on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "CkSc Up",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_UP_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_UP_POS,
+ },
+ },
+ /* Clock DOWN on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "CkSc Down",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_DOWN_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_DOWN_POS,
+ },
+ },
+ /* Clock Failover priority 0 TOP*/
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P0",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO0_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO0_POS,
+ .entries = alp222_app_preFW283_control_choice_clk_src_entries,
+ .entries_values = alp222_app_preFW283_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_app_preFW283_control_choice_clk_src_entries),
+ .card_entries_values = alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Clock Failover priority 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO1_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO1_POS,
+ .entries = alp222_app_preFW283_control_choice_clk_src_entries,
+ .entries_values = alp222_app_preFW283_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_app_preFW283_control_choice_clk_src_entries),
+ .card_entries_values = alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Clock Failover priority 2*/
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO2_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO2_POS,
+ .entries = alp222_app_preFW283_control_choice_clk_src_entries,
+ .entries_values = alp222_app_preFW283_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_app_preFW283_control_choice_clk_src_entries),
+ .card_entries_values = alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Clock Failover priority 3 Bottom*/
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO3_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO3_POS,
+ .entries = alp222_app_preFW283_control_choice_clk_src_entries,
+ .entries_values = alp222_app_preFW283_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_app_preFW283_control_choice_clk_src_entries),
+ .card_entries_values = alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+};
+
+static struct alpx_variant alp222_app_preFW283_variant __attribute__((unused)) = {
+ .shortname = "Alp222e",
+ .longname = "Alp 222e-comp",
+ .model = ALPX_VARIANT_MODEL_ALP222,
+ .mixername = "Alp222e_Mix",
+ .features = ALPX_VARIANT_FEATURE_GPIOS,
+ .control_descriptors = alp222_app_preFW283_line_control_descriptors,
+ .control_descriptors_count = ARRAY_SIZE(alp222_app_preFW283_line_control_descriptors),
+
+ .capture_hw = &alp222_app_preFW283_hardware_specs,
+ .playback_hw = &alp222_app_preFW283_hardware_specs,
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .gpios = {
+ .base = ALP222_GPIO_BASE,
+ .inputs_reg_offset = ALP222_GPIO_INPUT_REG,
+ .inputs_qty = ALP222_GPIO_INPUT_QTY,
+ .outputs_reg_offset = ALP222_GPIO_OUTPUT_REG,
+ .outputs_qty = ALP222_GPIO_OUTPUT_QTY,
+ },
+
+ .flash_partitions.partitions = alpx_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_mtd_partitions),
+ .flash_partitions.qty_for_fw_update = 1,
+};
+
+
+/* Alp 222e-MIC Variant definition */
+
+static const DECLARE_TLV_DB_MINMAX_MUTE(alp222_app_preFW283_mic_control_scale,
+ ALP222_MIC_GAIN_MIN_cdB,
+ ALP222_MIC_GAIN_MAX_cdB);
+
+/* Alp222 MIC controls : superset of Alp222 controls. Don't know how to embed an array into antother as first items ? */
+/* So I've copied the items !! */
+static struct alpx_control_descriptor alp222_app_preFW283_mic_control_descriptors[] = {
+ /* INPUT Amplification */
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_ANALOG_CAPTURE_AMPLI_BASE,
+ .prefix = "Ana Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_ANALOG_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_AES3_CAPTURE_AMPLI_BASE,
+ .prefix = "AES Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_AES3_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_DAW_PLAYBACK_AMPLI_BASE,
+ .prefix = "DAW Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_DAW_QTY,
+ },
+ },
+ /* OUTPUT Amplification*/
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_ANALOG_PLAYBACK_AMPLI_BASE,
+ .prefix = "Ana Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_ANALOG_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_AES3_PLAYBACK_AMPLI_BASE,
+ .prefix = "AES Playback",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_AES3_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_AMPLIFIER,
+ .base = ALP222_DAW_CAPTURE_AMPLI_BASE ,
+ .prefix = "DAW Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.ampli = {
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ .lines_count = ALP222_DAW_QTY,
+ },
+ },
+
+ /* Analog Eq */
+{
+ .type = ALPX_CONTROL_TYPE_ANALOG_EQ,
+ .base = ALP222_CODEC_CTRL_BASE,
+ .prefix = "Codec Analog Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.codec = {
+ .offset = ALP222_CODEC_PGA_REGS,
+ .gains_scale = alp222_app_preFW283_line_analog_gains_scale,
+ .reg_gain_min = ALP222_ANALOG_EQ_GAIN_MIN_REG,
+ .reg_gain_max = ALP222_ANALOG_EQ_GAIN_MAX_REG,
+ .lines_count = ALP222_ANALOG_QTY,
+ },
+},
+ /* Mixer */
+ {
+ .type = ALPX_CONTROL_TYPE_MIXER,
+ .base = ALP222_MIXER_BASE,
+ .prefix = "Mxr",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mixer = {
+ .lines_count = ALP222_MIXER_SIZE,
+ .gains_scale = alpxxx_line_digital_gains_scale,
+ .reg_gain_min = ALP_AMPLIFIER_GAIN_MIN_REG,
+ .reg_gain_max = ALP_AMPLIFIER_GAIN_MAX_REG,
+ },
+ },
+
+ /* Clock sources Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP222_CLK_MANAGER_BASE,
+ .prefix = "Clk Src",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .data.translated_choice = {
+ .offset = ALP222_CLK_MANAGER_CONFIG_REG,
+ .mask = ALP222_CLK_MANAGER_CLK_SRC_MASK,
+ .pos = ALP222_CLK_MANAGER_CLK_SRC_POS,
+ .entries = alp222_app_preFW283_control_choice_clk_src_entries,
+ .entries_values = alp222_app_preFW283_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_app_preFW283_control_choice_clk_src_entries),
+ .card_entries_values = alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Current Clock BASE Value Read Only */
+ {
+ .type = ALPX_CONTROL_TYPE_CHOICE,
+ .base = ALP222_CLK_MANAGER_BASE,
+ .prefix = "Clk Eff",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .data.choice = {
+ .offset = ALP222_CLK_MANAGER_CONFIG_REG,
+ .mask = ALP222_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_MASK,
+ .pos = ALP222_CLK_MANAGER_CONFIG_CLK_CURRENT_VALUE_POS,
+ .entries = alpxxx_control_choice_current_clk_values_entries,
+ .entries_values = alpxxx_control_choice_current_clk_values_entries_values,
+ .entries_count = ARRAY_SIZE(alpxxx_control_choice_current_clk_values_entries),
+ },
+ },
+ /* Current Clock factor values Read Only RESERVED to keep id order */
+ {
+ .type = ALPX_CONTROL_RESERVED,
+ .base = ALP222_CLK_MANAGER_BASE,
+ .prefix = "RESERVED",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ },
+ /* AES SRC */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP222_CODEC_CTRL_BASE,
+ .prefix = "AES SRC",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP222_CODEC_CTRL_ASRC_REG,
+ .mask = ALP222_CODEC_CTRL_ASRC_MASK,
+ .pos = ALP222_CODEC_CTRL_ASRC_POS,
+ },
+ },
+
+ /* Clock UP on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "CkSc Up",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_UP_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_UP_POS,
+ },
+ },
+ /* Clock DOWN on fail over enabled flag */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAG,
+ .base = ALP_PROC_BASE,
+ .prefix = "CkSc Down",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.flag = {
+ .offset = ALP_PROC_FWCONFIG_REG,
+ .mask = ALP_PROC_FWCONFIG_CLKSRC_DOWN_MASK,
+ .pos = ALP_PROC_FWCONFIG_CLKSRC_DOWN_POS,
+ },
+ },
+ /* Clock Failover priority 0 TOP*/
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P0",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO0_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO0_POS,
+ .entries = alp222_app_preFW283_control_choice_clk_src_entries,
+ .entries_values = alp222_app_preFW283_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_app_preFW283_control_choice_clk_src_entries),
+ .card_entries_values = alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Clock Failover priority 1 */
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P1",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO1_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO1_POS,
+ .entries = alp222_app_preFW283_control_choice_clk_src_entries,
+ .entries_values = alp222_app_preFW283_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_app_preFW283_control_choice_clk_src_entries),
+ .card_entries_values = alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Clock Failover priority 2*/
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P2",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO2_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO2_POS,
+ .entries = alp222_app_preFW283_control_choice_clk_src_entries,
+ .entries_values = alp222_app_preFW283_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_app_preFW283_control_choice_clk_src_entries),
+ .card_entries_values = alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+ /* Clock Failover priority 3 Bottom*/
+ {
+ .type = ALPX_CONTROL_TYPE_TRANSLATED_CHOICE,
+ .base = ALP_PROC_BASE,
+ .prefix = "Clk P3",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.translated_choice = {
+ .offset = ALP_PROC_CLK_SOURCE_PRIO_REG,
+ .mask = ALP_PROC_CLK_SOURCE_PRIO3_MASK,
+ .pos = ALP_PROC_CLK_SOURCE_PRIO3_POS,
+ .entries = alp222_app_preFW283_control_choice_clk_src_entries,
+ .entries_values = alp222_app_preFW283_control_choice_clk_src_entries_values,
+ .entries_count = ARRAY_SIZE(alp222_app_preFW283_control_choice_clk_src_entries),
+ .card_entries_values = alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values,
+ .card_entries_count = ARRAY_SIZE(alp222_app_preFW283_to_apps_control_choice_clk_src_entries_values)
+ },
+ },
+/* MIC INPUT Amplification (the gains MUST BE KEPT FIRST, control initialization)*/
+ {
+ .type = ALPX_CONTROL_TYPE_GAINS_EMBEDDED,
+ .base = ALP222_CONTROL_BASE,
+ .prefix = "McGaL Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mic_gains = {
+ .min = ALP222_MIC_GAINS_MIN_REG_VAL,
+ .max = ALP222_MIC_GAINS_MAX_REG_VAL,
+ .lines_count = 1, /* Only one line per mic side on 222e*/
+ .offset = ALP222_MIC_CONTROL_REG,
+ .mask = ALP222_MIC_GAIN_L_MASK,
+ .pos = ALP222_MIC_GAIN_L_POS,
+ .width = ALP222_MIC_GAIN_WIDTH,
+ .gains_scale = alp222_app_preFW283_mic_control_scale,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_GAINS_EMBEDDED,
+ .base = ALP222_CONTROL_BASE,
+ .prefix = "McGaR Capture",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .data.mic_gains = {
+ .min = ALP222_MIC_GAINS_MIN_REG_VAL,
+ .max = ALP222_MIC_GAINS_MAX_REG_VAL,
+ .lines_count = 1, /* Only one line per mic side on 222e*/
+ .offset = ALP222_MIC_CONTROL_REG,
+ .mask = ALP222_MIC_GAIN_R_MASK,
+ .pos = ALP222_MIC_GAIN_R_POS,
+ .width = ALP222_MIC_GAIN_WIDTH,
+ .gains_scale = alp222_app_preFW283_mic_control_scale,
+ },
+ },
+ /* MIC Phantom */
+ {
+ .type = ALPX_CONTROL_TYPE_FLAGS_EMBEDDED,
+ .base = ALP222_CONTROL_BASE,
+ .prefix = "McPhL",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALP222_MIC_CONTROL_REG,
+ .mask = ALP222_MIC_PH_L_MASK,
+ .pos = ALP222_MIC_PH_L_POS,
+ .lines_count = 1, // Should be if control items[i] was Ok : ALP222_ANALOG_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAGS_EMBEDDED,
+ .base = ALP222_CONTROL_BASE,
+ .prefix = "McPhR",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALP222_MIC_CONTROL_REG,
+ .mask = ALP222_MIC_PH_R_MASK,
+ .pos = ALP222_MIC_PH_R_POS,
+ .lines_count = 1, // Should be if control items[i] was Ok : ALP222_ANALOG_QTY,
+ },
+ },
+ /* MIC Enable*/
+ {
+ .type = ALPX_CONTROL_TYPE_FLAGS_EMBEDDED,
+ .base = ALP222_CONTROL_BASE,
+ .prefix = "McEnL",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALP222_MIC_CONTROL_REG,
+ .mask = ALP222_MIC_EN_L_MASK,
+ .pos = ALP222_MIC_EN_L_POS,
+ .lines_count = 1, // Should be if control items[i] was Ok : ALP222_ANALOG_QTY,
+ },
+ },
+ {
+ .type = ALPX_CONTROL_TYPE_FLAGS_EMBEDDED,
+ .base = ALP222_CONTROL_BASE,
+ .prefix = "McEnR",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .data.mic_flags = {
+ .offset = ALP222_MIC_CONTROL_REG,
+ .mask = ALP222_MIC_EN_R_MASK,
+ .pos = ALP222_MIC_EN_R_POS,
+ .lines_count = 1, // Should be if control items[i] was Ok : ALP222_ANALOG_QTY,
+ },
+ },
+};
+
+
+static struct alpx_variant alp222_app_preFW283_mic_variant __attribute__((unused)) = {
+ .shortname = "Alp222e-MIC",
+ .longname = "Alp 222e-MIC-comp",
+ .model = ALPX_VARIANT_MODEL_ALP222_MIC,
+ .mixername = "Alp222e_MIC_Mix",
+ .features = ALPX_VARIANT_FEATURE_GPIOS,
+ .control_descriptors = alp222_app_preFW283_mic_control_descriptors,
+ .control_descriptors_count = ARRAY_SIZE(alp222_app_preFW283_mic_control_descriptors),
+
+ .capture_hw = &alp222_app_preFW283_hardware_specs,
+ .playback_hw = &alp222_app_preFW283_hardware_specs,
+
+ .flash_golden_production_base = ALPxxx_FLASH_GOLDEN_PRODUCTION_BASE,
+
+ .gpios = {
+ .base = ALP222_GPIO_BASE,
+ .inputs_reg_offset = ALP222_GPIO_INPUT_REG,
+ .inputs_qty = ALP222_GPIO_INPUT_QTY,
+ .outputs_reg_offset = ALP222_GPIO_OUTPUT_REG,
+ .outputs_qty = ALP222_GPIO_OUTPUT_QTY,
+ },
+
+ .flash_partitions.partitions = alpx_mtd_partitions,
+ .flash_partitions.qty = ARRAY_SIZE(alpx_mtd_partitions),
+ .flash_partitions.qty_for_fw_update = 1,
+};
+
+#endif /* _ALPX_VARIANTS_STEREO_APP_PREFW283_H_ */
diff --git a/snd-alpx/alpx_version.h b/snd-alpx/alpx_version.h
new file mode 100644
index 0000000..511e092
--- /dev/null
+++ b/snd-alpx/alpx_version.h
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+/* Driver version definition */
+#define ALPX_MODULE_VERSION "3.4.3"
diff --git a/snd-alpx/alpx_xdma.c b/snd-alpx/alpx_xdma.c
new file mode 100644
index 0000000..c6b4bf2
--- /dev/null
+++ b/snd-alpx/alpx_xdma.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#include <linux/version.h>
+
+#include "alpx_xdma.h"
+
+
+#if KERNEL_VERSION(6, 7, 0) > LINUX_VERSION_CODE
+#include "core/generic/6.3/amd_xdma.h"
+#include "include/6.3/amd_xdma.h"
+#else
+#include <linux/platform_data/amd_xdma.h>
+#include <linux/dma/amd_xdma.h>
+#endif
+
+
+static struct xdma_chan_info alpx_xdma_chan_info[2] = {
+ [0].dir = DMA_MEM_TO_DEV,
+ [1].dir = DMA_DEV_TO_MEM,
+};
+
+struct alpx_xdma_platform_data {
+ struct resource resources[2];
+ struct dma_slave_map map[2];
+ struct xdma_platdata platdata;
+};
+
+struct platform_device *alpx_xdma_register(struct pci_dev *pci_dev)
+{
+ struct alpx_xdma_platform_data *data;
+ struct platform_device *xdma_pdev;
+ int nvectors, vector, ret;
+
+ data = devm_kzalloc(&pci_dev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ xdma_pdev = platform_device_alloc("xdma", PLATFORM_DEVID_AUTO);
+ if (!xdma_pdev)
+ return NULL;
+
+ data->resources[0].start = pci_resource_start(pci_dev, 2);
+ data->resources[0].end = pci_resource_end(pci_dev, 2);
+ data->resources[0].flags = IORESOURCE_MEM;
+
+ nvectors = pci_alloc_irq_vectors(pci_dev, 2, 2,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (nvectors < 0)
+ goto put_pdev;
+
+ vector = pci_irq_vector(pci_dev, 0);
+ data->resources[1].start = vector;
+ data->resources[1].end = vector + nvectors - 1;
+ data->resources[1].flags = IORESOURCE_IRQ;
+
+ ret = platform_device_add_resources(xdma_pdev, data->resources,
+ ARRAY_SIZE(data->resources));
+ if (ret)
+ goto free_irq_vectors;
+
+ data->map[0].devname = pci_name(pci_dev);
+ data->map[0].slave = "h2c-0";
+ data->map[0].param = &alpx_xdma_chan_info[0];
+
+ data->map[1].devname = pci_name(pci_dev);
+ data->map[1].slave = "c2h-0";
+ data->map[1].param = &alpx_xdma_chan_info[1];
+
+ data->platdata.max_dma_channels = 4;
+ data->platdata.device_map = data->map;
+ data->platdata.device_map_cnt = ARRAY_SIZE(data->map);
+
+ ret = platform_device_add_data(xdma_pdev, &data->platdata, sizeof(data->platdata));
+ if (ret)
+ goto free_irq_vectors;
+
+ xdma_pdev->dev.parent = &pci_dev->dev;
+
+ ret = platform_device_add(xdma_pdev);
+ if (ret)
+ goto free_irq_vectors;
+
+ return xdma_pdev;
+
+free_irq_vectors:
+ pci_free_irq_vectors(pci_dev);
+put_pdev:
+ platform_device_put(xdma_pdev);
+
+ return NULL;
+}
+
+void alpx_xdma_unregister(struct pci_dev *pci_dev,
+ struct platform_device *xdma_pdev)
+{
+ platform_device_unregister(xdma_pdev);
+ pci_free_irq_vectors(pci_dev);
+}
diff --git a/snd-alpx/alpx_xdma.h b/snd-alpx/alpx_xdma.h
new file mode 100644
index 0000000..3bb34c5
--- /dev/null
+++ b/snd-alpx/alpx_xdma.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+#ifndef _ALPX_XDMA_H_
+#define _ALPX_XDMA_H_
+
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+
+struct platform_device *alpx_xdma_register(struct pci_dev *pci_dev);
+
+void alpx_xdma_unregister(struct pci_dev *pci_dev,
+ struct platform_device *xdma_pdev);
+
+#endif /*_ALPX_XDMA_H_*/
diff --git a/snd-alpx/alsa_conf/asound-stereo_alp882.conf b/snd-alpx/alsa_conf/asound-stereo_alp882.conf
new file mode 100644
index 0000000..3fee8af
--- /dev/null
+++ b/snd-alpx/alsa_conf/asound-stereo_alp882.conf
@@ -0,0 +1,154 @@
+pcm_slave.ins {
+ pcm "hw:CARD=Alp882e,DEV=0"
+ channels 16
+}
+
+pcm.mic0 {
+ type dsnoop
+ ipc_key 0x414c5043
+ slave ins
+ bindings.0 0
+ bindings.1 1
+ hint.description "Emulated analog Alp222 #0 in"
+}
+
+pcm.mic1 {
+ type dsnoop
+ ipc_key 0x414c5043
+ slave ins
+ bindings.0 2
+ bindings.1 3
+ hint.description "Emulated analog Alp222 #1 in"
+}
+
+pcm.mic2 {
+ type dsnoop
+ ipc_key 0x414c5043
+ slave ins
+ bindings.0 4
+ bindings.1 5
+ hint.description "Emulated analog Alp222 #2 in"
+}
+
+pcm.mic3 {
+ type dsnoop
+ ipc_key 0x414c5043
+ slave ins
+ bindings.0 6
+ bindings.1 7
+ hint.description "Emulated analog Alp222 #3 in"
+}
+
+pcm.mic4 {
+ type dsnoop
+ ipc_key 0x414c5043
+ slave ins
+ bindings.0 8
+ bindings.1 9
+ hint.description "Emulated digital Alp222 #0 in"
+}
+
+pcm.mic5 {
+ type dsnoop
+ ipc_key 0x414c5043
+ slave ins
+ bindings.0 10
+ bindings.1 11
+ hint.description "Emulated digital Alp222 #1 in"
+}
+
+pcm.mic6 {
+ type dsnoop
+ ipc_key 0x414c5043
+ slave ins
+ bindings.0 12
+ bindings.1 13
+ hint.description "Emulated digital Alp222 #2 in"
+}
+
+pcm.mic7 {
+ type dsnoop
+ ipc_key 0x414c5043
+ slave ins
+ bindings.0 14
+ bindings.1 15
+ hint.description "Emulated digital Alp222 #3 in"
+}
+
+pcm_slave.outs {
+ pcm "hw:CARD=Alp882e,DEV=0"
+ channels 16
+}
+
+
+pcm.out0 {
+ type dshare
+ ipc_key 0x414c5050
+ slave outs
+ bindings.0 0
+ bindings.1 1
+ hint.description "Emulated analog Alp222 #0 out"
+}
+
+pcm.out1 {
+ type dshare
+ ipc_key 0x414c5050
+ slave outs
+ bindings.0 2
+ bindings.1 3
+ hint.description "Emulated analog Alp222 #1 out"
+}
+
+pcm.out2 {
+ type dshare
+ ipc_key 0x414c5050
+ slave outs
+ bindings.0 4
+ bindings.1 5
+ hint.description "Emulated analog Alp222 #2 out"
+}
+
+pcm.out3 {
+ type dshare
+ ipc_key 0x414c5050
+ slave outs
+ bindings.0 6
+ bindings.1 7
+ hint.description "Emulated analog Alp222 #3 out"
+}
+
+pcm.out4 {
+ type dshare
+ ipc_key 0x414c5050
+ slave outs
+ bindings.0 8
+ bindings.1 9
+ hint.description "Emulated digital Alp222 #0 out"
+}
+
+pcm.out5 {
+ type dshare
+ ipc_key 0x414c5050
+ slave outs
+ bindings.0 10
+ bindings.1 11
+ hint.description "Emulated digital Alp222 #1 out"
+}
+
+pcm.out6 {
+ type dshare
+ ipc_key 0x414c5050
+ slave outs
+ bindings.0 12
+ bindings.1 13
+ hint.description "Emulated digital Alp222 #2 out"
+}
+
+pcm.out7 {
+ type dshare
+ ipc_key 0x414c5050
+ slave outs
+ bindings.0 14
+ bindings.1 15
+ hint.description "Emulated digital Alp222 #3 out"
+}
diff --git a/snd-alpx/cdev_sgdma.h b/snd-alpx/cdev_sgdma.h
new file mode 100644
index 0000000..f1437ed
--- /dev/null
+++ b/snd-alpx/cdev_sgdma.h
@@ -0,0 +1,72 @@
+/*
+ * This file is part of the Xilinx DMA IP Core driver for Linux
+ *
+ * Copyright (c) 2016-present, Xilinx, Inc.
+ * All rights reserved.
+ *
+ * This source code is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#ifndef _XDMA_IOCALLS_POSIX_H_
+#define _XDMA_IOCALLS_POSIX_H_
+
+#include <linux/ioctl.h>
+
+
+#define IOCTL_XDMA_PERF_V1 (1)
+#define XDMA_ADDRMODE_MEMORY (0)
+#define XDMA_ADDRMODE_FIXED (1)
+
+/*
+ * S means "Set" through a ptr,
+ * T means "Tell" directly with the argument value
+ * G means "Get": reply by setting through a pointer
+ * Q means "Query": response is on the return value
+ * X means "eXchange": switch G and S atomically
+ * H means "sHift": switch T and Q atomically
+ *
+ * _IO(type,nr) no arguments
+ * _IOR(type,nr,datatype) read data from driver
+ * _IOW(type,nr.datatype) write data to driver
+ * _IORW(type,nr,datatype) read/write data
+ *
+ * _IOC_DIR(nr) returns direction
+ * _IOC_TYPE(nr) returns magic
+ * _IOC_NR(nr) returns number
+ * _IOC_SIZE(nr) returns size
+ */
+
+struct xdma_performance_ioctl {
+ /* IOCTL_XDMA_IOCTL_Vx */
+ uint32_t version;
+ uint32_t transfer_size;
+ /* measurement */
+ uint32_t stopped;
+ uint32_t iterations;
+ uint64_t clock_cycle_count;
+ uint64_t data_cycle_count;
+ uint64_t pending_count;
+};
+
+
+
+/* IOCTL codes */
+
+#define IOCTL_XDMA_PERF_START _IOW('q', 1, struct xdma_performance_ioctl *)
+#define IOCTL_XDMA_PERF_STOP _IOW('q', 2, struct xdma_performance_ioctl *)
+#define IOCTL_XDMA_PERF_GET _IOR('q', 3, struct xdma_performance_ioctl *)
+#define IOCTL_XDMA_ADDRMODE_SET _IOW('q', 4, int)
+#define IOCTL_XDMA_ADDRMODE_GET _IOR('q', 5, int)
+#define IOCTL_XDMA_ALIGN_GET _IOR('q', 6, int)
+
+#endif /* _XDMA_IOCALLS_POSIX_H_ */
diff --git a/snd-alpx/core/RedHat/4.18/dmaengine_pcm.h b/snd-alpx/core/RedHat/4.18/dmaengine_pcm.h
new file mode 100755
index 0000000..2df54cf
--- /dev/null
+++ b/snd-alpx/core/RedHat/4.18/dmaengine_pcm.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef __SOUND_DMAENGINE_PCM_H__
+#define __SOUND_DMAENGINE_PCM_H__
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <linux/dmaengine.h>
+
+/**
+ * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM
+ * substream
+ * @substream: PCM substream
+ *
+ * Return: DMA transfer direction
+ */
+static inline enum dma_transfer_direction
+snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return DMA_MEM_TO_DEV;
+ else
+ return DMA_DEV_TO_MEM;
+}
+
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config);
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan);
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data);
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream);
+
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data);
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream);
+
+/*
+ * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word.
+ * If this flag is set the dmaengine driver won't put any restriction on
+ * the supported sample formats and set the DMA transfer size to undefined.
+ * The DAI driver is responsible to disable any unsupported formats in it's
+ * configuration and catch corner cases that are not already handled in
+ * the ALSA core.
+ */
+#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0)
+
+/**
+ * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data
+ * @addr: Address of the DAI data source or destination register.
+ * @addr_width: Width of the DAI data source or destination register.
+ * @maxburst: Maximum number of words(note: words, as in units of the
+ * src_addr_width member, not bytes) that can be send to or received from the
+ * DAI in one burst.
+ * @filter_data: Custom DMA channel filter data, this will usually be used when
+ * requesting the DMA channel.
+ * @chan_name: Custom channel name to use when requesting DMA channel.
+ * @fifo_size: FIFO size of the DAI controller in bytes
+ * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
+ * @peripheral_config: peripheral configuration for programming peripheral
+ * for dmaengine transfer
+ * @peripheral_size: peripheral configuration buffer size
+ */
+struct snd_dmaengine_dai_dma_data {
+ dma_addr_t addr;
+ enum dma_slave_buswidth addr_width;
+ u32 maxburst;
+ void *filter_data;
+ const char *chan_name;
+ unsigned int fifo_size;
+ unsigned int flags;
+ void *peripheral_config;
+ size_t peripheral_size;
+};
+
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *config);
+
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan);
+
+/*
+ * Try to request the DMA channel using compat_request_channel or
+ * compat_filter_fn if it couldn't be requested through devicetree.
+ */
+#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0)
+/*
+ * Don't try to request the DMA channels through devicetree. This flag only
+ * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well.
+ */
+#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1)
+/*
+ * The PCM is half duplex and the DMA channel is shared between capture and
+ * playback.
+ */
+#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
+
+/**
+ * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
+ * @prepare_slave_config: Callback used to fill in the DMA slave_config for a
+ * PCM substream. Will be called from the PCM drivers hwparams callback.
+ * @compat_request_channel: Callback to request a DMA channel for platforms
+ * which do not use devicetree.
+ * @process: Callback used to apply processing on samples transferred from/to
+ * user space.
+ * @compat_filter_fn: Will be used as the filter function when requesting a
+ * channel for platforms which do not use devicetree. The filter parameter
+ * will be the DAI's DMA data.
+ * @dma_dev: If set, request DMA channel on this device rather than the DAI
+ * device.
+ * @chan_names: If set, these custom DMA channel names will be requested at
+ * registration time.
+ * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM.
+ * @prealloc_buffer_size: Size of the preallocated audio buffer.
+ *
+ * Note: If both compat_request_channel and compat_filter_fn are set
+ * compat_request_channel will be used to request the channel and
+ * compat_filter_fn will be ignored. Otherwise the channel will be requested
+ * using dma_request_channel with compat_filter_fn as the filter function.
+ */
+struct snd_dmaengine_pcm_config {
+ int (*prepare_slave_config)(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+ struct dma_chan *(*compat_request_channel)(
+ struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_substream *substream);
+ int (*process)(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes);
+ dma_filter_fn compat_filter_fn;
+ struct device *dma_dev;
+ const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
+
+ const struct snd_pcm_hardware *pcm_hardware;
+ unsigned int prealloc_buffer_size;
+};
+
+int snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+void snd_dmaengine_pcm_unregister(struct device *dev);
+
+int devm_snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+
+int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+
+#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+
+struct dmaengine_pcm {
+ struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
+ const struct snd_dmaengine_pcm_config *config;
+ struct snd_soc_component component;
+ unsigned int flags;
+};
+
+static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
+{
+ return container_of(p, struct dmaengine_pcm, component);
+}
+#endif
diff --git a/snd-alpx/core/RedHat/4.18/pcm_dmaengine.c b/snd-alpx/core/RedHat/4.18/pcm_dmaengine.c
new file mode 100755
index 0000000..494ec0c
--- /dev/null
+++ b/snd-alpx/core/RedHat/4.18/pcm_dmaengine.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on:
+ * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
+ * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc.
+ * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <sound/dmaengine_pcm.h>
+
+struct dmaengine_pcm_runtime_data {
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+
+ unsigned int pos;
+};
+
+static inline struct dmaengine_pcm_runtime_data *substream_to_prtd(
+ const struct snd_pcm_substream *substream)
+{
+ return substream->runtime->private_data;
+}
+
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ return prtd->dma_chan;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan);
+
+/**
+ * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config
+ * @substream: PCM substream
+ * @params: hw_params
+ * @slave_config: DMA slave config
+ *
+ * This function can be used to initialize a dma_slave_config from a substream
+ * and hw_params in a dmaengine based PCM driver implementation.
+ *
+ * Return: zero if successful, or a negative error code
+ */
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config)
+{
+ enum dma_slave_buswidth buswidth;
+ int bits;
+
+ bits = params_physical_width(params);
+ if (bits < 8 || bits > 64)
+ return -EINVAL;
+ else if (bits == 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (bits == 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else if (bits == 24)
+ buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES;
+ else if (bits <= 32)
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->direction = DMA_MEM_TO_DEV;
+ slave_config->dst_addr_width = buswidth;
+ } else {
+ slave_config->direction = DMA_DEV_TO_MEM;
+ slave_config->src_addr_width = buswidth;
+ }
+
+ slave_config->device_fc = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config);
+
+/**
+ * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config
+ * using DAI DMA data.
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @slave_config: DMA slave configuration
+ *
+ * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width
+ * fields of the DMA slave config from the same fields of the DAI DMA
+ * data struct. The src and dst fields will be initialized depending on the
+ * direction of the substream. If the substream is a playback stream the dst
+ * fields will be initialized, if it is a capture stream the src fields will be
+ * initialized. The {dst,src}_addr_width field will only be initialized if the
+ * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of
+ * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If
+ * both conditions are met the latter takes priority.
+ */
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *slave_config)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->dst_addr = dma_data->addr;
+ slave_config->dst_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->dst_addr_width = dma_data->addr_width;
+ } else {
+ slave_config->src_addr = dma_data->addr;
+ slave_config->src_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->src_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->src_addr_width = dma_data->addr_width;
+ }
+
+ slave_config->peripheral_config = dma_data->peripheral_config;
+ slave_config->peripheral_size = dma_data->peripheral_size;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data);
+
+static void dmaengine_pcm_dma_complete(void *arg)
+{
+ unsigned int new_pos;
+ struct snd_pcm_substream *substream = arg;
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ new_pos = prtd->pos + snd_pcm_lib_period_bytes(substream);
+ if (new_pos >= snd_pcm_lib_buffer_bytes(substream))
+ new_pos = 0;
+ prtd->pos = new_pos;
+
+ snd_pcm_period_elapsed(substream);
+}
+
+static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_chan *chan = prtd->dma_chan;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction direction;
+ unsigned long flags = DMA_CTRL_ACK;
+
+ direction = snd_pcm_substream_to_dma_direction(substream);
+
+ if (!substream->runtime->no_period_wakeup)
+ flags |= DMA_PREP_INTERRUPT;
+
+ prtd->pos = 0;
+ desc = dmaengine_prep_dma_cyclic(chan,
+ substream->runtime->dma_addr,
+ snd_pcm_lib_buffer_bytes(substream),
+ snd_pcm_lib_period_bytes(substream), direction, flags);
+
+ if (!desc)
+ return -ENOMEM;
+
+ desc->callback = dmaengine_pcm_dma_complete;
+ desc->callback_param = substream;
+ prtd->cookie = dmaengine_submit(desc);
+
+ return 0;
+}
+
+/**
+ * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation
+ * @substream: PCM substream
+ * @cmd: Trigger command
+ *
+ * This function can be used as the PCM trigger callback for dmaengine based PCM
+ * driver implementations.
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ ret = dmaengine_pcm_prepare_and_submit(substream);
+ if (ret)
+ return ret;
+ dma_async_issue_pending(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dmaengine_resume(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (runtime->info & SNDRV_PCM_INFO_PAUSE)
+ dmaengine_pause(prtd->dma_chan);
+ else
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dmaengine_pause(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger);
+
+/**
+ * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function is deprecated and should not be used by new drivers, as its
+ * results may be unreliable.
+ *
+ * Return: PCM position in frames
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ return bytes_to_frames(substream->runtime, prtd->pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue);
+
+/**
+ * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function can be used as the PCM pointer callback for dmaengine based PCM
+ * driver implementations.
+ *
+ * Return: PCM position in frames
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned int buf_size;
+ unsigned int pos = 0;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
+ buf_size = snd_pcm_lib_buffer_bytes(substream);
+ if (state.residue > 0 && state.residue <= buf_size)
+ pos = buf_size - state.residue;
+
+ runtime->delay = bytes_to_frames(runtime,
+ state.in_flight_bytes);
+ }
+
+ return bytes_to_frames(runtime, pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer);
+
+/**
+ * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * This function request a DMA channel for usage with dmaengine PCM.
+ *
+ * Return: NULL or the requested DMA channel
+ */
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ return dma_request_channel(mask, filter_fn, filter_data);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel);
+
+/**
+ * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream
+ * @substream: PCM substream
+ * @chan: DMA channel to use for data transfers
+ *
+ * The function should usually be called from the pcm open callback. Note that
+ * this function will use private_data field of the substream's runtime. So it
+ * is not available to your pcm driver implementation.
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan)
+{
+ struct dmaengine_pcm_runtime_data *prtd;
+ int ret;
+
+ if (!chan)
+ return -ENXIO;
+
+ ret = snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+ prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
+ if (!prtd)
+ return -ENOMEM;
+
+ prtd->dma_chan = chan;
+
+ substream->runtime->private_data = prtd;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open);
+
+/**
+ * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel
+ * @substream: PCM substream
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * This function will request a DMA channel using the passed filter function and
+ * data. The function should usually be called from the pcm open callback. Note
+ * that this function will use private_data field of the substream's runtime. So
+ * it is not available to your pcm driver implementation.
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data)
+{
+ return snd_dmaengine_pcm_open(substream,
+ snd_dmaengine_pcm_request_channel(filter_fn, filter_data));
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+
+/**
+ * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
+ * @substream: PCM substream
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
+
+/**
+ * snd_dmaengine_pcm_close_release_chan - Close a dmaengine based PCM
+ * substream and release channel
+ * @substream: PCM substream
+ *
+ * Releases the DMA channel associated with the PCM substream.
+ *
+ * Return: zero if successful, or a negative error code
+ */
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ dma_release_channel(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
+
+/**
+ * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @hw: PCM hw params
+ * @chan: DMA channel to use for data transfers
+ *
+ * This function will query DMA capability, then refine the pcm hardware
+ * parameters.
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan)
+{
+ struct dma_slave_caps dma_caps;
+ u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ snd_pcm_format_t i;
+ int ret = 0;
+
+ if (!hw || !chan || !dma_data)
+ return -EINVAL;
+
+ ret = dma_get_slave_caps(chan, &dma_caps);
+ if (ret == 0) {
+ if (dma_caps.cmd_pause && dma_caps.cmd_resume)
+ hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
+ if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
+ hw->info |= SNDRV_PCM_INFO_BATCH;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ addr_widths = dma_caps.dst_addr_widths;
+ else
+ addr_widths = dma_caps.src_addr_widths;
+ }
+
+ /*
+ * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep
+ * hw.formats set to 0, meaning no restrictions are in place.
+ * In this case it's the responsibility of the DAI driver to
+ * provide the supported format information.
+ */
+ if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK))
+ /*
+ * Prepare formats mask for valid/allowed sample types. If the
+ * dma does not have support for the given physical word size,
+ * it needs to be masked out so user space can not use the
+ * format which produces corrupted audio.
+ * In case the dma driver does not implement the slave_caps the
+ * default assumption is that it supports 1, 2 and 4 bytes
+ * widths.
+ */
+ pcm_for_each_format(i) {
+ int bits = snd_pcm_format_physical_width(i);
+
+ /*
+ * Enable only samples with DMA supported physical
+ * widths
+ */
+ switch (bits) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ case 64:
+ if (addr_widths & (1 << (bits / 8)))
+ hw->formats |= pcm_format_to_bits(i);
+ break;
+ default:
+ /* Unsupported types */
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams);
+
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/core/RedHat/5.14/dmaengine_pcm.h b/snd-alpx/core/RedHat/5.14/dmaengine_pcm.h
new file mode 100755
index 0000000..2df54cf
--- /dev/null
+++ b/snd-alpx/core/RedHat/5.14/dmaengine_pcm.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef __SOUND_DMAENGINE_PCM_H__
+#define __SOUND_DMAENGINE_PCM_H__
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <linux/dmaengine.h>
+
+/**
+ * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM
+ * substream
+ * @substream: PCM substream
+ *
+ * Return: DMA transfer direction
+ */
+static inline enum dma_transfer_direction
+snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return DMA_MEM_TO_DEV;
+ else
+ return DMA_DEV_TO_MEM;
+}
+
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config);
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan);
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data);
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream);
+
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data);
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream);
+
+/*
+ * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word.
+ * If this flag is set the dmaengine driver won't put any restriction on
+ * the supported sample formats and set the DMA transfer size to undefined.
+ * The DAI driver is responsible to disable any unsupported formats in it's
+ * configuration and catch corner cases that are not already handled in
+ * the ALSA core.
+ */
+#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0)
+
+/**
+ * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data
+ * @addr: Address of the DAI data source or destination register.
+ * @addr_width: Width of the DAI data source or destination register.
+ * @maxburst: Maximum number of words(note: words, as in units of the
+ * src_addr_width member, not bytes) that can be send to or received from the
+ * DAI in one burst.
+ * @filter_data: Custom DMA channel filter data, this will usually be used when
+ * requesting the DMA channel.
+ * @chan_name: Custom channel name to use when requesting DMA channel.
+ * @fifo_size: FIFO size of the DAI controller in bytes
+ * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
+ * @peripheral_config: peripheral configuration for programming peripheral
+ * for dmaengine transfer
+ * @peripheral_size: peripheral configuration buffer size
+ */
+struct snd_dmaengine_dai_dma_data {
+ dma_addr_t addr;
+ enum dma_slave_buswidth addr_width;
+ u32 maxburst;
+ void *filter_data;
+ const char *chan_name;
+ unsigned int fifo_size;
+ unsigned int flags;
+ void *peripheral_config;
+ size_t peripheral_size;
+};
+
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *config);
+
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan);
+
+/*
+ * Try to request the DMA channel using compat_request_channel or
+ * compat_filter_fn if it couldn't be requested through devicetree.
+ */
+#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0)
+/*
+ * Don't try to request the DMA channels through devicetree. This flag only
+ * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well.
+ */
+#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1)
+/*
+ * The PCM is half duplex and the DMA channel is shared between capture and
+ * playback.
+ */
+#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
+
+/**
+ * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
+ * @prepare_slave_config: Callback used to fill in the DMA slave_config for a
+ * PCM substream. Will be called from the PCM drivers hwparams callback.
+ * @compat_request_channel: Callback to request a DMA channel for platforms
+ * which do not use devicetree.
+ * @process: Callback used to apply processing on samples transferred from/to
+ * user space.
+ * @compat_filter_fn: Will be used as the filter function when requesting a
+ * channel for platforms which do not use devicetree. The filter parameter
+ * will be the DAI's DMA data.
+ * @dma_dev: If set, request DMA channel on this device rather than the DAI
+ * device.
+ * @chan_names: If set, these custom DMA channel names will be requested at
+ * registration time.
+ * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM.
+ * @prealloc_buffer_size: Size of the preallocated audio buffer.
+ *
+ * Note: If both compat_request_channel and compat_filter_fn are set
+ * compat_request_channel will be used to request the channel and
+ * compat_filter_fn will be ignored. Otherwise the channel will be requested
+ * using dma_request_channel with compat_filter_fn as the filter function.
+ */
+struct snd_dmaengine_pcm_config {
+ int (*prepare_slave_config)(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+ struct dma_chan *(*compat_request_channel)(
+ struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_substream *substream);
+ int (*process)(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes);
+ dma_filter_fn compat_filter_fn;
+ struct device *dma_dev;
+ const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
+
+ const struct snd_pcm_hardware *pcm_hardware;
+ unsigned int prealloc_buffer_size;
+};
+
+int snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+void snd_dmaengine_pcm_unregister(struct device *dev);
+
+int devm_snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+
+int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+
+#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+
+struct dmaengine_pcm {
+ struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
+ const struct snd_dmaengine_pcm_config *config;
+ struct snd_soc_component component;
+ unsigned int flags;
+};
+
+static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
+{
+ return container_of(p, struct dmaengine_pcm, component);
+}
+#endif
diff --git a/snd-alpx/core/RedHat/5.14/pcm_dmaengine.c b/snd-alpx/core/RedHat/5.14/pcm_dmaengine.c
new file mode 100755
index 0000000..494ec0c
--- /dev/null
+++ b/snd-alpx/core/RedHat/5.14/pcm_dmaengine.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on:
+ * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
+ * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc.
+ * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <sound/dmaengine_pcm.h>
+
+struct dmaengine_pcm_runtime_data {
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+
+ unsigned int pos;
+};
+
+static inline struct dmaengine_pcm_runtime_data *substream_to_prtd(
+ const struct snd_pcm_substream *substream)
+{
+ return substream->runtime->private_data;
+}
+
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ return prtd->dma_chan;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan);
+
+/**
+ * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config
+ * @substream: PCM substream
+ * @params: hw_params
+ * @slave_config: DMA slave config
+ *
+ * This function can be used to initialize a dma_slave_config from a substream
+ * and hw_params in a dmaengine based PCM driver implementation.
+ *
+ * Return: zero if successful, or a negative error code
+ */
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config)
+{
+ enum dma_slave_buswidth buswidth;
+ int bits;
+
+ bits = params_physical_width(params);
+ if (bits < 8 || bits > 64)
+ return -EINVAL;
+ else if (bits == 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (bits == 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else if (bits == 24)
+ buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES;
+ else if (bits <= 32)
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->direction = DMA_MEM_TO_DEV;
+ slave_config->dst_addr_width = buswidth;
+ } else {
+ slave_config->direction = DMA_DEV_TO_MEM;
+ slave_config->src_addr_width = buswidth;
+ }
+
+ slave_config->device_fc = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config);
+
+/**
+ * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config
+ * using DAI DMA data.
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @slave_config: DMA slave configuration
+ *
+ * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width
+ * fields of the DMA slave config from the same fields of the DAI DMA
+ * data struct. The src and dst fields will be initialized depending on the
+ * direction of the substream. If the substream is a playback stream the dst
+ * fields will be initialized, if it is a capture stream the src fields will be
+ * initialized. The {dst,src}_addr_width field will only be initialized if the
+ * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of
+ * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If
+ * both conditions are met the latter takes priority.
+ */
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *slave_config)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->dst_addr = dma_data->addr;
+ slave_config->dst_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->dst_addr_width = dma_data->addr_width;
+ } else {
+ slave_config->src_addr = dma_data->addr;
+ slave_config->src_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->src_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->src_addr_width = dma_data->addr_width;
+ }
+
+ slave_config->peripheral_config = dma_data->peripheral_config;
+ slave_config->peripheral_size = dma_data->peripheral_size;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data);
+
+static void dmaengine_pcm_dma_complete(void *arg)
+{
+ unsigned int new_pos;
+ struct snd_pcm_substream *substream = arg;
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ new_pos = prtd->pos + snd_pcm_lib_period_bytes(substream);
+ if (new_pos >= snd_pcm_lib_buffer_bytes(substream))
+ new_pos = 0;
+ prtd->pos = new_pos;
+
+ snd_pcm_period_elapsed(substream);
+}
+
+static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_chan *chan = prtd->dma_chan;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction direction;
+ unsigned long flags = DMA_CTRL_ACK;
+
+ direction = snd_pcm_substream_to_dma_direction(substream);
+
+ if (!substream->runtime->no_period_wakeup)
+ flags |= DMA_PREP_INTERRUPT;
+
+ prtd->pos = 0;
+ desc = dmaengine_prep_dma_cyclic(chan,
+ substream->runtime->dma_addr,
+ snd_pcm_lib_buffer_bytes(substream),
+ snd_pcm_lib_period_bytes(substream), direction, flags);
+
+ if (!desc)
+ return -ENOMEM;
+
+ desc->callback = dmaengine_pcm_dma_complete;
+ desc->callback_param = substream;
+ prtd->cookie = dmaengine_submit(desc);
+
+ return 0;
+}
+
+/**
+ * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation
+ * @substream: PCM substream
+ * @cmd: Trigger command
+ *
+ * This function can be used as the PCM trigger callback for dmaengine based PCM
+ * driver implementations.
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ ret = dmaengine_pcm_prepare_and_submit(substream);
+ if (ret)
+ return ret;
+ dma_async_issue_pending(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dmaengine_resume(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (runtime->info & SNDRV_PCM_INFO_PAUSE)
+ dmaengine_pause(prtd->dma_chan);
+ else
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dmaengine_pause(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger);
+
+/**
+ * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function is deprecated and should not be used by new drivers, as its
+ * results may be unreliable.
+ *
+ * Return: PCM position in frames
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ return bytes_to_frames(substream->runtime, prtd->pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue);
+
+/**
+ * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function can be used as the PCM pointer callback for dmaengine based PCM
+ * driver implementations.
+ *
+ * Return: PCM position in frames
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned int buf_size;
+ unsigned int pos = 0;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
+ buf_size = snd_pcm_lib_buffer_bytes(substream);
+ if (state.residue > 0 && state.residue <= buf_size)
+ pos = buf_size - state.residue;
+
+ runtime->delay = bytes_to_frames(runtime,
+ state.in_flight_bytes);
+ }
+
+ return bytes_to_frames(runtime, pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer);
+
+/**
+ * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * This function request a DMA channel for usage with dmaengine PCM.
+ *
+ * Return: NULL or the requested DMA channel
+ */
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ return dma_request_channel(mask, filter_fn, filter_data);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel);
+
+/**
+ * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream
+ * @substream: PCM substream
+ * @chan: DMA channel to use for data transfers
+ *
+ * The function should usually be called from the pcm open callback. Note that
+ * this function will use private_data field of the substream's runtime. So it
+ * is not available to your pcm driver implementation.
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan)
+{
+ struct dmaengine_pcm_runtime_data *prtd;
+ int ret;
+
+ if (!chan)
+ return -ENXIO;
+
+ ret = snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+ prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
+ if (!prtd)
+ return -ENOMEM;
+
+ prtd->dma_chan = chan;
+
+ substream->runtime->private_data = prtd;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open);
+
+/**
+ * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel
+ * @substream: PCM substream
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * This function will request a DMA channel using the passed filter function and
+ * data. The function should usually be called from the pcm open callback. Note
+ * that this function will use private_data field of the substream's runtime. So
+ * it is not available to your pcm driver implementation.
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data)
+{
+ return snd_dmaengine_pcm_open(substream,
+ snd_dmaengine_pcm_request_channel(filter_fn, filter_data));
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+
+/**
+ * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
+ * @substream: PCM substream
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
+
+/**
+ * snd_dmaengine_pcm_close_release_chan - Close a dmaengine based PCM
+ * substream and release channel
+ * @substream: PCM substream
+ *
+ * Releases the DMA channel associated with the PCM substream.
+ *
+ * Return: zero if successful, or a negative error code
+ */
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ dma_release_channel(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
+
+/**
+ * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @hw: PCM hw params
+ * @chan: DMA channel to use for data transfers
+ *
+ * This function will query DMA capability, then refine the pcm hardware
+ * parameters.
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan)
+{
+ struct dma_slave_caps dma_caps;
+ u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ snd_pcm_format_t i;
+ int ret = 0;
+
+ if (!hw || !chan || !dma_data)
+ return -EINVAL;
+
+ ret = dma_get_slave_caps(chan, &dma_caps);
+ if (ret == 0) {
+ if (dma_caps.cmd_pause && dma_caps.cmd_resume)
+ hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
+ if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
+ hw->info |= SNDRV_PCM_INFO_BATCH;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ addr_widths = dma_caps.dst_addr_widths;
+ else
+ addr_widths = dma_caps.src_addr_widths;
+ }
+
+ /*
+ * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep
+ * hw.formats set to 0, meaning no restrictions are in place.
+ * In this case it's the responsibility of the DAI driver to
+ * provide the supported format information.
+ */
+ if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK))
+ /*
+ * Prepare formats mask for valid/allowed sample types. If the
+ * dma does not have support for the given physical word size,
+ * it needs to be masked out so user space can not use the
+ * format which produces corrupted audio.
+ * In case the dma driver does not implement the slave_caps the
+ * default assumption is that it supports 1, 2 and 4 bytes
+ * widths.
+ */
+ pcm_for_each_format(i) {
+ int bits = snd_pcm_format_physical_width(i);
+
+ /*
+ * Enable only samples with DMA supported physical
+ * widths
+ */
+ switch (bits) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ case 64:
+ if (addr_widths & (1 << (bits / 8)))
+ hw->formats |= pcm_format_to_bits(i);
+ break;
+ default:
+ /* Unsupported types */
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams);
+
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/core/generic/4.19/dmaengine_pcm.h b/snd-alpx/core/generic/4.19/dmaengine_pcm.h
new file mode 100755
index 0000000..2c4cfaa
--- /dev/null
+++ b/snd-alpx/core/generic/4.19/dmaengine_pcm.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef __SOUND_DMAENGINE_PCM_H__
+#define __SOUND_DMAENGINE_PCM_H__
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <linux/dmaengine.h>
+
+/**
+ * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM
+ * substream
+ * @substream: PCM substream
+ */
+static inline enum dma_transfer_direction
+snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return DMA_MEM_TO_DEV;
+ else
+ return DMA_DEV_TO_MEM;
+}
+
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config);
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan);
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data);
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream);
+
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data);
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream);
+
+/*
+ * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word.
+ * If this flag is set the dmaengine driver won't put any restriction on
+ * the supported sample formats and set the DMA transfer size to undefined.
+ * The DAI driver is responsible to disable any unsupported formats in it's
+ * configuration and catch corner cases that are not already handled in
+ * the ALSA core.
+ */
+#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0)
+
+/**
+ * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data
+ * @addr: Address of the DAI data source or destination register.
+ * @addr_width: Width of the DAI data source or destination register.
+ * @maxburst: Maximum number of words(note: words, as in units of the
+ * src_addr_width member, not bytes) that can be send to or received from the
+ * DAI in one burst.
+ * @slave_id: Slave requester id for the DMA channel.
+ * @filter_data: Custom DMA channel filter data, this will usually be used when
+ * requesting the DMA channel.
+ * @chan_name: Custom channel name to use when requesting DMA channel.
+ * @fifo_size: FIFO size of the DAI controller in bytes
+ * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
+ */
+struct snd_dmaengine_dai_dma_data {
+ dma_addr_t addr;
+ enum dma_slave_buswidth addr_width;
+ u32 maxburst;
+ unsigned int slave_id;
+ void *filter_data;
+ const char *chan_name;
+ unsigned int fifo_size;
+ unsigned int flags;
+};
+
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *config);
+
+
+/*
+ * Try to request the DMA channel using compat_request_channel or
+ * compat_filter_fn if it couldn't be requested through devicetree.
+ */
+#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0)
+/*
+ * Don't try to request the DMA channels through devicetree. This flag only
+ * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well.
+ */
+#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1)
+/*
+ * The PCM is half duplex and the DMA channel is shared between capture and
+ * playback.
+ */
+#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
+/*
+ * The PCM streams have custom channel names specified.
+ */
+#define SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME BIT(4)
+
+/**
+ * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
+ * @prepare_slave_config: Callback used to fill in the DMA slave_config for a
+ * PCM substream. Will be called from the PCM drivers hwparams callback.
+ * @compat_request_channel: Callback to request a DMA channel for platforms
+ * which do not use devicetree.
+ * @process: Callback used to apply processing on samples transferred from/to
+ * user space.
+ * @compat_filter_fn: Will be used as the filter function when requesting a
+ * channel for platforms which do not use devicetree. The filter parameter
+ * will be the DAI's DMA data.
+ * @dma_dev: If set, request DMA channel on this device rather than the DAI
+ * device.
+ * @chan_names: If set, these custom DMA channel names will be requested at
+ * registration time.
+ * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM.
+ * @prealloc_buffer_size: Size of the preallocated audio buffer.
+ *
+ * Note: If both compat_request_channel and compat_filter_fn are set
+ * compat_request_channel will be used to request the channel and
+ * compat_filter_fn will be ignored. Otherwise the channel will be requested
+ * using dma_request_channel with compat_filter_fn as the filter function.
+ */
+struct snd_dmaengine_pcm_config {
+ int (*prepare_slave_config)(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+ struct dma_chan *(*compat_request_channel)(
+ struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_substream *substream);
+ int (*process)(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes);
+ dma_filter_fn compat_filter_fn;
+ struct device *dma_dev;
+ const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
+
+ const struct snd_pcm_hardware *pcm_hardware;
+ unsigned int prealloc_buffer_size;
+};
+
+int snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+void snd_dmaengine_pcm_unregister(struct device *dev);
+
+int devm_snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+
+int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+
+#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+
+#endif
diff --git a/snd-alpx/core/generic/4.19/internal.h b/snd-alpx/core/generic/4.19/internal.h
new file mode 100644
index 0000000..a6bf34d
--- /dev/null
+++ b/snd-alpx/core/generic/4.19/internal.h
@@ -0,0 +1,297 @@
+/*
+ * Register map access API internal header
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _REGMAP_INTERNAL_H
+#define _REGMAP_INTERNAL_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+struct regmap;
+struct regcache_ops;
+
+struct regmap_debugfs_off_cache {
+ struct list_head list;
+ off_t min;
+ off_t max;
+ unsigned int base_reg;
+ unsigned int max_reg;
+};
+
+struct regmap_format {
+ size_t buf_size;
+ size_t reg_bytes;
+ size_t pad_bytes;
+ size_t val_bytes;
+ void (*format_write)(struct regmap *map,
+ unsigned int reg, unsigned int val);
+ void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
+ void (*format_val)(void *buf, unsigned int val, unsigned int shift);
+ unsigned int (*parse_val)(const void *buf);
+ void (*parse_inplace)(void *buf);
+};
+
+struct regmap_async {
+ struct list_head list;
+ struct regmap *map;
+ void *work_buf;
+};
+
+struct regmap {
+ union {
+ struct mutex mutex;
+ struct {
+ spinlock_t spinlock;
+ unsigned long spinlock_flags;
+ };
+ };
+ regmap_lock lock;
+ regmap_unlock unlock;
+ void *lock_arg; /* This is passed to lock/unlock functions */
+ gfp_t alloc_flags;
+
+ struct device *dev; /* Device we do I/O on */
+ void *work_buf; /* Scratch buffer used to format I/O */
+ struct regmap_format format; /* Buffer format */
+ const struct regmap_bus *bus;
+ void *bus_context;
+ const char *name;
+
+ bool async;
+ spinlock_t async_lock;
+ wait_queue_head_t async_waitq;
+ struct list_head async_list;
+ struct list_head async_free;
+ int async_ret;
+
+#ifdef CONFIG_DEBUG_FS
+ bool debugfs_disable;
+ struct dentry *debugfs;
+ const char *debugfs_name;
+
+ unsigned int debugfs_reg_len;
+ unsigned int debugfs_val_len;
+ unsigned int debugfs_tot_len;
+
+ struct list_head debugfs_off_cache;
+ struct mutex cache_lock;
+#endif
+
+ unsigned int max_register;
+ bool (*writeable_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+ bool (*volatile_reg)(struct device *dev, unsigned int reg);
+ bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+ const struct regmap_access_table *volatile_table;
+ const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *rd_noinc_table;
+
+ int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
+ int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+ int (*reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
+
+ bool defer_caching;
+
+ unsigned long read_flag_mask;
+ unsigned long write_flag_mask;
+
+ /* number of bits to (left) shift the reg value when formatting*/
+ int reg_shift;
+ int reg_stride;
+ int reg_stride_order;
+
+ /* regcache specific members */
+ const struct regcache_ops *cache_ops;
+ enum regcache_type cache_type;
+
+ /* number of bytes in reg_defaults_raw */
+ unsigned int cache_size_raw;
+ /* number of bytes per word in reg_defaults_raw */
+ unsigned int cache_word_size;
+ /* number of entries in reg_defaults */
+ unsigned int num_reg_defaults;
+ /* number of entries in reg_defaults_raw */
+ unsigned int num_reg_defaults_raw;
+
+ /* if set, only the cache is modified not the HW */
+ bool cache_only;
+ /* if set, only the HW is modified not the cache */
+ bool cache_bypass;
+ /* if set, remember to free reg_defaults_raw */
+ bool cache_free;
+
+ struct reg_default *reg_defaults;
+ const void *reg_defaults_raw;
+ void *cache;
+ /* if set, the cache contains newer data than the HW */
+ bool cache_dirty;
+ /* if set, the HW registers are known to match map->reg_defaults */
+ bool no_sync_defaults;
+
+ struct reg_sequence *patch;
+ int patch_regs;
+
+ /* if set, converts bulk read to single read */
+ bool use_single_read;
+ /* if set, converts bulk read to single read */
+ bool use_single_write;
+ /* if set, the device supports multi write mode */
+ bool can_multi_write;
+
+ /* if set, raw reads/writes are limited to this size */
+ size_t max_raw_read;
+ size_t max_raw_write;
+
+ struct rb_root range_tree;
+ void *selector_work_buf; /* Scratch buffer used for selector */
+
+ struct hwspinlock *hwlock;
+};
+
+struct regcache_ops {
+ const char *name;
+ enum regcache_type type;
+ int (*init)(struct regmap *map);
+ int (*exit)(struct regmap *map);
+#ifdef CONFIG_DEBUG_FS
+ void (*debugfs_init)(struct regmap *map);
+#endif
+ int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
+ int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
+ int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
+ int (*drop)(struct regmap *map, unsigned int min, unsigned int max);
+};
+
+bool regmap_cached(struct regmap *map, unsigned int reg);
+bool regmap_writeable(struct regmap *map, unsigned int reg);
+bool regmap_readable(struct regmap *map, unsigned int reg);
+bool regmap_volatile(struct regmap *map, unsigned int reg);
+bool regmap_precious(struct regmap *map, unsigned int reg);
+bool regmap_readable_noinc(struct regmap *map, unsigned int reg);
+
+int _regmap_write(struct regmap *map, unsigned int reg,
+ unsigned int val);
+
+struct regmap_range_node {
+ struct rb_node node;
+ const char *name;
+ struct regmap *map;
+
+ unsigned int range_min;
+ unsigned int range_max;
+
+ unsigned int selector_reg;
+ unsigned int selector_mask;
+ int selector_shift;
+
+ unsigned int window_start;
+ unsigned int window_len;
+};
+
+struct regmap_field {
+ struct regmap *regmap;
+ unsigned int mask;
+ /* lsb */
+ unsigned int shift;
+ unsigned int reg;
+
+ unsigned int id_size;
+ unsigned int id_offset;
+};
+
+#ifdef CONFIG_DEBUG_FS
+extern void regmap_debugfs_initcall(void);
+extern void regmap_debugfs_init(struct regmap *map, const char *name);
+extern void regmap_debugfs_exit(struct regmap *map);
+
+static inline void regmap_debugfs_disable(struct regmap *map)
+{
+ map->debugfs_disable = true;
+}
+
+#else
+static inline void regmap_debugfs_initcall(void) { }
+static inline void regmap_debugfs_init(struct regmap *map, const char *name) { }
+static inline void regmap_debugfs_exit(struct regmap *map) { }
+static inline void regmap_debugfs_disable(struct regmap *map) { }
+#endif
+
+/* regcache core declarations */
+int regcache_init(struct regmap *map, const struct regmap_config *config);
+void regcache_exit(struct regmap *map);
+int regcache_read(struct regmap *map,
+ unsigned int reg, unsigned int *value);
+int regcache_write(struct regmap *map,
+ unsigned int reg, unsigned int value);
+int regcache_sync(struct regmap *map);
+int regcache_sync_block(struct regmap *map, void *block,
+ unsigned long *cache_present,
+ unsigned int block_base, unsigned int start,
+ unsigned int end);
+
+static inline const void *regcache_get_val_addr(struct regmap *map,
+ const void *base,
+ unsigned int idx)
+{
+ return base + (map->cache_word_size * idx);
+}
+
+unsigned int regcache_get_val(struct regmap *map, const void *base,
+ unsigned int idx);
+bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
+ unsigned int val);
+int regcache_lookup_reg(struct regmap *map, unsigned int reg);
+
+int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
+
+void regmap_async_complete_cb(struct regmap_async *async, int ret);
+
+enum regmap_endian regmap_get_val_endian(struct device *dev,
+ const struct regmap_bus *bus,
+ const struct regmap_config *config);
+
+extern struct regcache_ops regcache_rbtree_ops;
+extern struct regcache_ops regcache_lzo_ops;
+extern struct regcache_ops regcache_flat_ops;
+
+static inline const char *regmap_name(const struct regmap *map)
+{
+ if (map->dev)
+ return dev_name(map->dev);
+
+ return map->name;
+}
+
+static inline unsigned int regmap_get_offset(const struct regmap *map,
+ unsigned int index)
+{
+ if (map->reg_stride_order >= 0)
+ return index << map->reg_stride_order;
+ else
+ return index * map->reg_stride;
+}
+
+static inline unsigned int regcache_get_index_by_order(const struct regmap *map,
+ unsigned int reg)
+{
+ return reg >> map->reg_stride_order;
+}
+
+#endif
diff --git a/snd-alpx/core/generic/4.19/pcm_dmaengine.c b/snd-alpx/core/generic/4.19/pcm_dmaengine.c
new file mode 100755
index 0000000..6f6da11
--- /dev/null
+++ b/snd-alpx/core/generic/4.19/pcm_dmaengine.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on:
+ * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
+ * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc.
+ * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <sound/dmaengine_pcm.h>
+
+struct dmaengine_pcm_runtime_data {
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+
+ unsigned int pos;
+};
+
+static inline struct dmaengine_pcm_runtime_data *substream_to_prtd(
+ const struct snd_pcm_substream *substream)
+{
+ return substream->runtime->private_data;
+}
+
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ return prtd->dma_chan;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan);
+
+/**
+ * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config
+ * @substream: PCM substream
+ * @params: hw_params
+ * @slave_config: DMA slave config
+ *
+ * This function can be used to initialize a dma_slave_config from a substream
+ * and hw_params in a dmaengine based PCM driver implementation.
+ */
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config)
+{
+ enum dma_slave_buswidth buswidth;
+ int bits;
+
+ bits = params_physical_width(params);
+ if (bits < 8 || bits > 64)
+ return -EINVAL;
+ else if (bits == 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (bits == 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else if (bits == 24)
+ buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES;
+ else if (bits <= 32)
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->direction = DMA_MEM_TO_DEV;
+ slave_config->dst_addr_width = buswidth;
+ } else {
+ slave_config->direction = DMA_DEV_TO_MEM;
+ slave_config->src_addr_width = buswidth;
+ }
+
+ slave_config->device_fc = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config);
+
+/**
+ * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config
+ * using DAI DMA data.
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @slave_config: DMA slave configuration
+ *
+ * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width and
+ * slave_id fields of the DMA slave config from the same fields of the DAI DMA
+ * data struct. The src and dst fields will be initialized depending on the
+ * direction of the substream. If the substream is a playback stream the dst
+ * fields will be initialized, if it is a capture stream the src fields will be
+ * initialized. The {dst,src}_addr_width field will only be initialized if the
+ * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of
+ * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If
+ * both conditions are met the latter takes priority.
+ */
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *slave_config)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->dst_addr = dma_data->addr;
+ slave_config->dst_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->dst_addr_width = dma_data->addr_width;
+ } else {
+ slave_config->src_addr = dma_data->addr;
+ slave_config->src_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->src_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->src_addr_width = dma_data->addr_width;
+ }
+
+ slave_config->slave_id = dma_data->slave_id;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data);
+
+static void dmaengine_pcm_dma_complete(void *arg)
+{
+ unsigned int new_pos;
+ struct snd_pcm_substream *substream = arg;
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ new_pos = prtd->pos + snd_pcm_lib_period_bytes(substream);
+ if (new_pos >= snd_pcm_lib_buffer_bytes(substream))
+ new_pos = 0;
+ prtd->pos = new_pos;
+
+ snd_pcm_period_elapsed(substream);
+}
+
+static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_chan *chan = prtd->dma_chan;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction direction;
+ unsigned long flags = DMA_CTRL_ACK;
+
+ direction = snd_pcm_substream_to_dma_direction(substream);
+
+ if (!substream->runtime->no_period_wakeup)
+ flags |= DMA_PREP_INTERRUPT;
+
+ prtd->pos = 0;
+ desc = dmaengine_prep_dma_cyclic(chan,
+ substream->runtime->dma_addr,
+ snd_pcm_lib_buffer_bytes(substream),
+ snd_pcm_lib_period_bytes(substream), direction, flags);
+
+ if (!desc)
+ return -ENOMEM;
+
+ desc->callback = dmaengine_pcm_dma_complete;
+ desc->callback_param = substream;
+ prtd->cookie = dmaengine_submit(desc);
+
+ return 0;
+}
+
+/**
+ * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation
+ * @substream: PCM substream
+ * @cmd: Trigger command
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function can be used as the PCM trigger callback for dmaengine based PCM
+ * driver implementations.
+ */
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ ret = dmaengine_pcm_prepare_and_submit(substream);
+ if (ret)
+ return ret;
+ dma_async_issue_pending(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dmaengine_resume(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (runtime->info & SNDRV_PCM_INFO_PAUSE)
+ dmaengine_pause(prtd->dma_chan);
+ else
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dmaengine_pause(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger);
+
+/**
+ * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function is deprecated and should not be used by new drivers, as its
+ * results may be unreliable.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ return bytes_to_frames(substream->runtime, prtd->pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue);
+
+/**
+ * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function can be used as the PCM pointer callback for dmaengine based PCM
+ * driver implementations.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned int buf_size;
+ unsigned int pos = 0;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
+ buf_size = snd_pcm_lib_buffer_bytes(substream);
+ if (state.residue > 0 && state.residue <= buf_size)
+ pos = buf_size - state.residue;
+ }
+
+ return bytes_to_frames(substream->runtime, pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer);
+
+/**
+ * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * Returns NULL or the requested DMA channel.
+ *
+ * This function request a DMA channel for usage with dmaengine PCM.
+ */
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ return dma_request_channel(mask, filter_fn, filter_data);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel);
+
+/**
+ * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream
+ * @substream: PCM substream
+ * @chan: DMA channel to use for data transfers
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * The function should usually be called from the pcm open callback. Note that
+ * this function will use private_data field of the substream's runtime. So it
+ * is not available to your pcm driver implementation.
+ */
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan)
+{
+ struct dmaengine_pcm_runtime_data *prtd;
+ int ret;
+
+ if (!chan)
+ return -ENXIO;
+
+ ret = snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+ prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
+ if (!prtd)
+ return -ENOMEM;
+
+ prtd->dma_chan = chan;
+
+ substream->runtime->private_data = prtd;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open);
+
+/**
+ * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel
+ * @substream: PCM substream
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function will request a DMA channel using the passed filter function and
+ * data. The function should usually be called from the pcm open callback. Note
+ * that this function will use private_data field of the substream's runtime. So
+ * it is not available to your pcm driver implementation.
+ */
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data)
+{
+ return snd_dmaengine_pcm_open(substream,
+ snd_dmaengine_pcm_request_channel(filter_fn, filter_data));
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+
+/**
+ * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
+ * @substream: PCM substream
+ */
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
+
+/**
+ * snd_dmaengine_pcm_release_chan_close - Close a dmaengine based PCM substream and release channel
+ * @substream: PCM substream
+ *
+ * Releases the DMA channel associated with the PCM substream.
+ */
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ dma_release_channel(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
+
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/core/generic/4.19/regmap-mmio.c b/snd-alpx/core/generic/4.19/regmap-mmio.c
new file mode 100644
index 0000000..d524815
--- /dev/null
+++ b/snd-alpx/core/generic/4.19/regmap-mmio.c
@@ -0,0 +1,399 @@
+/*
+ * Register map access API - MMIO support
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Simple patch to embed this file in its user module */
+/* DISABLE the EXPORT_SYMBOL() macros */
+#undef CONFIG_MODULES
+#include <linux/export.h>
+/* Then re-enable the module support to include modle.h */
+#define CONFIG_MODULES
+
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+struct regmap_mmio_context {
+ void __iomem *regs;
+ unsigned val_bytes;
+
+ bool attached_clk;
+ struct clk *clk;
+
+ void (*reg_write)(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val);
+ unsigned int (*reg_read)(struct regmap_mmio_context *ctx,
+ unsigned int reg);
+};
+
+static int regmap_mmio_regbits_check(size_t reg_bits)
+{
+ switch (reg_bits) {
+ case 8:
+ case 16:
+ case 32:
+#ifdef CONFIG_64BIT
+ case 64:
+#endif
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int regmap_mmio_get_min_stride(size_t val_bits)
+{
+ int min_stride;
+
+ switch (val_bits) {
+ case 8:
+ /* The core treats 0 as 1 */
+ min_stride = 0;
+ return 0;
+ case 16:
+ min_stride = 2;
+ break;
+ case 32:
+ min_stride = 4;
+ break;
+#ifdef CONFIG_64BIT
+ case 64:
+ min_stride = 8;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ return min_stride;
+}
+
+static void regmap_mmio_write8(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writeb(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writew(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ iowrite16be(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writel(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ iowrite32be(val, ctx->regs + reg);
+}
+
+#ifdef CONFIG_64BIT
+static void regmap_mmio_write64le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writeq(val, ctx->regs + reg);
+}
+#endif
+
+static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ ctx->reg_write(ctx, reg, val);
+
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return 0;
+}
+
+static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readb(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readw(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread16be(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readl(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread32be(ctx->regs + reg);
+}
+
+#ifdef CONFIG_64BIT
+static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readq(ctx->regs + reg);
+}
+#endif
+
+static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ *val = ctx->reg_read(ctx, reg);
+
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return 0;
+}
+
+static void regmap_mmio_free_context(void *context)
+{
+ struct regmap_mmio_context *ctx = context;
+
+ if (!IS_ERR(ctx->clk)) {
+ clk_unprepare(ctx->clk);
+ if (!ctx->attached_clk)
+ clk_put(ctx->clk);
+ }
+ kfree(context);
+}
+
+static const struct regmap_bus regmap_mmio = {
+ .fast_io = true,
+ .reg_write = regmap_mmio_write,
+ .reg_read = regmap_mmio_read,
+ .free_context = regmap_mmio_free_context,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config)
+{
+ struct regmap_mmio_context *ctx;
+ int min_stride;
+ int ret;
+
+ ret = regmap_mmio_regbits_check(config->reg_bits);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (config->pad_bits)
+ return ERR_PTR(-EINVAL);
+
+ min_stride = regmap_mmio_get_min_stride(config->val_bits);
+ if (min_stride < 0)
+ return ERR_PTR(min_stride);
+
+ if (config->reg_stride < min_stride)
+ return ERR_PTR(-EINVAL);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->regs = regs;
+ ctx->val_bytes = config->val_bits / 8;
+ ctx->clk = ERR_PTR(-ENODEV);
+
+ switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
+ case REGMAP_ENDIAN_DEFAULT:
+ case REGMAP_ENDIAN_LITTLE:
+#ifdef __LITTLE_ENDIAN
+ case REGMAP_ENDIAN_NATIVE:
+#endif
+ switch (config->val_bits) {
+ case 8:
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ break;
+ case 16:
+ ctx->reg_read = regmap_mmio_read16le;
+ ctx->reg_write = regmap_mmio_write16le;
+ break;
+ case 32:
+ ctx->reg_read = regmap_mmio_read32le;
+ ctx->reg_write = regmap_mmio_write32le;
+ break;
+#ifdef CONFIG_64BIT
+ case 64:
+ ctx->reg_read = regmap_mmio_read64le;
+ ctx->reg_write = regmap_mmio_write64le;
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+ break;
+ case REGMAP_ENDIAN_BIG:
+#ifdef __BIG_ENDIAN
+ case REGMAP_ENDIAN_NATIVE:
+#endif
+ switch (config->val_bits) {
+ case 8:
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ break;
+ case 16:
+ ctx->reg_read = regmap_mmio_read16be;
+ ctx->reg_write = regmap_mmio_write16be;
+ break;
+ case 32:
+ ctx->reg_read = regmap_mmio_read32be;
+ ctx->reg_write = regmap_mmio_write32be;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ if (clk_id == NULL)
+ return ctx;
+
+ ctx->clk = clk_get(dev, clk_id);
+ if (IS_ERR(ctx->clk)) {
+ ret = PTR_ERR(ctx->clk);
+ goto err_free;
+ }
+
+ ret = clk_prepare(ctx->clk);
+ if (ret < 0) {
+ clk_put(ctx->clk);
+ goto err_free;
+ }
+
+ return ctx;
+
+err_free:
+ kfree(ctx);
+
+ return ERR_PTR(ret);
+}
+
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct regmap_mmio_context *ctx;
+
+ ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __regmap_init(dev, &regmap_mmio, ctx, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk);
+
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct regmap_mmio_context *ctx;
+
+ ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __devm_regmap_init(dev, &regmap_mmio, ctx, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk);
+
+int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk)
+{
+ struct regmap_mmio_context *ctx = map->bus_context;
+
+ ctx->clk = clk;
+ ctx->attached_clk = true;
+
+ return clk_prepare(ctx->clk);
+}
+EXPORT_SYMBOL_GPL(regmap_mmio_attach_clk);
+
+void regmap_mmio_detach_clk(struct regmap *map)
+{
+ struct regmap_mmio_context *ctx = map->bus_context;
+
+ clk_unprepare(ctx->clk);
+
+ ctx->attached_clk = false;
+ ctx->clk = NULL;
+}
+EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
+
+MODULE_LICENSE("GPL v2");
diff --git a/snd-alpx/core/generic/5.12/dmaengine_pcm.h b/snd-alpx/core/generic/5.12/dmaengine_pcm.h
new file mode 100644
index 0000000..96666ef
--- /dev/null
+++ b/snd-alpx/core/generic/5.12/dmaengine_pcm.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef __SOUND_DMAENGINE_PCM_H__
+#define __SOUND_DMAENGINE_PCM_H__
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <linux/dmaengine.h>
+
+/**
+ * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM
+ * substream
+ * @substream: PCM substream
+ */
+static inline enum dma_transfer_direction
+snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return DMA_MEM_TO_DEV;
+ else
+ return DMA_DEV_TO_MEM;
+}
+
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config);
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan);
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data);
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream);
+
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data);
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream);
+
+/*
+ * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word.
+ * If this flag is set the dmaengine driver won't put any restriction on
+ * the supported sample formats and set the DMA transfer size to undefined.
+ * The DAI driver is responsible to disable any unsupported formats in it's
+ * configuration and catch corner cases that are not already handled in
+ * the ALSA core.
+ */
+#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0)
+
+/**
+ * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data
+ * @addr: Address of the DAI data source or destination register.
+ * @addr_width: Width of the DAI data source or destination register.
+ * @maxburst: Maximum number of words(note: words, as in units of the
+ * src_addr_width member, not bytes) that can be send to or received from the
+ * DAI in one burst.
+ * @slave_id: Slave requester id for the DMA channel.
+ * @filter_data: Custom DMA channel filter data, this will usually be used when
+ * requesting the DMA channel.
+ * @chan_name: Custom channel name to use when requesting DMA channel.
+ * @fifo_size: FIFO size of the DAI controller in bytes
+ * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
+ * @peripheral_config: peripheral configuration for programming peripheral
+ * for dmaengine transfer
+ * @peripheral_size: peripheral configuration buffer size
+ */
+struct snd_dmaengine_dai_dma_data {
+ dma_addr_t addr;
+ enum dma_slave_buswidth addr_width;
+ u32 maxburst;
+ unsigned int slave_id;
+ void *filter_data;
+ const char *chan_name;
+ unsigned int fifo_size;
+ unsigned int flags;
+ void *peripheral_config;
+ size_t peripheral_size;
+};
+
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *config);
+
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan);
+
+/*
+ * Try to request the DMA channel using compat_request_channel or
+ * compat_filter_fn if it couldn't be requested through devicetree.
+ */
+#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0)
+/*
+ * Don't try to request the DMA channels through devicetree. This flag only
+ * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well.
+ */
+#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1)
+/*
+ * The PCM is half duplex and the DMA channel is shared between capture and
+ * playback.
+ */
+#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
+
+/**
+ * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
+ * @prepare_slave_config: Callback used to fill in the DMA slave_config for a
+ * PCM substream. Will be called from the PCM drivers hwparams callback.
+ * @compat_request_channel: Callback to request a DMA channel for platforms
+ * which do not use devicetree.
+ * @process: Callback used to apply processing on samples transferred from/to
+ * user space.
+ * @compat_filter_fn: Will be used as the filter function when requesting a
+ * channel for platforms which do not use devicetree. The filter parameter
+ * will be the DAI's DMA data.
+ * @dma_dev: If set, request DMA channel on this device rather than the DAI
+ * device.
+ * @chan_names: If set, these custom DMA channel names will be requested at
+ * registration time.
+ * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM.
+ * @prealloc_buffer_size: Size of the preallocated audio buffer.
+ *
+ * Note: If both compat_request_channel and compat_filter_fn are set
+ * compat_request_channel will be used to request the channel and
+ * compat_filter_fn will be ignored. Otherwise the channel will be requested
+ * using dma_request_channel with compat_filter_fn as the filter function.
+ */
+struct snd_dmaengine_pcm_config {
+ int (*prepare_slave_config)(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+ struct dma_chan *(*compat_request_channel)(
+ struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_substream *substream);
+ int (*process)(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes);
+ dma_filter_fn compat_filter_fn;
+ struct device *dma_dev;
+ const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
+
+ const struct snd_pcm_hardware *pcm_hardware;
+ unsigned int prealloc_buffer_size;
+};
+
+int snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+void snd_dmaengine_pcm_unregister(struct device *dev);
+
+int devm_snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+
+int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+
+#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+
+struct dmaengine_pcm {
+ struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
+ const struct snd_dmaengine_pcm_config *config;
+ struct snd_soc_component component;
+ unsigned int flags;
+};
+
+static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
+{
+ return container_of(p, struct dmaengine_pcm, component);
+}
+#endif
diff --git a/snd-alpx/core/generic/5.12/pcm_dmaengine.c b/snd-alpx/core/generic/5.12/pcm_dmaengine.c
new file mode 100644
index 0000000..1fc2fa0
--- /dev/null
+++ b/snd-alpx/core/generic/5.12/pcm_dmaengine.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on:
+ * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
+ * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc.
+ * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <sound/dmaengine_pcm.h>
+
+struct dmaengine_pcm_runtime_data {
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+
+ unsigned int pos;
+};
+
+static inline struct dmaengine_pcm_runtime_data *substream_to_prtd(
+ const struct snd_pcm_substream *substream)
+{
+ return substream->runtime->private_data;
+}
+
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ return prtd->dma_chan;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan);
+
+/**
+ * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config
+ * @substream: PCM substream
+ * @params: hw_params
+ * @slave_config: DMA slave config
+ *
+ * This function can be used to initialize a dma_slave_config from a substream
+ * and hw_params in a dmaengine based PCM driver implementation.
+ */
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config)
+{
+ enum dma_slave_buswidth buswidth;
+ int bits;
+
+ bits = params_physical_width(params);
+ if (bits < 8 || bits > 64)
+ return -EINVAL;
+ else if (bits == 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (bits == 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else if (bits == 24)
+ buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES;
+ else if (bits <= 32)
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->direction = DMA_MEM_TO_DEV;
+ slave_config->dst_addr_width = buswidth;
+ } else {
+ slave_config->direction = DMA_DEV_TO_MEM;
+ slave_config->src_addr_width = buswidth;
+ }
+
+ slave_config->device_fc = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config);
+
+/**
+ * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config
+ * using DAI DMA data.
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @slave_config: DMA slave configuration
+ *
+ * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width and
+ * slave_id fields of the DMA slave config from the same fields of the DAI DMA
+ * data struct. The src and dst fields will be initialized depending on the
+ * direction of the substream. If the substream is a playback stream the dst
+ * fields will be initialized, if it is a capture stream the src fields will be
+ * initialized. The {dst,src}_addr_width field will only be initialized if the
+ * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of
+ * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If
+ * both conditions are met the latter takes priority.
+ */
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *slave_config)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->dst_addr = dma_data->addr;
+ slave_config->dst_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->dst_addr_width = dma_data->addr_width;
+ } else {
+ slave_config->src_addr = dma_data->addr;
+ slave_config->src_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->src_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->src_addr_width = dma_data->addr_width;
+ }
+
+ slave_config->slave_id = dma_data->slave_id;
+ slave_config->peripheral_config = dma_data->peripheral_config;
+ slave_config->peripheral_size = dma_data->peripheral_size;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data);
+
+static void dmaengine_pcm_dma_complete(void *arg)
+{
+ struct snd_pcm_substream *substream = arg;
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ prtd->pos += snd_pcm_lib_period_bytes(substream);
+ if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream))
+ prtd->pos = 0;
+
+ snd_pcm_period_elapsed(substream);
+}
+
+static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_chan *chan = prtd->dma_chan;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction direction;
+ unsigned long flags = DMA_CTRL_ACK;
+
+ direction = snd_pcm_substream_to_dma_direction(substream);
+
+ if (!substream->runtime->no_period_wakeup)
+ flags |= DMA_PREP_INTERRUPT;
+
+ prtd->pos = 0;
+ desc = dmaengine_prep_dma_cyclic(chan,
+ substream->runtime->dma_addr,
+ snd_pcm_lib_buffer_bytes(substream),
+ snd_pcm_lib_period_bytes(substream), direction, flags);
+
+ if (!desc)
+ return -ENOMEM;
+
+ desc->callback = dmaengine_pcm_dma_complete;
+ desc->callback_param = substream;
+ prtd->cookie = dmaengine_submit(desc);
+
+ return 0;
+}
+
+/**
+ * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation
+ * @substream: PCM substream
+ * @cmd: Trigger command
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function can be used as the PCM trigger callback for dmaengine based PCM
+ * driver implementations.
+ */
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ ret = dmaengine_pcm_prepare_and_submit(substream);
+ if (ret)
+ return ret;
+ dma_async_issue_pending(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dmaengine_resume(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (runtime->info & SNDRV_PCM_INFO_PAUSE)
+ dmaengine_pause(prtd->dma_chan);
+ else
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dmaengine_pause(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger);
+
+/**
+ * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function is deprecated and should not be used by new drivers, as its
+ * results may be unreliable.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ return bytes_to_frames(substream->runtime, prtd->pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue);
+
+/**
+ * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function can be used as the PCM pointer callback for dmaengine based PCM
+ * driver implementations.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned int buf_size;
+ unsigned int pos = 0;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
+ buf_size = snd_pcm_lib_buffer_bytes(substream);
+ if (state.residue > 0 && state.residue <= buf_size)
+ pos = buf_size - state.residue;
+
+ runtime->delay = bytes_to_frames(runtime,
+ state.in_flight_bytes);
+ }
+
+ return bytes_to_frames(runtime, pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer);
+
+/**
+ * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * Returns NULL or the requested DMA channel.
+ *
+ * This function request a DMA channel for usage with dmaengine PCM.
+ */
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ return dma_request_channel(mask, filter_fn, filter_data);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel);
+
+/**
+ * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream
+ * @substream: PCM substream
+ * @chan: DMA channel to use for data transfers
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * The function should usually be called from the pcm open callback. Note that
+ * this function will use private_data field of the substream's runtime. So it
+ * is not available to your pcm driver implementation.
+ */
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan)
+{
+ struct dmaengine_pcm_runtime_data *prtd;
+ int ret;
+
+ if (!chan)
+ return -ENXIO;
+
+ ret = snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+ prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
+ if (!prtd)
+ return -ENOMEM;
+
+ prtd->dma_chan = chan;
+
+ substream->runtime->private_data = prtd;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open);
+
+/**
+ * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel
+ * @substream: PCM substream
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function will request a DMA channel using the passed filter function and
+ * data. The function should usually be called from the pcm open callback. Note
+ * that this function will use private_data field of the substream's runtime. So
+ * it is not available to your pcm driver implementation.
+ */
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data)
+{
+ return snd_dmaengine_pcm_open(substream,
+ snd_dmaengine_pcm_request_channel(filter_fn, filter_data));
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+
+/**
+ * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
+ * @substream: PCM substream
+ */
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
+
+/**
+ * snd_dmaengine_pcm_close_release_chan - Close a dmaengine based PCM
+ * substream and release channel
+ * @substream: PCM substream
+ *
+ * Releases the DMA channel associated with the PCM substream.
+ */
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ dma_release_channel(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
+
+/**
+ * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @hw: PCM hw params
+ * @chan: DMA channel to use for data transfers
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function will query DMA capability, then refine the pcm hardware
+ * parameters.
+ */
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan)
+{
+ struct dma_slave_caps dma_caps;
+ u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ snd_pcm_format_t i;
+ int ret = 0;
+
+ if (!hw || !chan || !dma_data)
+ return -EINVAL;
+
+ ret = dma_get_slave_caps(chan, &dma_caps);
+ if (ret == 0) {
+ if (dma_caps.cmd_pause && dma_caps.cmd_resume)
+ hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
+ if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
+ hw->info |= SNDRV_PCM_INFO_BATCH;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ addr_widths = dma_caps.dst_addr_widths;
+ else
+ addr_widths = dma_caps.src_addr_widths;
+ }
+
+ /*
+ * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep
+ * hw.formats set to 0, meaning no restrictions are in place.
+ * In this case it's the responsibility of the DAI driver to
+ * provide the supported format information.
+ */
+ if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK))
+ /*
+ * Prepare formats mask for valid/allowed sample types. If the
+ * dma does not have support for the given physical word size,
+ * it needs to be masked out so user space can not use the
+ * format which produces corrupted audio.
+ * In case the dma driver does not implement the slave_caps the
+ * default assumption is that it supports 1, 2 and 4 bytes
+ * widths.
+ */
+ pcm_for_each_format(i) {
+ int bits = snd_pcm_format_physical_width(i);
+
+ /*
+ * Enable only samples with DMA supported physical
+ * widths
+ */
+ switch (bits) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ case 64:
+ if (addr_widths & (1 << (bits / 8)))
+ hw->formats |= pcm_format_to_bits(i);
+ break;
+ default:
+ /* Unsupported types */
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams);
+
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/core/generic/5.14/internal.h b/snd-alpx/core/generic/5.14/internal.h
new file mode 100644
index 0000000..37dfbdf
--- /dev/null
+++ b/snd-alpx/core/generic/5.14/internal.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Register map access API internal header
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ */
+
+#ifndef _REGMAP_INTERNAL_H
+#define _REGMAP_INTERNAL_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+struct regmap;
+struct regcache_ops;
+
+struct regmap_debugfs_off_cache {
+ struct list_head list;
+ off_t min;
+ off_t max;
+ unsigned int base_reg;
+ unsigned int max_reg;
+};
+
+struct regmap_format {
+ size_t buf_size;
+ size_t reg_bytes;
+ size_t pad_bytes;
+ size_t val_bytes;
+ s8 reg_shift;
+ void (*format_write)(struct regmap *map,
+ unsigned int reg, unsigned int val);
+ void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
+ void (*format_val)(void *buf, unsigned int val, unsigned int shift);
+ unsigned int (*parse_val)(const void *buf);
+ void (*parse_inplace)(void *buf);
+};
+
+struct regmap_async {
+ struct list_head list;
+ struct regmap *map;
+ void *work_buf;
+};
+
+struct regmap {
+ union {
+ struct mutex mutex;
+ struct {
+ spinlock_t spinlock;
+ unsigned long spinlock_flags;
+ };
+ struct {
+ raw_spinlock_t raw_spinlock;
+ unsigned long raw_spinlock_flags;
+ };
+ };
+ regmap_lock lock;
+ regmap_unlock unlock;
+ void *lock_arg; /* This is passed to lock/unlock functions */
+ gfp_t alloc_flags;
+ unsigned int reg_base;
+
+ struct device *dev; /* Device we do I/O on */
+ void *work_buf; /* Scratch buffer used to format I/O */
+ struct regmap_format format; /* Buffer format */
+ const struct regmap_bus *bus;
+ void *bus_context;
+ const char *name;
+
+ bool async;
+ spinlock_t async_lock;
+ wait_queue_head_t async_waitq;
+ struct list_head async_list;
+ struct list_head async_free;
+ int async_ret;
+
+#ifdef CONFIG_DEBUG_FS
+ bool debugfs_disable;
+ struct dentry *debugfs;
+ const char *debugfs_name;
+
+ unsigned int debugfs_reg_len;
+ unsigned int debugfs_val_len;
+ unsigned int debugfs_tot_len;
+
+ struct list_head debugfs_off_cache;
+ struct mutex cache_lock;
+#endif
+
+ unsigned int max_register;
+ bool (*writeable_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+ bool (*volatile_reg)(struct device *dev, unsigned int reg);
+ bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+ const struct regmap_access_table *volatile_table;
+ const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *wr_noinc_table;
+ const struct regmap_access_table *rd_noinc_table;
+
+ int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
+ int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+ int (*reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
+ /* Bulk read/write */
+ int (*read)(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+ int (*write)(void *context, const void *data, size_t count);
+
+ bool defer_caching;
+
+ unsigned long read_flag_mask;
+ unsigned long write_flag_mask;
+
+ /* number of bits to (left) shift the reg value when formatting*/
+ int reg_shift;
+ int reg_stride;
+ int reg_stride_order;
+
+ /* regcache specific members */
+ const struct regcache_ops *cache_ops;
+ enum regcache_type cache_type;
+
+ /* number of bytes in reg_defaults_raw */
+ unsigned int cache_size_raw;
+ /* number of bytes per word in reg_defaults_raw */
+ unsigned int cache_word_size;
+ /* number of entries in reg_defaults */
+ unsigned int num_reg_defaults;
+ /* number of entries in reg_defaults_raw */
+ unsigned int num_reg_defaults_raw;
+
+ /* if set, only the cache is modified not the HW */
+ bool cache_only;
+ /* if set, only the HW is modified not the cache */
+ bool cache_bypass;
+ /* if set, remember to free reg_defaults_raw */
+ bool cache_free;
+
+ struct reg_default *reg_defaults;
+ const void *reg_defaults_raw;
+ void *cache;
+ /* if set, the cache contains newer data than the HW */
+ bool cache_dirty;
+ /* if set, the HW registers are known to match map->reg_defaults */
+ bool no_sync_defaults;
+
+ struct reg_sequence *patch;
+ int patch_regs;
+
+ /* if set, converts bulk read to single read */
+ bool use_single_read;
+ /* if set, converts bulk write to single write */
+ bool use_single_write;
+ /* if set, the device supports multi write mode */
+ bool can_multi_write;
+
+ /* if set, raw reads/writes are limited to this size */
+ size_t max_raw_read;
+ size_t max_raw_write;
+
+ struct rb_root range_tree;
+ void *selector_work_buf; /* Scratch buffer used for selector */
+
+ struct hwspinlock *hwlock;
+
+ /* if set, the regmap core can sleep */
+ bool can_sleep;
+};
+
+struct regcache_ops {
+ const char *name;
+ enum regcache_type type;
+ int (*init)(struct regmap *map);
+ int (*exit)(struct regmap *map);
+#ifdef CONFIG_DEBUG_FS
+ void (*debugfs_init)(struct regmap *map);
+#endif
+ int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
+ int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
+ int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
+ int (*drop)(struct regmap *map, unsigned int min, unsigned int max);
+};
+
+bool regmap_cached(struct regmap *map, unsigned int reg);
+bool regmap_writeable(struct regmap *map, unsigned int reg);
+bool regmap_readable(struct regmap *map, unsigned int reg);
+bool regmap_volatile(struct regmap *map, unsigned int reg);
+bool regmap_precious(struct regmap *map, unsigned int reg);
+bool regmap_writeable_noinc(struct regmap *map, unsigned int reg);
+bool regmap_readable_noinc(struct regmap *map, unsigned int reg);
+
+int _regmap_write(struct regmap *map, unsigned int reg,
+ unsigned int val);
+
+struct regmap_range_node {
+ struct rb_node node;
+ const char *name;
+ struct regmap *map;
+
+ unsigned int range_min;
+ unsigned int range_max;
+
+ unsigned int selector_reg;
+ unsigned int selector_mask;
+ int selector_shift;
+
+ unsigned int window_start;
+ unsigned int window_len;
+};
+
+struct regmap_field {
+ struct regmap *regmap;
+ unsigned int mask;
+ /* lsb */
+ unsigned int shift;
+ unsigned int reg;
+
+ unsigned int id_size;
+ unsigned int id_offset;
+};
+
+#ifdef CONFIG_DEBUG_FS
+extern void regmap_debugfs_initcall(void);
+extern void regmap_debugfs_init(struct regmap *map);
+extern void regmap_debugfs_exit(struct regmap *map);
+
+static inline void regmap_debugfs_disable(struct regmap *map)
+{
+ map->debugfs_disable = true;
+}
+
+#else
+static inline void regmap_debugfs_initcall(void) { }
+static inline void regmap_debugfs_init(struct regmap *map) { }
+static inline void regmap_debugfs_exit(struct regmap *map) { }
+static inline void regmap_debugfs_disable(struct regmap *map) { }
+#endif
+
+/* regcache core declarations */
+int regcache_init(struct regmap *map, const struct regmap_config *config);
+void regcache_exit(struct regmap *map);
+int regcache_read(struct regmap *map,
+ unsigned int reg, unsigned int *value);
+int regcache_write(struct regmap *map,
+ unsigned int reg, unsigned int value);
+int regcache_sync(struct regmap *map);
+int regcache_sync_block(struct regmap *map, void *block,
+ unsigned long *cache_present,
+ unsigned int block_base, unsigned int start,
+ unsigned int end);
+bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
+ unsigned int val);
+
+static inline const void *regcache_get_val_addr(struct regmap *map,
+ const void *base,
+ unsigned int idx)
+{
+ return base + (map->cache_word_size * idx);
+}
+
+unsigned int regcache_get_val(struct regmap *map, const void *base,
+ unsigned int idx);
+bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
+ unsigned int val);
+int regcache_lookup_reg(struct regmap *map, unsigned int reg);
+int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val);
+
+int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len, bool noinc);
+
+void regmap_async_complete_cb(struct regmap_async *async, int ret);
+
+enum regmap_endian regmap_get_val_endian(struct device *dev,
+ const struct regmap_bus *bus,
+ const struct regmap_config *config);
+
+extern struct regcache_ops regcache_rbtree_ops;
+extern struct regcache_ops regcache_maple_ops;
+extern struct regcache_ops regcache_flat_ops;
+
+static inline const char *regmap_name(const struct regmap *map)
+{
+ if (map->dev)
+ return dev_name(map->dev);
+
+ return map->name;
+}
+
+static inline unsigned int regmap_get_offset(const struct regmap *map,
+ unsigned int index)
+{
+ if (map->reg_stride_order >= 0)
+ return index << map->reg_stride_order;
+ else
+ return index * map->reg_stride;
+}
+
+static inline unsigned int regcache_get_index_by_order(const struct regmap *map,
+ unsigned int reg)
+{
+ return reg >> map->reg_stride_order;
+}
+
+struct regmap_ram_data {
+ unsigned int *vals; /* Allocatd by caller */
+ bool *read;
+ bool *written;
+ enum regmap_endian reg_endian;
+};
+
+/*
+ * Create a test register map with data stored in RAM, not intended
+ * for practical use.
+ */
+struct regmap *__regmap_init_ram(const struct regmap_config *config,
+ struct regmap_ram_data *data,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+#define regmap_init_ram(config, data) \
+ __regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data)
+
+struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
+ struct regmap_ram_data *data,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+#define regmap_init_raw_ram(config, data) \
+ __regmap_lockdep_wrapper(__regmap_init_raw_ram, #config, config, data)
+
+#endif
diff --git a/snd-alpx/core/generic/5.14/regmap-mmio.c b/snd-alpx/core/generic/5.14/regmap-mmio.c
new file mode 100644
index 0000000..3ccdd86
--- /dev/null
+++ b/snd-alpx/core/generic/5.14/regmap-mmio.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - MMIO support
+//
+// Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
+
+#include "internal.h"
+
+struct regmap_mmio_context {
+ void __iomem *regs;
+ unsigned int val_bytes;
+ bool big_endian;
+
+ bool attached_clk;
+ struct clk *clk;
+
+ void (*reg_write)(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val);
+ unsigned int (*reg_read)(struct regmap_mmio_context *ctx,
+ unsigned int reg);
+};
+
+static int regmap_mmio_regbits_check(size_t reg_bits)
+{
+ switch (reg_bits) {
+ case 8:
+ case 16:
+ case 32:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int regmap_mmio_get_min_stride(size_t val_bits)
+{
+ int min_stride;
+
+ switch (val_bits) {
+ case 8:
+ /* The core treats 0 as 1 */
+ min_stride = 0;
+ break;
+ case 16:
+ min_stride = 2;
+ break;
+ case 32:
+ min_stride = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return min_stride;
+}
+
+static void regmap_mmio_write8(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writeb(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write8_relaxed(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writeb_relaxed(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite8(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite8(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writew(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16le_relaxed(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writew_relaxed(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite16le(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite16(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writew(swab16(val), ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite16be(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite16be(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writel(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32le_relaxed(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writel_relaxed(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite32le(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite32(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writel(swab32(val), ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite32be(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite32be(val, ctx->regs + reg);
+}
+
+static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ ctx->reg_write(ctx, reg, val);
+
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return 0;
+}
+
+static int regmap_mmio_noinc_write(void *context, unsigned int reg,
+ const void *val, size_t val_count)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret = 0;
+ int i;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * There are no native, assembly-optimized write single register
+ * operations for big endian, so fall back to emulation if this
+ * is needed. (Single bytes are fine, they are not affected by
+ * endianness.)
+ */
+ if (ctx->big_endian && (ctx->val_bytes > 1)) {
+ switch (ctx->val_bytes) {
+ case 2:
+ {
+ const u16 *valp = (const u16 *)val;
+ for (i = 0; i < val_count; i++)
+ writew(swab16(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+ case 4:
+ {
+ const u32 *valp = (const u32 *)val;
+ for (i = 0; i < val_count; i++)
+ writel(swab32(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+#ifdef CONFIG_64BIT
+ case 8:
+ {
+ const u64 *valp = (const u64 *)val;
+ for (i = 0; i < val_count; i++)
+ writeq(swab64(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+#endif
+ default:
+ ret = -EINVAL;
+ goto out_clk;
+ }
+ }
+
+ switch (ctx->val_bytes) {
+ case 1:
+ writesb(ctx->regs + reg, (const u8 *)val, val_count);
+ break;
+ case 2:
+ writesw(ctx->regs + reg, (const u16 *)val, val_count);
+ break;
+ case 4:
+ writesl(ctx->regs + reg, (const u32 *)val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ writesq(ctx->regs + reg, (const u64 *)val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out_clk:
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return ret;
+}
+
+static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readb(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read8_relaxed(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readb_relaxed(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_ioread8(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread8(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readw(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16le_relaxed(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readw_relaxed(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_ioread16le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread16(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return swab16(readw(ctx->regs + reg));
+}
+
+static unsigned int regmap_mmio_ioread16be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread16be(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readl(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32le_relaxed(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readl_relaxed(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_ioread32le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread32(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return swab32(readl(ctx->regs + reg));
+}
+
+static unsigned int regmap_mmio_ioread32be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread32be(ctx->regs + reg);
+}
+
+static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ *val = ctx->reg_read(ctx, reg);
+
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return 0;
+}
+
+static int regmap_mmio_noinc_read(void *context, unsigned int reg,
+ void *val, size_t val_count)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret = 0;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ switch (ctx->val_bytes) {
+ case 1:
+ readsb(ctx->regs + reg, (u8 *)val, val_count);
+ break;
+ case 2:
+ readsw(ctx->regs + reg, (u16 *)val, val_count);
+ break;
+ case 4:
+ readsl(ctx->regs + reg, (u32 *)val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ readsq(ctx->regs + reg, (u64 *)val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto out_clk;
+ }
+
+ /*
+ * There are no native, assembly-optimized write single register
+ * operations for big endian, so fall back to emulation if this
+ * is needed. (Single bytes are fine, they are not affected by
+ * endianness.)
+ */
+ if (ctx->big_endian && (ctx->val_bytes > 1)) {
+ switch (ctx->val_bytes) {
+ case 2:
+ swab16_array(val, val_count);
+ break;
+ case 4:
+ swab32_array(val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ swab64_array(val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+out_clk:
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return ret;
+}
+
+
+static void regmap_mmio_free_context(void *context)
+{
+ struct regmap_mmio_context *ctx = context;
+
+ if (!IS_ERR(ctx->clk)) {
+ clk_unprepare(ctx->clk);
+ if (!ctx->attached_clk)
+ clk_put(ctx->clk);
+ }
+ kfree(context);
+}
+
+static const struct regmap_bus regmap_mmio = {
+ .fast_io = true,
+ .reg_write = regmap_mmio_write,
+ .reg_read = regmap_mmio_read,
+ .reg_noinc_write = regmap_mmio_noinc_write,
+ .reg_noinc_read = regmap_mmio_noinc_read,
+ .free_context = regmap_mmio_free_context,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config)
+{
+ struct regmap_mmio_context *ctx;
+ int min_stride;
+ int ret;
+
+ ret = regmap_mmio_regbits_check(config->reg_bits);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (config->pad_bits)
+ return ERR_PTR(-EINVAL);
+
+ min_stride = regmap_mmio_get_min_stride(config->val_bits);
+ if (min_stride < 0)
+ return ERR_PTR(min_stride);
+
+ if (config->reg_stride < min_stride)
+ return ERR_PTR(-EINVAL);
+
+ if (config->use_relaxed_mmio && config->io_port)
+ return ERR_PTR(-EINVAL);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->regs = regs;
+ ctx->val_bytes = config->val_bits / 8;
+ ctx->clk = ERR_PTR(-ENODEV);
+
+ switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
+ case REGMAP_ENDIAN_DEFAULT:
+ case REGMAP_ENDIAN_LITTLE:
+#ifdef __LITTLE_ENDIAN
+ case REGMAP_ENDIAN_NATIVE:
+#endif
+ switch (config->val_bits) {
+ case 8:
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread8;
+ ctx->reg_write = regmap_mmio_iowrite8;
+ } else if (config->use_relaxed_mmio) {
+ ctx->reg_read = regmap_mmio_read8_relaxed;
+ ctx->reg_write = regmap_mmio_write8_relaxed;
+ } else {
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ }
+ break;
+ case 16:
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread16le;
+ ctx->reg_write = regmap_mmio_iowrite16le;
+ } else if (config->use_relaxed_mmio) {
+ ctx->reg_read = regmap_mmio_read16le_relaxed;
+ ctx->reg_write = regmap_mmio_write16le_relaxed;
+ } else {
+ ctx->reg_read = regmap_mmio_read16le;
+ ctx->reg_write = regmap_mmio_write16le;
+ }
+ break;
+ case 32:
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread32le;
+ ctx->reg_write = regmap_mmio_iowrite32le;
+ } else if (config->use_relaxed_mmio) {
+ ctx->reg_read = regmap_mmio_read32le_relaxed;
+ ctx->reg_write = regmap_mmio_write32le_relaxed;
+ } else {
+ ctx->reg_read = regmap_mmio_read32le;
+ ctx->reg_write = regmap_mmio_write32le;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+ break;
+ case REGMAP_ENDIAN_BIG:
+#ifdef __BIG_ENDIAN
+ case REGMAP_ENDIAN_NATIVE:
+#endif
+ ctx->big_endian = true;
+ switch (config->val_bits) {
+ case 8:
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread8;
+ ctx->reg_write = regmap_mmio_iowrite8;
+ } else {
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ }
+ break;
+ case 16:
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread16be;
+ ctx->reg_write = regmap_mmio_iowrite16be;
+ } else {
+ ctx->reg_read = regmap_mmio_read16be;
+ ctx->reg_write = regmap_mmio_write16be;
+ }
+ break;
+ case 32:
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread32be;
+ ctx->reg_write = regmap_mmio_iowrite32be;
+ } else {
+ ctx->reg_read = regmap_mmio_read32be;
+ ctx->reg_write = regmap_mmio_write32be;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ if (clk_id == NULL)
+ return ctx;
+
+ ctx->clk = clk_get(dev, clk_id);
+ if (IS_ERR(ctx->clk)) {
+ ret = PTR_ERR(ctx->clk);
+ goto err_free;
+ }
+
+ ret = clk_prepare(ctx->clk);
+ if (ret < 0) {
+ clk_put(ctx->clk);
+ goto err_free;
+ }
+
+ return ctx;
+
+err_free:
+ kfree(ctx);
+
+ return ERR_PTR(ret);
+}
+
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct regmap_mmio_context *ctx;
+
+ ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __regmap_init(dev, &regmap_mmio, ctx, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk);
+
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct regmap_mmio_context *ctx;
+
+ ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __devm_regmap_init(dev, &regmap_mmio, ctx, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk);
+
+int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk)
+{
+ struct regmap_mmio_context *ctx = map->bus_context;
+
+ ctx->clk = clk;
+ ctx->attached_clk = true;
+
+ return clk_prepare(ctx->clk);
+}
+EXPORT_SYMBOL_GPL(regmap_mmio_attach_clk);
+
+void regmap_mmio_detach_clk(struct regmap *map)
+{
+ struct regmap_mmio_context *ctx = map->bus_context;
+
+ clk_unprepare(ctx->clk);
+
+ ctx->attached_clk = false;
+ ctx->clk = NULL;
+}
+EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
+
+MODULE_LICENSE("GPL v2");
diff --git a/snd-alpx/core/generic/5.17/dmaengine_pcm.h b/snd-alpx/core/generic/5.17/dmaengine_pcm.h
new file mode 100644
index 0000000..38ea046
--- /dev/null
+++ b/snd-alpx/core/generic/5.17/dmaengine_pcm.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef __SOUND_DMAENGINE_PCM_H__
+#define __SOUND_DMAENGINE_PCM_H__
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <linux/dmaengine.h>
+
+/**
+ * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM
+ * substream
+ * @substream: PCM substream
+ */
+static inline enum dma_transfer_direction
+snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return DMA_MEM_TO_DEV;
+ else
+ return DMA_DEV_TO_MEM;
+}
+
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config);
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan);
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data);
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream);
+
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data);
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream);
+
+/*
+ * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word.
+ * If this flag is set the dmaengine driver won't put any restriction on
+ * the supported sample formats and set the DMA transfer size to undefined.
+ * The DAI driver is responsible to disable any unsupported formats in it's
+ * configuration and catch corner cases that are not already handled in
+ * the ALSA core.
+ */
+#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0)
+
+/**
+ * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data
+ * @addr: Address of the DAI data source or destination register.
+ * @addr_width: Width of the DAI data source or destination register.
+ * @maxburst: Maximum number of words(note: words, as in units of the
+ * src_addr_width member, not bytes) that can be send to or received from the
+ * DAI in one burst.
+ * @filter_data: Custom DMA channel filter data, this will usually be used when
+ * requesting the DMA channel.
+ * @chan_name: Custom channel name to use when requesting DMA channel.
+ * @fifo_size: FIFO size of the DAI controller in bytes
+ * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
+ * @peripheral_config: peripheral configuration for programming peripheral
+ * for dmaengine transfer
+ * @peripheral_size: peripheral configuration buffer size
+ */
+struct snd_dmaengine_dai_dma_data {
+ dma_addr_t addr;
+ enum dma_slave_buswidth addr_width;
+ u32 maxburst;
+ void *filter_data;
+ const char *chan_name;
+ unsigned int fifo_size;
+ unsigned int flags;
+ void *peripheral_config;
+ size_t peripheral_size;
+};
+
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *config);
+
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan);
+
+/*
+ * Try to request the DMA channel using compat_request_channel or
+ * compat_filter_fn if it couldn't be requested through devicetree.
+ */
+#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0)
+/*
+ * Don't try to request the DMA channels through devicetree. This flag only
+ * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well.
+ */
+#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1)
+/*
+ * The PCM is half duplex and the DMA channel is shared between capture and
+ * playback.
+ */
+#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
+
+/**
+ * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
+ * @prepare_slave_config: Callback used to fill in the DMA slave_config for a
+ * PCM substream. Will be called from the PCM drivers hwparams callback.
+ * @compat_request_channel: Callback to request a DMA channel for platforms
+ * which do not use devicetree.
+ * @process: Callback used to apply processing on samples transferred from/to
+ * user space.
+ * @compat_filter_fn: Will be used as the filter function when requesting a
+ * channel for platforms which do not use devicetree. The filter parameter
+ * will be the DAI's DMA data.
+ * @dma_dev: If set, request DMA channel on this device rather than the DAI
+ * device.
+ * @chan_names: If set, these custom DMA channel names will be requested at
+ * registration time.
+ * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM.
+ * @prealloc_buffer_size: Size of the preallocated audio buffer.
+ *
+ * Note: If both compat_request_channel and compat_filter_fn are set
+ * compat_request_channel will be used to request the channel and
+ * compat_filter_fn will be ignored. Otherwise the channel will be requested
+ * using dma_request_channel with compat_filter_fn as the filter function.
+ */
+struct snd_dmaengine_pcm_config {
+ int (*prepare_slave_config)(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+ struct dma_chan *(*compat_request_channel)(
+ struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_substream *substream);
+ int (*process)(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes);
+ dma_filter_fn compat_filter_fn;
+ struct device *dma_dev;
+ const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
+
+ const struct snd_pcm_hardware *pcm_hardware;
+ unsigned int prealloc_buffer_size;
+};
+
+int snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+void snd_dmaengine_pcm_unregister(struct device *dev);
+
+int devm_snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+
+int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+
+#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+
+struct dmaengine_pcm {
+ struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
+ const struct snd_dmaengine_pcm_config *config;
+ struct snd_soc_component component;
+ unsigned int flags;
+};
+
+static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
+{
+ return container_of(p, struct dmaengine_pcm, component);
+}
+#endif
diff --git a/snd-alpx/core/generic/5.17/pcm_dmaengine.c b/snd-alpx/core/generic/5.17/pcm_dmaengine.c
new file mode 100644
index 0000000..af6f717
--- /dev/null
+++ b/snd-alpx/core/generic/5.17/pcm_dmaengine.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on:
+ * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
+ * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc.
+ * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <sound/dmaengine_pcm.h>
+
+struct dmaengine_pcm_runtime_data {
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+
+ unsigned int pos;
+};
+
+static inline struct dmaengine_pcm_runtime_data *substream_to_prtd(
+ const struct snd_pcm_substream *substream)
+{
+ return substream->runtime->private_data;
+}
+
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ return prtd->dma_chan;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan);
+
+/**
+ * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config
+ * @substream: PCM substream
+ * @params: hw_params
+ * @slave_config: DMA slave config
+ *
+ * This function can be used to initialize a dma_slave_config from a substream
+ * and hw_params in a dmaengine based PCM driver implementation.
+ */
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config)
+{
+ enum dma_slave_buswidth buswidth;
+ int bits;
+
+ bits = params_physical_width(params);
+ if (bits < 8 || bits > 64)
+ return -EINVAL;
+ else if (bits == 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (bits == 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else if (bits == 24)
+ buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES;
+ else if (bits <= 32)
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->direction = DMA_MEM_TO_DEV;
+ slave_config->dst_addr_width = buswidth;
+ } else {
+ slave_config->direction = DMA_DEV_TO_MEM;
+ slave_config->src_addr_width = buswidth;
+ }
+
+ slave_config->device_fc = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config);
+
+/**
+ * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config
+ * using DAI DMA data.
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @slave_config: DMA slave configuration
+ *
+ * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width
+ * fields of the DMA slave config from the same fields of the DAI DMA
+ * data struct. The src and dst fields will be initialized depending on the
+ * direction of the substream. If the substream is a playback stream the dst
+ * fields will be initialized, if it is a capture stream the src fields will be
+ * initialized. The {dst,src}_addr_width field will only be initialized if the
+ * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of
+ * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If
+ * both conditions are met the latter takes priority.
+ */
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *slave_config)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->dst_addr = dma_data->addr;
+ slave_config->dst_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->dst_addr_width = dma_data->addr_width;
+ } else {
+ slave_config->src_addr = dma_data->addr;
+ slave_config->src_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->src_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->src_addr_width = dma_data->addr_width;
+ }
+
+ slave_config->peripheral_config = dma_data->peripheral_config;
+ slave_config->peripheral_size = dma_data->peripheral_size;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data);
+
+static void dmaengine_pcm_dma_complete(void *arg)
+{
+ struct snd_pcm_substream *substream = arg;
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ prtd->pos += snd_pcm_lib_period_bytes(substream);
+ if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream))
+ prtd->pos = 0;
+
+ snd_pcm_period_elapsed(substream);
+}
+
+static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_chan *chan = prtd->dma_chan;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction direction;
+ unsigned long flags = DMA_CTRL_ACK;
+
+ direction = snd_pcm_substream_to_dma_direction(substream);
+
+ if (!substream->runtime->no_period_wakeup)
+ flags |= DMA_PREP_INTERRUPT;
+
+ prtd->pos = 0;
+ desc = dmaengine_prep_dma_cyclic(chan,
+ substream->runtime->dma_addr,
+ snd_pcm_lib_buffer_bytes(substream),
+ snd_pcm_lib_period_bytes(substream), direction, flags);
+
+ if (!desc)
+ return -ENOMEM;
+
+ desc->callback = dmaengine_pcm_dma_complete;
+ desc->callback_param = substream;
+ prtd->cookie = dmaengine_submit(desc);
+
+ return 0;
+}
+
+/**
+ * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation
+ * @substream: PCM substream
+ * @cmd: Trigger command
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function can be used as the PCM trigger callback for dmaengine based PCM
+ * driver implementations.
+ */
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ ret = dmaengine_pcm_prepare_and_submit(substream);
+ if (ret)
+ return ret;
+ dma_async_issue_pending(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dmaengine_resume(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (runtime->info & SNDRV_PCM_INFO_PAUSE)
+ dmaengine_pause(prtd->dma_chan);
+ else
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dmaengine_pause(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger);
+
+/**
+ * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function is deprecated and should not be used by new drivers, as its
+ * results may be unreliable.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ return bytes_to_frames(substream->runtime, prtd->pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue);
+
+/**
+ * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function can be used as the PCM pointer callback for dmaengine based PCM
+ * driver implementations.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned int buf_size;
+ unsigned int pos = 0;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
+ buf_size = snd_pcm_lib_buffer_bytes(substream);
+ if (state.residue > 0 && state.residue <= buf_size)
+ pos = buf_size - state.residue;
+
+ runtime->delay = bytes_to_frames(runtime,
+ state.in_flight_bytes);
+ }
+
+ return bytes_to_frames(runtime, pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer);
+
+/**
+ * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * Returns NULL or the requested DMA channel.
+ *
+ * This function request a DMA channel for usage with dmaengine PCM.
+ */
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ return dma_request_channel(mask, filter_fn, filter_data);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel);
+
+/**
+ * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream
+ * @substream: PCM substream
+ * @chan: DMA channel to use for data transfers
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * The function should usually be called from the pcm open callback. Note that
+ * this function will use private_data field of the substream's runtime. So it
+ * is not available to your pcm driver implementation.
+ */
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan)
+{
+ struct dmaengine_pcm_runtime_data *prtd;
+ int ret;
+
+ if (!chan)
+ return -ENXIO;
+
+ ret = snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+ prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
+ if (!prtd)
+ return -ENOMEM;
+
+ prtd->dma_chan = chan;
+
+ substream->runtime->private_data = prtd;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open);
+
+/**
+ * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel
+ * @substream: PCM substream
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function will request a DMA channel using the passed filter function and
+ * data. The function should usually be called from the pcm open callback. Note
+ * that this function will use private_data field of the substream's runtime. So
+ * it is not available to your pcm driver implementation.
+ */
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data)
+{
+ return snd_dmaengine_pcm_open(substream,
+ snd_dmaengine_pcm_request_channel(filter_fn, filter_data));
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+
+/**
+ * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
+ * @substream: PCM substream
+ */
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
+
+/**
+ * snd_dmaengine_pcm_close_release_chan - Close a dmaengine based PCM
+ * substream and release channel
+ * @substream: PCM substream
+ *
+ * Releases the DMA channel associated with the PCM substream.
+ */
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ dma_release_channel(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
+
+/**
+ * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @hw: PCM hw params
+ * @chan: DMA channel to use for data transfers
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function will query DMA capability, then refine the pcm hardware
+ * parameters.
+ */
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan)
+{
+ struct dma_slave_caps dma_caps;
+ u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ snd_pcm_format_t i;
+ int ret = 0;
+
+ if (!hw || !chan || !dma_data)
+ return -EINVAL;
+
+ ret = dma_get_slave_caps(chan, &dma_caps);
+ if (ret == 0) {
+ if (dma_caps.cmd_pause && dma_caps.cmd_resume)
+ hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
+ if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
+ hw->info |= SNDRV_PCM_INFO_BATCH;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ addr_widths = dma_caps.dst_addr_widths;
+ else
+ addr_widths = dma_caps.src_addr_widths;
+ }
+
+ /*
+ * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep
+ * hw.formats set to 0, meaning no restrictions are in place.
+ * In this case it's the responsibility of the DAI driver to
+ * provide the supported format information.
+ */
+ if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK))
+ /*
+ * Prepare formats mask for valid/allowed sample types. If the
+ * dma does not have support for the given physical word size,
+ * it needs to be masked out so user space can not use the
+ * format which produces corrupted audio.
+ * In case the dma driver does not implement the slave_caps the
+ * default assumption is that it supports 1, 2 and 4 bytes
+ * widths.
+ */
+ pcm_for_each_format(i) {
+ int bits = snd_pcm_format_physical_width(i);
+
+ /*
+ * Enable only samples with DMA supported physical
+ * widths
+ */
+ switch (bits) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ case 64:
+ if (addr_widths & (1 << (bits / 8)))
+ hw->formats |= pcm_format_to_bits(i);
+ break;
+ default:
+ /* Unsupported types */
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams);
+
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/core/generic/5.2/dmaengine_pcm.h b/snd-alpx/core/generic/5.2/dmaengine_pcm.h
new file mode 100644
index 0000000..c679f61
--- /dev/null
+++ b/snd-alpx/core/generic/5.2/dmaengine_pcm.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef __SOUND_DMAENGINE_PCM_H__
+#define __SOUND_DMAENGINE_PCM_H__
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <linux/dmaengine.h>
+
+/**
+ * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM
+ * substream
+ * @substream: PCM substream
+ */
+static inline enum dma_transfer_direction
+snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return DMA_MEM_TO_DEV;
+ else
+ return DMA_DEV_TO_MEM;
+}
+
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config);
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan);
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data);
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream);
+
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data);
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream);
+
+/*
+ * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word.
+ * If this flag is set the dmaengine driver won't put any restriction on
+ * the supported sample formats and set the DMA transfer size to undefined.
+ * The DAI driver is responsible to disable any unsupported formats in it's
+ * configuration and catch corner cases that are not already handled in
+ * the ALSA core.
+ */
+#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0)
+
+/**
+ * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data
+ * @addr: Address of the DAI data source or destination register.
+ * @addr_width: Width of the DAI data source or destination register.
+ * @maxburst: Maximum number of words(note: words, as in units of the
+ * src_addr_width member, not bytes) that can be send to or received from the
+ * DAI in one burst.
+ * @slave_id: Slave requester id for the DMA channel.
+ * @filter_data: Custom DMA channel filter data, this will usually be used when
+ * requesting the DMA channel.
+ * @chan_name: Custom channel name to use when requesting DMA channel.
+ * @fifo_size: FIFO size of the DAI controller in bytes
+ * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
+ */
+struct snd_dmaengine_dai_dma_data {
+ dma_addr_t addr;
+ enum dma_slave_buswidth addr_width;
+ u32 maxburst;
+ unsigned int slave_id;
+ void *filter_data;
+ const char *chan_name;
+ unsigned int fifo_size;
+ unsigned int flags;
+};
+
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *config);
+
+
+/*
+ * Try to request the DMA channel using compat_request_channel or
+ * compat_filter_fn if it couldn't be requested through devicetree.
+ */
+#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0)
+/*
+ * Don't try to request the DMA channels through devicetree. This flag only
+ * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well.
+ */
+#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1)
+/*
+ * The PCM is half duplex and the DMA channel is shared between capture and
+ * playback.
+ */
+#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
+
+/**
+ * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
+ * @prepare_slave_config: Callback used to fill in the DMA slave_config for a
+ * PCM substream. Will be called from the PCM drivers hwparams callback.
+ * @compat_request_channel: Callback to request a DMA channel for platforms
+ * which do not use devicetree.
+ * @process: Callback used to apply processing on samples transferred from/to
+ * user space.
+ * @compat_filter_fn: Will be used as the filter function when requesting a
+ * channel for platforms which do not use devicetree. The filter parameter
+ * will be the DAI's DMA data.
+ * @dma_dev: If set, request DMA channel on this device rather than the DAI
+ * device.
+ * @chan_names: If set, these custom DMA channel names will be requested at
+ * registration time.
+ * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM.
+ * @prealloc_buffer_size: Size of the preallocated audio buffer.
+ *
+ * Note: If both compat_request_channel and compat_filter_fn are set
+ * compat_request_channel will be used to request the channel and
+ * compat_filter_fn will be ignored. Otherwise the channel will be requested
+ * using dma_request_channel with compat_filter_fn as the filter function.
+ */
+struct snd_dmaengine_pcm_config {
+ int (*prepare_slave_config)(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+ struct dma_chan *(*compat_request_channel)(
+ struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_substream *substream);
+ int (*process)(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes);
+ dma_filter_fn compat_filter_fn;
+ struct device *dma_dev;
+ const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
+
+ const struct snd_pcm_hardware *pcm_hardware;
+ unsigned int prealloc_buffer_size;
+};
+
+int snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+void snd_dmaengine_pcm_unregister(struct device *dev);
+
+int devm_snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+
+int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+
+#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+
+#endif
diff --git a/snd-alpx/core/generic/5.2/pcm_dmaengine.c b/snd-alpx/core/generic/5.2/pcm_dmaengine.c
new file mode 100644
index 0000000..89a0592
--- /dev/null
+++ b/snd-alpx/core/generic/5.2/pcm_dmaengine.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on:
+ * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
+ * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc.
+ * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <sound/dmaengine_pcm.h>
+
+struct dmaengine_pcm_runtime_data {
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+
+ unsigned int pos;
+};
+
+static inline struct dmaengine_pcm_runtime_data *substream_to_prtd(
+ const struct snd_pcm_substream *substream)
+{
+ return substream->runtime->private_data;
+}
+
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ return prtd->dma_chan;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan);
+
+/**
+ * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config
+ * @substream: PCM substream
+ * @params: hw_params
+ * @slave_config: DMA slave config
+ *
+ * This function can be used to initialize a dma_slave_config from a substream
+ * and hw_params in a dmaengine based PCM driver implementation.
+ */
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config)
+{
+ enum dma_slave_buswidth buswidth;
+ int bits;
+
+ bits = params_physical_width(params);
+ if (bits < 8 || bits > 64)
+ return -EINVAL;
+ else if (bits == 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (bits == 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else if (bits == 24)
+ buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES;
+ else if (bits <= 32)
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->direction = DMA_MEM_TO_DEV;
+ slave_config->dst_addr_width = buswidth;
+ } else {
+ slave_config->direction = DMA_DEV_TO_MEM;
+ slave_config->src_addr_width = buswidth;
+ }
+
+ slave_config->device_fc = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config);
+
+/**
+ * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config
+ * using DAI DMA data.
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @slave_config: DMA slave configuration
+ *
+ * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width and
+ * slave_id fields of the DMA slave config from the same fields of the DAI DMA
+ * data struct. The src and dst fields will be initialized depending on the
+ * direction of the substream. If the substream is a playback stream the dst
+ * fields will be initialized, if it is a capture stream the src fields will be
+ * initialized. The {dst,src}_addr_width field will only be initialized if the
+ * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of
+ * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If
+ * both conditions are met the latter takes priority.
+ */
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *slave_config)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->dst_addr = dma_data->addr;
+ slave_config->dst_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->dst_addr_width = dma_data->addr_width;
+ } else {
+ slave_config->src_addr = dma_data->addr;
+ slave_config->src_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->src_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->src_addr_width = dma_data->addr_width;
+ }
+
+ slave_config->slave_id = dma_data->slave_id;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data);
+
+static void dmaengine_pcm_dma_complete(void *arg)
+{
+ struct snd_pcm_substream *substream = arg;
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ prtd->pos += snd_pcm_lib_period_bytes(substream);
+ if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream))
+ prtd->pos = 0;
+
+ snd_pcm_period_elapsed(substream);
+}
+
+static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_chan *chan = prtd->dma_chan;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction direction;
+ unsigned long flags = DMA_CTRL_ACK;
+
+ direction = snd_pcm_substream_to_dma_direction(substream);
+
+ if (!substream->runtime->no_period_wakeup)
+ flags |= DMA_PREP_INTERRUPT;
+
+ prtd->pos = 0;
+ desc = dmaengine_prep_dma_cyclic(chan,
+ substream->runtime->dma_addr,
+ snd_pcm_lib_buffer_bytes(substream),
+ snd_pcm_lib_period_bytes(substream), direction, flags);
+
+ if (!desc)
+ return -ENOMEM;
+
+ desc->callback = dmaengine_pcm_dma_complete;
+ desc->callback_param = substream;
+ prtd->cookie = dmaengine_submit(desc);
+
+ return 0;
+}
+
+/**
+ * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation
+ * @substream: PCM substream
+ * @cmd: Trigger command
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function can be used as the PCM trigger callback for dmaengine based PCM
+ * driver implementations.
+ */
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ ret = dmaengine_pcm_prepare_and_submit(substream);
+ if (ret)
+ return ret;
+ dma_async_issue_pending(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dmaengine_resume(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (runtime->info & SNDRV_PCM_INFO_PAUSE)
+ dmaengine_pause(prtd->dma_chan);
+ else
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dmaengine_pause(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger);
+
+/**
+ * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function is deprecated and should not be used by new drivers, as its
+ * results may be unreliable.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ return bytes_to_frames(substream->runtime, prtd->pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue);
+
+/**
+ * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function can be used as the PCM pointer callback for dmaengine based PCM
+ * driver implementations.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned int buf_size;
+ unsigned int pos = 0;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
+ buf_size = snd_pcm_lib_buffer_bytes(substream);
+ if (state.residue > 0 && state.residue <= buf_size)
+ pos = buf_size - state.residue;
+ }
+
+ return bytes_to_frames(substream->runtime, pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer);
+
+/**
+ * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * Returns NULL or the requested DMA channel.
+ *
+ * This function request a DMA channel for usage with dmaengine PCM.
+ */
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ return dma_request_channel(mask, filter_fn, filter_data);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel);
+
+/**
+ * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream
+ * @substream: PCM substream
+ * @chan: DMA channel to use for data transfers
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * The function should usually be called from the pcm open callback. Note that
+ * this function will use private_data field of the substream's runtime. So it
+ * is not available to your pcm driver implementation.
+ */
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan)
+{
+ struct dmaengine_pcm_runtime_data *prtd;
+ int ret;
+
+ if (!chan)
+ return -ENXIO;
+
+ ret = snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+ prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
+ if (!prtd)
+ return -ENOMEM;
+
+ prtd->dma_chan = chan;
+
+ substream->runtime->private_data = prtd;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open);
+
+/**
+ * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel
+ * @substream: PCM substream
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function will request a DMA channel using the passed filter function and
+ * data. The function should usually be called from the pcm open callback. Note
+ * that this function will use private_data field of the substream's runtime. So
+ * it is not available to your pcm driver implementation.
+ */
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data)
+{
+ return snd_dmaengine_pcm_open(substream,
+ snd_dmaengine_pcm_request_channel(filter_fn, filter_data));
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+
+/**
+ * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
+ * @substream: PCM substream
+ */
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
+
+/**
+ * snd_dmaengine_pcm_release_chan_close - Close a dmaengine based PCM substream and release channel
+ * @substream: PCM substream
+ *
+ * Releases the DMA channel associated with the PCM substream.
+ */
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ dma_release_channel(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
+
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/core/generic/5.5/dmaengine_pcm.h b/snd-alpx/core/generic/5.5/dmaengine_pcm.h
new file mode 100644
index 0000000..b652206
--- /dev/null
+++ b/snd-alpx/core/generic/5.5/dmaengine_pcm.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef __SOUND_DMAENGINE_PCM_H__
+#define __SOUND_DMAENGINE_PCM_H__
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <linux/dmaengine.h>
+
+/**
+ * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM
+ * substream
+ * @substream: PCM substream
+ */
+static inline enum dma_transfer_direction
+snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return DMA_MEM_TO_DEV;
+ else
+ return DMA_DEV_TO_MEM;
+}
+
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config);
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan);
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data);
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream);
+
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data);
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream);
+
+/*
+ * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word.
+ * If this flag is set the dmaengine driver won't put any restriction on
+ * the supported sample formats and set the DMA transfer size to undefined.
+ * The DAI driver is responsible to disable any unsupported formats in it's
+ * configuration and catch corner cases that are not already handled in
+ * the ALSA core.
+ */
+#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0)
+
+/**
+ * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data
+ * @addr: Address of the DAI data source or destination register.
+ * @addr_width: Width of the DAI data source or destination register.
+ * @maxburst: Maximum number of words(note: words, as in units of the
+ * src_addr_width member, not bytes) that can be send to or received from the
+ * DAI in one burst.
+ * @slave_id: Slave requester id for the DMA channel.
+ * @filter_data: Custom DMA channel filter data, this will usually be used when
+ * requesting the DMA channel.
+ * @chan_name: Custom channel name to use when requesting DMA channel.
+ * @fifo_size: FIFO size of the DAI controller in bytes
+ * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
+ */
+struct snd_dmaengine_dai_dma_data {
+ dma_addr_t addr;
+ enum dma_slave_buswidth addr_width;
+ u32 maxburst;
+ unsigned int slave_id;
+ void *filter_data;
+ const char *chan_name;
+ unsigned int fifo_size;
+ unsigned int flags;
+};
+
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *config);
+
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan);
+
+/*
+ * Try to request the DMA channel using compat_request_channel or
+ * compat_filter_fn if it couldn't be requested through devicetree.
+ */
+#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0)
+/*
+ * Don't try to request the DMA channels through devicetree. This flag only
+ * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well.
+ */
+#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1)
+/*
+ * The PCM is half duplex and the DMA channel is shared between capture and
+ * playback.
+ */
+#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
+
+/**
+ * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
+ * @prepare_slave_config: Callback used to fill in the DMA slave_config for a
+ * PCM substream. Will be called from the PCM drivers hwparams callback.
+ * @compat_request_channel: Callback to request a DMA channel for platforms
+ * which do not use devicetree.
+ * @process: Callback used to apply processing on samples transferred from/to
+ * user space.
+ * @compat_filter_fn: Will be used as the filter function when requesting a
+ * channel for platforms which do not use devicetree. The filter parameter
+ * will be the DAI's DMA data.
+ * @dma_dev: If set, request DMA channel on this device rather than the DAI
+ * device.
+ * @chan_names: If set, these custom DMA channel names will be requested at
+ * registration time.
+ * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM.
+ * @prealloc_buffer_size: Size of the preallocated audio buffer.
+ *
+ * Note: If both compat_request_channel and compat_filter_fn are set
+ * compat_request_channel will be used to request the channel and
+ * compat_filter_fn will be ignored. Otherwise the channel will be requested
+ * using dma_request_channel with compat_filter_fn as the filter function.
+ */
+struct snd_dmaengine_pcm_config {
+ int (*prepare_slave_config)(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+ struct dma_chan *(*compat_request_channel)(
+ struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_substream *substream);
+ int (*process)(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes);
+ dma_filter_fn compat_filter_fn;
+ struct device *dma_dev;
+ const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
+
+ const struct snd_pcm_hardware *pcm_hardware;
+ unsigned int prealloc_buffer_size;
+};
+
+int snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+void snd_dmaengine_pcm_unregister(struct device *dev);
+
+int devm_snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+
+int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+
+#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+
+#endif
diff --git a/snd-alpx/core/generic/5.5/pcm_dmaengine.c b/snd-alpx/core/generic/5.5/pcm_dmaengine.c
new file mode 100644
index 0000000..5749a8a
--- /dev/null
+++ b/snd-alpx/core/generic/5.5/pcm_dmaengine.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on:
+ * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
+ * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc.
+ * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <sound/dmaengine_pcm.h>
+
+struct dmaengine_pcm_runtime_data {
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+
+ unsigned int pos;
+};
+
+static inline struct dmaengine_pcm_runtime_data *substream_to_prtd(
+ const struct snd_pcm_substream *substream)
+{
+ return substream->runtime->private_data;
+}
+
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ return prtd->dma_chan;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan);
+
+/**
+ * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config
+ * @substream: PCM substream
+ * @params: hw_params
+ * @slave_config: DMA slave config
+ *
+ * This function can be used to initialize a dma_slave_config from a substream
+ * and hw_params in a dmaengine based PCM driver implementation.
+ */
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config)
+{
+ enum dma_slave_buswidth buswidth;
+ int bits;
+
+ bits = params_physical_width(params);
+ if (bits < 8 || bits > 64)
+ return -EINVAL;
+ else if (bits == 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (bits == 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else if (bits == 24)
+ buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES;
+ else if (bits <= 32)
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->direction = DMA_MEM_TO_DEV;
+ slave_config->dst_addr_width = buswidth;
+ } else {
+ slave_config->direction = DMA_DEV_TO_MEM;
+ slave_config->src_addr_width = buswidth;
+ }
+
+ slave_config->device_fc = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config);
+
+/**
+ * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config
+ * using DAI DMA data.
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @slave_config: DMA slave configuration
+ *
+ * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width and
+ * slave_id fields of the DMA slave config from the same fields of the DAI DMA
+ * data struct. The src and dst fields will be initialized depending on the
+ * direction of the substream. If the substream is a playback stream the dst
+ * fields will be initialized, if it is a capture stream the src fields will be
+ * initialized. The {dst,src}_addr_width field will only be initialized if the
+ * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of
+ * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If
+ * both conditions are met the latter takes priority.
+ */
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *slave_config)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->dst_addr = dma_data->addr;
+ slave_config->dst_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->dst_addr_width = dma_data->addr_width;
+ } else {
+ slave_config->src_addr = dma_data->addr;
+ slave_config->src_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->src_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->src_addr_width = dma_data->addr_width;
+ }
+
+ slave_config->slave_id = dma_data->slave_id;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data);
+
+static void dmaengine_pcm_dma_complete(void *arg)
+{
+ struct snd_pcm_substream *substream = arg;
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ prtd->pos += snd_pcm_lib_period_bytes(substream);
+ if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream))
+ prtd->pos = 0;
+
+ snd_pcm_period_elapsed(substream);
+}
+
+static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_chan *chan = prtd->dma_chan;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction direction;
+ unsigned long flags = DMA_CTRL_ACK;
+
+ direction = snd_pcm_substream_to_dma_direction(substream);
+
+ if (!substream->runtime->no_period_wakeup)
+ flags |= DMA_PREP_INTERRUPT;
+
+ prtd->pos = 0;
+ desc = dmaengine_prep_dma_cyclic(chan,
+ substream->runtime->dma_addr,
+ snd_pcm_lib_buffer_bytes(substream),
+ snd_pcm_lib_period_bytes(substream), direction, flags);
+
+ if (!desc)
+ return -ENOMEM;
+
+ desc->callback = dmaengine_pcm_dma_complete;
+ desc->callback_param = substream;
+ prtd->cookie = dmaengine_submit(desc);
+
+ return 0;
+}
+
+/**
+ * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation
+ * @substream: PCM substream
+ * @cmd: Trigger command
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function can be used as the PCM trigger callback for dmaengine based PCM
+ * driver implementations.
+ */
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ ret = dmaengine_pcm_prepare_and_submit(substream);
+ if (ret)
+ return ret;
+ dma_async_issue_pending(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dmaengine_resume(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (runtime->info & SNDRV_PCM_INFO_PAUSE)
+ dmaengine_pause(prtd->dma_chan);
+ else
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dmaengine_pause(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger);
+
+/**
+ * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function is deprecated and should not be used by new drivers, as its
+ * results may be unreliable.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ return bytes_to_frames(substream->runtime, prtd->pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue);
+
+/**
+ * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function can be used as the PCM pointer callback for dmaengine based PCM
+ * driver implementations.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned int buf_size;
+ unsigned int pos = 0;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
+ buf_size = snd_pcm_lib_buffer_bytes(substream);
+ if (state.residue > 0 && state.residue <= buf_size)
+ pos = buf_size - state.residue;
+ }
+
+ return bytes_to_frames(substream->runtime, pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer);
+
+/**
+ * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * Returns NULL or the requested DMA channel.
+ *
+ * This function request a DMA channel for usage with dmaengine PCM.
+ */
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ return dma_request_channel(mask, filter_fn, filter_data);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel);
+
+/**
+ * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream
+ * @substream: PCM substream
+ * @chan: DMA channel to use for data transfers
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * The function should usually be called from the pcm open callback. Note that
+ * this function will use private_data field of the substream's runtime. So it
+ * is not available to your pcm driver implementation.
+ */
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan)
+{
+ struct dmaengine_pcm_runtime_data *prtd;
+ int ret;
+
+ if (!chan)
+ return -ENXIO;
+
+ ret = snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+ prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
+ if (!prtd)
+ return -ENOMEM;
+
+ prtd->dma_chan = chan;
+
+ substream->runtime->private_data = prtd;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open);
+
+/**
+ * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel
+ * @substream: PCM substream
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function will request a DMA channel using the passed filter function and
+ * data. The function should usually be called from the pcm open callback. Note
+ * that this function will use private_data field of the substream's runtime. So
+ * it is not available to your pcm driver implementation.
+ */
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data)
+{
+ return snd_dmaengine_pcm_open(substream,
+ snd_dmaengine_pcm_request_channel(filter_fn, filter_data));
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+
+/**
+ * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
+ * @substream: PCM substream
+ */
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
+
+/**
+ * snd_dmaengine_pcm_release_chan_close - Close a dmaengine based PCM substream and release channel
+ * @substream: PCM substream
+ *
+ * Releases the DMA channel associated with the PCM substream.
+ */
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ dma_release_channel(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
+
+/**
+ * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @hw: PCM hw params
+ * @chan: DMA channel to use for data transfers
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function will query DMA capability, then refine the pcm hardware
+ * parameters.
+ */
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan)
+{
+ struct dma_slave_caps dma_caps;
+ u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ snd_pcm_format_t i;
+ int ret = 0;
+
+ if (!hw || !chan || !dma_data)
+ return -EINVAL;
+
+ ret = dma_get_slave_caps(chan, &dma_caps);
+ if (ret == 0) {
+ if (dma_caps.cmd_pause && dma_caps.cmd_resume)
+ hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
+ if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
+ hw->info |= SNDRV_PCM_INFO_BATCH;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ addr_widths = dma_caps.dst_addr_widths;
+ else
+ addr_widths = dma_caps.src_addr_widths;
+ }
+
+ /*
+ * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep
+ * hw.formats set to 0, meaning no restrictions are in place.
+ * In this case it's the responsibility of the DAI driver to
+ * provide the supported format information.
+ */
+ if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK))
+ /*
+ * Prepare formats mask for valid/allowed sample types. If the
+ * dma does not have support for the given physical word size,
+ * it needs to be masked out so user space can not use the
+ * format which produces corrupted audio.
+ * In case the dma driver does not implement the slave_caps the
+ * default assumption is that it supports 1, 2 and 4 bytes
+ * widths.
+ */
+ for (i = SNDRV_PCM_FORMAT_FIRST; i <= SNDRV_PCM_FORMAT_LAST; i++) {
+ int bits = snd_pcm_format_physical_width(i);
+
+ /*
+ * Enable only samples with DMA supported physical
+ * widths
+ */
+ switch (bits) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ case 64:
+ if (addr_widths & (1 << (bits / 8)))
+ hw->formats |= pcm_format_to_bits(i);
+ break;
+ default:
+ /* Unsupported types */
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams);
+
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/core/generic/5.7/dmaengine_pcm.h b/snd-alpx/core/generic/5.7/dmaengine_pcm.h
new file mode 100644
index 0000000..b652206
--- /dev/null
+++ b/snd-alpx/core/generic/5.7/dmaengine_pcm.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef __SOUND_DMAENGINE_PCM_H__
+#define __SOUND_DMAENGINE_PCM_H__
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <linux/dmaengine.h>
+
+/**
+ * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM
+ * substream
+ * @substream: PCM substream
+ */
+static inline enum dma_transfer_direction
+snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return DMA_MEM_TO_DEV;
+ else
+ return DMA_DEV_TO_MEM;
+}
+
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config);
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan);
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
+
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data);
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream);
+
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data);
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream);
+
+/*
+ * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word.
+ * If this flag is set the dmaengine driver won't put any restriction on
+ * the supported sample formats and set the DMA transfer size to undefined.
+ * The DAI driver is responsible to disable any unsupported formats in it's
+ * configuration and catch corner cases that are not already handled in
+ * the ALSA core.
+ */
+#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0)
+
+/**
+ * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data
+ * @addr: Address of the DAI data source or destination register.
+ * @addr_width: Width of the DAI data source or destination register.
+ * @maxburst: Maximum number of words(note: words, as in units of the
+ * src_addr_width member, not bytes) that can be send to or received from the
+ * DAI in one burst.
+ * @slave_id: Slave requester id for the DMA channel.
+ * @filter_data: Custom DMA channel filter data, this will usually be used when
+ * requesting the DMA channel.
+ * @chan_name: Custom channel name to use when requesting DMA channel.
+ * @fifo_size: FIFO size of the DAI controller in bytes
+ * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
+ */
+struct snd_dmaengine_dai_dma_data {
+ dma_addr_t addr;
+ enum dma_slave_buswidth addr_width;
+ u32 maxburst;
+ unsigned int slave_id;
+ void *filter_data;
+ const char *chan_name;
+ unsigned int fifo_size;
+ unsigned int flags;
+};
+
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *config);
+
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan);
+
+/*
+ * Try to request the DMA channel using compat_request_channel or
+ * compat_filter_fn if it couldn't be requested through devicetree.
+ */
+#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0)
+/*
+ * Don't try to request the DMA channels through devicetree. This flag only
+ * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well.
+ */
+#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1)
+/*
+ * The PCM is half duplex and the DMA channel is shared between capture and
+ * playback.
+ */
+#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
+
+/**
+ * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
+ * @prepare_slave_config: Callback used to fill in the DMA slave_config for a
+ * PCM substream. Will be called from the PCM drivers hwparams callback.
+ * @compat_request_channel: Callback to request a DMA channel for platforms
+ * which do not use devicetree.
+ * @process: Callback used to apply processing on samples transferred from/to
+ * user space.
+ * @compat_filter_fn: Will be used as the filter function when requesting a
+ * channel for platforms which do not use devicetree. The filter parameter
+ * will be the DAI's DMA data.
+ * @dma_dev: If set, request DMA channel on this device rather than the DAI
+ * device.
+ * @chan_names: If set, these custom DMA channel names will be requested at
+ * registration time.
+ * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM.
+ * @prealloc_buffer_size: Size of the preallocated audio buffer.
+ *
+ * Note: If both compat_request_channel and compat_filter_fn are set
+ * compat_request_channel will be used to request the channel and
+ * compat_filter_fn will be ignored. Otherwise the channel will be requested
+ * using dma_request_channel with compat_filter_fn as the filter function.
+ */
+struct snd_dmaengine_pcm_config {
+ int (*prepare_slave_config)(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+ struct dma_chan *(*compat_request_channel)(
+ struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_substream *substream);
+ int (*process)(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes);
+ dma_filter_fn compat_filter_fn;
+ struct device *dma_dev;
+ const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
+
+ const struct snd_pcm_hardware *pcm_hardware;
+ unsigned int prealloc_buffer_size;
+};
+
+int snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+void snd_dmaengine_pcm_unregister(struct device *dev);
+
+int devm_snd_dmaengine_pcm_register(struct device *dev,
+ const struct snd_dmaengine_pcm_config *config,
+ unsigned int flags);
+
+int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config);
+
+#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+
+#endif
diff --git a/snd-alpx/core/generic/5.7/pcm_dmaengine.c b/snd-alpx/core/generic/5.7/pcm_dmaengine.c
new file mode 100644
index 0000000..4d059ff
--- /dev/null
+++ b/snd-alpx/core/generic/5.7/pcm_dmaengine.c
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012, Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on:
+ * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
+ * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc.
+ * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <sound/dmaengine_pcm.h>
+
+struct dmaengine_pcm_runtime_data {
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+
+ unsigned int pos;
+};
+
+static inline struct dmaengine_pcm_runtime_data *substream_to_prtd(
+ const struct snd_pcm_substream *substream)
+{
+ return substream->runtime->private_data;
+}
+
+struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ return prtd->dma_chan;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan);
+
+/**
+ * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config
+ * @substream: PCM substream
+ * @params: hw_params
+ * @slave_config: DMA slave config
+ *
+ * This function can be used to initialize a dma_slave_config from a substream
+ * and hw_params in a dmaengine based PCM driver implementation.
+ */
+int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
+ const struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config)
+{
+ enum dma_slave_buswidth buswidth;
+ int bits;
+
+ bits = params_physical_width(params);
+ if (bits < 8 || bits > 64)
+ return -EINVAL;
+ else if (bits == 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (bits == 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else if (bits == 24)
+ buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES;
+ else if (bits <= 32)
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->direction = DMA_MEM_TO_DEV;
+ slave_config->dst_addr_width = buswidth;
+ } else {
+ slave_config->direction = DMA_DEV_TO_MEM;
+ slave_config->src_addr_width = buswidth;
+ }
+
+ slave_config->device_fc = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config);
+
+/**
+ * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config
+ * using DAI DMA data.
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @slave_config: DMA slave configuration
+ *
+ * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width and
+ * slave_id fields of the DMA slave config from the same fields of the DAI DMA
+ * data struct. The src and dst fields will be initialized depending on the
+ * direction of the substream. If the substream is a playback stream the dst
+ * fields will be initialized, if it is a capture stream the src fields will be
+ * initialized. The {dst,src}_addr_width field will only be initialized if the
+ * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of
+ * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If
+ * both conditions are met the latter takes priority.
+ */
+void snd_dmaengine_pcm_set_config_from_dai_data(
+ const struct snd_pcm_substream *substream,
+ const struct snd_dmaengine_dai_dma_data *dma_data,
+ struct dma_slave_config *slave_config)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config->dst_addr = dma_data->addr;
+ slave_config->dst_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->dst_addr_width = dma_data->addr_width;
+ } else {
+ slave_config->src_addr = dma_data->addr;
+ slave_config->src_maxburst = dma_data->maxburst;
+ if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+ slave_config->src_addr_width =
+ DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ slave_config->src_addr_width = dma_data->addr_width;
+ }
+
+ slave_config->slave_id = dma_data->slave_id;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data);
+
+static void dmaengine_pcm_dma_complete(void *arg)
+{
+ struct snd_pcm_substream *substream = arg;
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ prtd->pos += snd_pcm_lib_period_bytes(substream);
+ if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream))
+ prtd->pos = 0;
+
+ snd_pcm_period_elapsed(substream);
+}
+
+static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct dma_chan *chan = prtd->dma_chan;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction direction;
+ unsigned long flags = DMA_CTRL_ACK;
+
+ direction = snd_pcm_substream_to_dma_direction(substream);
+
+ if (!substream->runtime->no_period_wakeup)
+ flags |= DMA_PREP_INTERRUPT;
+
+ prtd->pos = 0;
+ desc = dmaengine_prep_dma_cyclic(chan,
+ substream->runtime->dma_addr,
+ snd_pcm_lib_buffer_bytes(substream),
+ snd_pcm_lib_period_bytes(substream), direction, flags);
+
+ if (!desc)
+ return -ENOMEM;
+
+ desc->callback = dmaengine_pcm_dma_complete;
+ desc->callback_param = substream;
+ prtd->cookie = dmaengine_submit(desc);
+
+ return 0;
+}
+
+/**
+ * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation
+ * @substream: PCM substream
+ * @cmd: Trigger command
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function can be used as the PCM trigger callback for dmaengine based PCM
+ * driver implementations.
+ */
+int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ ret = dmaengine_pcm_prepare_and_submit(substream);
+ if (ret)
+ return ret;
+ dma_async_issue_pending(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dmaengine_resume(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (runtime->info & SNDRV_PCM_INFO_PAUSE)
+ dmaengine_pause(prtd->dma_chan);
+ else
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dmaengine_pause(prtd->dma_chan);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ dmaengine_terminate_async(prtd->dma_chan);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger);
+
+/**
+ * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function is deprecated and should not be used by new drivers, as its
+ * results may be unreliable.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ return bytes_to_frames(substream->runtime, prtd->pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue);
+
+/**
+ * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function can be used as the PCM pointer callback for dmaengine based PCM
+ * driver implementations.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned int buf_size;
+ unsigned int pos = 0;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
+ buf_size = snd_pcm_lib_buffer_bytes(substream);
+ if (state.residue > 0 && state.residue <= buf_size)
+ pos = buf_size - state.residue;
+
+ runtime->delay = bytes_to_frames(runtime,
+ state.in_flight_bytes);
+ }
+
+ return bytes_to_frames(runtime, pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer);
+
+/**
+ * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * Returns NULL or the requested DMA channel.
+ *
+ * This function request a DMA channel for usage with dmaengine PCM.
+ */
+struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
+ void *filter_data)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ return dma_request_channel(mask, filter_fn, filter_data);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel);
+
+/**
+ * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream
+ * @substream: PCM substream
+ * @chan: DMA channel to use for data transfers
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * The function should usually be called from the pcm open callback. Note that
+ * this function will use private_data field of the substream's runtime. So it
+ * is not available to your pcm driver implementation.
+ */
+int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan)
+{
+ struct dmaengine_pcm_runtime_data *prtd;
+ int ret;
+
+ if (!chan)
+ return -ENXIO;
+
+ ret = snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+ prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
+ if (!prtd)
+ return -ENOMEM;
+
+ prtd->dma_chan = chan;
+
+ substream->runtime->private_data = prtd;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open);
+
+/**
+ * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel
+ * @substream: PCM substream
+ * @filter_fn: Filter function used to request the DMA channel
+ * @filter_data: Data passed to the DMA filter function
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function will request a DMA channel using the passed filter function and
+ * data. The function should usually be called from the pcm open callback. Note
+ * that this function will use private_data field of the substream's runtime. So
+ * it is not available to your pcm driver implementation.
+ */
+int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data)
+{
+ return snd_dmaengine_pcm_open(substream,
+ snd_dmaengine_pcm_request_channel(filter_fn, filter_data));
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+
+/**
+ * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
+ * @substream: PCM substream
+ */
+int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
+
+/**
+ * snd_dmaengine_pcm_release_chan_close - Close a dmaengine based PCM substream and release channel
+ * @substream: PCM substream
+ *
+ * Releases the DMA channel associated with the PCM substream.
+ */
+int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ dma_release_channel(prtd->dma_chan);
+ kfree(prtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
+
+/**
+ * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @hw: PCM hw params
+ * @chan: DMA channel to use for data transfers
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function will query DMA capability, then refine the pcm hardware
+ * parameters.
+ */
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan)
+{
+ struct dma_slave_caps dma_caps;
+ u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ snd_pcm_format_t i;
+ int ret = 0;
+
+ if (!hw || !chan || !dma_data)
+ return -EINVAL;
+
+ ret = dma_get_slave_caps(chan, &dma_caps);
+ if (ret == 0) {
+ if (dma_caps.cmd_pause && dma_caps.cmd_resume)
+ hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
+ if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
+ hw->info |= SNDRV_PCM_INFO_BATCH;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ addr_widths = dma_caps.dst_addr_widths;
+ else
+ addr_widths = dma_caps.src_addr_widths;
+ }
+
+ /*
+ * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep
+ * hw.formats set to 0, meaning no restrictions are in place.
+ * In this case it's the responsibility of the DAI driver to
+ * provide the supported format information.
+ */
+ if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK))
+ /*
+ * Prepare formats mask for valid/allowed sample types. If the
+ * dma does not have support for the given physical word size,
+ * it needs to be masked out so user space can not use the
+ * format which produces corrupted audio.
+ * In case the dma driver does not implement the slave_caps the
+ * default assumption is that it supports 1, 2 and 4 bytes
+ * widths.
+ */
+ pcm_for_each_format(i) {
+ int bits = snd_pcm_format_physical_width(i);
+
+ /*
+ * Enable only samples with DMA supported physical
+ * widths
+ */
+ switch (bits) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ case 64:
+ if (addr_widths & (1 << (bits / 8)))
+ hw->formats |= pcm_format_to_bits(i);
+ break;
+ default:
+ /* Unsupported types */
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams);
+
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/core/generic/6.2/amd_xdma.h b/snd-alpx/core/generic/6.2/amd_xdma.h
new file mode 100644
index 0000000..b5e23e1
--- /dev/null
+++ b/snd-alpx/core/generic/6.2/amd_xdma.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _PLATDATA_AMD_XDMA_H
+#define _PLATDATA_AMD_XDMA_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct xdma_chan_info - DMA channel information
+ * This information is used to match channel when request dma channel
+ * @dir: Channel transfer direction
+ */
+struct xdma_chan_info {
+ enum dma_transfer_direction dir;
+};
+
+#define XDMA_FILTER_PARAM(chan_info) ((void *)(chan_info))
+
+struct dma_slave_map;
+
+/**
+ * struct xdma_platdata - platform specific data for XDMA engine
+ * @max_dma_channels: Maximum dma channels in each direction
+ */
+struct xdma_platdata {
+ u32 max_dma_channels;
+ u32 device_map_cnt;
+ struct dma_slave_map *device_map;
+};
+
+#endif /* _PLATDATA_AMD_XDMA_H */
diff --git a/snd-alpx/core/generic/6.2/dmaengine.c b/snd-alpx/core/generic/6.2/dmaengine.c
new file mode 100644
index 0000000..8a6e6b6
--- /dev/null
+++ b/snd-alpx/core/generic/6.2/dmaengine.c
@@ -0,0 +1,1652 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * This code implements the DMA subsystem. It provides a HW-neutral interface
+ * for other kernel code to use asynchronous memory copy capabilities,
+ * if present, and allows different HW DMA drivers to register as providing
+ * this capability.
+ *
+ * Due to the fact we are accelerating what is already a relatively fast
+ * operation, the code goes to great lengths to avoid additional overhead,
+ * such as locking.
+ *
+ * LOCKING:
+ *
+ * The subsystem keeps a global list of dma_device structs it is protected by a
+ * mutex, dma_list_mutex.
+ *
+ * A subsystem can get access to a channel by calling dmaengine_get() followed
+ * by dma_find_channel(), or if it has need for an exclusive channel it can call
+ * dma_request_channel(). Once a channel is allocated a reference is taken
+ * against its corresponding driver to disable removal.
+ *
+ * Each device has a channels list, which runs unlocked but is never modified
+ * once the device is registered, it's just setup by the driver.
+ *
+ * See Documentation/driver-api/dmaengine for more details
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/hardirq.h>
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+#include <linux/rculist.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
+#include <linux/of_dma.h>
+#include <linux/mempool.h>
+#include <linux/numa.h>
+
+#include "dmaengine.h"
+
+static DEFINE_MUTEX(dma_list_mutex);
+static DEFINE_IDA(dma_ida);
+static LIST_HEAD(dma_device_list);
+static long dmaengine_ref_count;
+
+/* --- debugfs implementation --- */
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static struct dentry *rootdir;
+
+static void dmaengine_debug_register(struct dma_device *dma_dev)
+{
+ dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
+ rootdir);
+ if (IS_ERR(dma_dev->dbg_dev_root))
+ dma_dev->dbg_dev_root = NULL;
+}
+
+static void dmaengine_debug_unregister(struct dma_device *dma_dev)
+{
+ debugfs_remove_recursive(dma_dev->dbg_dev_root);
+ dma_dev->dbg_dev_root = NULL;
+}
+
+static void dmaengine_dbg_summary_show(struct seq_file *s,
+ struct dma_device *dma_dev)
+{
+ struct dma_chan *chan;
+
+ list_for_each_entry(chan, &dma_dev->channels, device_node) {
+ if (chan->client_count) {
+ seq_printf(s, " %-13s| %s", dma_chan_name(chan),
+ chan->dbg_client_name ?: "in-use");
+
+ if (chan->router)
+ seq_printf(s, " (via router: %s)\n",
+ dev_name(chan->router->dev));
+ else
+ seq_puts(s, "\n");
+ }
+ }
+}
+
+static int dmaengine_summary_show(struct seq_file *s, void *data)
+{
+ struct dma_device *dma_dev = NULL;
+
+ mutex_lock(&dma_list_mutex);
+ list_for_each_entry(dma_dev, &dma_device_list, global_node) {
+ seq_printf(s, "dma%d (%s): number of channels: %u\n",
+ dma_dev->dev_id, dev_name(dma_dev->dev),
+ dma_dev->chancnt);
+
+ if (dma_dev->dbg_summary_show)
+ dma_dev->dbg_summary_show(s, dma_dev);
+ else
+ dmaengine_dbg_summary_show(s, dma_dev);
+
+ if (!list_is_last(&dma_dev->global_node, &dma_device_list))
+ seq_puts(s, "\n");
+ }
+ mutex_unlock(&dma_list_mutex);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
+
+static void __init dmaengine_debugfs_init(void)
+{
+ rootdir = debugfs_create_dir("dmaengine", NULL);
+
+ /* /sys/kernel/debug/dmaengine/summary */
+ debugfs_create_file("summary", 0444, rootdir, NULL,
+ &dmaengine_summary_fops);
+}
+#else
+static inline void dmaengine_debugfs_init(void) { }
+static inline int dmaengine_debug_register(struct dma_device *dma_dev)
+{
+ return 0;
+}
+
+static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
+#endif /* DEBUG_FS */
+
+/* --- sysfs implementation --- */
+
+#define DMA_SLAVE_NAME "slave"
+
+/**
+ * dev_to_dma_chan - convert a device pointer to its sysfs container object
+ * @dev: device node
+ *
+ * Must be called under dma_list_mutex.
+ */
+static struct dma_chan *dev_to_dma_chan(struct device *dev)
+{
+ struct dma_chan_dev *chan_dev;
+
+ chan_dev = container_of(dev, typeof(*chan_dev), device);
+ return chan_dev->chan;
+}
+
+static ssize_t memcpy_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dma_chan *chan;
+ unsigned long count = 0;
+ int i;
+ int err;
+
+ mutex_lock(&dma_list_mutex);
+ chan = dev_to_dma_chan(dev);
+ if (chan) {
+ for_each_possible_cpu(i)
+ count += per_cpu_ptr(chan->local, i)->memcpy_count;
+ err = sprintf(buf, "%lu\n", count);
+ } else
+ err = -ENODEV;
+ mutex_unlock(&dma_list_mutex);
+
+ return err;
+}
+static DEVICE_ATTR_RO(memcpy_count);
+
+static ssize_t bytes_transferred_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dma_chan *chan;
+ unsigned long count = 0;
+ int i;
+ int err;
+
+ mutex_lock(&dma_list_mutex);
+ chan = dev_to_dma_chan(dev);
+ if (chan) {
+ for_each_possible_cpu(i)
+ count += per_cpu_ptr(chan->local, i)->bytes_transferred;
+ err = sprintf(buf, "%lu\n", count);
+ } else
+ err = -ENODEV;
+ mutex_unlock(&dma_list_mutex);
+
+ return err;
+}
+static DEVICE_ATTR_RO(bytes_transferred);
+
+static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dma_chan *chan;
+ int err;
+
+ mutex_lock(&dma_list_mutex);
+ chan = dev_to_dma_chan(dev);
+ if (chan)
+ err = sprintf(buf, "%d\n", chan->client_count);
+ else
+ err = -ENODEV;
+ mutex_unlock(&dma_list_mutex);
+
+ return err;
+}
+static DEVICE_ATTR_RO(in_use);
+
+static struct attribute *dma_dev_attrs[] = {
+ &dev_attr_memcpy_count.attr,
+ &dev_attr_bytes_transferred.attr,
+ &dev_attr_in_use.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(dma_dev);
+
+static void chan_dev_release(struct device *dev)
+{
+ struct dma_chan_dev *chan_dev;
+
+ chan_dev = container_of(dev, typeof(*chan_dev), device);
+ kfree(chan_dev);
+}
+
+static struct class dma_devclass = {
+ .name = "dma",
+ .dev_groups = dma_dev_groups,
+ .dev_release = chan_dev_release,
+};
+
+/* --- client and device registration --- */
+
+/* enable iteration over all operation types */
+static dma_cap_mask_t dma_cap_mask_all;
+
+/**
+ * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan: associated channel for this entry
+ */
+struct dma_chan_tbl_ent {
+ struct dma_chan *chan;
+};
+
+/* percpu lookup table for memory-to-memory offload providers */
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
+
+static int __init dma_channel_table_init(void)
+{
+ enum dma_transaction_type cap;
+ int err = 0;
+
+ bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
+
+ /* 'interrupt', 'private', and 'slave' are channel capabilities,
+ * but are not associated with an operation so they do not need
+ * an entry in the channel_table
+ */
+ clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
+ clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
+ clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
+
+ for_each_dma_cap_mask(cap, dma_cap_mask_all) {
+ channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
+ if (!channel_table[cap]) {
+ err = -ENOMEM;
+ break;
+ }
+ }
+
+ if (err) {
+ pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ free_percpu(channel_table[cap]);
+ }
+
+ return err;
+}
+arch_initcall(dma_channel_table_init);
+
+/**
+ * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
+ * @chan: DMA channel to test
+ * @cpu: CPU index which the channel should be close to
+ *
+ * Returns true if the channel is in the same NUMA-node as the CPU.
+ */
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+ int node = dev_to_node(chan->device->dev);
+ return node == NUMA_NO_NODE ||
+ cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
+ * @cap: capability to match
+ * @cpu: CPU index which the channel should be close to
+ *
+ * If some channels are close to the given CPU, the one with the lowest
+ * reference count is returned. Otherwise, CPU is ignored and only the
+ * reference count is taken into account.
+ *
+ * Must be called under dma_list_mutex.
+ */
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
+{
+ struct dma_device *device;
+ struct dma_chan *chan;
+ struct dma_chan *min = NULL;
+ struct dma_chan *localmin = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (!dma_has_cap(cap, device->cap_mask) ||
+ dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node) {
+ if (!chan->client_count)
+ continue;
+ if (!min || chan->table_count < min->table_count)
+ min = chan;
+
+ if (dma_chan_is_local(chan, cpu))
+ if (!localmin ||
+ chan->table_count < localmin->table_count)
+ localmin = chan;
+ }
+ }
+
+ chan = localmin ? localmin : min;
+
+ if (chan)
+ chan->table_count++;
+
+ return chan;
+}
+
+/**
+ * dma_channel_rebalance - redistribute the available channels
+ *
+ * Optimize for CPU isolation (each CPU gets a dedicated channel for an
+ * operation type) in the SMP case, and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case.
+ *
+ * Must be called under dma_list_mutex.
+ */
+static void dma_channel_rebalance(void)
+{
+ struct dma_chan *chan;
+ struct dma_device *device;
+ int cpu;
+ int cap;
+
+ /* undo the last distribution */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node)
+ chan->table_count = 0;
+ }
+
+ /* don't populate the channel_table if no clients are available */
+ if (!dmaengine_ref_count)
+ return;
+
+ /* redistribute available channels */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_online_cpu(cpu) {
+ chan = min_chan(cap, cpu);
+ per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
+ }
+}
+
+static int dma_device_satisfies_mask(struct dma_device *device,
+ const dma_cap_mask_t *want)
+{
+ dma_cap_mask_t has;
+
+ bitmap_and(has.bits, want->bits, device->cap_mask.bits,
+ DMA_TX_TYPE_END);
+ return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
+}
+
+static struct module *dma_chan_to_owner(struct dma_chan *chan)
+{
+ return chan->device->owner;
+}
+
+/**
+ * balance_ref_count - catch up the channel reference count
+ * @chan: channel to balance ->client_count versus dmaengine_ref_count
+ *
+ * Must be called under dma_list_mutex.
+ */
+static void balance_ref_count(struct dma_chan *chan)
+{
+ struct module *owner = dma_chan_to_owner(chan);
+
+ while (chan->client_count < dmaengine_ref_count) {
+ __module_get(owner);
+ chan->client_count++;
+ }
+}
+
+static void dma_device_release(struct kref *ref)
+{
+ struct dma_device *device = container_of(ref, struct dma_device, ref);
+
+ list_del_rcu(&device->global_node);
+ dma_channel_rebalance();
+
+ if (device->device_release)
+ device->device_release(device);
+}
+
+static void dma_device_put(struct dma_device *device)
+{
+ lockdep_assert_held(&dma_list_mutex);
+ kref_put(&device->ref, dma_device_release);
+}
+
+/**
+ * dma_chan_get - try to grab a DMA channel's parent driver module
+ * @chan: channel to grab
+ *
+ * Must be called under dma_list_mutex.
+ */
+static int dma_chan_get(struct dma_chan *chan)
+{
+ struct module *owner = dma_chan_to_owner(chan);
+ int ret;
+
+ /* The channel is already in use, update client count */
+ if (chan->client_count) {
+ __module_get(owner);
+ chan->client_count++;
+ return 0;
+ }
+
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+ ret = kref_get_unless_zero(&chan->device->ref);
+ if (!ret) {
+ ret = -ENODEV;
+ goto module_put_out;
+ }
+
+ /* allocate upon first client reference */
+ if (chan->device->device_alloc_chan_resources) {
+ ret = chan->device->device_alloc_chan_resources(chan);
+ if (ret < 0)
+ goto err_out;
+ }
+
+ chan->client_count++;
+
+ if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
+ balance_ref_count(chan);
+
+ return 0;
+
+err_out:
+ dma_device_put(chan->device);
+module_put_out:
+ module_put(owner);
+ return ret;
+}
+
+/**
+ * dma_chan_put - drop a reference to a DMA channel's parent driver module
+ * @chan: channel to release
+ *
+ * Must be called under dma_list_mutex.
+ */
+static void dma_chan_put(struct dma_chan *chan)
+{
+ /* This channel is not in use, bail out */
+ if (!chan->client_count)
+ return;
+
+ chan->client_count--;
+
+ /* This channel is not in use anymore, free it */
+ if (!chan->client_count && chan->device->device_free_chan_resources) {
+ /* Make sure all operations have completed */
+ dmaengine_synchronize(chan);
+ chan->device->device_free_chan_resources(chan);
+ }
+
+ /* If the channel is used via a DMA request router, free the mapping */
+ if (chan->router && chan->router->route_free) {
+ chan->router->route_free(chan->router->dev, chan->route_data);
+ chan->router = NULL;
+ chan->route_data = NULL;
+ }
+
+ dma_device_put(chan->device);
+ module_put(dma_chan_to_owner(chan));
+}
+
+enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
+{
+ enum dma_status status;
+ unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
+
+ dma_async_issue_pending(chan);
+ do {
+ status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+ if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
+ dev_err(chan->device->dev, "%s: timeout!\n", __func__);
+ return DMA_ERROR;
+ }
+ if (status != DMA_IN_PROGRESS)
+ break;
+ cpu_relax();
+ } while (1);
+
+ return status;
+}
+EXPORT_SYMBOL(dma_sync_wait);
+
+/**
+ * dma_find_channel - find a channel to carry out the operation
+ * @tx_type: transaction type
+ */
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
+{
+ return this_cpu_read(channel_table[tx_type]->chan);
+}
+EXPORT_SYMBOL(dma_find_channel);
+
+/**
+ * dma_issue_pending_all - flush all pending operations across all channels
+ */
+void dma_issue_pending_all(void)
+{
+ struct dma_device *device;
+ struct dma_chan *chan;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(device, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node)
+ if (chan->client_count)
+ device->device_issue_pending(chan);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(dma_issue_pending_all);
+
+int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
+{
+ struct dma_device *device;
+
+ if (!chan || !caps)
+ return -EINVAL;
+
+ device = chan->device;
+
+ /* check if the channel supports slave transactions */
+ if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
+ test_bit(DMA_CYCLIC, device->cap_mask.bits)))
+ return -ENXIO;
+
+ /*
+ * Check whether it reports it uses the generic slave
+ * capabilities, if not, that means it doesn't support any
+ * kind of slave capabilities reporting.
+ */
+ if (!device->directions)
+ return -ENXIO;
+
+ caps->src_addr_widths = device->src_addr_widths;
+ caps->dst_addr_widths = device->dst_addr_widths;
+ caps->directions = device->directions;
+ caps->min_burst = device->min_burst;
+ caps->max_burst = device->max_burst;
+ caps->max_sg_burst = device->max_sg_burst;
+ caps->residue_granularity = device->residue_granularity;
+ caps->descriptor_reuse = device->descriptor_reuse;
+ caps->cmd_pause = !!device->device_pause;
+ caps->cmd_resume = !!device->device_resume;
+ caps->cmd_terminate = !!device->device_terminate_all;
+
+ /*
+ * DMA engine device might be configured with non-uniformly
+ * distributed slave capabilities per device channels. In this
+ * case the corresponding driver may provide the device_caps
+ * callback to override the generic capabilities with
+ * channel-specific ones.
+ */
+ if (device->device_caps)
+ device->device_caps(chan, caps);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_caps);
+
+static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
+ struct dma_device *dev,
+ dma_filter_fn fn, void *fn_param)
+{
+ struct dma_chan *chan;
+
+ if (mask && !dma_device_satisfies_mask(dev, mask)) {
+ dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
+ return NULL;
+ }
+ /* devices with multiple channels need special handling as we need to
+ * ensure that all channels are either private or public.
+ */
+ if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
+ list_for_each_entry(chan, &dev->channels, device_node) {
+ /* some channels are already publicly allocated */
+ if (chan->client_count)
+ return NULL;
+ }
+
+ list_for_each_entry(chan, &dev->channels, device_node) {
+ if (chan->client_count) {
+ dev_dbg(dev->dev, "%s: %s busy\n",
+ __func__, dma_chan_name(chan));
+ continue;
+ }
+ if (fn && !fn(chan, fn_param)) {
+ dev_dbg(dev->dev, "%s: %s filter said false\n",
+ __func__, dma_chan_name(chan));
+ continue;
+ }
+ return chan;
+ }
+
+ return NULL;
+}
+
+static struct dma_chan *find_candidate(struct dma_device *device,
+ const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param)
+{
+ struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
+ int err;
+
+ if (chan) {
+ /* Found a suitable channel, try to grab, prep, and return it.
+ * We first set DMA_PRIVATE to disable balance_ref_count as this
+ * channel will not be published in the general-purpose
+ * allocator
+ */
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
+ device->privatecnt++;
+ err = dma_chan_get(chan);
+
+ if (err) {
+ if (err == -ENODEV) {
+ dev_dbg(device->dev, "%s: %s module removed\n",
+ __func__, dma_chan_name(chan));
+ list_del_rcu(&device->global_node);
+ } else
+ dev_dbg(device->dev,
+ "%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+
+ if (--device->privatecnt == 0)
+ dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+
+ chan = ERR_PTR(err);
+ }
+ }
+
+ return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+}
+
+/**
+ * dma_get_slave_channel - try to get specific channel exclusively
+ * @chan: target channel
+ */
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
+{
+ /* lock against __dma_request_channel */
+ mutex_lock(&dma_list_mutex);
+
+ if (chan->client_count == 0) {
+ struct dma_device *device = chan->device;
+ int err;
+
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
+ device->privatecnt++;
+ err = dma_chan_get(chan);
+ if (err) {
+ dev_dbg(chan->device->dev,
+ "%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+ chan = NULL;
+ if (--device->privatecnt == 0)
+ dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+ }
+ } else
+ chan = NULL;
+
+ mutex_unlock(&dma_list_mutex);
+
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* lock against __dma_request_channel */
+ mutex_lock(&dma_list_mutex);
+
+ chan = find_candidate(device, &mask, NULL, NULL);
+
+ mutex_unlock(&dma_list_mutex);
+
+ return IS_ERR(chan) ? NULL : chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
+
+/**
+ * __dma_request_channel - try to allocate an exclusive channel
+ * @mask: capabilities that the channel must satisfy
+ * @fn: optional callback to disposition available channels
+ * @fn_param: opaque parameter to pass to dma_filter_fn()
+ * @np: device node to look for DMA channels
+ *
+ * Returns pointer to appropriate DMA channel on success or NULL.
+ */
+struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param,
+ struct device_node *np)
+{
+ struct dma_device *device, *_d;
+ struct dma_chan *chan = NULL;
+
+ /* Find a channel */
+ mutex_lock(&dma_list_mutex);
+ list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
+ /* Finds a DMA controller with matching device node */
+ if (np && device->dev->of_node && np != device->dev->of_node)
+ continue;
+
+ chan = find_candidate(device, mask, fn, fn_param);
+ if (!IS_ERR(chan))
+ break;
+
+ chan = NULL;
+ }
+ mutex_unlock(&dma_list_mutex);
+
+ pr_debug("%s: %s (%s)\n",
+ __func__,
+ chan ? "success" : "fail",
+ chan ? dma_chan_name(chan) : NULL);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(__dma_request_channel);
+
+static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
+ const char *name,
+ struct device *dev)
+{
+ int i;
+
+ if (!device->filter.mapcnt)
+ return NULL;
+
+ for (i = 0; i < device->filter.mapcnt; i++) {
+ const struct dma_slave_map *map = &device->filter.map[i];
+
+ if (!strcmp(map->devname, dev_name(dev)) &&
+ !strcmp(map->slave, name))
+ return map;
+ }
+
+ return NULL;
+}
+
+/**
+ * dma_request_chan - try to allocate an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ *
+ * Returns pointer to appropriate DMA channel on success or an error pointer.
+ */
+struct dma_chan *dma_request_chan(struct device *dev, const char *name)
+{
+ struct dma_device *d, *_d;
+ struct dma_chan *chan = NULL;
+
+ /* If device-tree is present get slave info from here */
+ if (dev->of_node)
+ chan = of_dma_request_slave_channel(dev->of_node, name);
+
+ /* If device was enumerated by ACPI get slave info from here */
+ if (has_acpi_companion(dev) && !chan)
+ chan = acpi_dma_request_slave_chan_by_name(dev, name);
+
+ if (PTR_ERR(chan) == -EPROBE_DEFER)
+ return chan;
+
+ if (!IS_ERR_OR_NULL(chan))
+ goto found;
+
+ /* Try to find the channel via the DMA filter map(s) */
+ mutex_lock(&dma_list_mutex);
+ list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
+ dma_cap_mask_t mask;
+ const struct dma_slave_map *map = dma_filter_match(d, name, dev);
+
+ if (!map)
+ continue;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = find_candidate(d, &mask, d->filter.fn, map->param);
+ if (!IS_ERR(chan))
+ break;
+ }
+ mutex_unlock(&dma_list_mutex);
+
+ if (IS_ERR(chan))
+ return chan;
+ if (!chan)
+ return ERR_PTR(-EPROBE_DEFER);
+
+found:
+#ifdef CONFIG_DEBUG_FS
+ chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
+ name);
+#endif
+
+ chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
+ if (!chan->name)
+ return chan;
+ chan->slave = dev;
+
+ if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
+ DMA_SLAVE_NAME))
+ dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
+ if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
+ dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_request_chan);
+
+/**
+ * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
+ * @mask: capabilities that the channel must satisfy
+ *
+ * Returns pointer to appropriate DMA channel on success or an error pointer.
+ */
+struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
+{
+ struct dma_chan *chan;
+
+ if (!mask)
+ return ERR_PTR(-ENODEV);
+
+ chan = __dma_request_channel(mask, NULL, NULL, NULL);
+ if (!chan) {
+ mutex_lock(&dma_list_mutex);
+ if (list_empty(&dma_device_list))
+ chan = ERR_PTR(-EPROBE_DEFER);
+ else
+ chan = ERR_PTR(-ENODEV);
+ mutex_unlock(&dma_list_mutex);
+ }
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
+
+void dma_release_channel(struct dma_chan *chan)
+{
+ mutex_lock(&dma_list_mutex);
+ WARN_ONCE(chan->client_count != 1,
+ "chan reference count %d != 1\n", chan->client_count);
+ dma_chan_put(chan);
+ /* drop PRIVATE cap enabled by __dma_request_channel() */
+ if (--chan->device->privatecnt == 0)
+ dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
+
+ if (chan->slave) {
+ sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
+ sysfs_remove_link(&chan->slave->kobj, chan->name);
+ kfree(chan->name);
+ chan->name = NULL;
+ chan->slave = NULL;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ kfree(chan->dbg_client_name);
+ chan->dbg_client_name = NULL;
+#endif
+ mutex_unlock(&dma_list_mutex);
+}
+EXPORT_SYMBOL_GPL(dma_release_channel);
+
+/**
+ * dmaengine_get - register interest in dma_channels
+ */
+void dmaengine_get(void)
+{
+ struct dma_device *device, *_d;
+ struct dma_chan *chan;
+ int err;
+
+ mutex_lock(&dma_list_mutex);
+ dmaengine_ref_count++;
+
+ /* try to grab channels */
+ list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node) {
+ err = dma_chan_get(chan);
+ if (err == -ENODEV) {
+ /* module removed before we could use it */
+ list_del_rcu(&device->global_node);
+ break;
+ } else if (err)
+ dev_dbg(chan->device->dev,
+ "%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+ }
+ }
+
+ /* if this is the first reference and there were channels
+ * waiting we need to rebalance to get those channels
+ * incorporated into the channel table
+ */
+ if (dmaengine_ref_count == 1)
+ dma_channel_rebalance();
+ mutex_unlock(&dma_list_mutex);
+}
+EXPORT_SYMBOL(dmaengine_get);
+
+/**
+ * dmaengine_put - let DMA drivers be removed when ref_count == 0
+ */
+void dmaengine_put(void)
+{
+ struct dma_device *device, *_d;
+ struct dma_chan *chan;
+
+ mutex_lock(&dma_list_mutex);
+ dmaengine_ref_count--;
+ BUG_ON(dmaengine_ref_count < 0);
+ /* drop channel references */
+ list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node)
+ dma_chan_put(chan);
+ }
+ mutex_unlock(&dma_list_mutex);
+}
+EXPORT_SYMBOL(dmaengine_put);
+
+static bool device_has_all_tx_types(struct dma_device *device)
+{
+ /* A device that satisfies this test has channels that will never cause
+ * an async_tx channel switch event as all possible operation types can
+ * be handled.
+ */
+ #ifdef CONFIG_ASYNC_TX_DMA
+ if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
+ return false;
+ #endif
+
+ #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
+ if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
+ return false;
+ #endif
+
+ #if IS_ENABLED(CONFIG_ASYNC_XOR)
+ if (!dma_has_cap(DMA_XOR, device->cap_mask))
+ return false;
+
+ #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
+ if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
+ return false;
+ #endif
+ #endif
+
+ #if IS_ENABLED(CONFIG_ASYNC_PQ)
+ if (!dma_has_cap(DMA_PQ, device->cap_mask))
+ return false;
+
+ #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
+ if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
+ return false;
+ #endif
+ #endif
+
+ return true;
+}
+
+static int get_dma_id(struct dma_device *device)
+{
+ int rc = ida_alloc(&dma_ida, GFP_KERNEL);
+
+ if (rc < 0)
+ return rc;
+ device->dev_id = rc;
+ return 0;
+}
+
+static int __dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ int rc;
+
+ chan->local = alloc_percpu(typeof(*chan->local));
+ if (!chan->local)
+ return -ENOMEM;
+ chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+ if (!chan->dev) {
+ rc = -ENOMEM;
+ goto err_free_local;
+ }
+
+ /*
+ * When the chan_id is a negative value, we are dynamically adding
+ * the channel. Otherwise we are static enumerating.
+ */
+ chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
+ if (chan->chan_id < 0) {
+ pr_err("%s: unable to alloc ida for chan: %d\n",
+ __func__, chan->chan_id);
+ rc = chan->chan_id;
+ goto err_free_dev;
+ }
+
+ chan->dev->device.class = &dma_devclass;
+ chan->dev->device.parent = device->dev;
+ chan->dev->chan = chan;
+ chan->dev->dev_id = device->dev_id;
+ dev_set_name(&chan->dev->device, "dma%dchan%d",
+ device->dev_id, chan->chan_id);
+ rc = device_register(&chan->dev->device);
+ if (rc)
+ goto err_out_ida;
+ chan->client_count = 0;
+ device->chancnt++;
+
+ return 0;
+
+ err_out_ida:
+ ida_free(&device->chan_ida, chan->chan_id);
+ err_free_dev:
+ kfree(chan->dev);
+ err_free_local:
+ free_percpu(chan->local);
+ chan->local = NULL;
+ return rc;
+}
+
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ int rc;
+
+ rc = __dma_async_device_channel_register(device, chan);
+ if (rc < 0)
+ return rc;
+
+ dma_channel_rebalance();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
+
+static void __dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ WARN_ONCE(!device->device_release && chan->client_count,
+ "%s called while %d clients hold a reference\n",
+ __func__, chan->client_count);
+ mutex_lock(&dma_list_mutex);
+ device->chancnt--;
+ chan->dev->chan = NULL;
+ mutex_unlock(&dma_list_mutex);
+ ida_free(&device->chan_ida, chan->chan_id);
+ device_unregister(&chan->dev->device);
+ free_percpu(chan->local);
+}
+
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ __dma_async_device_channel_unregister(device, chan);
+ dma_channel_rebalance();
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
+
+/**
+ * dma_async_device_register - registers DMA devices found
+ * @device: pointer to &struct dma_device
+ *
+ * After calling this routine the structure should not be freed except in the
+ * device_release() callback which will be called after
+ * dma_async_device_unregister() is called and no further references are taken.
+ */
+int dma_async_device_register(struct dma_device *device)
+{
+ int rc;
+ struct dma_chan* chan;
+
+ if (!device)
+ return -ENODEV;
+
+ /* validate device routines */
+ if (!device->dev) {
+ pr_err("DMAdevice must have dev\n");
+ return -EIO;
+ }
+
+ device->owner = device->dev->driver->owner;
+
+ if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_MEMCPY");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_XOR");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_XOR_VAL");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_PQ");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_PQ_VAL");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_MEMSET");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_INTERRUPT");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_CYCLIC");
+ return -EIO;
+ }
+
+ if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_INTERLEAVE");
+ return -EIO;
+ }
+
+
+ if (!device->device_tx_status) {
+ dev_err(device->dev, "Device tx_status is not defined\n");
+ return -EIO;
+ }
+
+
+ if (!device->device_issue_pending) {
+ dev_err(device->dev, "Device issue_pending is not defined\n");
+ return -EIO;
+ }
+
+ if (!device->device_release)
+ dev_dbg(device->dev,
+ "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
+
+ kref_init(&device->ref);
+
+ /* note: this only matters in the
+ * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
+ */
+ if (device_has_all_tx_types(device))
+ dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
+
+ rc = get_dma_id(device);
+ if (rc != 0)
+ return rc;
+
+ ida_init(&device->chan_ida);
+
+ /* represent channels in sysfs. Probably want devs too */
+ list_for_each_entry(chan, &device->channels, device_node) {
+ rc = __dma_async_device_channel_register(device, chan);
+ if (rc < 0)
+ goto err_out;
+ }
+
+ mutex_lock(&dma_list_mutex);
+ /* take references on public channels */
+ if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ list_for_each_entry(chan, &device->channels, device_node) {
+ /* if clients are already waiting for channels we need
+ * to take references on their behalf
+ */
+ if (dma_chan_get(chan) == -ENODEV) {
+ /* note we can only get here for the first
+ * channel as the remaining channels are
+ * guaranteed to get a reference
+ */
+ rc = -ENODEV;
+ mutex_unlock(&dma_list_mutex);
+ goto err_out;
+ }
+ }
+ list_add_tail_rcu(&device->global_node, &dma_device_list);
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ device->privatecnt++; /* Always private */
+ dma_channel_rebalance();
+ mutex_unlock(&dma_list_mutex);
+
+ dmaengine_debug_register(device);
+
+ return 0;
+
+err_out:
+ /* if we never registered a channel just release the idr */
+ if (!device->chancnt) {
+ ida_free(&dma_ida, device->dev_id);
+ return rc;
+ }
+
+ list_for_each_entry(chan, &device->channels, device_node) {
+ if (chan->local == NULL)
+ continue;
+ mutex_lock(&dma_list_mutex);
+ chan->dev->chan = NULL;
+ mutex_unlock(&dma_list_mutex);
+ device_unregister(&chan->dev->device);
+ free_percpu(chan->local);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(dma_async_device_register);
+
+/**
+ * dma_async_device_unregister - unregister a DMA device
+ * @device: pointer to &struct dma_device
+ *
+ * This routine is called by dma driver exit routines, dmaengine holds module
+ * references to prevent it being called while channels are in use.
+ */
+void dma_async_device_unregister(struct dma_device *device)
+{
+ struct dma_chan *chan, *n;
+
+ dmaengine_debug_unregister(device);
+
+ list_for_each_entry_safe(chan, n, &device->channels, device_node)
+ __dma_async_device_channel_unregister(device, chan);
+
+ mutex_lock(&dma_list_mutex);
+ /*
+ * setting DMA_PRIVATE ensures the device being torn down will not
+ * be used in the channel_table
+ */
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
+ dma_channel_rebalance();
+ ida_free(&dma_ida, device->dev_id);
+ dma_device_put(device);
+ mutex_unlock(&dma_list_mutex);
+}
+EXPORT_SYMBOL(dma_async_device_unregister);
+
+static void dmam_device_release(struct device *dev, void *res)
+{
+ struct dma_device *device;
+
+ device = *(struct dma_device **)res;
+ dma_async_device_unregister(device);
+}
+
+/**
+ * dmaenginem_async_device_register - registers DMA devices found
+ * @device: pointer to &struct dma_device
+ *
+ * The operation is managed and will be undone on driver detach.
+ */
+int dmaenginem_async_device_register(struct dma_device *device)
+{
+ void *p;
+ int ret;
+
+ p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ ret = dma_async_device_register(device);
+ if (!ret) {
+ *(struct dma_device **)p = device;
+ devres_add(device->dev, p);
+ } else {
+ devres_free(p);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dmaenginem_async_device_register);
+
+struct dmaengine_unmap_pool {
+ struct kmem_cache *cache;
+ const char *name;
+ mempool_t *pool;
+ size_t size;
+};
+
+#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
+static struct dmaengine_unmap_pool unmap_pool[] = {
+ __UNMAP_POOL(2),
+ #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
+ __UNMAP_POOL(16),
+ __UNMAP_POOL(128),
+ __UNMAP_POOL(256),
+ #endif
+};
+
+static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
+{
+ int order = get_count_order(nr);
+
+ switch (order) {
+ case 0 ... 1:
+ return &unmap_pool[0];
+#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
+ case 2 ... 4:
+ return &unmap_pool[1];
+ case 5 ... 7:
+ return &unmap_pool[2];
+ case 8:
+ return &unmap_pool[3];
+#endif
+ default:
+ BUG();
+ return NULL;
+ }
+}
+
+static void dmaengine_unmap(struct kref *kref)
+{
+ struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
+ struct device *dev = unmap->dev;
+ int cnt, i;
+
+ cnt = unmap->to_cnt;
+ for (i = 0; i < cnt; i++)
+ dma_unmap_page(dev, unmap->addr[i], unmap->len,
+ DMA_TO_DEVICE);
+ cnt += unmap->from_cnt;
+ for (; i < cnt; i++)
+ dma_unmap_page(dev, unmap->addr[i], unmap->len,
+ DMA_FROM_DEVICE);
+ cnt += unmap->bidi_cnt;
+ for (; i < cnt; i++) {
+ if (unmap->addr[i] == 0)
+ continue;
+ dma_unmap_page(dev, unmap->addr[i], unmap->len,
+ DMA_BIDIRECTIONAL);
+ }
+ cnt = unmap->map_cnt;
+ mempool_free(unmap, __get_unmap_pool(cnt)->pool);
+}
+
+void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
+{
+ if (unmap)
+ kref_put(&unmap->kref, dmaengine_unmap);
+}
+EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
+
+static void dmaengine_destroy_unmap_pool(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
+ struct dmaengine_unmap_pool *p = &unmap_pool[i];
+
+ mempool_destroy(p->pool);
+ p->pool = NULL;
+ kmem_cache_destroy(p->cache);
+ p->cache = NULL;
+ }
+}
+
+static int __init dmaengine_init_unmap_pool(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
+ struct dmaengine_unmap_pool *p = &unmap_pool[i];
+ size_t size;
+
+ size = sizeof(struct dmaengine_unmap_data) +
+ sizeof(dma_addr_t) * p->size;
+
+ p->cache = kmem_cache_create(p->name, size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!p->cache)
+ break;
+ p->pool = mempool_create_slab_pool(1, p->cache);
+ if (!p->pool)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(unmap_pool))
+ return 0;
+
+ dmaengine_destroy_unmap_pool();
+ return -ENOMEM;
+}
+
+struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
+{
+ struct dmaengine_unmap_data *unmap;
+
+ unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
+ if (!unmap)
+ return NULL;
+
+ memset(unmap, 0, sizeof(*unmap));
+ kref_init(&unmap->kref);
+ unmap->dev = dev;
+ unmap->map_cnt = nr;
+
+ return unmap;
+}
+EXPORT_SYMBOL(dmaengine_get_unmap_data);
+
+void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
+ struct dma_chan *chan)
+{
+ tx->chan = chan;
+ #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ spin_lock_init(&tx->lock);
+ #endif
+}
+EXPORT_SYMBOL(dma_async_tx_descriptor_init);
+
+static inline int desc_check_and_set_metadata_mode(
+ struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
+{
+ /* Make sure that the metadata mode is not mixed */
+ if (!desc->desc_metadata_mode) {
+ if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
+ desc->desc_metadata_mode = mode;
+ else
+ return -ENOTSUPP;
+ } else if (desc->desc_metadata_mode != mode) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len)
+{
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
+ if (ret)
+ return ret;
+
+ if (!desc->metadata_ops || !desc->metadata_ops->attach)
+ return -ENOTSUPP;
+
+ return desc->metadata_ops->attach(desc, data, len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
+
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len)
+{
+ int ret;
+
+ if (!desc)
+ return ERR_PTR(-EINVAL);
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
+ return ERR_PTR(-ENOTSUPP);
+
+ return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
+
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len)
+{
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+ if (ret)
+ return ret;
+
+ if (!desc->metadata_ops || !desc->metadata_ops->set_len)
+ return -ENOTSUPP;
+
+ return desc->metadata_ops->set_len(desc, payload_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
+
+/**
+ * dma_wait_for_async_tx - spin wait for a transaction to complete
+ * @tx: in-flight transaction to wait on
+ */
+enum dma_status
+dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
+{
+ unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
+
+ if (!tx)
+ return DMA_COMPLETE;
+
+ while (tx->cookie == -EBUSY) {
+ if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
+ dev_err(tx->chan->device->dev,
+ "%s timeout waiting for descriptor submission\n",
+ __func__);
+ return DMA_ERROR;
+ }
+ cpu_relax();
+ }
+ return dma_sync_wait(tx->chan, tx->cookie);
+}
+EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
+
+/**
+ * dma_run_dependencies - process dependent operations on the target channel
+ * @tx: transaction with dependencies
+ *
+ * Helper routine for DMA drivers to process (start) dependent operations
+ * on their target channel.
+ */
+void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_async_tx_descriptor *dep = txd_next(tx);
+ struct dma_async_tx_descriptor *dep_next;
+ struct dma_chan *chan;
+
+ if (!dep)
+ return;
+
+ /* we'll submit tx->next now, so clear the link */
+ txd_clear_next(tx);
+ chan = dep->chan;
+
+ /* keep submitting up until a channel switch is detected
+ * in that case we will be called again as a result of
+ * processing the interrupt from async_tx_channel_switch
+ */
+ for (; dep; dep = dep_next) {
+ txd_lock(dep);
+ txd_clear_parent(dep);
+ dep_next = txd_next(dep);
+ if (dep_next && dep_next->chan == chan)
+ txd_clear_next(dep); /* ->next will be submitted */
+ else
+ dep_next = NULL; /* submit current dep and terminate */
+ txd_unlock(dep);
+
+ dep->tx_submit(dep);
+ }
+
+ chan->device->device_issue_pending(chan);
+}
+EXPORT_SYMBOL_GPL(dma_run_dependencies);
+
+static int __init dma_bus_init(void)
+{
+ int err = dmaengine_init_unmap_pool();
+
+ if (err)
+ return err;
+
+ err = class_register(&dma_devclass);
+ if (!err)
+ dmaengine_debugfs_init();
+
+ return err;
+}
+arch_initcall(dma_bus_init);
diff --git a/snd-alpx/core/generic/6.2/dmaengine.h b/snd-alpx/core/generic/6.2/dmaengine.h
new file mode 100644
index 0000000..53f16d3
--- /dev/null
+++ b/snd-alpx/core/generic/6.2/dmaengine.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The contents of this file are private to DMA engine drivers, and is not
+ * part of the API to be used by DMA engine users.
+ */
+#ifndef DMAENGINE_H
+#define DMAENGINE_H
+
+#include <linux/bug.h>
+#include <linux/dmaengine.h>
+
+/**
+ * dma_cookie_init - initialize the cookies for a DMA channel
+ * @chan: dma channel to initialize
+ */
+static inline void dma_cookie_init(struct dma_chan *chan)
+{
+ chan->cookie = DMA_MIN_COOKIE;
+ chan->completed_cookie = DMA_MIN_COOKIE;
+}
+
+/**
+ * dma_cookie_assign - assign a DMA engine cookie to the descriptor
+ * @tx: descriptor needing cookie
+ *
+ * Assign a unique non-zero per-channel cookie to the descriptor.
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *chan = tx->chan;
+ dma_cookie_t cookie;
+
+ cookie = chan->cookie + 1;
+ if (cookie < DMA_MIN_COOKIE)
+ cookie = DMA_MIN_COOKIE;
+ tx->cookie = chan->cookie = cookie;
+
+ return cookie;
+}
+
+/**
+ * dma_cookie_complete - complete a descriptor
+ * @tx: descriptor to complete
+ *
+ * Mark this descriptor complete by updating the channels completed
+ * cookie marker. Zero the descriptors cookie to prevent accidental
+ * repeated completions.
+ *
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
+{
+ BUG_ON(tx->cookie < DMA_MIN_COOKIE);
+ tx->chan->completed_cookie = tx->cookie;
+ tx->cookie = 0;
+}
+
+/**
+ * dma_cookie_status - report cookie status
+ * @chan: dma channel
+ * @cookie: cookie we are interested in
+ * @state: dma_tx_state structure to return last/used cookies
+ *
+ * Report the status of the cookie, filling in the state structure if
+ * non-NULL. No locking is required.
+ */
+static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ dma_cookie_t used, complete;
+
+ used = chan->cookie;
+ complete = chan->completed_cookie;
+ barrier();
+ if (state) {
+ state->last = complete;
+ state->used = used;
+ state->residue = 0;
+ state->in_flight_bytes = 0;
+ }
+ return dma_async_is_complete(cookie, complete, used);
+}
+
+static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
+{
+ if (state)
+ state->residue = residue;
+}
+
+static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
+ u32 in_flight_bytes)
+{
+ if (state)
+ state->in_flight_bytes = in_flight_bytes;
+}
+
+struct dmaengine_desc_callback {
+ dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
+ void *callback_param;
+};
+
+/**
+ * dmaengine_desc_get_callback - get the passed in callback function
+ * @tx: tx descriptor
+ * @cb: temp struct to hold the callback info
+ *
+ * Fill the passed in cb struct with what's available in the passed in
+ * tx descriptor struct
+ * No locking is required.
+ */
+static inline void
+dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_desc_callback *cb)
+{
+ cb->callback = tx->callback;
+ cb->callback_result = tx->callback_result;
+ cb->callback_param = tx->callback_param;
+}
+
+/**
+ * dmaengine_desc_callback_invoke - call the callback function in cb struct
+ * @cb: temp struct that is holding the callback info
+ * @result: transaction result
+ *
+ * Call the callback function provided in the cb struct with the parameter
+ * in the cb struct.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_result dummy_result = {
+ .result = DMA_TRANS_NOERROR,
+ .residue = 0
+ };
+
+ if (cb->callback_result) {
+ if (!result)
+ result = &dummy_result;
+ cb->callback_result(cb->callback_param, result);
+ } else if (cb->callback) {
+ cb->callback(cb->callback_param);
+ }
+}
+
+/**
+ * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
+ * then immediately call the callback.
+ * @tx: dma async tx descriptor
+ * @result: transaction result
+ *
+ * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
+ * in a single function since no work is necessary in between for the driver.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_desc_callback cb;
+
+ dmaengine_desc_get_callback(tx, &cb);
+ dmaengine_desc_callback_invoke(&cb, result);
+}
+
+/**
+ * dmaengine_desc_callback_valid - verify the callback is valid in cb
+ * @cb: callback info struct
+ *
+ * Return a bool that verifies whether callback in cb is valid or not.
+ * No locking is required.
+ */
+static inline bool
+dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
+{
+ return cb->callback || cb->callback_result;
+}
+
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static inline struct dentry *
+dmaengine_get_debugfs_root(struct dma_device *dma_dev) {
+ return dma_dev->dbg_dev_root;
+}
+#else
+struct dentry;
+static inline struct dentry *
+dmaengine_get_debugfs_root(struct dma_device *dma_dev)
+{
+ return NULL;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif
diff --git a/snd-alpx/core/generic/6.2/virt-dma.c b/snd-alpx/core/generic/6.2/virt-dma.c
new file mode 100644
index 0000000..a6f4265
--- /dev/null
+++ b/snd-alpx/core/generic/6.2/virt-dma.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ */
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+
+static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct virt_dma_desc, tx);
+}
+
+dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+ struct virt_dma_desc *vd = to_virt_desc(tx);
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ cookie = dma_cookie_assign(tx);
+
+ list_move_tail(&vd->node, &vc->desc_submitted);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
+ vc, vd, cookie);
+
+ return cookie;
+}
+EXPORT_SYMBOL_GPL(vchan_tx_submit);
+
+/**
+ * vchan_tx_desc_free - free a reusable descriptor
+ * @tx: the transfer
+ *
+ * This function frees a previously allocated reusable descriptor. The only
+ * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
+ * transfer.
+ *
+ * Returns 0 upon success
+ */
+int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
+{
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+ struct virt_dma_desc *vd = to_virt_desc(tx);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_del(&vd->node);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
+ vc, vd, vd->tx.cookie);
+ vc->desc_free(vd);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
+
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
+ dma_cookie_t cookie)
+{
+ struct virt_dma_desc *vd;
+
+ list_for_each_entry(vd, &vc->desc_issued, node)
+ if (vd->tx.cookie == cookie)
+ return vd;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vchan_find_desc);
+
+/*
+ * This tasklet handles the completion of a DMA descriptor by
+ * calling its callback and freeing it.
+ */
+static void vchan_complete(struct tasklet_struct *t)
+{
+ struct virt_dma_chan *vc = from_tasklet(vc, t, task);
+ struct virt_dma_desc *vd, *_vd;
+ struct dmaengine_desc_callback cb;
+ LIST_HEAD(head);
+
+ spin_lock_irq(&vc->lock);
+ list_splice_tail_init(&vc->desc_completed, &head);
+ vd = vc->cyclic;
+ if (vd) {
+ vc->cyclic = NULL;
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+ } else {
+ memset(&cb, 0, sizeof(cb));
+ }
+ spin_unlock_irq(&vc->lock);
+
+ dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
+
+ list_for_each_entry_safe(vd, _vd, &head, node) {
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+
+ list_del(&vd->node);
+ dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
+ vchan_vdesc_fini(vd);
+ }
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
+{
+ struct virt_dma_desc *vd, *_vd;
+
+ list_for_each_entry_safe(vd, _vd, head, node) {
+ list_del(&vd->node);
+ vchan_vdesc_fini(vd);
+ }
+}
+EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
+
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
+{
+ dma_cookie_init(&vc->chan);
+
+ spin_lock_init(&vc->lock);
+ INIT_LIST_HEAD(&vc->desc_allocated);
+ INIT_LIST_HEAD(&vc->desc_submitted);
+ INIT_LIST_HEAD(&vc->desc_issued);
+ INIT_LIST_HEAD(&vc->desc_completed);
+ INIT_LIST_HEAD(&vc->desc_terminated);
+
+ tasklet_setup(&vc->task, vchan_complete);
+
+ vc->chan.device = dmadev;
+ list_add_tail(&vc->chan.device_node, &dmadev->channels);
+}
+EXPORT_SYMBOL_GPL(vchan_init);
+
+MODULE_AUTHOR("Russell King");
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/core/generic/6.2/virt-dma.h b/snd-alpx/core/generic/6.2/virt-dma.h
new file mode 100644
index 0000000..e9f5250
--- /dev/null
+++ b/snd-alpx/core/generic/6.2/virt-dma.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ */
+#ifndef VIRT_DMA_H
+#define VIRT_DMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "dmaengine.h"
+
+struct virt_dma_desc {
+ struct dma_async_tx_descriptor tx;
+ struct dmaengine_result tx_result;
+ /* protected by vc.lock */
+ struct list_head node;
+};
+
+struct virt_dma_chan {
+ struct dma_chan chan;
+ struct tasklet_struct task;
+ void (*desc_free)(struct virt_dma_desc *);
+
+ spinlock_t lock;
+
+ /* protected by vc.lock */
+ struct list_head desc_allocated;
+ struct list_head desc_submitted;
+ struct list_head desc_issued;
+ struct list_head desc_completed;
+ struct list_head desc_terminated;
+
+ struct virt_dma_desc *cyclic;
+};
+
+static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct virt_dma_chan, chan);
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
+extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
+
+/**
+ * vchan_tx_prep - prepare a descriptor
+ * @vc: virtual channel allocating this descriptor
+ * @vd: virtual descriptor to prepare
+ * @tx_flags: flags argument passed in to prepare function
+ */
+static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
+ struct virt_dma_desc *vd, unsigned long tx_flags)
+{
+ unsigned long flags;
+
+ dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
+ vd->tx.flags = tx_flags;
+ vd->tx.tx_submit = vchan_tx_submit;
+ vd->tx.desc_free = vchan_tx_desc_free;
+
+ vd->tx_result.result = DMA_TRANS_NOERROR;
+ vd->tx_result.residue = 0;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_add_tail(&vd->node, &vc->desc_allocated);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ return &vd->tx;
+}
+
+/**
+ * vchan_issue_pending - move submitted descriptors to issued list
+ * @vc: virtual channel to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
+{
+ list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
+ return !list_empty(&vc->desc_issued);
+}
+
+/**
+ * vchan_cookie_complete - report completion of a descriptor
+ * @vd: virtual descriptor to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+ dma_cookie_t cookie;
+
+ cookie = vd->tx.cookie;
+ dma_cookie_complete(&vd->tx);
+ dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
+ vd, cookie);
+ list_add_tail(&vd->node, &vc->desc_completed);
+
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_vdesc_fini - Free or reuse a descriptor
+ * @vd: virtual descriptor to free/reuse
+ */
+static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ if (dmaengine_desc_test_reuse(&vd->tx)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_add(&vd->node, &vc->desc_allocated);
+ spin_unlock_irqrestore(&vc->lock, flags);
+ } else {
+ vc->desc_free(vd);
+ }
+}
+
+/**
+ * vchan_cyclic_callback - report the completion of a period
+ * @vd: virtual descriptor
+ */
+static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ vc->cyclic = vd;
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_terminate_vdesc - Disable pending cyclic callback
+ * @vd: virtual descriptor to be terminated
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ list_add_tail(&vd->node, &vc->desc_terminated);
+
+ if (vc->cyclic == vd)
+ vc->cyclic = NULL;
+}
+
+/**
+ * vchan_next_desc - peek at the next descriptor to be processed
+ * @vc: virtual channel to obtain descriptor from
+ *
+ * vc.lock must be held by caller
+ */
+static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
+{
+ return list_first_entry_or_null(&vc->desc_issued,
+ struct virt_dma_desc, node);
+}
+
+/**
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
+ * @vc: virtual channel to get descriptors from
+ * @head: list of descriptors found
+ *
+ * vc.lock must be held by caller
+ *
+ * Removes all submitted and issued descriptors from internal lists, and
+ * provides a list of all descriptors found
+ */
+static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
+ struct list_head *head)
+{
+ list_splice_tail_init(&vc->desc_allocated, head);
+ list_splice_tail_init(&vc->desc_submitted, head);
+ list_splice_tail_init(&vc->desc_issued, head);
+ list_splice_tail_init(&vc->desc_completed, head);
+ list_splice_tail_init(&vc->desc_terminated, head);
+}
+
+static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
+{
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ vchan_get_all_descriptors(vc, &head);
+ list_for_each_entry(vd, &head, node)
+ dmaengine_desc_clear_reuse(&vd->tx);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+}
+
+/**
+ * vchan_synchronize() - synchronize callback execution to the current context
+ * @vc: virtual channel to synchronize
+ *
+ * Makes sure that all scheduled or active callbacks have finished running. For
+ * proper operation the caller has to ensure that no new callbacks are scheduled
+ * after the invocation of this function started.
+ * Free up the terminated cyclic descriptor to prevent memory leakage.
+ */
+static inline void vchan_synchronize(struct virt_dma_chan *vc)
+{
+ LIST_HEAD(head);
+ unsigned long flags;
+
+ tasklet_kill(&vc->task);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ list_splice_tail_init(&vc->desc_terminated, &head);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+}
+
+#endif
diff --git a/snd-alpx/core/generic/6.2/xilinx/xdma-regs.h b/snd-alpx/core/generic/6.2/xilinx/xdma-regs.h
new file mode 100644
index 0000000..4ee96de
--- /dev/null
+++ b/snd-alpx/core/generic/6.2/xilinx/xdma-regs.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef __DMA_XDMA_REGS_H
+#define __DMA_XDMA_REGS_H
+
+/* The length of register space exposed to host */
+#define XDMA_REG_SPACE_LEN 65536
+
+/*
+ * maximum number of DMA channels for each direction:
+ * Host to Card (H2C) or Card to Host (C2H)
+ */
+#define XDMA_MAX_CHANNELS 4
+
+/*
+ * macros to define the number of descriptor blocks can be used in one
+ * DMA transfer request.
+ * the DMA engine uses a linked list of descriptor blocks that specify the
+ * source, destination, and length of the DMA transfers.
+ */
+#define XDMA_DESC_BLOCK_NUM BIT(7)
+#define XDMA_DESC_BLOCK_MASK (XDMA_DESC_BLOCK_NUM - 1)
+
+/* descriptor definitions */
+#define XDMA_DESC_ADJACENT 32
+#define XDMA_DESC_ADJACENT_MASK (XDMA_DESC_ADJACENT - 1)
+#define XDMA_DESC_ADJACENT_BITS GENMASK(13, 8)
+#define XDMA_DESC_MAGIC 0xad4bUL
+#define XDMA_DESC_MAGIC_BITS GENMASK(31, 16)
+#define XDMA_DESC_FLAGS_BITS GENMASK(7, 0)
+#define XDMA_DESC_STOPPED BIT(0)
+#define XDMA_DESC_COMPLETED BIT(1)
+#define XDMA_DESC_BLEN_BITS 28
+#define XDMA_DESC_BLEN_MAX (BIT(XDMA_DESC_BLEN_BITS) - PAGE_SIZE)
+
+/* macros to construct the descriptor control word */
+#define XDMA_DESC_CONTROL(adjacent, flag) \
+ (FIELD_PREP(XDMA_DESC_MAGIC_BITS, XDMA_DESC_MAGIC) | \
+ FIELD_PREP(XDMA_DESC_ADJACENT_BITS, (adjacent) - 1) | \
+ FIELD_PREP(XDMA_DESC_FLAGS_BITS, (flag)))
+#define XDMA_DESC_CONTROL_LAST \
+ XDMA_DESC_CONTROL(1, XDMA_DESC_STOPPED | XDMA_DESC_COMPLETED)
+#define XDMA_DESC_CONTROL_CYCLIC \
+ XDMA_DESC_CONTROL(1, XDMA_DESC_COMPLETED)
+
+/*
+ * Descriptor for a single contiguous memory block transfer.
+ *
+ * Multiple descriptors are linked by means of the next pointer. An additional
+ * extra adjacent number gives the amount of extra contiguous descriptors.
+ *
+ * The descriptors are in root complex memory, and the bytes in the 32-bit
+ * words must be in little-endian byte ordering.
+ */
+struct xdma_hw_desc {
+ __le32 control;
+ __le32 bytes;
+ __le64 src_addr;
+ __le64 dst_addr;
+ __le64 next_desc;
+};
+
+#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
+#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
+#define XDMA_DESC_BLOCK_ALIGN 32
+#define XDMA_DESC_BLOCK_BOUNDARY 4096
+
+/*
+ * Channel registers
+ */
+#define XDMA_CHAN_IDENTIFIER 0x0
+#define XDMA_CHAN_CONTROL 0x4
+#define XDMA_CHAN_CONTROL_W1S 0x8
+#define XDMA_CHAN_CONTROL_W1C 0xc
+#define XDMA_CHAN_STATUS 0x40
+#define XDMA_CHAN_STATUS_RC 0x44
+#define XDMA_CHAN_COMPLETED_DESC 0x48
+#define XDMA_CHAN_ALIGNMENTS 0x4c
+#define XDMA_CHAN_INTR_ENABLE 0x90
+#define XDMA_CHAN_INTR_ENABLE_W1S 0x94
+#define XDMA_CHAN_INTR_ENABLE_W1C 0x9c
+
+#define XDMA_CHAN_STRIDE 0x100
+#define XDMA_CHAN_H2C_OFFSET 0x0
+#define XDMA_CHAN_C2H_OFFSET 0x1000
+#define XDMA_CHAN_H2C_TARGET 0x0
+#define XDMA_CHAN_C2H_TARGET 0x1
+
+/* macro to check if channel is available */
+#define XDMA_CHAN_MAGIC 0x1fc0
+#define XDMA_CHAN_CHECK_TARGET(id, target) \
+ (((u32)(id) >> 16) == XDMA_CHAN_MAGIC + (target))
+
+/* bits of the channel control register */
+#define CHAN_CTRL_RUN_STOP BIT(0)
+#define CHAN_CTRL_IE_DESC_STOPPED BIT(1)
+#define CHAN_CTRL_IE_DESC_COMPLETED BIT(2)
+#define CHAN_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3)
+#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4)
+#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6)
+#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9)
+#define CHAN_CTRL_IE_WRITE_ERROR GENMASK(18, 14)
+#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19)
+#define CHAN_CTRL_NON_INCR_ADDR BIT(25)
+#define CHAN_CTRL_POLL_MODE_WB BIT(26)
+#define CHAN_CTRL_TRANSFER_INFO_WB BIT(27)
+
+#define CHAN_CTRL_START (CHAN_CTRL_RUN_STOP | \
+ CHAN_CTRL_IE_DESC_STOPPED | \
+ CHAN_CTRL_IE_DESC_COMPLETED | \
+ CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
+ CHAN_CTRL_IE_MAGIC_STOPPED | \
+ CHAN_CTRL_IE_READ_ERROR | \
+ CHAN_CTRL_IE_WRITE_ERROR | \
+ CHAN_CTRL_IE_DESC_ERROR)
+
+#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
+
+#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
+ CHAN_CTRL_IE_MAGIC_STOPPED | \
+ CHAN_CTRL_IE_READ_ERROR | \
+ CHAN_CTRL_IE_WRITE_ERROR | \
+ CHAN_CTRL_IE_DESC_ERROR)
+
+/* bits of the channel interrupt enable mask */
+#define CHAN_IM_DESC_ERROR BIT(19)
+#define CHAN_IM_READ_ERROR BIT(9)
+#define CHAN_IM_IDLE_STOPPED BIT(6)
+#define CHAN_IM_MAGIC_STOPPED BIT(4)
+#define CHAN_IM_DESC_COMPLETED BIT(2)
+#define CHAN_IM_DESC_STOPPED BIT(1)
+
+#define CHAN_IM_ALL (CHAN_IM_DESC_ERROR | CHAN_IM_READ_ERROR | \
+ CHAN_IM_IDLE_STOPPED | CHAN_IM_MAGIC_STOPPED | \
+ CHAN_IM_DESC_COMPLETED | CHAN_IM_DESC_STOPPED)
+
+/*
+ * Channel SGDMA registers
+ */
+#define XDMA_SGDMA_IDENTIFIER 0x4000
+#define XDMA_SGDMA_DESC_LO 0x4080
+#define XDMA_SGDMA_DESC_HI 0x4084
+#define XDMA_SGDMA_DESC_ADJ 0x4088
+#define XDMA_SGDMA_DESC_CREDIT 0x408c
+
+/*
+ * interrupt registers
+ */
+#define XDMA_IRQ_IDENTIFIER 0x2000
+#define XDMA_IRQ_USER_INT_EN 0x2004
+#define XDMA_IRQ_USER_INT_EN_W1S 0x2008
+#define XDMA_IRQ_USER_INT_EN_W1C 0x200c
+#define XDMA_IRQ_CHAN_INT_EN 0x2010
+#define XDMA_IRQ_CHAN_INT_EN_W1S 0x2014
+#define XDMA_IRQ_CHAN_INT_EN_W1C 0x2018
+#define XDMA_IRQ_USER_INT_REQ 0x2040
+#define XDMA_IRQ_CHAN_INT_REQ 0x2044
+#define XDMA_IRQ_USER_INT_PEND 0x2048
+#define XDMA_IRQ_CHAN_INT_PEND 0x204c
+#define XDMA_IRQ_USER_VEC_NUM 0x2080
+#define XDMA_IRQ_CHAN_VEC_NUM 0x20a0
+
+#define XDMA_IRQ_VEC_SHIFT 8
+
+#endif /* __DMA_XDMA_REGS_H */
diff --git a/snd-alpx/core/generic/6.2/xilinx/xdma.c b/snd-alpx/core/generic/6.2/xilinx/xdma.c
new file mode 100644
index 0000000..6c89145
--- /dev/null
+++ b/snd-alpx/core/generic/6.2/xilinx/xdma.c
@@ -0,0 +1,1437 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DMA driver for Xilinx DMA/Bridge Subsystem
+ *
+ * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+/*
+ * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
+ * between Host memory and the DMA subsystem. It does this by operating on
+ * 'descriptors' that contain information about the source, destination and
+ * amount of data to transfer. These direct memory transfers can be both in
+ * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
+ * configured to have a single AXI4 Master interface shared by all channels
+ * or one AXI4-Stream interface for each channel enabled. Memory transfers are
+ * specified on a per-channel basis in descriptor linked lists, which the DMA
+ * fetches from host memory and processes. Events such as descriptor completion
+ * and errors are signaled using interrupts. The core also provides up to 16
+ * user interrupt wires that generate interrupts to the host.
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/bitfield.h>
+#include <linux/dmapool.h>
+
+#include <linux/regmap.h>
+
+#if defined (CONFIG_KERNEL_REDHAT)
+ #warning REDHAT Kernel
+ #if KERNEL_VERSION(5, 19, 0) <= LINUX_VERSION_CODE
+ /* Use Generic include */
+ #include "../../../../include/5.6/virt-dma.h"
+ #elif KERNEL_VERSION(4, 18, 0) <= LINUX_VERSION_CODE
+ /* Use Generic include : files equal !! */
+ #warning ReadHat 4.18 at least
+ #include "../../../../include/5.6/virt-dma.h"
+ #else
+ #error Redhat kernel NOT Supported
+ #endif
+
+#else
+ /* Generic Kernels */
+ #warning "Generic Kernels"
+ #if KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE
+ #include "../virt-dma.h"
+ #elif KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
+ #include "../../../../include/5.6/virt-dma.h"
+ #elif KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE
+ #include "../../../../include/5.3/virt-dma.h"
+ #else
+ #include "../../../../include/4.16/virt-dma.h"
+ #endif
+
+#endif
+
+
+#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
+#include <linux/dmaengine.h>
+#elif KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE
+#include "../../../../include/5.3/dmaengine.h"
+#else
+#include "../../../../include/4.16/dmaengine.h"
+#endif
+
+#include "../../../../include/6.2/amd_xdma.h"
+#include <linux/platform_device.h>
+#include "../amd_xdma.h"
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+
+#include "xdma-regs.h"
+
+/* mmio regmap config for all XDMA registers */
+static const struct regmap_config xdma_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = XDMA_REG_SPACE_LEN,
+};
+
+/**
+ * struct xdma_desc_block - Descriptor block
+ * @virt_addr: Virtual address of block start
+ * @dma_addr: DMA address of block start
+ */
+struct xdma_desc_block {
+ void *virt_addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct xdma_c2h_write_back - Write back block , written by the XDMA.
+ * @magic_status_bit : magic (0x52B4) once written
+ * @length: effective transfer length (in bytes)
+ * @PADDING to be aligned on 32 bytes
+ * @associated dma address
+ */
+struct xdma_c2h_write_back {
+ __le32 magic_status_bit;
+ __le32 length;
+ u32 padding_1[6];
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct xdma_chan - Driver specific DMA channel structure
+ * @vchan: Virtual channel
+ * @xdev_hdl: Pointer to DMA device structure
+ * @base: Offset of channel registers
+ * @desc_pool: Descriptor pool
+ * @busy: Busy flag of the channel
+ * @dir: Transferring direction of the channel
+ * @cfg: Transferring config of the channel
+ * @irq: IRQ assigned to the channel
+ * @write_back : C2H meta data write back
+ */
+struct xdma_chan {
+ struct virt_dma_chan vchan;
+ void *xdev_hdl;
+ u32 base;
+ struct dma_pool *desc_pool;
+ bool busy;
+ enum dma_transfer_direction dir;
+ struct dma_slave_config cfg;
+ u32 irq;
+ struct xdma_c2h_write_back* write_back;
+};
+
+/**
+ * struct xdma_desc - DMA desc structure
+ * @vdesc: Virtual DMA descriptor
+ * @chan: DMA channel pointer
+ * @dir: Transferring direction of the request
+ * @desc_blocks: Hardware descriptor blocks
+ * @dblk_num: Number of hardware descriptor blocks
+ * @desc_num: Number of hardware descriptors
+ * @completed_desc_num: Completed hardware descriptors
+ * @cyclic: Cyclic transfer vs. scatter-gather
+ * @interleaved_dma: Interleaved DMA transfer
+ * @periods: Number of periods in the cyclic transfer
+ * @period_size: Size of a period in bytes in cyclic transfers
+ * @frames_left: Number of frames left in interleaved DMA transfer
+ * @error: tx error flag
+ */
+struct xdma_desc {
+ struct virt_dma_desc vdesc;
+ struct xdma_chan *chan;
+ enum dma_transfer_direction dir;
+ struct xdma_desc_block *desc_blocks;
+ u32 dblk_num;
+ u32 desc_num;
+ u32 completed_desc_num;
+ bool cyclic;
+ bool interleaved_dma;
+ u32 periods;
+ u32 period_size;
+ u32 frames_left;
+ bool error;
+};
+
+#define XDMA_DEV_STATUS_REG_DMA BIT(0)
+#define XDMA_DEV_STATUS_INIT_MSIX BIT(1)
+
+/**
+ * struct xdma_device - DMA device structure
+ * @pdev: Platform device pointer
+ * @dma_dev: DMA device structure
+ * @rmap: MMIO regmap for DMA registers
+ * @h2c_chans: Host to Card channels
+ * @c2h_chans: Card to Host channels
+ * @h2c_chan_num: Number of H2C channels
+ * @c2h_chan_num: Number of C2H channels
+ * @irq_start: Start IRQ assigned to device
+ * @irq_num: Number of IRQ assigned to device
+ * @status: Initialization status
+ */
+struct xdma_device {
+ struct platform_device *pdev;
+ struct dma_device dma_dev;
+ struct regmap *rmap;
+ struct xdma_chan *h2c_chans;
+ struct xdma_chan *c2h_chans;
+ u32 h2c_chan_num;
+ u32 c2h_chan_num;
+ u32 irq_start;
+ u32 irq_num;
+ u32 status;
+};
+
+#define xdma_err(xdev, fmt, args...) \
+ dev_err(&(xdev)->pdev->dev, fmt, ##args)
+#define XDMA_CHAN_NUM(_xd) ({ \
+ typeof(_xd) (xd) = (_xd); \
+ ((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
+
+/* Get the last desc in a desc block */
+static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
+{
+ return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
+}
+
+/**
+ * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer
+ * @sw_desc: Tx descriptor pointer
+ */
+static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
+{
+ struct xdma_desc_block *block;
+ u32 last_blk_desc, desc_control;
+ struct xdma_hw_desc *desc;
+ int i;
+
+ desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
+ for (i = 1; i < sw_desc->dblk_num; i++) {
+ block = &sw_desc->desc_blocks[i - 1];
+ desc = xdma_blk_last_desc(block);
+
+ if (!(i & XDMA_DESC_BLOCK_MASK)) {
+ desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
+ continue;
+ }
+ desc->control = cpu_to_le32(desc_control);
+ desc->next_desc = cpu_to_le64(block[1].dma_addr);
+ }
+
+ /* update the last block */
+ last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
+ if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
+ block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
+ desc = xdma_blk_last_desc(block);
+ desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
+ desc->control = cpu_to_le32(desc_control);
+ }
+
+ block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
+ desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
+ desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
+}
+
+/**
+ * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer
+ * @sw_desc: Tx descriptor pointer
+ */
+static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc)
+{
+ struct xdma_desc_block *block;
+ struct xdma_hw_desc *desc;
+ int i;
+
+ block = sw_desc->desc_blocks;
+ for (i = 0; i < sw_desc->desc_num - 1; i++) {
+ desc = block->virt_addr + i * XDMA_DESC_SIZE;
+ desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE));
+ }
+ desc = block->virt_addr + i * XDMA_DESC_SIZE;
+ desc->next_desc = cpu_to_le64(block->dma_addr);
+}
+
+static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct xdma_chan, vchan.chan);
+}
+
+static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct xdma_desc, vdesc);
+}
+
+/**
+ * xdma_channel_init - Initialize DMA channel registers
+ * @chan: DMA channel pointer
+ */
+static int xdma_channel_init(struct xdma_chan *chan)
+{
+ struct xdma_device *xdev = chan->xdev_hdl;
+ int ret;
+ unsigned int reg_ctrl = 0;
+
+ regmap_read(xdev->rmap, chan->base + XDMA_CHAN_CONTROL, &reg_ctrl);
+ dev_dbg(&xdev->pdev->dev, "CONTROL Init: 0x%08x\n", reg_ctrl);
+
+ ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_NON_INCR_ADDR | CHAN_CTRL_TRANSFER_INFO_WB);
+ if (ret)
+ return ret;
+
+ regmap_read(xdev->rmap, chan->base + XDMA_CHAN_CONTROL, &reg_ctrl);
+ dev_dbg(&xdev->pdev->dev, "CONTROL Init: 0x%08x\n", reg_ctrl);
+
+ ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
+ CHAN_IM_ALL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * xdma_free_desc - Free descriptor
+ * @vdesc: Virtual DMA descriptor
+ */
+static void xdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct xdma_desc *sw_desc;
+ int i;
+
+ sw_desc = to_xdma_desc(vdesc);
+ for (i = 0; i < sw_desc->dblk_num; i++) {
+ if (!sw_desc->desc_blocks[i].virt_addr)
+ break;
+ dma_pool_free(sw_desc->chan->desc_pool,
+ sw_desc->desc_blocks[i].virt_addr,
+ sw_desc->desc_blocks[i].dma_addr);
+ }
+ kfree(sw_desc->desc_blocks);
+ kfree(sw_desc);
+}
+
+/**
+ * xdma_alloc_desc - Allocate descriptor
+ * @chan: DMA channel pointer
+ * @desc_num: Number of hardware descriptors
+ * @cyclic: Whether this is a cyclic transfer
+ */
+static struct xdma_desc *
+xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
+{
+ struct xdma_desc *sw_desc;
+ struct xdma_hw_desc *desc;
+ dma_addr_t dma_addr;
+ u32 dblk_num;
+ u32 control;
+ void *addr;
+ int i, j;
+
+ sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
+ if (!sw_desc)
+ return NULL;
+
+ sw_desc->chan = chan;
+ sw_desc->desc_num = desc_num;
+ sw_desc->cyclic = cyclic;
+ sw_desc->error = false;
+ dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
+ sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
+ GFP_NOWAIT);
+ if (!sw_desc->desc_blocks)
+ goto failed;
+
+ if (cyclic)
+ control = XDMA_DESC_CONTROL_CYCLIC;
+ else
+ control = XDMA_DESC_CONTROL(1, 0);
+
+ sw_desc->dblk_num = dblk_num;
+ for (i = 0; i < sw_desc->dblk_num; i++) {
+ addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
+ if (!addr)
+ goto failed;
+
+ sw_desc->desc_blocks[i].virt_addr = addr;
+ sw_desc->desc_blocks[i].dma_addr = dma_addr;
+ for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
+ desc[j].control = cpu_to_le32(control);
+ }
+
+ if (cyclic)
+ xdma_link_cyclic_desc_blocks(sw_desc);
+ else
+ xdma_link_sg_desc_blocks(sw_desc);
+
+ return sw_desc;
+
+failed:
+ xdma_free_desc(&sw_desc->vdesc);
+ return NULL;
+}
+
+/**
+ * xdma_xfer_start - Start DMA transfer
+ * @xchan: DMA channel pointer
+ */
+static int xdma_xfer_start(struct xdma_chan *xchan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
+ struct xdma_device *xdev = xchan->xdev_hdl;
+ struct xdma_desc_block *block;
+ u32 val, completed_blocks;
+ struct xdma_desc *desc;
+ int ret;
+
+ /*
+ * check if there is not any submitted descriptor or channel is busy.
+ * vchan lock should be held where this function is called.
+ */
+ if (!vd || xchan->busy)
+ return -EINVAL;
+
+ /* clear run stop bit to get ready for transfer */
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
+ if (ret)
+ return ret;
+
+ desc = to_xdma_desc(vd);
+ if (desc->dir != xchan->dir) {
+ xdma_err(xdev, "incorrect request direction");
+ return -EINVAL;
+ }
+
+ /* set DMA engine to the first descriptor block */
+ completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
+ block = &desc->desc_blocks[completed_blocks];
+ val = lower_32_bits(block->dma_addr);
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
+ if (ret)
+ return ret;
+
+ val = upper_32_bits(block->dma_addr);
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
+ if (ret)
+ return ret;
+
+ if (completed_blocks + 1 == desc->dblk_num)
+ val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
+ else
+ val = XDMA_DESC_ADJACENT - 1;
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
+ if (ret)
+ return ret;
+
+ /* kick off DMA transfer, force 0=1 transition, USE Bit clear/set registers */
+
+ regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_START);
+
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1S,
+ CHAN_CTRL_START);
+ if (ret)
+ return ret;
+
+ xchan->busy = true;
+
+ return 0;
+}
+
+/**
+ * xdma_xfer_stop - Stop DMA transfer
+ * @xchan: DMA channel pointer
+ */
+static int xdma_xfer_stop(struct xdma_chan *xchan)
+{
+ int ret;
+ u32 val;
+ struct xdma_device *xdev = xchan->xdev_hdl;
+
+ /* clear run stop bit to prevent any further auto-triggering */
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
+ if (ret)
+ return ret;
+
+ /* Clear the channel status register */
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * xdma_alloc_channels - Detect and allocate DMA channels
+ * @xdev: DMA device pointer
+ * @dir: Channel direction
+ */
+static int xdma_alloc_channels(struct xdma_device *xdev,
+ enum dma_transfer_direction dir)
+{
+ struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
+ struct xdma_chan **chans, *xchan;
+ u32 base, identifier, target;
+ u32 *chan_num;
+ int i, j, ret;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ base = XDMA_CHAN_H2C_OFFSET;
+ target = XDMA_CHAN_H2C_TARGET;
+ chans = &xdev->h2c_chans;
+ chan_num = &xdev->h2c_chan_num;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ base = XDMA_CHAN_C2H_OFFSET;
+ target = XDMA_CHAN_C2H_TARGET;
+ chans = &xdev->c2h_chans;
+ chan_num = &xdev->c2h_chan_num;
+ } else {
+ xdma_err(xdev, "invalid direction specified");
+ return -EINVAL;
+ }
+
+ /* detect number of available DMA channels */
+ for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
+ ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
+ &identifier);
+ if (ret)
+ return ret;
+
+ /* check if it is available DMA channel */
+ if (XDMA_CHAN_CHECK_TARGET(identifier, target))
+ (*chan_num)++;
+ }
+
+ if (!*chan_num) {
+ xdma_err(xdev, "does not probe any channel");
+ return -EINVAL;
+ }
+
+ *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
+ GFP_KERNEL);
+ if (!*chans)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
+ ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
+ &identifier);
+ if (ret)
+ return ret;
+
+ if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
+ continue;
+
+ if (j == *chan_num) {
+ xdma_err(xdev, "invalid channel number");
+ return -EIO;
+ }
+
+ /* init channel structure and hardware */
+ xchan = &(*chans)[j];
+ xchan->xdev_hdl = xdev;
+ xchan->base = base + i * XDMA_CHAN_STRIDE;
+ xchan->dir = dir;
+
+ ret = xdma_channel_init(xchan);
+ if (ret)
+ return ret;
+ xchan->vchan.desc_free = xdma_free_desc;
+ vchan_init(&xchan->vchan, &xdev->dma_dev);
+
+ j++;
+ }
+
+ dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
+ (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
+
+ return 0;
+}
+
+/**
+ * xdma_issue_pending - Issue pending transactions
+ * @chan: DMA channel pointer
+ */
+static void xdma_issue_pending(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+ if (vchan_issue_pending(&xdma_chan->vchan))
+ xdma_xfer_start(xdma_chan);
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+}
+
+/**
+ * xdma_terminate_all - Terminate all transactions
+ * @chan: DMA channel pointer
+ */
+static int xdma_terminate_all(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ xdma_xfer_stop(xdma_chan);
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+
+ xdma_chan->busy = false;
+ vd = vchan_next_desc(&xdma_chan->vchan);
+ if (vd) {
+ list_del(&vd->node);
+ dma_cookie_complete(&vd->tx);
+ vchan_terminate_vdesc(vd);
+ }
+ vchan_get_all_descriptors(&xdma_chan->vchan, &head);
+ list_splice_tail(&head, &xdma_chan->vchan.desc_terminated);
+
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+
+ return 0;
+}
+
+/**
+ * xdma_synchronize - Synchronize terminated transactions
+ * @chan: DMA channel pointer
+ */
+static void xdma_synchronize(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+ vchan_synchronize(&xdma_chan->vchan);
+}
+
+/**
+ * xdma_fill_descs - Fill hardware descriptors for one contiguous memory chunk.
+ * More than one descriptor will be used if the size is bigger
+ * than XDMA_DESC_BLEN_MAX.
+ * @sw_desc: Descriptor container
+ * @src_addr: First value for the ->src_addr field
+ * @dst_addr: First value for the ->dst_addr field
+ * @size: Size of the contiguous memory block
+ * @desc_start_num: Index of the first descriptor to take care of in @sw_desc
+ */
+static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
+ u64 dst_addr, u32 size, u32 filled_descs_num)
+{
+ u32 left = size, len, desc_num = filled_descs_num;
+ struct xdma_desc_block *dblk;
+ struct xdma_hw_desc *desc;
+
+ dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT);
+ desc = dblk->virt_addr;
+ desc += desc_num & XDMA_DESC_ADJACENT_MASK;
+ do {
+ len = min_t(u32, left, XDMA_DESC_BLEN_MAX);
+ /* set hardware descriptor */
+ desc->bytes = cpu_to_le32(len);
+ desc->src_addr = cpu_to_le64(src_addr);
+ desc->dst_addr = cpu_to_le64(dst_addr);
+
+ dev_dbg(NULL, "desc[%u]:%p {src:0x%llx, dst: 0x%llx, length: %u}",
+ desc_num,
+ desc,
+ src_addr,
+ dst_addr,
+ len);
+
+ if (!(++desc_num & XDMA_DESC_ADJACENT_MASK))
+ desc = (++dblk)->virt_addr;
+ else
+ desc++;
+
+ src_addr += len;
+ dst_addr += len;
+ left -= len;
+ } while (left);
+
+ return desc_num - filled_descs_num;
+}
+
+/**
+ * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
+ * @chan: DMA channel pointer
+ * @sgl: Transfer scatter gather list
+ * @sg_len: Length of scatter gather list
+ * @dir: Transfer direction
+ * @flags: transfer ack flags
+ * @context: APP words of the descriptor
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan ->xdev_hdl;
+
+ struct dma_async_tx_descriptor *tx_desc;
+ struct xdma_desc *sw_desc;
+ u32 desc_num = 0, i;
+ u64 addr, dev_addr, *src, *dst;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, sg_len, i)
+ desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
+
+ sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false);
+ if (!sw_desc)
+ return NULL;
+ sw_desc->dir = dir;
+ sw_desc->cyclic = false;
+ sw_desc->interleaved_dma = false;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = xdma_chan->cfg.dst_addr;
+ src = &addr;
+ dst = &dev_addr;
+ } else {
+ dev_addr = xdma_chan->cfg.src_addr ? xdma_chan->cfg.src_addr : xdma_chan->write_back->dma_addr;
+ src = &dev_addr;
+ dst = &addr;
+ }
+
+ dev_dbg(&xdev->pdev->dev, "desc[%s]:%p {src: %p, dst: %p, length: %u}",
+ dir == DMA_MEM_TO_DEV ? "C2H" : "H2C",
+ sw_desc,
+ src,
+ dst,
+ sg_dma_len(sg));
+
+
+ desc_num = 0;
+ for_each_sg(sgl, sg, sg_len, i) {
+ addr = sg_dma_address(sg);
+ desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num);
+ dev_addr += sg_dma_len(sg);
+ }
+
+ tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
+ if (!tx_desc)
+ goto failed;
+
+ return tx_desc;
+
+failed:
+ xdma_free_desc(&sw_desc->vdesc);
+
+ return NULL;
+}
+
+/**
+ * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions
+ * @chan: DMA channel pointer
+ * @address: Device DMA address to access
+ * @size: Total length to transfer
+ * @period_size: Period size to use for each transfer
+ * @dir: Transfer direction
+ * @flags: Transfer ack flags
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
+ size_t size, size_t period_size,
+ enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ unsigned int periods = size / period_size;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct xdma_desc *sw_desc;
+ u64 addr, dev_addr, *src, *dst;
+ u32 desc_num = 0;
+ unsigned int i;
+
+ /*
+ * Simplify the whole logic by preventing an abnormally high number of
+ * periods and periods size.
+ */
+ if (period_size > XDMA_DESC_BLEN_MAX) {
+ xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX);
+ return NULL;
+ }
+
+ if (periods > XDMA_DESC_ADJACENT) {
+ xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT);
+ return NULL;
+ }
+
+ sw_desc = xdma_alloc_desc(xdma_chan, periods, true);
+ if (!sw_desc)
+ return NULL;
+
+ sw_desc->periods = periods;
+ sw_desc->period_size = period_size;
+ sw_desc->dir = dir;
+ sw_desc->interleaved_dma = false;
+
+ addr = address;
+ if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = xdma_chan->cfg.dst_addr;
+ src = &addr;
+ dst = &dev_addr;
+ } else {
+ dev_addr = xdma_chan->cfg.src_addr ? xdma_chan->cfg.src_addr : xdma_chan->write_back->dma_addr;
+ src = &dev_addr;
+ dst = &addr;
+ }
+
+ dev_dbg(&xdev->pdev->dev, "desc[%s]:%p {src: %p, dst: %p, length: %lu}",
+ dir == DMA_MEM_TO_DEV ? "C2H" : "H2C",
+ sw_desc,
+ src,
+ dst,
+ period_size);
+
+ for (i = 0; i < periods; i++) {
+ xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num++);
+ addr += period_size;
+ }
+
+ tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
+ if (!tx_desc)
+ goto failed;
+
+ return tx_desc;
+
+failed:
+ xdma_free_desc(&sw_desc->vdesc);
+
+ return NULL;
+}
+
+/**
+ * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers
+ * @chan: DMA channel
+ * @xt: DMA transfer template
+ * @flags: tx flags
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_interleaved_dma(struct dma_chan *chan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ int i;
+ u32 desc_num = 0, period_size = 0;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct xdma_chan *xchan = to_xdma_chan(chan);
+ struct xdma_desc *sw_desc;
+ u64 src_addr, dst_addr;
+
+ for (i = 0; i < xt->frame_size; ++i)
+ desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX);
+
+ sw_desc = xdma_alloc_desc(xchan, desc_num, false);
+ if (!sw_desc)
+ return NULL;
+ sw_desc->dir = xt->dir;
+ sw_desc->interleaved_dma = true;
+ sw_desc->cyclic = flags & DMA_PREP_REPEAT;
+ sw_desc->frames_left = xt->numf;
+ sw_desc->periods = xt->numf;
+
+ desc_num = 0;
+ src_addr = xt->src_start;
+ dst_addr = xt->dst_start;
+ for (i = 0; i < xt->frame_size; ++i) {
+ desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num);
+ src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ?
+ xt->sgl[i].size : 0);
+ dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ?
+ xt->sgl[i].size : 0);
+ period_size += xt->sgl[i].size;
+ }
+ sw_desc->period_size = period_size;
+
+ tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags);
+ if (tx_desc)
+ return tx_desc;
+
+ xdma_free_desc(&sw_desc->vdesc);
+ return NULL;
+}
+
+/**
+ * xdma_device_config - Configure the DMA channel
+ * @chan: DMA channel
+ * @cfg: channel configuration
+ */
+static int xdma_device_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+ memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+/**
+ * xdma_free_chan_resources - Free channel resources
+ * @chan: DMA channel
+ */
+static void xdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+ vchan_free_chan_resources(&xdma_chan->vchan);
+ dma_pool_free(xdma_chan->desc_pool,
+ xdma_chan->write_back,
+ xdma_chan->write_back->dma_addr);
+ dma_pool_destroy(xdma_chan->desc_pool);
+ xdma_chan->desc_pool = NULL;
+}
+
+/**
+ * xdma_alloc_chan_resources - Allocate channel resources
+ * @chan: DMA channel
+ */
+static int xdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ struct device *dev = xdev->dma_dev.dev;
+ dma_addr_t write_back_addr;
+
+ while (dev && !dev_is_pci(dev))
+ dev = dev->parent;
+ if (!dev) {
+ xdma_err(xdev, "unable to find pci device");
+ return -EINVAL;
+ }
+
+ //Allocate the pool WITH the H2C write back
+ xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE +
+ sizeof(struct xdma_c2h_write_back),
+ XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY);
+ if (!xdma_chan->desc_pool) {
+ xdma_err(xdev, "unable to allocate descriptor pool");
+ return -ENOMEM;
+ }
+
+ /* Allocate the C2H write back out of the pool*/
+
+ xdma_chan->write_back = dma_pool_alloc(xdma_chan->desc_pool, GFP_NOWAIT, &write_back_addr);
+
+ dev_dbg(dev, "C2H write_back : %p, dma_addr: %lld", xdma_chan->write_back, write_back_addr);
+
+ if (!xdma_chan->write_back) {
+ xdma_err(xdev, "unable to allocate C2H write back block");
+ return -ENOMEM;
+ }
+ xdma_chan->write_back->dma_addr = write_back_addr;
+
+
+ return 0;
+}
+
+static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_desc *desc = NULL;
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+ unsigned int period_idx;
+ u32 residue = 0;
+
+ ret = dma_cookie_status(chan, cookie, state);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+
+ vd = vchan_find_desc(&xdma_chan->vchan, cookie);
+ if (!vd)
+ goto out;
+
+ desc = to_xdma_desc(vd);
+ if (desc->error) {
+ ret = DMA_ERROR;
+ } else if (desc->cyclic) {
+ period_idx = desc->completed_desc_num % desc->periods;
+ residue = (desc->periods - period_idx) * desc->period_size;
+ dma_set_residue(state, residue);
+ }
+out:
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+
+ return ret;
+}
+
+/**
+ * xdma_channel_isr - XDMA channel interrupt handler
+ * @irq: IRQ number
+ * @dev_id: Pointer to the DMA channel structure
+ */
+static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
+{
+ struct xdma_chan *xchan = dev_id;
+ u32 complete_desc_num = 0;
+ struct xdma_device *xdev = xchan->xdev_hdl;
+ struct virt_dma_desc *vd, *next_vd;
+ struct xdma_desc *desc;
+ int ret;
+ u32 st;
+ bool repeat_tx;
+
+ spin_lock(&xchan->vchan.lock);
+
+ /* get submitted request */
+ vd = vchan_next_desc(&xchan->vchan);
+ if (!vd)
+ goto out;
+
+ /* Clear-on-read the status register */
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st);
+ if (ret)
+ goto out;
+
+ desc = to_xdma_desc(vd);
+
+ st &= XDMA_CHAN_STATUS_MASK;
+ if ((st & XDMA_CHAN_ERROR_MASK) ||
+ !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) {
+ desc->error = true;
+ xdma_err(xdev, "channel error, status register value: 0x%x", st);
+ goto out;
+ }
+
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
+ &complete_desc_num);
+ if (ret)
+ goto out;
+
+ if (desc->interleaved_dma) {
+ xchan->busy = false;
+ desc->completed_desc_num += complete_desc_num;
+ if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) {
+ xdma_xfer_start(xchan);
+ goto out;
+ }
+
+ /* last desc of any frame */
+ desc->frames_left--;
+ if (desc->frames_left)
+ goto out;
+
+ /* last desc of the last frame */
+ repeat_tx = vd->tx.flags & DMA_PREP_REPEAT;
+ next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node);
+ if (next_vd)
+ repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT);
+ if (repeat_tx) {
+ desc->frames_left = desc->periods;
+ desc->completed_desc_num = 0;
+ vchan_cyclic_callback(vd);
+ } else {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ }
+ /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
+ xdma_xfer_start(xchan);
+ } else if (!desc->cyclic) {
+ xchan->busy = false;
+ desc->completed_desc_num += complete_desc_num;
+
+ /* if all data blocks are transferred, remove and complete the request */
+ if (desc->completed_desc_num == desc->desc_num) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ goto out;
+ }
+
+ if (desc->completed_desc_num > desc->desc_num ||
+ complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
+ goto out;
+
+ /* transfer the rest of data */
+ xdma_xfer_start(xchan);
+ } else {
+ desc->completed_desc_num = complete_desc_num;
+ vchan_cyclic_callback(vd);
+ }
+
+out:
+ spin_unlock(&xchan->vchan.lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xdma_irq_fini - Uninitialize IRQ
+ * @xdev: DMA device pointer
+ */
+static void xdma_irq_fini(struct xdma_device *xdev)
+{
+ int i;
+
+ /* disable interrupt */
+ regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
+
+ /* free irq handler */
+ for (i = 0; i < xdev->h2c_chan_num; i++)
+ free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
+
+ for (i = 0; i < xdev->c2h_chan_num; i++)
+ free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
+}
+
+/**
+ * xdma_set_vector_reg - configure hardware IRQ registers
+ * @xdev: DMA device pointer
+ * @vec_tbl_start: Start of IRQ registers
+ * @irq_start: Start of IRQ
+ * @irq_num: Number of IRQ
+ */
+static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
+ u32 irq_start, u32 irq_num)
+{
+ u32 shift, i, val = 0;
+ int ret;
+
+ /* Each IRQ register is 32 bit and contains 4 IRQs */
+ while (irq_num > 0) {
+ for (i = 0; i < 4; i++) {
+ shift = XDMA_IRQ_VEC_SHIFT * i;
+ val |= irq_start << shift;
+ irq_start++;
+ irq_num--;
+ if (!irq_num)
+ break;
+ }
+
+ /* write IRQ register */
+ ret = regmap_write(xdev->rmap, vec_tbl_start, val);
+ if (ret)
+ return ret;
+ vec_tbl_start += sizeof(u32);
+ val = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * xdma_irq_init - initialize IRQs
+ * @xdev: DMA device pointer
+ */
+static int xdma_irq_init(struct xdma_device *xdev)
+{
+ u32 irq = xdev->irq_start;
+ u32 user_irq_start;
+ int i, j, ret;
+
+ /* return failure if there are not enough IRQs */
+ if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
+ xdma_err(xdev, "not enough irq");
+ return -EINVAL;
+ }
+
+ /* setup H2C interrupt handler */
+ for (i = 0; i < xdev->h2c_chan_num; i++) {
+ ret = request_irq(irq, xdma_channel_isr, 0,
+ "xdma-h2c-channel", &xdev->h2c_chans[i]);
+ if (ret) {
+ xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
+ i, irq, ret);
+ goto failed_init_h2c;
+ }
+ xdev->h2c_chans[i].irq = irq;
+ irq++;
+ }
+
+ /* setup C2H interrupt handler */
+ for (j = 0; j < xdev->c2h_chan_num; j++) {
+ ret = request_irq(irq, xdma_channel_isr, 0,
+ "xdma-c2h-channel", &xdev->c2h_chans[j]);
+ if (ret) {
+ xdma_err(xdev, "C2H channel%d request irq%d failed: %d",
+ j, irq, ret);
+ goto failed_init_c2h;
+ }
+ xdev->c2h_chans[j].irq = irq;
+ irq++;
+ }
+
+ /* config hardware IRQ registers */
+ ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
+ XDMA_CHAN_NUM(xdev));
+ if (ret) {
+ xdma_err(xdev, "failed to set channel vectors: %d", ret);
+ goto failed_init_c2h;
+ }
+
+ /* config user IRQ registers if needed */
+ user_irq_start = XDMA_CHAN_NUM(xdev);
+ if (xdev->irq_num > user_irq_start) {
+ ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
+ user_irq_start,
+ xdev->irq_num - user_irq_start);
+ if (ret) {
+ xdma_err(xdev, "failed to set user vectors: %d", ret);
+ goto failed_init_c2h;
+ }
+ }
+
+ /* enable interrupt */
+ ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
+ if (ret)
+ goto failed_init_c2h;
+
+ return 0;
+
+failed_init_c2h:
+ while (j--)
+ free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
+failed_init_h2c:
+ while (i--)
+ free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
+
+ return ret;
+}
+
+static bool xdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_chan_info *chan_info = param;
+
+ return chan_info->dir == xdma_chan->dir;
+}
+
+/**
+ * xdma_disable_user_irq - Disable user interrupt
+ * @pdev: Pointer to the platform_device structure
+ * @irq_num: System IRQ number
+ */
+void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+ u32 index;
+
+ index = irq_num - xdev->irq_start;
+ if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
+ xdma_err(xdev, "invalid user irq number");
+ return;
+ }
+ index -= XDMA_CHAN_NUM(xdev);
+
+ regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
+}
+EXPORT_SYMBOL(xdma_disable_user_irq);
+
+/**
+ * xdma_enable_user_irq - Enable user logic interrupt
+ * @pdev: Pointer to the platform_device structure
+ * @irq_num: System IRQ number
+ */
+int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+ u32 index;
+ int ret;
+
+ index = irq_num - xdev->irq_start;
+ if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
+ xdma_err(xdev, "invalid user irq number");
+ return -EINVAL;
+ }
+ index -= XDMA_CHAN_NUM(xdev);
+
+ ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(xdma_enable_user_irq);
+
+/**
+ * xdma_get_user_irq - Get system IRQ number
+ * @pdev: Pointer to the platform_device structure
+ * @user_irq_index: User logic IRQ wire index
+ *
+ * Return: The system IRQ number allocated for the given wire index.
+ */
+int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+
+ if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
+ xdma_err(xdev, "invalid user irq index");
+ return -EINVAL;
+ }
+
+ return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
+}
+EXPORT_SYMBOL(xdma_get_user_irq);
+
+/**
+ * xdma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ */
+static int xdma_remove(struct platform_device *pdev)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+
+ if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
+ xdma_irq_fini(xdev);
+
+ if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
+ dma_async_device_unregister(&xdev->dma_dev);
+
+ return 0;
+}
+
+/**
+ * xdma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ */
+static int xdma_probe(struct platform_device *pdev)
+{
+ struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct xdma_device *xdev;
+ void __iomem *reg_base;
+ struct resource *res;
+ int ret = -ENODEV;
+
+ if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
+ dev_err(&pdev->dev, "invalid max dma channels %d",
+ pdata->max_dma_channels);
+ return -EINVAL;
+ }
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, xdev);
+ xdev->pdev = pdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ xdma_err(xdev, "failed to get irq resource");
+ goto failed;
+ }
+ xdev->irq_start = res->start;
+ xdev->irq_num = resource_size(res);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ xdma_err(xdev, "failed to get io resource");
+ goto failed;
+ }
+
+ reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg_base)) {
+ xdma_err(xdev, "ioremap failed");
+ goto failed;
+ }
+
+ dev_dbg(&pdev->dev, " %s - config: %p (%lu bytes), reg_bits:%d, reg_stride:%d, pad_bits:%d, val_bits:%d, &val_bits:%p",
+ __func__,
+ &xdma_regmap_config,
+ sizeof(struct regmap_config),
+ xdma_regmap_config.reg_bits,
+ xdma_regmap_config.reg_stride,
+ xdma_regmap_config.pad_bits,
+ xdma_regmap_config.val_bits,
+ &xdma_regmap_config.val_bits);
+
+ xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
+ &xdma_regmap_config);
+ if (IS_ERR(xdev->rmap)) {
+ ret = PTR_ERR(xdev->rmap);
+ xdma_err(xdev, "config regmap failed: %d", ret);
+ goto failed;
+ }
+
+ INIT_LIST_HEAD(&xdev->dma_dev.channels);
+
+ ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
+ if (ret) {
+ xdma_err(xdev, "config H2C channels failed: %d", ret);
+ goto failed;
+ }
+
+ ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
+ if (ret) {
+ xdma_err(xdev, "config C2H channels failed: %d", ret);
+ goto failed;
+ }
+
+ dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask);
+
+ xdev->dma_dev.dev = &pdev->dev;
+ xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
+ xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
+ xdev->dma_dev.device_tx_status = xdma_tx_status;
+ xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
+ xdev->dma_dev.device_config = xdma_device_config;
+ xdev->dma_dev.device_issue_pending = xdma_issue_pending;
+ xdev->dma_dev.device_terminate_all = xdma_terminate_all;
+ xdev->dma_dev.device_synchronize = xdma_synchronize;
+ xdev->dma_dev.filter.map = pdata->device_map;
+ xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
+ xdev->dma_dev.filter.fn = xdma_filter_fn;
+ xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
+ xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma;
+
+ ret = dma_async_device_register(&xdev->dma_dev);
+ if (ret) {
+ xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
+ goto failed;
+ }
+ xdev->status |= XDMA_DEV_STATUS_REG_DMA;
+
+ ret = xdma_irq_init(xdev);
+ if (ret) {
+ xdma_err(xdev, "failed to init msix: %d", ret);
+ goto failed;
+ }
+ xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
+
+ return 0;
+
+failed:
+ xdma_remove(pdev);
+
+ return ret;
+}
+
+static const struct platform_device_id xdma_id_table[] = {
+ { "xdma", 0},
+ { },
+};
+
+static struct platform_driver xdma_driver = {
+ .driver = {
+ .name = "xdma",
+ },
+ .id_table = xdma_id_table,
+ .probe = xdma_probe,
+ .remove = xdma_remove,
+};
+
+module_platform_driver(xdma_driver);
+
+MODULE_DESCRIPTION("AMD XDMA driver");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/core/generic/6.3/amd_xdma.h b/snd-alpx/core/generic/6.3/amd_xdma.h
new file mode 100644
index 0000000..b5e23e1
--- /dev/null
+++ b/snd-alpx/core/generic/6.3/amd_xdma.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _PLATDATA_AMD_XDMA_H
+#define _PLATDATA_AMD_XDMA_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct xdma_chan_info - DMA channel information
+ * This information is used to match channel when request dma channel
+ * @dir: Channel transfer direction
+ */
+struct xdma_chan_info {
+ enum dma_transfer_direction dir;
+};
+
+#define XDMA_FILTER_PARAM(chan_info) ((void *)(chan_info))
+
+struct dma_slave_map;
+
+/**
+ * struct xdma_platdata - platform specific data for XDMA engine
+ * @max_dma_channels: Maximum dma channels in each direction
+ */
+struct xdma_platdata {
+ u32 max_dma_channels;
+ u32 device_map_cnt;
+ struct dma_slave_map *device_map;
+};
+
+#endif /* _PLATDATA_AMD_XDMA_H */
diff --git a/snd-alpx/core/generic/6.3/dmaengine.h b/snd-alpx/core/generic/6.3/dmaengine.h
new file mode 100644
index 0000000..53f16d3
--- /dev/null
+++ b/snd-alpx/core/generic/6.3/dmaengine.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The contents of this file are private to DMA engine drivers, and is not
+ * part of the API to be used by DMA engine users.
+ */
+#ifndef DMAENGINE_H
+#define DMAENGINE_H
+
+#include <linux/bug.h>
+#include <linux/dmaengine.h>
+
+/**
+ * dma_cookie_init - initialize the cookies for a DMA channel
+ * @chan: dma channel to initialize
+ */
+static inline void dma_cookie_init(struct dma_chan *chan)
+{
+ chan->cookie = DMA_MIN_COOKIE;
+ chan->completed_cookie = DMA_MIN_COOKIE;
+}
+
+/**
+ * dma_cookie_assign - assign a DMA engine cookie to the descriptor
+ * @tx: descriptor needing cookie
+ *
+ * Assign a unique non-zero per-channel cookie to the descriptor.
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *chan = tx->chan;
+ dma_cookie_t cookie;
+
+ cookie = chan->cookie + 1;
+ if (cookie < DMA_MIN_COOKIE)
+ cookie = DMA_MIN_COOKIE;
+ tx->cookie = chan->cookie = cookie;
+
+ return cookie;
+}
+
+/**
+ * dma_cookie_complete - complete a descriptor
+ * @tx: descriptor to complete
+ *
+ * Mark this descriptor complete by updating the channels completed
+ * cookie marker. Zero the descriptors cookie to prevent accidental
+ * repeated completions.
+ *
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
+{
+ BUG_ON(tx->cookie < DMA_MIN_COOKIE);
+ tx->chan->completed_cookie = tx->cookie;
+ tx->cookie = 0;
+}
+
+/**
+ * dma_cookie_status - report cookie status
+ * @chan: dma channel
+ * @cookie: cookie we are interested in
+ * @state: dma_tx_state structure to return last/used cookies
+ *
+ * Report the status of the cookie, filling in the state structure if
+ * non-NULL. No locking is required.
+ */
+static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ dma_cookie_t used, complete;
+
+ used = chan->cookie;
+ complete = chan->completed_cookie;
+ barrier();
+ if (state) {
+ state->last = complete;
+ state->used = used;
+ state->residue = 0;
+ state->in_flight_bytes = 0;
+ }
+ return dma_async_is_complete(cookie, complete, used);
+}
+
+static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
+{
+ if (state)
+ state->residue = residue;
+}
+
+static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
+ u32 in_flight_bytes)
+{
+ if (state)
+ state->in_flight_bytes = in_flight_bytes;
+}
+
+struct dmaengine_desc_callback {
+ dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
+ void *callback_param;
+};
+
+/**
+ * dmaengine_desc_get_callback - get the passed in callback function
+ * @tx: tx descriptor
+ * @cb: temp struct to hold the callback info
+ *
+ * Fill the passed in cb struct with what's available in the passed in
+ * tx descriptor struct
+ * No locking is required.
+ */
+static inline void
+dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_desc_callback *cb)
+{
+ cb->callback = tx->callback;
+ cb->callback_result = tx->callback_result;
+ cb->callback_param = tx->callback_param;
+}
+
+/**
+ * dmaengine_desc_callback_invoke - call the callback function in cb struct
+ * @cb: temp struct that is holding the callback info
+ * @result: transaction result
+ *
+ * Call the callback function provided in the cb struct with the parameter
+ * in the cb struct.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_result dummy_result = {
+ .result = DMA_TRANS_NOERROR,
+ .residue = 0
+ };
+
+ if (cb->callback_result) {
+ if (!result)
+ result = &dummy_result;
+ cb->callback_result(cb->callback_param, result);
+ } else if (cb->callback) {
+ cb->callback(cb->callback_param);
+ }
+}
+
+/**
+ * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
+ * then immediately call the callback.
+ * @tx: dma async tx descriptor
+ * @result: transaction result
+ *
+ * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
+ * in a single function since no work is necessary in between for the driver.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_desc_callback cb;
+
+ dmaengine_desc_get_callback(tx, &cb);
+ dmaengine_desc_callback_invoke(&cb, result);
+}
+
+/**
+ * dmaengine_desc_callback_valid - verify the callback is valid in cb
+ * @cb: callback info struct
+ *
+ * Return a bool that verifies whether callback in cb is valid or not.
+ * No locking is required.
+ */
+static inline bool
+dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
+{
+ return cb->callback || cb->callback_result;
+}
+
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static inline struct dentry *
+dmaengine_get_debugfs_root(struct dma_device *dma_dev) {
+ return dma_dev->dbg_dev_root;
+}
+#else
+struct dentry;
+static inline struct dentry *
+dmaengine_get_debugfs_root(struct dma_device *dma_dev)
+{
+ return NULL;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif
diff --git a/snd-alpx/core/generic/6.3/virt-dma.c b/snd-alpx/core/generic/6.3/virt-dma.c
new file mode 100644
index 0000000..a6f4265
--- /dev/null
+++ b/snd-alpx/core/generic/6.3/virt-dma.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ */
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+
+static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct virt_dma_desc, tx);
+}
+
+dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+ struct virt_dma_desc *vd = to_virt_desc(tx);
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ cookie = dma_cookie_assign(tx);
+
+ list_move_tail(&vd->node, &vc->desc_submitted);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
+ vc, vd, cookie);
+
+ return cookie;
+}
+EXPORT_SYMBOL_GPL(vchan_tx_submit);
+
+/**
+ * vchan_tx_desc_free - free a reusable descriptor
+ * @tx: the transfer
+ *
+ * This function frees a previously allocated reusable descriptor. The only
+ * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
+ * transfer.
+ *
+ * Returns 0 upon success
+ */
+int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
+{
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+ struct virt_dma_desc *vd = to_virt_desc(tx);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_del(&vd->node);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
+ vc, vd, vd->tx.cookie);
+ vc->desc_free(vd);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
+
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
+ dma_cookie_t cookie)
+{
+ struct virt_dma_desc *vd;
+
+ list_for_each_entry(vd, &vc->desc_issued, node)
+ if (vd->tx.cookie == cookie)
+ return vd;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vchan_find_desc);
+
+/*
+ * This tasklet handles the completion of a DMA descriptor by
+ * calling its callback and freeing it.
+ */
+static void vchan_complete(struct tasklet_struct *t)
+{
+ struct virt_dma_chan *vc = from_tasklet(vc, t, task);
+ struct virt_dma_desc *vd, *_vd;
+ struct dmaengine_desc_callback cb;
+ LIST_HEAD(head);
+
+ spin_lock_irq(&vc->lock);
+ list_splice_tail_init(&vc->desc_completed, &head);
+ vd = vc->cyclic;
+ if (vd) {
+ vc->cyclic = NULL;
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+ } else {
+ memset(&cb, 0, sizeof(cb));
+ }
+ spin_unlock_irq(&vc->lock);
+
+ dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
+
+ list_for_each_entry_safe(vd, _vd, &head, node) {
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+
+ list_del(&vd->node);
+ dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
+ vchan_vdesc_fini(vd);
+ }
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
+{
+ struct virt_dma_desc *vd, *_vd;
+
+ list_for_each_entry_safe(vd, _vd, head, node) {
+ list_del(&vd->node);
+ vchan_vdesc_fini(vd);
+ }
+}
+EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
+
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
+{
+ dma_cookie_init(&vc->chan);
+
+ spin_lock_init(&vc->lock);
+ INIT_LIST_HEAD(&vc->desc_allocated);
+ INIT_LIST_HEAD(&vc->desc_submitted);
+ INIT_LIST_HEAD(&vc->desc_issued);
+ INIT_LIST_HEAD(&vc->desc_completed);
+ INIT_LIST_HEAD(&vc->desc_terminated);
+
+ tasklet_setup(&vc->task, vchan_complete);
+
+ vc->chan.device = dmadev;
+ list_add_tail(&vc->chan.device_node, &dmadev->channels);
+}
+EXPORT_SYMBOL_GPL(vchan_init);
+
+MODULE_AUTHOR("Russell King");
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/core/generic/6.3/virt-dma.h b/snd-alpx/core/generic/6.3/virt-dma.h
new file mode 100644
index 0000000..e9f5250
--- /dev/null
+++ b/snd-alpx/core/generic/6.3/virt-dma.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ */
+#ifndef VIRT_DMA_H
+#define VIRT_DMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "dmaengine.h"
+
+struct virt_dma_desc {
+ struct dma_async_tx_descriptor tx;
+ struct dmaengine_result tx_result;
+ /* protected by vc.lock */
+ struct list_head node;
+};
+
+struct virt_dma_chan {
+ struct dma_chan chan;
+ struct tasklet_struct task;
+ void (*desc_free)(struct virt_dma_desc *);
+
+ spinlock_t lock;
+
+ /* protected by vc.lock */
+ struct list_head desc_allocated;
+ struct list_head desc_submitted;
+ struct list_head desc_issued;
+ struct list_head desc_completed;
+ struct list_head desc_terminated;
+
+ struct virt_dma_desc *cyclic;
+};
+
+static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct virt_dma_chan, chan);
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
+extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
+
+/**
+ * vchan_tx_prep - prepare a descriptor
+ * @vc: virtual channel allocating this descriptor
+ * @vd: virtual descriptor to prepare
+ * @tx_flags: flags argument passed in to prepare function
+ */
+static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
+ struct virt_dma_desc *vd, unsigned long tx_flags)
+{
+ unsigned long flags;
+
+ dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
+ vd->tx.flags = tx_flags;
+ vd->tx.tx_submit = vchan_tx_submit;
+ vd->tx.desc_free = vchan_tx_desc_free;
+
+ vd->tx_result.result = DMA_TRANS_NOERROR;
+ vd->tx_result.residue = 0;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_add_tail(&vd->node, &vc->desc_allocated);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ return &vd->tx;
+}
+
+/**
+ * vchan_issue_pending - move submitted descriptors to issued list
+ * @vc: virtual channel to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
+{
+ list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
+ return !list_empty(&vc->desc_issued);
+}
+
+/**
+ * vchan_cookie_complete - report completion of a descriptor
+ * @vd: virtual descriptor to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+ dma_cookie_t cookie;
+
+ cookie = vd->tx.cookie;
+ dma_cookie_complete(&vd->tx);
+ dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
+ vd, cookie);
+ list_add_tail(&vd->node, &vc->desc_completed);
+
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_vdesc_fini - Free or reuse a descriptor
+ * @vd: virtual descriptor to free/reuse
+ */
+static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ if (dmaengine_desc_test_reuse(&vd->tx)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_add(&vd->node, &vc->desc_allocated);
+ spin_unlock_irqrestore(&vc->lock, flags);
+ } else {
+ vc->desc_free(vd);
+ }
+}
+
+/**
+ * vchan_cyclic_callback - report the completion of a period
+ * @vd: virtual descriptor
+ */
+static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ vc->cyclic = vd;
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_terminate_vdesc - Disable pending cyclic callback
+ * @vd: virtual descriptor to be terminated
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ list_add_tail(&vd->node, &vc->desc_terminated);
+
+ if (vc->cyclic == vd)
+ vc->cyclic = NULL;
+}
+
+/**
+ * vchan_next_desc - peek at the next descriptor to be processed
+ * @vc: virtual channel to obtain descriptor from
+ *
+ * vc.lock must be held by caller
+ */
+static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
+{
+ return list_first_entry_or_null(&vc->desc_issued,
+ struct virt_dma_desc, node);
+}
+
+/**
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
+ * @vc: virtual channel to get descriptors from
+ * @head: list of descriptors found
+ *
+ * vc.lock must be held by caller
+ *
+ * Removes all submitted and issued descriptors from internal lists, and
+ * provides a list of all descriptors found
+ */
+static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
+ struct list_head *head)
+{
+ list_splice_tail_init(&vc->desc_allocated, head);
+ list_splice_tail_init(&vc->desc_submitted, head);
+ list_splice_tail_init(&vc->desc_issued, head);
+ list_splice_tail_init(&vc->desc_completed, head);
+ list_splice_tail_init(&vc->desc_terminated, head);
+}
+
+static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
+{
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ vchan_get_all_descriptors(vc, &head);
+ list_for_each_entry(vd, &head, node)
+ dmaengine_desc_clear_reuse(&vd->tx);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+}
+
+/**
+ * vchan_synchronize() - synchronize callback execution to the current context
+ * @vc: virtual channel to synchronize
+ *
+ * Makes sure that all scheduled or active callbacks have finished running. For
+ * proper operation the caller has to ensure that no new callbacks are scheduled
+ * after the invocation of this function started.
+ * Free up the terminated cyclic descriptor to prevent memory leakage.
+ */
+static inline void vchan_synchronize(struct virt_dma_chan *vc)
+{
+ LIST_HEAD(head);
+ unsigned long flags;
+
+ tasklet_kill(&vc->task);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ list_splice_tail_init(&vc->desc_terminated, &head);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+}
+
+#endif
diff --git a/snd-alpx/core/generic/6.3/xilinx/xdma-regs.h b/snd-alpx/core/generic/6.3/xilinx/xdma-regs.h
new file mode 100644
index 0000000..4ee96de
--- /dev/null
+++ b/snd-alpx/core/generic/6.3/xilinx/xdma-regs.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef __DMA_XDMA_REGS_H
+#define __DMA_XDMA_REGS_H
+
+/* The length of register space exposed to host */
+#define XDMA_REG_SPACE_LEN 65536
+
+/*
+ * maximum number of DMA channels for each direction:
+ * Host to Card (H2C) or Card to Host (C2H)
+ */
+#define XDMA_MAX_CHANNELS 4
+
+/*
+ * macros to define the number of descriptor blocks can be used in one
+ * DMA transfer request.
+ * the DMA engine uses a linked list of descriptor blocks that specify the
+ * source, destination, and length of the DMA transfers.
+ */
+#define XDMA_DESC_BLOCK_NUM BIT(7)
+#define XDMA_DESC_BLOCK_MASK (XDMA_DESC_BLOCK_NUM - 1)
+
+/* descriptor definitions */
+#define XDMA_DESC_ADJACENT 32
+#define XDMA_DESC_ADJACENT_MASK (XDMA_DESC_ADJACENT - 1)
+#define XDMA_DESC_ADJACENT_BITS GENMASK(13, 8)
+#define XDMA_DESC_MAGIC 0xad4bUL
+#define XDMA_DESC_MAGIC_BITS GENMASK(31, 16)
+#define XDMA_DESC_FLAGS_BITS GENMASK(7, 0)
+#define XDMA_DESC_STOPPED BIT(0)
+#define XDMA_DESC_COMPLETED BIT(1)
+#define XDMA_DESC_BLEN_BITS 28
+#define XDMA_DESC_BLEN_MAX (BIT(XDMA_DESC_BLEN_BITS) - PAGE_SIZE)
+
+/* macros to construct the descriptor control word */
+#define XDMA_DESC_CONTROL(adjacent, flag) \
+ (FIELD_PREP(XDMA_DESC_MAGIC_BITS, XDMA_DESC_MAGIC) | \
+ FIELD_PREP(XDMA_DESC_ADJACENT_BITS, (adjacent) - 1) | \
+ FIELD_PREP(XDMA_DESC_FLAGS_BITS, (flag)))
+#define XDMA_DESC_CONTROL_LAST \
+ XDMA_DESC_CONTROL(1, XDMA_DESC_STOPPED | XDMA_DESC_COMPLETED)
+#define XDMA_DESC_CONTROL_CYCLIC \
+ XDMA_DESC_CONTROL(1, XDMA_DESC_COMPLETED)
+
+/*
+ * Descriptor for a single contiguous memory block transfer.
+ *
+ * Multiple descriptors are linked by means of the next pointer. An additional
+ * extra adjacent number gives the amount of extra contiguous descriptors.
+ *
+ * The descriptors are in root complex memory, and the bytes in the 32-bit
+ * words must be in little-endian byte ordering.
+ */
+struct xdma_hw_desc {
+ __le32 control;
+ __le32 bytes;
+ __le64 src_addr;
+ __le64 dst_addr;
+ __le64 next_desc;
+};
+
+#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
+#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
+#define XDMA_DESC_BLOCK_ALIGN 32
+#define XDMA_DESC_BLOCK_BOUNDARY 4096
+
+/*
+ * Channel registers
+ */
+#define XDMA_CHAN_IDENTIFIER 0x0
+#define XDMA_CHAN_CONTROL 0x4
+#define XDMA_CHAN_CONTROL_W1S 0x8
+#define XDMA_CHAN_CONTROL_W1C 0xc
+#define XDMA_CHAN_STATUS 0x40
+#define XDMA_CHAN_STATUS_RC 0x44
+#define XDMA_CHAN_COMPLETED_DESC 0x48
+#define XDMA_CHAN_ALIGNMENTS 0x4c
+#define XDMA_CHAN_INTR_ENABLE 0x90
+#define XDMA_CHAN_INTR_ENABLE_W1S 0x94
+#define XDMA_CHAN_INTR_ENABLE_W1C 0x9c
+
+#define XDMA_CHAN_STRIDE 0x100
+#define XDMA_CHAN_H2C_OFFSET 0x0
+#define XDMA_CHAN_C2H_OFFSET 0x1000
+#define XDMA_CHAN_H2C_TARGET 0x0
+#define XDMA_CHAN_C2H_TARGET 0x1
+
+/* macro to check if channel is available */
+#define XDMA_CHAN_MAGIC 0x1fc0
+#define XDMA_CHAN_CHECK_TARGET(id, target) \
+ (((u32)(id) >> 16) == XDMA_CHAN_MAGIC + (target))
+
+/* bits of the channel control register */
+#define CHAN_CTRL_RUN_STOP BIT(0)
+#define CHAN_CTRL_IE_DESC_STOPPED BIT(1)
+#define CHAN_CTRL_IE_DESC_COMPLETED BIT(2)
+#define CHAN_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3)
+#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4)
+#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6)
+#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9)
+#define CHAN_CTRL_IE_WRITE_ERROR GENMASK(18, 14)
+#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19)
+#define CHAN_CTRL_NON_INCR_ADDR BIT(25)
+#define CHAN_CTRL_POLL_MODE_WB BIT(26)
+#define CHAN_CTRL_TRANSFER_INFO_WB BIT(27)
+
+#define CHAN_CTRL_START (CHAN_CTRL_RUN_STOP | \
+ CHAN_CTRL_IE_DESC_STOPPED | \
+ CHAN_CTRL_IE_DESC_COMPLETED | \
+ CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
+ CHAN_CTRL_IE_MAGIC_STOPPED | \
+ CHAN_CTRL_IE_READ_ERROR | \
+ CHAN_CTRL_IE_WRITE_ERROR | \
+ CHAN_CTRL_IE_DESC_ERROR)
+
+#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
+
+#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
+ CHAN_CTRL_IE_MAGIC_STOPPED | \
+ CHAN_CTRL_IE_READ_ERROR | \
+ CHAN_CTRL_IE_WRITE_ERROR | \
+ CHAN_CTRL_IE_DESC_ERROR)
+
+/* bits of the channel interrupt enable mask */
+#define CHAN_IM_DESC_ERROR BIT(19)
+#define CHAN_IM_READ_ERROR BIT(9)
+#define CHAN_IM_IDLE_STOPPED BIT(6)
+#define CHAN_IM_MAGIC_STOPPED BIT(4)
+#define CHAN_IM_DESC_COMPLETED BIT(2)
+#define CHAN_IM_DESC_STOPPED BIT(1)
+
+#define CHAN_IM_ALL (CHAN_IM_DESC_ERROR | CHAN_IM_READ_ERROR | \
+ CHAN_IM_IDLE_STOPPED | CHAN_IM_MAGIC_STOPPED | \
+ CHAN_IM_DESC_COMPLETED | CHAN_IM_DESC_STOPPED)
+
+/*
+ * Channel SGDMA registers
+ */
+#define XDMA_SGDMA_IDENTIFIER 0x4000
+#define XDMA_SGDMA_DESC_LO 0x4080
+#define XDMA_SGDMA_DESC_HI 0x4084
+#define XDMA_SGDMA_DESC_ADJ 0x4088
+#define XDMA_SGDMA_DESC_CREDIT 0x408c
+
+/*
+ * interrupt registers
+ */
+#define XDMA_IRQ_IDENTIFIER 0x2000
+#define XDMA_IRQ_USER_INT_EN 0x2004
+#define XDMA_IRQ_USER_INT_EN_W1S 0x2008
+#define XDMA_IRQ_USER_INT_EN_W1C 0x200c
+#define XDMA_IRQ_CHAN_INT_EN 0x2010
+#define XDMA_IRQ_CHAN_INT_EN_W1S 0x2014
+#define XDMA_IRQ_CHAN_INT_EN_W1C 0x2018
+#define XDMA_IRQ_USER_INT_REQ 0x2040
+#define XDMA_IRQ_CHAN_INT_REQ 0x2044
+#define XDMA_IRQ_USER_INT_PEND 0x2048
+#define XDMA_IRQ_CHAN_INT_PEND 0x204c
+#define XDMA_IRQ_USER_VEC_NUM 0x2080
+#define XDMA_IRQ_CHAN_VEC_NUM 0x20a0
+
+#define XDMA_IRQ_VEC_SHIFT 8
+
+#endif /* __DMA_XDMA_REGS_H */
diff --git a/snd-alpx/core/generic/6.3/xilinx/xdma.c b/snd-alpx/core/generic/6.3/xilinx/xdma.c
new file mode 100644
index 0000000..ad42f05
--- /dev/null
+++ b/snd-alpx/core/generic/6.3/xilinx/xdma.c
@@ -0,0 +1,1403 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DMA driver for Xilinx DMA/Bridge Subsystem
+ *
+ * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+/*
+ * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
+ * between Host memory and the DMA subsystem. It does this by operating on
+ * 'descriptors' that contain information about the source, destination and
+ * amount of data to transfer. These direct memory transfers can be both in
+ * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
+ * configured to have a single AXI4 Master interface shared by all channels
+ * or one AXI4-Stream interface for each channel enabled. Memory transfers are
+ * specified on a per-channel basis in descriptor linked lists, which the DMA
+ * fetches from host memory and processes. Events such as descriptor completion
+ * and errors are signaled using interrupts. The core also provides up to 16
+ * user interrupt wires that generate interrupts to the host.
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/bitfield.h>
+#include <linux/dmapool.h>
+
+#include <linux/regmap.h>
+
+#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
+#include <linux/dmaengine.h>
+#elif KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE
+#include "../../../../include/5.3/dmaengine.h"
+#else
+#include "../../../../include/4.16/dmaengine.h"
+#endif
+#include "../../../../include/6.3/amd_xdma.h"
+#include <linux/platform_device.h>
+#include "../amd_xdma.h"
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#if KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE
+#include "../virt-dma.h"
+#elif KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
+#include "../../../../include/5.6/virt-dma.h"
+#elif KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE
+#include "../../../../include/5.3/virt-dma.h"
+#else
+#include "../../../../include/4.16/virt-dma.h"
+#endif
+#include "xdma-regs.h"
+
+/* mmio regmap config for all XDMA registers */
+static const struct regmap_config xdma_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = XDMA_REG_SPACE_LEN,
+};
+
+/**
+ * struct xdma_desc_block - Descriptor block
+ * @virt_addr: Virtual address of block start
+ * @dma_addr: DMA address of block start
+ */
+struct xdma_desc_block {
+ void *virt_addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct xdma_c2h_write_back - Write back block , written by the XDMA.
+ * @magic_status_bit : magic (0x52B4) once written
+ * @length: effective transfer length (in bytes)
+ * @PADDING to be aligned on 32 bytes
+ * @associated dma address
+ */
+struct xdma_c2h_write_back {
+ __le32 magic_status_bit;
+ __le32 length;
+ u32 padding_1[6];
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct xdma_chan - Driver specific DMA channel structure
+ * @vchan: Virtual channel
+ * @xdev_hdl: Pointer to DMA device structure
+ * @base: Offset of channel registers
+ * @desc_pool: Descriptor pool
+ * @busy: Busy flag of the channel
+ * @dir: Transferring direction of the channel
+ * @cfg: Transferring config of the channel
+ * @irq: IRQ assigned to the channel
+ * @write_back : C2H meta data write back
+ */
+struct xdma_chan {
+ struct virt_dma_chan vchan;
+ void *xdev_hdl;
+ u32 base;
+ struct dma_pool *desc_pool;
+ bool busy;
+ enum dma_transfer_direction dir;
+ struct dma_slave_config cfg;
+ u32 irq;
+ struct xdma_c2h_write_back* write_back;
+};
+
+/**
+ * struct xdma_desc - DMA desc structure
+ * @vdesc: Virtual DMA descriptor
+ * @chan: DMA channel pointer
+ * @dir: Transferring direction of the request
+ * @desc_blocks: Hardware descriptor blocks
+ * @dblk_num: Number of hardware descriptor blocks
+ * @desc_num: Number of hardware descriptors
+ * @completed_desc_num: Completed hardware descriptors
+ * @cyclic: Cyclic transfer vs. scatter-gather
+ * @interleaved_dma: Interleaved DMA transfer
+ * @periods: Number of periods in the cyclic transfer
+ * @period_size: Size of a period in bytes in cyclic transfers
+ * @frames_left: Number of frames left in interleaved DMA transfer
+ * @error: tx error flag
+ */
+struct xdma_desc {
+ struct virt_dma_desc vdesc;
+ struct xdma_chan *chan;
+ enum dma_transfer_direction dir;
+ struct xdma_desc_block *desc_blocks;
+ u32 dblk_num;
+ u32 desc_num;
+ u32 completed_desc_num;
+ bool cyclic;
+ bool interleaved_dma;
+ u32 periods;
+ u32 period_size;
+ u32 frames_left;
+ bool error;
+};
+
+#define XDMA_DEV_STATUS_REG_DMA BIT(0)
+#define XDMA_DEV_STATUS_INIT_MSIX BIT(1)
+
+/**
+ * struct xdma_device - DMA device structure
+ * @pdev: Platform device pointer
+ * @dma_dev: DMA device structure
+ * @rmap: MMIO regmap for DMA registers
+ * @h2c_chans: Host to Card channels
+ * @c2h_chans: Card to Host channels
+ * @h2c_chan_num: Number of H2C channels
+ * @c2h_chan_num: Number of C2H channels
+ * @irq_start: Start IRQ assigned to device
+ * @irq_num: Number of IRQ assigned to device
+ * @status: Initialization status
+ */
+struct xdma_device {
+ struct platform_device *pdev;
+ struct dma_device dma_dev;
+ struct regmap *rmap;
+ struct xdma_chan *h2c_chans;
+ struct xdma_chan *c2h_chans;
+ u32 h2c_chan_num;
+ u32 c2h_chan_num;
+ u32 irq_start;
+ u32 irq_num;
+ u32 status;
+};
+
+#define xdma_err(xdev, fmt, args...) \
+ dev_err(&(xdev)->pdev->dev, fmt, ##args)
+#define XDMA_CHAN_NUM(_xd) ({ \
+ typeof(_xd) (xd) = (_xd); \
+ ((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
+
+/* Get the last desc in a desc block */
+static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
+{
+ return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
+}
+
+/**
+ * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer
+ * @sw_desc: Tx descriptor pointer
+ */
+static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
+{
+ struct xdma_desc_block *block;
+ u32 last_blk_desc, desc_control;
+ struct xdma_hw_desc *desc;
+ int i;
+
+ desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
+ for (i = 1; i < sw_desc->dblk_num; i++) {
+ block = &sw_desc->desc_blocks[i - 1];
+ desc = xdma_blk_last_desc(block);
+
+ if (!(i & XDMA_DESC_BLOCK_MASK)) {
+ desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
+ continue;
+ }
+ desc->control = cpu_to_le32(desc_control);
+ desc->next_desc = cpu_to_le64(block[1].dma_addr);
+ }
+
+ /* update the last block */
+ last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
+ if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
+ block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
+ desc = xdma_blk_last_desc(block);
+ desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
+ desc->control = cpu_to_le32(desc_control);
+ }
+
+ block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
+ desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
+ desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
+}
+
+/**
+ * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer
+ * @sw_desc: Tx descriptor pointer
+ */
+static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc)
+{
+ struct xdma_desc_block *block;
+ struct xdma_hw_desc *desc;
+ int i;
+
+ block = sw_desc->desc_blocks;
+ for (i = 0; i < sw_desc->desc_num - 1; i++) {
+ desc = block->virt_addr + i * XDMA_DESC_SIZE;
+ desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE));
+ }
+ desc = block->virt_addr + i * XDMA_DESC_SIZE;
+ desc->next_desc = cpu_to_le64(block->dma_addr);
+}
+
+static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct xdma_chan, vchan.chan);
+}
+
+static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct xdma_desc, vdesc);
+}
+
+/**
+ * xdma_channel_init - Initialize DMA channel registers
+ * @chan: DMA channel pointer
+ */
+static int xdma_channel_init(struct xdma_chan *chan)
+{
+ struct xdma_device *xdev = chan->xdev_hdl;
+ int ret;
+ unsigned int reg_ctrl = 0;
+
+ regmap_read(xdev->rmap, chan->base + XDMA_CHAN_CONTROL, &reg_ctrl);
+ dev_dbg(&xdev->pdev->dev, "CONTROL Init: 0x%08x\n", reg_ctrl);
+
+ ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_NON_INCR_ADDR | CHAN_CTRL_TRANSFER_INFO_WB);
+ if (ret)
+ return ret;
+
+ regmap_read(xdev->rmap, chan->base + XDMA_CHAN_CONTROL, &reg_ctrl);
+ dev_dbg(&xdev->pdev->dev, "CONTROL Init: 0x%08x\n", reg_ctrl);
+
+ ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
+ CHAN_IM_ALL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * xdma_free_desc - Free descriptor
+ * @vdesc: Virtual DMA descriptor
+ */
+static void xdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct xdma_desc *sw_desc;
+ int i;
+
+ sw_desc = to_xdma_desc(vdesc);
+ for (i = 0; i < sw_desc->dblk_num; i++) {
+ if (!sw_desc->desc_blocks[i].virt_addr)
+ break;
+ dma_pool_free(sw_desc->chan->desc_pool,
+ sw_desc->desc_blocks[i].virt_addr,
+ sw_desc->desc_blocks[i].dma_addr);
+ }
+ kfree(sw_desc->desc_blocks);
+ kfree(sw_desc);
+}
+
+/**
+ * xdma_alloc_desc - Allocate descriptor
+ * @chan: DMA channel pointer
+ * @desc_num: Number of hardware descriptors
+ * @cyclic: Whether this is a cyclic transfer
+ */
+static struct xdma_desc *
+xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
+{
+ struct xdma_desc *sw_desc;
+ struct xdma_hw_desc *desc;
+ dma_addr_t dma_addr;
+ u32 dblk_num;
+ u32 control;
+ void *addr;
+ int i, j;
+
+ sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
+ if (!sw_desc)
+ return NULL;
+
+ sw_desc->chan = chan;
+ sw_desc->desc_num = desc_num;
+ sw_desc->cyclic = cyclic;
+ sw_desc->error = false;
+ dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
+ sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
+ GFP_NOWAIT);
+ if (!sw_desc->desc_blocks)
+ goto failed;
+
+ if (cyclic)
+ control = XDMA_DESC_CONTROL_CYCLIC;
+ else
+ control = XDMA_DESC_CONTROL(1, 0);
+
+ sw_desc->dblk_num = dblk_num;
+ for (i = 0; i < sw_desc->dblk_num; i++) {
+ addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
+ if (!addr)
+ goto failed;
+
+ sw_desc->desc_blocks[i].virt_addr = addr;
+ sw_desc->desc_blocks[i].dma_addr = dma_addr;
+ for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
+ desc[j].control = cpu_to_le32(control);
+ }
+
+ if (cyclic)
+ xdma_link_cyclic_desc_blocks(sw_desc);
+ else
+ xdma_link_sg_desc_blocks(sw_desc);
+
+ return sw_desc;
+
+failed:
+ xdma_free_desc(&sw_desc->vdesc);
+ return NULL;
+}
+
+/**
+ * xdma_xfer_start - Start DMA transfer
+ * @xchan: DMA channel pointer
+ */
+static int xdma_xfer_start(struct xdma_chan *xchan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
+ struct xdma_device *xdev = xchan->xdev_hdl;
+ struct xdma_desc_block *block;
+ u32 val, completed_blocks;
+ struct xdma_desc *desc;
+ int ret;
+
+ /*
+ * check if there is not any submitted descriptor or channel is busy.
+ * vchan lock should be held where this function is called.
+ */
+ if (!vd || xchan->busy)
+ return -EINVAL;
+
+ /* clear run stop bit to get ready for transfer */
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
+ if (ret)
+ return ret;
+
+ desc = to_xdma_desc(vd);
+ if (desc->dir != xchan->dir) {
+ xdma_err(xdev, "incorrect request direction");
+ return -EINVAL;
+ }
+
+ /* set DMA engine to the first descriptor block */
+ completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
+ block = &desc->desc_blocks[completed_blocks];
+ val = lower_32_bits(block->dma_addr);
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
+ if (ret)
+ return ret;
+
+ val = upper_32_bits(block->dma_addr);
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
+ if (ret)
+ return ret;
+
+ if (completed_blocks + 1 == desc->dblk_num)
+ val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
+ else
+ val = XDMA_DESC_ADJACENT - 1;
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
+ if (ret)
+ return ret;
+
+ /* kick off DMA transfer, force 0=1 transition, USE Bit clear/set registers */
+
+ regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_START);
+
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1S,
+ CHAN_CTRL_START);
+ if (ret)
+ return ret;
+
+ xchan->busy = true;
+
+ return 0;
+}
+
+/**
+ * xdma_xfer_stop - Stop DMA transfer
+ * @xchan: DMA channel pointer
+ */
+static int xdma_xfer_stop(struct xdma_chan *xchan)
+{
+ int ret;
+ u32 val;
+ struct xdma_device *xdev = xchan->xdev_hdl;
+
+ /* clear run stop bit to prevent any further auto-triggering */
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
+ if (ret)
+ return ret;
+
+ /* Clear the channel status register */
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * xdma_alloc_channels - Detect and allocate DMA channels
+ * @xdev: DMA device pointer
+ * @dir: Channel direction
+ */
+static int xdma_alloc_channels(struct xdma_device *xdev,
+ enum dma_transfer_direction dir)
+{
+ struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
+ struct xdma_chan **chans, *xchan;
+ u32 base, identifier, target;
+ u32 *chan_num;
+ int i, j, ret;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ base = XDMA_CHAN_H2C_OFFSET;
+ target = XDMA_CHAN_H2C_TARGET;
+ chans = &xdev->h2c_chans;
+ chan_num = &xdev->h2c_chan_num;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ base = XDMA_CHAN_C2H_OFFSET;
+ target = XDMA_CHAN_C2H_TARGET;
+ chans = &xdev->c2h_chans;
+ chan_num = &xdev->c2h_chan_num;
+ } else {
+ xdma_err(xdev, "invalid direction specified");
+ return -EINVAL;
+ }
+
+ /* detect number of available DMA channels */
+ for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
+ ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
+ &identifier);
+ if (ret)
+ return ret;
+
+ /* check if it is available DMA channel */
+ if (XDMA_CHAN_CHECK_TARGET(identifier, target))
+ (*chan_num)++;
+ }
+
+ if (!*chan_num) {
+ xdma_err(xdev, "does not probe any channel");
+ return -EINVAL;
+ }
+
+ *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
+ GFP_KERNEL);
+ if (!*chans)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
+ ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
+ &identifier);
+ if (ret)
+ return ret;
+
+ if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
+ continue;
+
+ if (j == *chan_num) {
+ xdma_err(xdev, "invalid channel number");
+ return -EIO;
+ }
+
+ /* init channel structure and hardware */
+ xchan = &(*chans)[j];
+ xchan->xdev_hdl = xdev;
+ xchan->base = base + i * XDMA_CHAN_STRIDE;
+ xchan->dir = dir;
+
+ ret = xdma_channel_init(xchan);
+ if (ret)
+ return ret;
+ xchan->vchan.desc_free = xdma_free_desc;
+ vchan_init(&xchan->vchan, &xdev->dma_dev);
+
+ j++;
+ }
+
+ dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
+ (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
+
+ return 0;
+}
+
+/**
+ * xdma_issue_pending - Issue pending transactions
+ * @chan: DMA channel pointer
+ */
+static void xdma_issue_pending(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+ if (vchan_issue_pending(&xdma_chan->vchan))
+ xdma_xfer_start(xdma_chan);
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+}
+
+/**
+ * xdma_terminate_all - Terminate all transactions
+ * @chan: DMA channel pointer
+ */
+static int xdma_terminate_all(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ xdma_xfer_stop(xdma_chan);
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+
+ xdma_chan->busy = false;
+ vd = vchan_next_desc(&xdma_chan->vchan);
+ if (vd) {
+ list_del(&vd->node);
+ dma_cookie_complete(&vd->tx);
+ vchan_terminate_vdesc(vd);
+ }
+ vchan_get_all_descriptors(&xdma_chan->vchan, &head);
+ list_splice_tail(&head, &xdma_chan->vchan.desc_terminated);
+
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+
+ return 0;
+}
+
+/**
+ * xdma_synchronize - Synchronize terminated transactions
+ * @chan: DMA channel pointer
+ */
+static void xdma_synchronize(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+ vchan_synchronize(&xdma_chan->vchan);
+}
+
+/**
+ * xdma_fill_descs - Fill hardware descriptors for one contiguous memory chunk.
+ * More than one descriptor will be used if the size is bigger
+ * than XDMA_DESC_BLEN_MAX.
+ * @sw_desc: Descriptor container
+ * @src_addr: First value for the ->src_addr field
+ * @dst_addr: First value for the ->dst_addr field
+ * @size: Size of the contiguous memory block
+ * @desc_start_num: Index of the first descriptor to take care of in @sw_desc
+ */
+static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
+ u64 dst_addr, u32 size, u32 filled_descs_num)
+{
+ u32 left = size, len, desc_num = filled_descs_num;
+ struct xdma_desc_block *dblk;
+ struct xdma_hw_desc *desc;
+
+ dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT);
+ desc = dblk->virt_addr;
+ desc += desc_num & XDMA_DESC_ADJACENT_MASK;
+ do {
+ len = min_t(u32, left, XDMA_DESC_BLEN_MAX);
+ /* set hardware descriptor */
+ desc->bytes = cpu_to_le32(len);
+ desc->src_addr = cpu_to_le64(src_addr);
+ desc->dst_addr = cpu_to_le64(dst_addr);
+
+ dev_dbg(NULL, "desc[%u]:%p {src:0x%llx, dst: 0x%llx, length: %u}",
+ desc_num,
+ desc,
+ src_addr,
+ dst_addr,
+ len);
+
+ if (!(++desc_num & XDMA_DESC_ADJACENT_MASK))
+ desc = (++dblk)->virt_addr;
+ else
+ desc++;
+
+ src_addr += len;
+ dst_addr += len;
+ left -= len;
+ } while (left);
+
+ return desc_num - filled_descs_num;
+}
+
+/**
+ * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
+ * @chan: DMA channel pointer
+ * @sgl: Transfer scatter gather list
+ * @sg_len: Length of scatter gather list
+ * @dir: Transfer direction
+ * @flags: transfer ack flags
+ * @context: APP words of the descriptor
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan ->xdev_hdl;
+
+ struct dma_async_tx_descriptor *tx_desc;
+ struct xdma_desc *sw_desc;
+ u32 desc_num = 0, i;
+ u64 addr, dev_addr, *src, *dst;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, sg_len, i)
+ desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
+
+ sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false);
+ if (!sw_desc)
+ return NULL;
+ sw_desc->dir = dir;
+ sw_desc->cyclic = false;
+ sw_desc->interleaved_dma = false;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = xdma_chan->cfg.dst_addr;
+ src = &addr;
+ dst = &dev_addr;
+ } else {
+ dev_addr = xdma_chan->cfg.src_addr ? xdma_chan->cfg.src_addr : xdma_chan->write_back->dma_addr;
+ src = &dev_addr;
+ dst = &addr;
+ }
+
+ dev_dbg(&xdev->pdev->dev, "desc[%s]:%p {src: %p, dst: %p, length: %u}",
+ dir == DMA_MEM_TO_DEV ? "C2H" : "H2C",
+ sw_desc,
+ src,
+ dst,
+ sg_dma_len(sg));
+
+
+ desc_num = 0;
+ for_each_sg(sgl, sg, sg_len, i) {
+ addr = sg_dma_address(sg);
+ desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num);
+ dev_addr += sg_dma_len(sg);
+ }
+
+ tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
+ if (!tx_desc)
+ goto failed;
+
+ return tx_desc;
+
+failed:
+ xdma_free_desc(&sw_desc->vdesc);
+
+ return NULL;
+}
+
+/**
+ * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions
+ * @chan: DMA channel pointer
+ * @address: Device DMA address to access
+ * @size: Total length to transfer
+ * @period_size: Period size to use for each transfer
+ * @dir: Transfer direction
+ * @flags: Transfer ack flags
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
+ size_t size, size_t period_size,
+ enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ unsigned int periods = size / period_size;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct xdma_desc *sw_desc;
+ u64 addr, dev_addr, *src, *dst;
+ u32 desc_num = 0;
+ unsigned int i;
+
+ /*
+ * Simplify the whole logic by preventing an abnormally high number of
+ * periods and periods size.
+ */
+ if (period_size > XDMA_DESC_BLEN_MAX) {
+ xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX);
+ return NULL;
+ }
+
+ if (periods > XDMA_DESC_ADJACENT) {
+ xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT);
+ return NULL;
+ }
+
+ sw_desc = xdma_alloc_desc(xdma_chan, periods, true);
+ if (!sw_desc)
+ return NULL;
+
+ sw_desc->periods = periods;
+ sw_desc->period_size = period_size;
+ sw_desc->dir = dir;
+ sw_desc->interleaved_dma = false;
+
+ addr = address;
+ if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = xdma_chan->cfg.dst_addr;
+ src = &addr;
+ dst = &dev_addr;
+ } else {
+ dev_addr = xdma_chan->cfg.src_addr ? xdma_chan->cfg.src_addr : xdma_chan->write_back->dma_addr;
+ src = &dev_addr;
+ dst = &addr;
+ }
+
+ dev_dbg(&xdev->pdev->dev, "desc[%s]:%p {src: %p, dst: %p, length: %lu}",
+ dir == DMA_MEM_TO_DEV ? "C2H" : "H2C",
+ sw_desc,
+ src,
+ dst,
+ period_size);
+
+ for (i = 0; i < periods; i++) {
+ xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num++);
+ addr += period_size;
+ }
+
+ tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
+ if (!tx_desc)
+ goto failed;
+
+ return tx_desc;
+
+failed:
+ xdma_free_desc(&sw_desc->vdesc);
+
+ return NULL;
+}
+
+/**
+ * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers
+ * @chan: DMA channel
+ * @xt: DMA transfer template
+ * @flags: tx flags
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_interleaved_dma(struct dma_chan *chan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ int i;
+ u32 desc_num = 0, period_size = 0;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct xdma_chan *xchan = to_xdma_chan(chan);
+ struct xdma_desc *sw_desc;
+ u64 src_addr, dst_addr;
+
+ for (i = 0; i < xt->frame_size; ++i)
+ desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX);
+
+ sw_desc = xdma_alloc_desc(xchan, desc_num, false);
+ if (!sw_desc)
+ return NULL;
+ sw_desc->dir = xt->dir;
+ sw_desc->interleaved_dma = true;
+ sw_desc->cyclic = flags & DMA_PREP_REPEAT;
+ sw_desc->frames_left = xt->numf;
+ sw_desc->periods = xt->numf;
+
+ desc_num = 0;
+ src_addr = xt->src_start;
+ dst_addr = xt->dst_start;
+ for (i = 0; i < xt->frame_size; ++i) {
+ desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num);
+ src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ?
+ xt->sgl[i].size : 0);
+ dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ?
+ xt->sgl[i].size : 0);
+ period_size += xt->sgl[i].size;
+ }
+ sw_desc->period_size = period_size;
+
+ tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags);
+ if (tx_desc)
+ return tx_desc;
+
+ xdma_free_desc(&sw_desc->vdesc);
+ return NULL;
+}
+
+/**
+ * xdma_device_config - Configure the DMA channel
+ * @chan: DMA channel
+ * @cfg: channel configuration
+ */
+static int xdma_device_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+ memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+/**
+ * xdma_free_chan_resources - Free channel resources
+ * @chan: DMA channel
+ */
+static void xdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+ vchan_free_chan_resources(&xdma_chan->vchan);
+ dma_pool_free(xdma_chan->desc_pool,
+ xdma_chan->write_back,
+ xdma_chan->write_back->dma_addr);
+ dma_pool_destroy(xdma_chan->desc_pool);
+ xdma_chan->desc_pool = NULL;
+}
+
+/**
+ * xdma_alloc_chan_resources - Allocate channel resources
+ * @chan: DMA channel
+ */
+static int xdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ struct device *dev = xdev->dma_dev.dev;
+ dma_addr_t write_back_addr;
+
+ while (dev && !dev_is_pci(dev))
+ dev = dev->parent;
+ if (!dev) {
+ xdma_err(xdev, "unable to find pci device");
+ return -EINVAL;
+ }
+
+ //Allocate the pool WITH the H2C write back
+ xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE +
+ sizeof(struct xdma_c2h_write_back),
+ XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY);
+ if (!xdma_chan->desc_pool) {
+ xdma_err(xdev, "unable to allocate descriptor pool");
+ return -ENOMEM;
+ }
+
+ /* Allocate the C2H write back out of the pool*/
+
+ xdma_chan->write_back = dma_pool_alloc(xdma_chan->desc_pool, GFP_NOWAIT, &write_back_addr);
+
+ dev_dbg(dev, "C2H write_back : %p, dma_addr: %lld", xdma_chan->write_back, write_back_addr);
+
+ if (!xdma_chan->write_back) {
+ xdma_err(xdev, "unable to allocate C2H write back block");
+ return -ENOMEM;
+ }
+ xdma_chan->write_back->dma_addr = write_back_addr;
+
+
+ return 0;
+}
+
+static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_desc *desc = NULL;
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+ unsigned int period_idx;
+ u32 residue = 0;
+
+ ret = dma_cookie_status(chan, cookie, state);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+
+ vd = vchan_find_desc(&xdma_chan->vchan, cookie);
+ if (!vd)
+ goto out;
+
+ desc = to_xdma_desc(vd);
+ if (desc->error) {
+ ret = DMA_ERROR;
+ } else if (desc->cyclic) {
+ period_idx = desc->completed_desc_num % desc->periods;
+ residue = (desc->periods - period_idx) * desc->period_size;
+ dma_set_residue(state, residue);
+ }
+out:
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+
+ return ret;
+}
+
+/**
+ * xdma_channel_isr - XDMA channel interrupt handler
+ * @irq: IRQ number
+ * @dev_id: Pointer to the DMA channel structure
+ */
+static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
+{
+ struct xdma_chan *xchan = dev_id;
+ u32 complete_desc_num = 0;
+ struct xdma_device *xdev = xchan->xdev_hdl;
+ struct virt_dma_desc *vd, *next_vd;
+ struct xdma_desc *desc;
+ int ret;
+ u32 st;
+ bool repeat_tx;
+
+ spin_lock(&xchan->vchan.lock);
+
+ /* get submitted request */
+ vd = vchan_next_desc(&xchan->vchan);
+ if (!vd)
+ goto out;
+
+ /* Clear-on-read the status register */
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st);
+ if (ret)
+ goto out;
+
+ desc = to_xdma_desc(vd);
+
+ st &= XDMA_CHAN_STATUS_MASK;
+ if ((st & XDMA_CHAN_ERROR_MASK) ||
+ !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) {
+ desc->error = true;
+ xdma_err(xdev, "channel error, status register value: 0x%x", st);
+ goto out;
+ }
+
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
+ &complete_desc_num);
+ if (ret)
+ goto out;
+
+ if (desc->interleaved_dma) {
+ xchan->busy = false;
+ desc->completed_desc_num += complete_desc_num;
+ if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) {
+ xdma_xfer_start(xchan);
+ goto out;
+ }
+
+ /* last desc of any frame */
+ desc->frames_left--;
+ if (desc->frames_left)
+ goto out;
+
+ /* last desc of the last frame */
+ repeat_tx = vd->tx.flags & DMA_PREP_REPEAT;
+ next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node);
+ if (next_vd)
+ repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT);
+ if (repeat_tx) {
+ desc->frames_left = desc->periods;
+ desc->completed_desc_num = 0;
+ vchan_cyclic_callback(vd);
+ } else {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ }
+ /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
+ xdma_xfer_start(xchan);
+ } else if (!desc->cyclic) {
+ xchan->busy = false;
+ desc->completed_desc_num += complete_desc_num;
+
+ /* if all data blocks are transferred, remove and complete the request */
+ if (desc->completed_desc_num == desc->desc_num) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ goto out;
+ }
+
+ if (desc->completed_desc_num > desc->desc_num ||
+ complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
+ goto out;
+
+ /* transfer the rest of data */
+ xdma_xfer_start(xchan);
+ } else {
+ desc->completed_desc_num = complete_desc_num;
+ vchan_cyclic_callback(vd);
+ }
+
+out:
+ spin_unlock(&xchan->vchan.lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xdma_irq_fini - Uninitialize IRQ
+ * @xdev: DMA device pointer
+ */
+static void xdma_irq_fini(struct xdma_device *xdev)
+{
+ int i;
+
+ /* disable interrupt */
+ regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
+
+ /* free irq handler */
+ for (i = 0; i < xdev->h2c_chan_num; i++)
+ free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
+
+ for (i = 0; i < xdev->c2h_chan_num; i++)
+ free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
+}
+
+/**
+ * xdma_set_vector_reg - configure hardware IRQ registers
+ * @xdev: DMA device pointer
+ * @vec_tbl_start: Start of IRQ registers
+ * @irq_start: Start of IRQ
+ * @irq_num: Number of IRQ
+ */
+static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
+ u32 irq_start, u32 irq_num)
+{
+ u32 shift, i, val = 0;
+ int ret;
+
+ /* Each IRQ register is 32 bit and contains 4 IRQs */
+ while (irq_num > 0) {
+ for (i = 0; i < 4; i++) {
+ shift = XDMA_IRQ_VEC_SHIFT * i;
+ val |= irq_start << shift;
+ irq_start++;
+ irq_num--;
+ if (!irq_num)
+ break;
+ }
+
+ /* write IRQ register */
+ ret = regmap_write(xdev->rmap, vec_tbl_start, val);
+ if (ret)
+ return ret;
+ vec_tbl_start += sizeof(u32);
+ val = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * xdma_irq_init - initialize IRQs
+ * @xdev: DMA device pointer
+ */
+static int xdma_irq_init(struct xdma_device *xdev)
+{
+ u32 irq = xdev->irq_start;
+ u32 user_irq_start;
+ int i, j, ret;
+
+ /* return failure if there are not enough IRQs */
+ if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
+ xdma_err(xdev, "not enough irq");
+ return -EINVAL;
+ }
+
+ /* setup H2C interrupt handler */
+ for (i = 0; i < xdev->h2c_chan_num; i++) {
+ ret = request_irq(irq, xdma_channel_isr, 0,
+ "xdma-h2c-channel", &xdev->h2c_chans[i]);
+ if (ret) {
+ xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
+ i, irq, ret);
+ goto failed_init_h2c;
+ }
+ xdev->h2c_chans[i].irq = irq;
+ irq++;
+ }
+
+ /* setup C2H interrupt handler */
+ for (j = 0; j < xdev->c2h_chan_num; j++) {
+ ret = request_irq(irq, xdma_channel_isr, 0,
+ "xdma-c2h-channel", &xdev->c2h_chans[j]);
+ if (ret) {
+ xdma_err(xdev, "C2H channel%d request irq%d failed: %d",
+ j, irq, ret);
+ goto failed_init_c2h;
+ }
+ xdev->c2h_chans[j].irq = irq;
+ irq++;
+ }
+
+ /* config hardware IRQ registers */
+ ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
+ XDMA_CHAN_NUM(xdev));
+ if (ret) {
+ xdma_err(xdev, "failed to set channel vectors: %d", ret);
+ goto failed_init_c2h;
+ }
+
+ /* config user IRQ registers if needed */
+ user_irq_start = XDMA_CHAN_NUM(xdev);
+ if (xdev->irq_num > user_irq_start) {
+ ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
+ user_irq_start,
+ xdev->irq_num - user_irq_start);
+ if (ret) {
+ xdma_err(xdev, "failed to set user vectors: %d", ret);
+ goto failed_init_c2h;
+ }
+ }
+
+ /* enable interrupt */
+ ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
+ if (ret)
+ goto failed_init_c2h;
+
+ return 0;
+
+failed_init_c2h:
+ while (j--)
+ free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
+failed_init_h2c:
+ while (i--)
+ free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
+
+ return ret;
+}
+
+static bool xdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_chan_info *chan_info = param;
+
+ return chan_info->dir == xdma_chan->dir;
+}
+
+/**
+ * xdma_disable_user_irq - Disable user interrupt
+ * @pdev: Pointer to the platform_device structure
+ * @irq_num: System IRQ number
+ */
+void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+ u32 index;
+
+ index = irq_num - xdev->irq_start;
+ if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
+ xdma_err(xdev, "invalid user irq number");
+ return;
+ }
+ index -= XDMA_CHAN_NUM(xdev);
+
+ regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
+}
+EXPORT_SYMBOL(xdma_disable_user_irq);
+
+/**
+ * xdma_enable_user_irq - Enable user logic interrupt
+ * @pdev: Pointer to the platform_device structure
+ * @irq_num: System IRQ number
+ */
+int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+ u32 index;
+ int ret;
+
+ index = irq_num - xdev->irq_start;
+ if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
+ xdma_err(xdev, "invalid user irq number");
+ return -EINVAL;
+ }
+ index -= XDMA_CHAN_NUM(xdev);
+
+ ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(xdma_enable_user_irq);
+
+/**
+ * xdma_get_user_irq - Get system IRQ number
+ * @pdev: Pointer to the platform_device structure
+ * @user_irq_index: User logic IRQ wire index
+ *
+ * Return: The system IRQ number allocated for the given wire index.
+ */
+int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+
+ if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
+ xdma_err(xdev, "invalid user irq index");
+ return -EINVAL;
+ }
+
+ return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
+}
+EXPORT_SYMBOL(xdma_get_user_irq);
+
+/**
+ * xdma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ */
+static void xdma_remove(struct platform_device *pdev)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+
+ if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
+ xdma_irq_fini(xdev);
+
+ if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
+ dma_async_device_unregister(&xdev->dma_dev);
+}
+
+/**
+ * xdma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ */
+static int xdma_probe(struct platform_device *pdev)
+{
+ struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct xdma_device *xdev;
+ void __iomem *reg_base;
+ struct resource *res;
+ int ret = -ENODEV;
+
+ if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
+ dev_err(&pdev->dev, "invalid max dma channels %d",
+ pdata->max_dma_channels);
+ return -EINVAL;
+ }
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, xdev);
+ xdev->pdev = pdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ xdma_err(xdev, "failed to get irq resource");
+ goto failed;
+ }
+ xdev->irq_start = res->start;
+ xdev->irq_num = resource_size(res);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ xdma_err(xdev, "failed to get io resource");
+ goto failed;
+ }
+
+ reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg_base)) {
+ xdma_err(xdev, "ioremap failed");
+ goto failed;
+ }
+
+ xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
+ &xdma_regmap_config);
+ if (IS_ERR(xdev->rmap)) {
+ ret = PTR_ERR(xdev->rmap);
+ xdma_err(xdev, "config regmap failed: %d", ret);
+ goto failed;
+ }
+
+ INIT_LIST_HEAD(&xdev->dma_dev.channels);
+
+ ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
+ if (ret) {
+ xdma_err(xdev, "config H2C channels failed: %d", ret);
+ goto failed;
+ }
+
+ ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
+ if (ret) {
+ xdma_err(xdev, "config C2H channels failed: %d", ret);
+ goto failed;
+ }
+
+ dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask);
+
+ xdev->dma_dev.dev = &pdev->dev;
+ xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
+ xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
+ xdev->dma_dev.device_tx_status = xdma_tx_status;
+ xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
+ xdev->dma_dev.device_config = xdma_device_config;
+ xdev->dma_dev.device_issue_pending = xdma_issue_pending;
+ xdev->dma_dev.device_terminate_all = xdma_terminate_all;
+ xdev->dma_dev.device_synchronize = xdma_synchronize;
+ xdev->dma_dev.filter.map = pdata->device_map;
+ xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
+ xdev->dma_dev.filter.fn = xdma_filter_fn;
+ xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
+ xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma;
+
+ ret = dma_async_device_register(&xdev->dma_dev);
+ if (ret) {
+ xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
+ goto failed;
+ }
+ xdev->status |= XDMA_DEV_STATUS_REG_DMA;
+
+ ret = xdma_irq_init(xdev);
+ if (ret) {
+ xdma_err(xdev, "failed to init msix: %d", ret);
+ goto failed;
+ }
+ xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
+
+ return 0;
+
+failed:
+ xdma_remove(pdev);
+
+ return ret;
+}
+
+static const struct platform_device_id xdma_id_table[] = {
+ { "xdma", 0},
+ { },
+};
+
+static struct platform_driver xdma_driver = {
+ .driver = {
+ .name = "xdma",
+ },
+ .id_table = xdma_id_table,
+ .probe = xdma_probe,
+ .remove_new = xdma_remove,
+};
+
+module_platform_driver(xdma_driver);
+
+MODULE_DESCRIPTION("AMD XDMA driver");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/snd-alpx/dkms.conf b/snd-alpx/dkms.conf
new file mode 100644
index 0000000..4af6f73
--- /dev/null
+++ b/snd-alpx/dkms.conf
@@ -0,0 +1,10 @@
+PACKAGE_VERSION="3.4.3"
+PACKAGE_NAME="snd-alpx"
+CLEAN="make clean"
+MAKE[0]="make -Bj KERNEL_VERSION=${kernelver} all"
+BUILT_MODULE_NAME[0]="snd-alpx"
+BUILT_MODULE_NAME[1]="snd-alpx-xdma"
+DEST_MODULE_LOCATION[0]="/extra/digigram"
+DEST_MODULE_LOCATION[1]="/extra/digigram"
+AUTOINSTALL="yes"
+BUILD_EXCLUSIVE_ARCH="x86_64"
diff --git a/snd-alpx/include/4.16/dmaengine.h b/snd-alpx/include/4.16/dmaengine.h
new file mode 100644
index 0000000..501c0b0
--- /dev/null
+++ b/snd-alpx/include/4.16/dmaengine.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The contents of this file are private to DMA engine drivers, and is not
+ * part of the API to be used by DMA engine users.
+ */
+#ifndef DMAENGINE_H
+#define DMAENGINE_H
+
+#include <linux/bug.h>
+#include <linux/dmaengine.h>
+
+/**
+ * dma_cookie_init - initialize the cookies for a DMA channel
+ * @chan: dma channel to initialize
+ */
+static inline void dma_cookie_init(struct dma_chan *chan)
+{
+ chan->cookie = DMA_MIN_COOKIE;
+ chan->completed_cookie = DMA_MIN_COOKIE;
+}
+
+/**
+ * dma_cookie_assign - assign a DMA engine cookie to the descriptor
+ * @tx: descriptor needing cookie
+ *
+ * Assign a unique non-zero per-channel cookie to the descriptor.
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *chan = tx->chan;
+ dma_cookie_t cookie;
+
+ cookie = chan->cookie + 1;
+ if (cookie < DMA_MIN_COOKIE)
+ cookie = DMA_MIN_COOKIE;
+ tx->cookie = chan->cookie = cookie;
+
+ return cookie;
+}
+
+/**
+ * dma_cookie_complete - complete a descriptor
+ * @tx: descriptor to complete
+ *
+ * Mark this descriptor complete by updating the channels completed
+ * cookie marker. Zero the descriptors cookie to prevent accidental
+ * repeated completions.
+ *
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
+{
+ BUG_ON(tx->cookie < DMA_MIN_COOKIE);
+ tx->chan->completed_cookie = tx->cookie;
+ tx->cookie = 0;
+}
+
+/**
+ * dma_cookie_status - report cookie status
+ * @chan: dma channel
+ * @cookie: cookie we are interested in
+ * @state: dma_tx_state structure to return last/used cookies
+ *
+ * Report the status of the cookie, filling in the state structure if
+ * non-NULL. No locking is required.
+ */
+static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ dma_cookie_t used, complete;
+
+ used = chan->cookie;
+ complete = chan->completed_cookie;
+ barrier();
+ if (state) {
+ state->last = complete;
+ state->used = used;
+ state->residue = 0;
+ }
+ return dma_async_is_complete(cookie, complete, used);
+}
+
+static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
+{
+ if (state)
+ state->residue = residue;
+}
+
+struct dmaengine_desc_callback {
+ dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
+ void *callback_param;
+};
+
+/**
+ * dmaengine_desc_get_callback - get the passed in callback function
+ * @tx: tx descriptor
+ * @cb: temp struct to hold the callback info
+ *
+ * Fill the passed in cb struct with what's available in the passed in
+ * tx descriptor struct
+ * No locking is required.
+ */
+static inline void
+dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_desc_callback *cb)
+{
+ cb->callback = tx->callback;
+ cb->callback_result = tx->callback_result;
+ cb->callback_param = tx->callback_param;
+}
+
+/**
+ * dmaengine_desc_callback_invoke - call the callback function in cb struct
+ * @cb: temp struct that is holding the callback info
+ * @result: transaction result
+ *
+ * Call the callback function provided in the cb struct with the parameter
+ * in the cb struct.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_result dummy_result = {
+ .result = DMA_TRANS_NOERROR,
+ .residue = 0
+ };
+
+ if (cb->callback_result) {
+ if (!result)
+ result = &dummy_result;
+ cb->callback_result(cb->callback_param, result);
+ } else if (cb->callback) {
+ cb->callback(cb->callback_param);
+ }
+}
+
+/**
+ * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
+ * then immediately call the callback.
+ * @tx: dma async tx descriptor
+ * @result: transaction result
+ *
+ * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
+ * in a single function since no work is necessary in between for the driver.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_desc_callback cb;
+
+ dmaengine_desc_get_callback(tx, &cb);
+ dmaengine_desc_callback_invoke(&cb, result);
+}
+
+/**
+ * dmaengine_desc_callback_valid - verify the callback is valid in cb
+ * @cb: callback info struct
+ *
+ * Return a bool that verifies whether callback in cb is valid or not.
+ * No locking is required.
+ */
+static inline bool
+dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
+{
+ return (cb->callback) ? true : false;
+}
+
+#endif
diff --git a/snd-alpx/include/4.16/virt-dma.h b/snd-alpx/include/4.16/virt-dma.h
new file mode 100644
index 0000000..b09b75a
--- /dev/null
+++ b/snd-alpx/include/4.16/virt-dma.h
@@ -0,0 +1,221 @@
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef VIRT_DMA_H
+#define VIRT_DMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "dmaengine.h"
+
+struct virt_dma_desc {
+ struct dma_async_tx_descriptor tx;
+ /* protected by vc.lock */
+ struct list_head node;
+};
+
+struct virt_dma_chan {
+ struct dma_chan chan;
+ struct tasklet_struct task;
+ void (*desc_free)(struct virt_dma_desc *);
+
+ spinlock_t lock;
+
+ /* protected by vc.lock */
+ struct list_head desc_allocated;
+ struct list_head desc_submitted;
+ struct list_head desc_issued;
+ struct list_head desc_completed;
+
+ struct virt_dma_desc *cyclic;
+ struct virt_dma_desc *vd_terminated;
+};
+
+static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct virt_dma_chan, chan);
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
+extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
+
+/**
+ * vchan_tx_prep - prepare a descriptor
+ * @vc: virtual channel allocating this descriptor
+ * @vd: virtual descriptor to prepare
+ * @tx_flags: flags argument passed in to prepare function
+ */
+static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
+ struct virt_dma_desc *vd, unsigned long tx_flags)
+{
+ unsigned long flags;
+
+ dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
+ vd->tx.flags = tx_flags;
+ vd->tx.tx_submit = vchan_tx_submit;
+ vd->tx.desc_free = vchan_tx_desc_free;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_add_tail(&vd->node, &vc->desc_allocated);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ return &vd->tx;
+}
+
+/**
+ * vchan_issue_pending - move submitted descriptors to issued list
+ * @vc: virtual channel to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
+{
+ list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
+ return !list_empty(&vc->desc_issued);
+}
+
+/**
+ * vchan_cookie_complete - report completion of a descriptor
+ * @vd: virtual descriptor to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+ dma_cookie_t cookie;
+
+ cookie = vd->tx.cookie;
+ dma_cookie_complete(&vd->tx);
+ dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
+ vd, cookie);
+ list_add_tail(&vd->node, &vc->desc_completed);
+
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_vdesc_fini - Free or reuse a descriptor
+ * @vd: virtual descriptor to free/reuse
+ */
+static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ if (dmaengine_desc_test_reuse(&vd->tx))
+ list_add(&vd->node, &vc->desc_allocated);
+ else
+ vc->desc_free(vd);
+}
+
+/**
+ * vchan_cyclic_callback - report the completion of a period
+ * @vd: virtual descriptor
+ */
+static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ vc->cyclic = vd;
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_terminate_vdesc - Disable pending cyclic callback
+ * @vd: virtual descriptor to be terminated
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ /* free up stuck descriptor */
+ if (vc->vd_terminated)
+ vchan_vdesc_fini(vc->vd_terminated);
+
+ vc->vd_terminated = vd;
+ if (vc->cyclic == vd)
+ vc->cyclic = NULL;
+}
+
+/**
+ * vchan_next_desc - peek at the next descriptor to be processed
+ * @vc: virtual channel to obtain descriptor from
+ *
+ * vc.lock must be held by caller
+ */
+static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
+{
+ return list_first_entry_or_null(&vc->desc_issued,
+ struct virt_dma_desc, node);
+}
+
+/**
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
+ * @vc: virtual channel to get descriptors from
+ * @head: list of descriptors found
+ *
+ * vc.lock must be held by caller
+ *
+ * Removes all submitted and issued descriptors from internal lists, and
+ * provides a list of all descriptors found
+ */
+static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
+ struct list_head *head)
+{
+ list_splice_tail_init(&vc->desc_allocated, head);
+ list_splice_tail_init(&vc->desc_submitted, head);
+ list_splice_tail_init(&vc->desc_issued, head);
+ list_splice_tail_init(&vc->desc_completed, head);
+}
+
+static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
+{
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ vchan_get_all_descriptors(vc, &head);
+ list_for_each_entry(vd, &head, node)
+ dmaengine_desc_clear_reuse(&vd->tx);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+}
+
+/**
+ * vchan_synchronize() - synchronize callback execution to the current context
+ * @vc: virtual channel to synchronize
+ *
+ * Makes sure that all scheduled or active callbacks have finished running. For
+ * proper operation the caller has to ensure that no new callbacks are scheduled
+ * after the invocation of this function started.
+ * Free up the terminated cyclic descriptor to prevent memory leakage.
+ */
+static inline void vchan_synchronize(struct virt_dma_chan *vc)
+{
+ unsigned long flags;
+
+ tasklet_kill(&vc->task);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ if (vc->vd_terminated) {
+ vchan_vdesc_fini(vc->vd_terminated);
+ vc->vd_terminated = NULL;
+ }
+ spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+#endif
diff --git a/snd-alpx/include/5.10/regmap.h b/snd-alpx/include/5.10/regmap.h
new file mode 100644
index 0000000..e7834d9
--- /dev/null
+++ b/snd-alpx/include/5.10/regmap.h
@@ -0,0 +1,1765 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __LINUX_REGMAP_H
+#define __LINUX_REGMAP_H
+
+/*
+ * Register map access API
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ */
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/lockdep.h>
+#include <linux/iopoll.h>
+#include <linux/fwnode.h>
+
+struct module;
+struct clk;
+struct device;
+struct device_node;
+struct i2c_client;
+struct i3c_device;
+struct irq_domain;
+struct slim_device;
+struct spi_device;
+struct spmi_device;
+struct regmap;
+struct regmap_range_cfg;
+struct regmap_field;
+struct snd_ac97;
+struct sdw_slave;
+
+/* An enum of all the supported cache types */
+enum regcache_type {
+ REGCACHE_NONE,
+ REGCACHE_RBTREE,
+ REGCACHE_COMPRESSED,
+ REGCACHE_FLAT,
+};
+
+/**
+ * struct reg_default - Default value for a register.
+ *
+ * @reg: Register address.
+ * @def: Register default value.
+ *
+ * We use an array of structs rather than a simple array as many modern devices
+ * have very sparse register maps.
+ */
+struct reg_default {
+ unsigned int reg;
+ unsigned int def;
+};
+
+/**
+ * struct reg_sequence - An individual write from a sequence of writes.
+ *
+ * @reg: Register address.
+ * @def: Register value.
+ * @delay_us: Delay to be applied after the register write in microseconds
+ *
+ * Register/value pairs for sequences of writes with an optional delay in
+ * microseconds to be applied after each write.
+ */
+struct reg_sequence {
+ unsigned int reg;
+ unsigned int def;
+ unsigned int delay_us;
+};
+
+#define REG_SEQ(_reg, _def, _delay_us) { \
+ .reg = _reg, \
+ .def = _def, \
+ .delay_us = _delay_us, \
+ }
+#define REG_SEQ0(_reg, _def) REG_SEQ(_reg, _def, 0)
+
+/**
+ * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs
+ *
+ * @map: Regmap to read from
+ * @addr: Address to poll
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val. Must not be called
+ * from atomic context if sleep_us or timeout_us are used.
+ *
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ */
+#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
+({ \
+ int __ret, __tmp; \
+ __tmp = read_poll_timeout(regmap_read, __ret, __ret || (cond), \
+ sleep_us, timeout_us, false, (map), (addr), &(val)); \
+ __ret ?: __tmp; \
+})
+
+/**
+ * regmap_read_poll_timeout_atomic - Poll until a condition is met or a timeout occurs
+ *
+ * @map: Regmap to read from
+ * @addr: Address to poll
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops).
+ * Should be less than ~10us since udelay is used
+ * (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val.
+ *
+ * This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h.
+ *
+ * Note: In general regmap cannot be used in atomic context. If you want to use
+ * this macro then first setup your regmap for atomic use (flat or no cache
+ * and MMIO regmap).
+ */
+#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \
+({ \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __delay_us = (delay_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ int __ret; \
+ for (;;) { \
+ __ret = regmap_read((map), (addr), &(val)); \
+ if (__ret) \
+ break; \
+ if (cond) \
+ break; \
+ if ((__timeout_us) && \
+ ktime_compare(ktime_get(), __timeout) > 0) { \
+ __ret = regmap_read((map), (addr), &(val)); \
+ break; \
+ } \
+ if (__delay_us) \
+ udelay(__delay_us); \
+ } \
+ __ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+})
+
+/**
+ * regmap_field_read_poll_timeout - Poll until a condition is met or timeout
+ *
+ * @field: Regmap field to read from
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val. Must not be called
+ * from atomic context if sleep_us or timeout_us are used.
+ *
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ */
+#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \
+({ \
+ int __ret, __tmp; \
+ __tmp = read_poll_timeout(regmap_field_read, __ret, __ret || (cond), \
+ sleep_us, timeout_us, false, (field), &(val)); \
+ __ret ?: __tmp; \
+})
+
+#ifdef CONFIG_REGMAP
+
+enum regmap_endian {
+ /* Unspecified -> 0 -> Backwards compatible default */
+ REGMAP_ENDIAN_DEFAULT = 0,
+ REGMAP_ENDIAN_BIG,
+ REGMAP_ENDIAN_LITTLE,
+ REGMAP_ENDIAN_NATIVE,
+};
+
+/**
+ * struct regmap_range - A register range, used for access related checks
+ * (readable/writeable/volatile/precious checks)
+ *
+ * @range_min: address of first register
+ * @range_max: address of last register
+ */
+struct regmap_range {
+ unsigned int range_min;
+ unsigned int range_max;
+};
+
+#define regmap_reg_range(low, high) { .range_min = low, .range_max = high, }
+
+/**
+ * struct regmap_access_table - A table of register ranges for access checks
+ *
+ * @yes_ranges : pointer to an array of regmap ranges used as "yes ranges"
+ * @n_yes_ranges: size of the above array
+ * @no_ranges: pointer to an array of regmap ranges used as "no ranges"
+ * @n_no_ranges: size of the above array
+ *
+ * A table of ranges including some yes ranges and some no ranges.
+ * If a register belongs to a no_range, the corresponding check function
+ * will return false. If a register belongs to a yes range, the corresponding
+ * check function will return true. "no_ranges" are searched first.
+ */
+struct regmap_access_table {
+ const struct regmap_range *yes_ranges;
+ unsigned int n_yes_ranges;
+ const struct regmap_range *no_ranges;
+ unsigned int n_no_ranges;
+};
+
+typedef void (*regmap_lock)(void *);
+typedef void (*regmap_unlock)(void *);
+
+/**
+ * struct regmap_config - Configuration for the register map of a device.
+ *
+ * @name: Optional name of the regmap. Useful when a device has multiple
+ * register regions.
+ *
+ * @reg_bits: Number of bits in a register address, mandatory.
+ * @reg_stride: The register address stride. Valid register addresses are a
+ * multiple of this value. If set to 0, a value of 1 will be
+ * used.
+ * @pad_bits: Number of bits of padding between register and value.
+ * @val_bits: Number of bits in a register value, mandatory.
+ *
+ * @writeable_reg: Optional callback returning true if the register
+ * can be written to. If this field is NULL but wr_table
+ * (see below) is not, the check is performed on such table
+ * (a register is writeable if it belongs to one of the ranges
+ * specified by wr_table).
+ * @readable_reg: Optional callback returning true if the register
+ * can be read from. If this field is NULL but rd_table
+ * (see below) is not, the check is performed on such table
+ * (a register is readable if it belongs to one of the ranges
+ * specified by rd_table).
+ * @volatile_reg: Optional callback returning true if the register
+ * value can't be cached. If this field is NULL but
+ * volatile_table (see below) is not, the check is performed on
+ * such table (a register is volatile if it belongs to one of
+ * the ranges specified by volatile_table).
+ * @precious_reg: Optional callback returning true if the register
+ * should not be read outside of a call from the driver
+ * (e.g., a clear on read interrupt status register). If this
+ * field is NULL but precious_table (see below) is not, the
+ * check is performed on such table (a register is precious if
+ * it belongs to one of the ranges specified by precious_table).
+ * @writeable_noinc_reg: Optional callback returning true if the register
+ * supports multiple write operations without incrementing
+ * the register number. If this field is NULL but
+ * wr_noinc_table (see below) is not, the check is
+ * performed on such table (a register is no increment
+ * writeable if it belongs to one of the ranges specified
+ * by wr_noinc_table).
+ * @readable_noinc_reg: Optional callback returning true if the register
+ * supports multiple read operations without incrementing
+ * the register number. If this field is NULL but
+ * rd_noinc_table (see below) is not, the check is
+ * performed on such table (a register is no increment
+ * readable if it belongs to one of the ranges specified
+ * by rd_noinc_table).
+ * @disable_locking: This regmap is either protected by external means or
+ * is guaranteed not to be accessed from multiple threads.
+ * Don't use any locking mechanisms.
+ * @lock: Optional lock callback (overrides regmap's default lock
+ * function, based on spinlock or mutex).
+ * @unlock: As above for unlocking.
+ * @lock_arg: this field is passed as the only argument of lock/unlock
+ * functions (ignored in case regular lock/unlock functions
+ * are not overridden).
+ * @reg_read: Optional callback that if filled will be used to perform
+ * all the reads from the registers. Should only be provided for
+ * devices whose read operation cannot be represented as a simple
+ * read operation on a bus such as SPI, I2C, etc. Most of the
+ * devices do not need this.
+ * @reg_write: Same as above for writing.
+ * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
+ * to perform locking. This field is ignored if custom lock/unlock
+ * functions are used (see fields lock/unlock of struct regmap_config).
+ * This field is a duplicate of a similar file in
+ * 'struct regmap_bus' and serves exact same purpose.
+ * Use it only for "no-bus" cases.
+ * @max_register: Optional, specifies the maximum valid register address.
+ * @wr_table: Optional, points to a struct regmap_access_table specifying
+ * valid ranges for write access.
+ * @rd_table: As above, for read access.
+ * @volatile_table: As above, for volatile registers.
+ * @precious_table: As above, for precious registers.
+ * @wr_noinc_table: As above, for no increment writeable registers.
+ * @rd_noinc_table: As above, for no increment readable registers.
+ * @reg_defaults: Power on reset values for registers (for use with
+ * register cache support).
+ * @num_reg_defaults: Number of elements in reg_defaults.
+ *
+ * @read_flag_mask: Mask to be set in the top bytes of the register when doing
+ * a read.
+ * @write_flag_mask: Mask to be set in the top bytes of the register when doing
+ * a write. If both read_flag_mask and write_flag_mask are
+ * empty and zero_flag_mask is not set the regmap_bus default
+ * masks are used.
+ * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
+ * if they are both empty.
+ * @use_single_read: If set, converts the bulk read operation into a series of
+ * single read operations. This is useful for a device that
+ * does not support bulk read.
+ * @use_single_write: If set, converts the bulk write operation into a series of
+ * single write operations. This is useful for a device that
+ * does not support bulk write.
+ * @can_multi_write: If set, the device supports the multi write mode of bulk
+ * write operations, if clear multi write requests will be
+ * split into individual write operations
+ *
+ * @cache_type: The actual cache type.
+ * @reg_defaults_raw: Power on reset values for registers (for use with
+ * register cache support).
+ * @num_reg_defaults_raw: Number of elements in reg_defaults_raw.
+ * @reg_format_endian: Endianness for formatted register addresses. If this is
+ * DEFAULT, the @reg_format_endian_default value from the
+ * regmap bus is used.
+ * @val_format_endian: Endianness for formatted register values. If this is
+ * DEFAULT, the @reg_format_endian_default value from the
+ * regmap bus is used.
+ *
+ * @ranges: Array of configuration entries for virtual address ranges.
+ * @num_ranges: Number of range configuration entries.
+ * @use_hwlock: Indicate if a hardware spinlock should be used.
+ * @hwlock_id: Specify the hardware spinlock id.
+ * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
+ * HWLOCK_IRQ or 0.
+ * @can_sleep: Optional, specifies whether regmap operations can sleep.
+ */
+struct regmap_config {
+ const char *name;
+
+ int reg_bits;
+ int reg_stride;
+ int pad_bits;
+ int val_bits;
+
+ bool (*writeable_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+ bool (*volatile_reg)(struct device *dev, unsigned int reg);
+ bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
+
+ bool disable_locking;
+ regmap_lock lock;
+ regmap_unlock unlock;
+ void *lock_arg;
+
+ int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
+ int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+
+ bool fast_io;
+
+ unsigned int max_register;
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+ const struct regmap_access_table *volatile_table;
+ const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *wr_noinc_table;
+ const struct regmap_access_table *rd_noinc_table;
+ const struct reg_default *reg_defaults;
+ unsigned int num_reg_defaults;
+ enum regcache_type cache_type;
+ const void *reg_defaults_raw;
+ unsigned int num_reg_defaults_raw;
+
+ unsigned long read_flag_mask;
+ unsigned long write_flag_mask;
+ bool zero_flag_mask;
+
+ bool use_single_read;
+ bool use_single_write;
+ bool can_multi_write;
+
+ enum regmap_endian reg_format_endian;
+ enum regmap_endian val_format_endian;
+
+ const struct regmap_range_cfg *ranges;
+ unsigned int num_ranges;
+
+ bool use_hwlock;
+ unsigned int hwlock_id;
+ unsigned int hwlock_mode;
+
+ bool can_sleep;
+};
+
+/**
+ * struct regmap_range_cfg - Configuration for indirectly accessed or paged
+ * registers.
+ *
+ * @name: Descriptive name for diagnostics
+ *
+ * @range_min: Address of the lowest register address in virtual range.
+ * @range_max: Address of the highest register in virtual range.
+ *
+ * @selector_reg: Register with selector field.
+ * @selector_mask: Bit mask for selector value.
+ * @selector_shift: Bit shift for selector value.
+ *
+ * @window_start: Address of first (lowest) register in data window.
+ * @window_len: Number of registers in data window.
+ *
+ * Registers, mapped to this virtual range, are accessed in two steps:
+ * 1. page selector register update;
+ * 2. access through data window registers.
+ */
+struct regmap_range_cfg {
+ const char *name;
+
+ /* Registers of virtual address range */
+ unsigned int range_min;
+ unsigned int range_max;
+
+ /* Page selector for indirect addressing */
+ unsigned int selector_reg;
+ unsigned int selector_mask;
+ int selector_shift;
+
+ /* Data window (per each page) */
+ unsigned int window_start;
+ unsigned int window_len;
+};
+
+struct regmap_async;
+
+typedef int (*regmap_hw_write)(void *context, const void *data,
+ size_t count);
+typedef int (*regmap_hw_gather_write)(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len);
+typedef int (*regmap_hw_async_write)(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len,
+ struct regmap_async *async);
+typedef int (*regmap_hw_read)(void *context,
+ const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg,
+ unsigned int *val);
+typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
+ unsigned int val);
+typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
+typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
+typedef void (*regmap_hw_free_context)(void *context);
+
+/**
+ * struct regmap_bus - Description of a hardware bus for the register map
+ * infrastructure.
+ *
+ * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
+ * to perform locking. This field is ignored if custom lock/unlock
+ * functions are used (see fields lock/unlock of
+ * struct regmap_config).
+ * @write: Write operation.
+ * @gather_write: Write operation with split register/value, return -ENOTSUPP
+ * if not implemented on a given device.
+ * @async_write: Write operation which completes asynchronously, optional and
+ * must serialise with respect to non-async I/O.
+ * @reg_write: Write a single register value to the given register address. This
+ * write operation has to complete when returning from the function.
+ * @reg_update_bits: Update bits operation to be used against volatile
+ * registers, intended for devices supporting some mechanism
+ * for setting clearing bits without having to
+ * read/modify/write.
+ * @read: Read operation. Data is returned in the buffer used to transmit
+ * data.
+ * @reg_read: Read a single register value from a given register address.
+ * @free_context: Free context.
+ * @async_alloc: Allocate a regmap_async() structure.
+ * @read_flag_mask: Mask to be set in the top byte of the register when doing
+ * a read.
+ * @reg_format_endian_default: Default endianness for formatted register
+ * addresses. Used when the regmap_config specifies DEFAULT. If this is
+ * DEFAULT, BIG is assumed.
+ * @val_format_endian_default: Default endianness for formatted register
+ * values. Used when the regmap_config specifies DEFAULT. If this is
+ * DEFAULT, BIG is assumed.
+ * @max_raw_read: Max raw read size that can be used on the bus.
+ * @max_raw_write: Max raw write size that can be used on the bus.
+ */
+struct regmap_bus {
+ bool fast_io;
+ regmap_hw_write write;
+ regmap_hw_gather_write gather_write;
+ regmap_hw_async_write async_write;
+ regmap_hw_reg_write reg_write;
+ regmap_hw_reg_update_bits reg_update_bits;
+ regmap_hw_read read;
+ regmap_hw_reg_read reg_read;
+ regmap_hw_free_context free_context;
+ regmap_hw_async_alloc async_alloc;
+ u8 read_flag_mask;
+ enum regmap_endian reg_format_endian_default;
+ enum regmap_endian val_format_endian_default;
+ size_t max_raw_read;
+ size_t max_raw_write;
+};
+
+/*
+ * __regmap_init functions.
+ *
+ * These functions take a lock key and name parameter, and should not be called
+ * directly. Instead, use the regmap_init macros that generate a key and name
+ * for each call.
+ */
+struct regmap *__regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spi(struct spi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spmi_base(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spmi_ext(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_w1(struct device *w1_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+struct regmap *__devm_regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_w1(struct device *w1_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+/*
+ * Wrapper for regmap_init macros to include a unique lockdep key and name
+ * for each call. No-op if CONFIG_LOCKDEP is not set.
+ *
+ * @fn: Real function to call (in the form __[*_]regmap_init[_*])
+ * @name: Config variable name (#config in the calling macro)
+ **/
+#ifdef CONFIG_LOCKDEP
+#define __regmap_lockdep_wrapper(fn, name, ...) \
+( \
+ ({ \
+ static struct lock_class_key _key; \
+ fn(__VA_ARGS__, &_key, \
+ KBUILD_BASENAME ":" \
+ __stringify(__LINE__) ":" \
+ "(" name ")->lock"); \
+ }) \
+)
+#else
+#define __regmap_lockdep_wrapper(fn, name, ...) fn(__VA_ARGS__, NULL, NULL)
+#endif
+
+/**
+ * regmap_init() - Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions.
+ */
+#define regmap_init(dev, bus, bus_context, config) \
+ __regmap_lockdep_wrapper(__regmap_init, #config, \
+ dev, bus, bus_context, config)
+int regmap_attach_dev(struct device *dev, struct regmap *map,
+ const struct regmap_config *config);
+
+/**
+ * regmap_init_i2c() - Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_i2c(i2c, config) \
+ __regmap_lockdep_wrapper(__regmap_init_i2c, #config, \
+ i2c, config)
+
+/**
+ * regmap_init_sccb() - Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sccb(i2c, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sccb, #config, \
+ i2c, config)
+
+/**
+ * regmap_init_slimbus() - Initialise register map
+ *
+ * @slimbus: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_slimbus(slimbus, config) \
+ __regmap_lockdep_wrapper(__regmap_init_slimbus, #config, \
+ slimbus, config)
+
+/**
+ * regmap_init_spi() - Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spi(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spi, #config, \
+ dev, config)
+
+/**
+ * regmap_init_spmi_base() - Create regmap for the Base register space
+ *
+ * @dev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spmi_base(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spmi_base, #config, \
+ dev, config)
+
+/**
+ * regmap_init_spmi_ext() - Create regmap for Ext register space
+ *
+ * @dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spmi_ext(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spmi_ext, #config, \
+ dev, config)
+
+/**
+ * regmap_init_w1() - Initialise register map
+ *
+ * @w1_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_w1(w1_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_w1, #config, \
+ w1_dev, config)
+
+/**
+ * regmap_init_mmio_clk() - Initialise register map with register clock
+ *
+ * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mmio_clk(dev, clk_id, regs, config) \
+ __regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config, \
+ dev, clk_id, regs, config)
+
+/**
+ * regmap_init_mmio() - Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mmio(dev, regs, config) \
+ regmap_init_mmio_clk(dev, NULL, regs, config)
+
+/**
+ * regmap_init_ac97() - Initialise AC'97 register map
+ *
+ * @ac97: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_ac97(ac97, config) \
+ __regmap_lockdep_wrapper(__regmap_init_ac97, #config, \
+ ac97, config)
+bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
+
+/**
+ * regmap_init_sdw() - Initialise register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw, #config, \
+ sdw, config)
+
+/**
+ * regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
+ * to AVMM Bus Bridge
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.
+ */
+#define regmap_init_spi_avmm(spi, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spi_avmm, #config, \
+ spi, config)
+
+/**
+ * devm_regmap_init() - Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions. The
+ * map will be automatically freed by the device management code.
+ */
+#define devm_regmap_init(dev, bus, bus_context, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init, #config, \
+ dev, bus, bus_context, config)
+
+/**
+ * devm_regmap_init_i2c() - Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_i2c(i2c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_i2c, #config, \
+ i2c, config)
+
+/**
+ * devm_regmap_init_sccb() - Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sccb(i2c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sccb, #config, \
+ i2c, config)
+
+/**
+ * devm_regmap_init_spi() - Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The map will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spi(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spi, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_spmi_base() - Create managed regmap for Base register space
+ *
+ * @dev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spmi_base(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spmi_base, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_spmi_ext() - Create managed regmap for Ext register space
+ *
+ * @dev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spmi_ext(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spmi_ext, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_w1() - Initialise managed register map
+ *
+ * @w1_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_w1(w1_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_w1, #config, \
+ w1_dev, config)
+/**
+ * devm_regmap_init_mmio_clk() - Initialise managed register map with clock
+ *
+ * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mmio_clk(dev, clk_id, regs, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config, \
+ dev, clk_id, regs, config)
+
+/**
+ * devm_regmap_init_mmio() - Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mmio(dev, regs, config) \
+ devm_regmap_init_mmio_clk(dev, NULL, regs, config)
+
+/**
+ * devm_regmap_init_ac97() - Initialise AC'97 register map
+ *
+ * @ac97: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_ac97(ac97, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
+ ac97, config)
+
+/**
+ * devm_regmap_init_sdw() - Initialise managed register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \
+ sdw, config)
+
+/**
+ * devm_regmap_init_slimbus() - Initialise managed register map
+ *
+ * @slimbus: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_slimbus(slimbus, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_slimbus, #config, \
+ slimbus, config)
+
+/**
+ * devm_regmap_init_i3c() - Initialise managed register map
+ *
+ * @i3c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_i3c(i3c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_i3c, #config, \
+ i3c, config)
+
+/**
+ * devm_regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
+ * to AVMM Bus Bridge
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The map will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spi_avmm(spi, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spi_avmm, #config, \
+ spi, config)
+
+int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk);
+void regmap_mmio_detach_clk(struct regmap *map);
+void regmap_exit(struct regmap *map);
+int regmap_reinit_cache(struct regmap *map,
+ const struct regmap_config *config);
+struct regmap *dev_get_regmap(struct device *dev, const char *name);
+struct device *regmap_get_device(struct regmap *map);
+int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
+int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val);
+int regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
+int regmap_noinc_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
+int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
+ size_t val_count);
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
+ int num_regs);
+int regmap_multi_reg_write_bypassed(struct regmap *map,
+ const struct reg_sequence *regs,
+ int num_regs);
+int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
+int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
+int regmap_raw_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len);
+int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len);
+int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
+ size_t val_count);
+int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force);
+
+static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, false, false);
+}
+
+static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, true, false);
+}
+
+static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return regmap_update_bits_base(map, reg, mask, val,
+ change, false, false);
+}
+
+static inline int
+regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return regmap_update_bits_base(map, reg, mask, val,
+ change, true, false);
+}
+
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, false, true);
+}
+
+int regmap_get_val_bytes(struct regmap *map);
+int regmap_get_max_register(struct regmap *map);
+int regmap_get_reg_stride(struct regmap *map);
+int regmap_async_complete(struct regmap *map);
+bool regmap_can_raw_write(struct regmap *map);
+size_t regmap_get_raw_read_max(struct regmap *map);
+size_t regmap_get_raw_write_max(struct regmap *map);
+
+int regcache_sync(struct regmap *map);
+int regcache_sync_region(struct regmap *map, unsigned int min,
+ unsigned int max);
+int regcache_drop_region(struct regmap *map, unsigned int min,
+ unsigned int max);
+void regcache_cache_only(struct regmap *map, bool enable);
+void regcache_cache_bypass(struct regmap *map, bool enable);
+void regcache_mark_dirty(struct regmap *map);
+
+bool regmap_check_range_table(struct regmap *map, unsigned int reg,
+ const struct regmap_access_table *table);
+
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
+ int num_regs);
+int regmap_parse_val(struct regmap *map, const void *buf,
+ unsigned int *val);
+
+static inline bool regmap_reg_in_range(unsigned int reg,
+ const struct regmap_range *range)
+{
+ return reg >= range->range_min && reg <= range->range_max;
+}
+
+bool regmap_reg_in_ranges(unsigned int reg,
+ const struct regmap_range *ranges,
+ unsigned int nranges);
+
+static inline int regmap_set_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ return regmap_update_bits_base(map, reg, bits, bits,
+ NULL, false, false);
+}
+
+static inline int regmap_clear_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false);
+}
+
+int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits);
+
+/**
+ * struct reg_field - Description of an register field
+ *
+ * @reg: Offset of the register within the regmap bank
+ * @lsb: lsb of the register field.
+ * @msb: msb of the register field.
+ * @id_size: port size if it has some ports
+ * @id_offset: address offset for each ports
+ */
+struct reg_field {
+ unsigned int reg;
+ unsigned int lsb;
+ unsigned int msb;
+ unsigned int id_size;
+ unsigned int id_offset;
+};
+
+#define REG_FIELD(_reg, _lsb, _msb) { \
+ .reg = _reg, \
+ .lsb = _lsb, \
+ .msb = _msb, \
+ }
+
+#define REG_FIELD_ID(_reg, _lsb, _msb, _size, _offset) { \
+ .reg = _reg, \
+ .lsb = _lsb, \
+ .msb = _msb, \
+ .id_size = _size, \
+ .id_offset = _offset, \
+ }
+
+struct regmap_field *regmap_field_alloc(struct regmap *regmap,
+ struct reg_field reg_field);
+void regmap_field_free(struct regmap_field *field);
+
+struct regmap_field *devm_regmap_field_alloc(struct device *dev,
+ struct regmap *regmap, struct reg_field reg_field);
+void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
+
+int regmap_field_bulk_alloc(struct regmap *regmap,
+ struct regmap_field **rm_field,
+ struct reg_field *reg_field,
+ int num_fields);
+void regmap_field_bulk_free(struct regmap_field *field);
+int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap,
+ struct regmap_field **field,
+ struct reg_field *reg_field, int num_fields);
+void devm_regmap_field_bulk_free(struct device *dev,
+ struct regmap_field *field);
+
+int regmap_field_read(struct regmap_field *field, unsigned int *val);
+int regmap_field_update_bits_base(struct regmap_field *field,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force);
+int regmap_fields_read(struct regmap_field *field, unsigned int id,
+ unsigned int *val);
+int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force);
+
+static inline int regmap_field_write(struct regmap_field *field,
+ unsigned int val)
+{
+ return regmap_field_update_bits_base(field, ~0, val,
+ NULL, false, false);
+}
+
+static inline int regmap_field_force_write(struct regmap_field *field,
+ unsigned int val)
+{
+ return regmap_field_update_bits_base(field, ~0, val, NULL, false, true);
+}
+
+static inline int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_field_update_bits_base(field, mask, val,
+ NULL, false, false);
+}
+
+static inline int
+regmap_field_force_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_field_update_bits_base(field, mask, val,
+ NULL, false, true);
+}
+
+static inline int regmap_fields_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, ~0, val,
+ NULL, false, false);
+}
+
+static inline int regmap_fields_force_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, ~0, val,
+ NULL, false, true);
+}
+
+static inline int
+regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, mask, val,
+ NULL, false, false);
+}
+
+static inline int
+regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, mask, val,
+ NULL, false, true);
+}
+
+/**
+ * struct regmap_irq_type - IRQ type definitions.
+ *
+ * @type_reg_offset: Offset register for the irq type setting.
+ * @type_rising_val: Register value to configure RISING type irq.
+ * @type_falling_val: Register value to configure FALLING type irq.
+ * @type_level_low_val: Register value to configure LEVEL_LOW type irq.
+ * @type_level_high_val: Register value to configure LEVEL_HIGH type irq.
+ * @types_supported: logical OR of IRQ_TYPE_* flags indicating supported types.
+ */
+struct regmap_irq_type {
+ unsigned int type_reg_offset;
+ unsigned int type_reg_mask;
+ unsigned int type_rising_val;
+ unsigned int type_falling_val;
+ unsigned int type_level_low_val;
+ unsigned int type_level_high_val;
+ unsigned int types_supported;
+};
+
+/**
+ * struct regmap_irq - Description of an IRQ for the generic regmap irq_chip.
+ *
+ * @reg_offset: Offset of the status/mask register within the bank
+ * @mask: Mask used to flag/control the register.
+ * @type: IRQ trigger type setting details if supported.
+ */
+struct regmap_irq {
+ unsigned int reg_offset;
+ unsigned int mask;
+ struct regmap_irq_type type;
+};
+
+#define REGMAP_IRQ_REG(_irq, _off, _mask) \
+ [_irq] = { .reg_offset = (_off), .mask = (_mask) }
+
+#define REGMAP_IRQ_REG_LINE(_id, _reg_bits) \
+ [_id] = { \
+ .mask = BIT((_id) % (_reg_bits)), \
+ .reg_offset = (_id) / (_reg_bits), \
+ }
+
+#define REGMAP_IRQ_MAIN_REG_OFFSET(arr) \
+ { .num_regs = ARRAY_SIZE((arr)), .offset = &(arr)[0] }
+
+struct regmap_irq_sub_irq_map {
+ unsigned int num_regs;
+ unsigned int *offset;
+};
+
+/**
+ * struct regmap_irq_chip - Description of a generic regmap irq_chip.
+ *
+ * @name: Descriptive name for IRQ controller.
+ *
+ * @main_status: Base main status register address. For chips which have
+ * interrupts arranged in separate sub-irq blocks with own IRQ
+ * registers and which have a main IRQ registers indicating
+ * sub-irq blocks with unhandled interrupts. For such chips fill
+ * sub-irq register information in status_base, mask_base and
+ * ack_base.
+ * @num_main_status_bits: Should be given to chips where number of meaningfull
+ * main status bits differs from num_regs.
+ * @sub_reg_offsets: arrays of mappings from main register bits to sub irq
+ * registers. First item in array describes the registers
+ * for first main status bit. Second array for second bit etc.
+ * Offset is given as sub register status offset to
+ * status_base. Should contain num_regs arrays.
+ * Can be provided for chips with more complex mapping than
+ * 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ...
+ * @num_main_regs: Number of 'main status' irq registers for chips which have
+ * main_status set.
+ *
+ * @status_base: Base status register address.
+ * @mask_base: Base mask register address.
+ * @mask_writeonly: Base mask register is write only.
+ * @unmask_base: Base unmask register address. for chips who have
+ * separate mask and unmask registers
+ * @ack_base: Base ack address. If zero then the chip is clear on read.
+ * Using zero value is possible with @use_ack bit.
+ * @wake_base: Base address for wake enables. If zero unsupported.
+ * @type_base: Base address for irq type. If zero unsupported.
+ * @irq_reg_stride: Stride to use for chips where registers are not contiguous.
+ * @init_ack_masked: Ack all masked interrupts once during initalization.
+ * @mask_invert: Inverted mask register: cleared bits are masked out.
+ * @use_ack: Use @ack register even if it is zero.
+ * @ack_invert: Inverted ack register: cleared bits for ack.
+ * @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts.
+ * @wake_invert: Inverted wake register: cleared bits are wake enabled.
+ * @type_invert: Invert the type flags.
+ * @type_in_mask: Use the mask registers for controlling irq type. For
+ * interrupts defining type_rising/falling_mask use mask_base
+ * for edge configuration and never update bits in type_base.
+ * @clear_on_unmask: For chips with interrupts cleared on read: read the status
+ * registers before unmasking interrupts to clear any bits
+ * set when they were masked.
+ * @runtime_pm: Hold a runtime PM lock on the device when accessing it.
+ *
+ * @num_regs: Number of registers in each control bank.
+ * @irqs: Descriptors for individual IRQs. Interrupt numbers are
+ * assigned based on the index in the array of the interrupt.
+ * @num_irqs: Number of descriptors.
+ * @num_type_reg: Number of type registers.
+ * @type_reg_stride: Stride to use for chips where type registers are not
+ * contiguous.
+ * @handle_pre_irq: Driver specific callback to handle interrupt from device
+ * before regmap_irq_handler process the interrupts.
+ * @handle_post_irq: Driver specific callback to handle interrupt from device
+ * after handling the interrupts in regmap_irq_handler().
+ * @irq_drv_data: Driver specific IRQ data which is passed as parameter when
+ * driver specific pre/post interrupt handler is called.
+ *
+ * This is not intended to handle every possible interrupt controller, but
+ * it should handle a substantial proportion of those that are found in the
+ * wild.
+ */
+struct regmap_irq_chip {
+ const char *name;
+
+ unsigned int main_status;
+ unsigned int num_main_status_bits;
+ struct regmap_irq_sub_irq_map *sub_reg_offsets;
+ int num_main_regs;
+
+ unsigned int status_base;
+ unsigned int mask_base;
+ unsigned int unmask_base;
+ unsigned int ack_base;
+ unsigned int wake_base;
+ unsigned int type_base;
+ unsigned int irq_reg_stride;
+ bool mask_writeonly:1;
+ bool init_ack_masked:1;
+ bool mask_invert:1;
+ bool use_ack:1;
+ bool ack_invert:1;
+ bool clear_ack:1;
+ bool wake_invert:1;
+ bool runtime_pm:1;
+ bool type_invert:1;
+ bool type_in_mask:1;
+ bool clear_on_unmask:1;
+
+ int num_regs;
+
+ const struct regmap_irq *irqs;
+ int num_irqs;
+
+ int num_type_reg;
+ unsigned int type_reg_stride;
+
+ int (*handle_pre_irq)(void *irq_drv_data);
+ int (*handle_post_irq)(void *irq_drv_data);
+ void *irq_drv_data;
+};
+
+struct regmap_irq_chip_data;
+
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+ int irq_base, const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
+int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
+void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
+
+int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
+int devm_regmap_add_irq_chip_fwnode(struct device *dev,
+ struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
+void devm_regmap_del_irq_chip(struct device *dev, int irq,
+ struct regmap_irq_chip_data *data);
+
+int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data);
+int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq);
+struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data);
+
+#else
+
+/*
+ * These stubs should only ever be called by generic code which has
+ * regmap based facilities, if they ever get called at runtime
+ * something is going wrong and something probably needs to select
+ * REGMAP.
+ */
+
+static inline int regmap_write(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_write_async(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_noinc_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_bulk_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_count)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_read(struct regmap *map, unsigned int reg,
+ unsigned int *val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_count)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_set_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_clear_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_test_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_update_bits_base(struct regmap_field *field,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_fields_update_bits_base(struct regmap_field *field,
+ unsigned int id,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_write(struct regmap_field *field,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_force_write(struct regmap_field *field,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_field_force_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_fields_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_fields_force_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_get_val_bytes(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_get_max_register(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_get_reg_stride(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regcache_sync(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regcache_sync_region(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regcache_drop_region(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline void regcache_cache_only(struct regmap *map, bool enable)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
+static inline void regcache_cache_bypass(struct regmap *map, bool enable)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
+static inline void regcache_mark_dirty(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
+static inline void regmap_async_complete(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
+static inline int regmap_register_patch(struct regmap *map,
+ const struct reg_sequence *regs,
+ int num_regs)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_parse_val(struct regmap *map, const void *buf,
+ unsigned int *val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline struct regmap *dev_get_regmap(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline struct device *regmap_get_device(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return NULL;
+}
+
+#endif
+
+#endif
diff --git a/snd-alpx/include/5.14/regmap.h b/snd-alpx/include/5.14/regmap.h
new file mode 100644
index 0000000..8eb7922
--- /dev/null
+++ b/snd-alpx/include/5.14/regmap.h
@@ -0,0 +1,2041 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __LINUX_REGMAP_H
+#define __LINUX_REGMAP_H
+
+/*
+ * Register map access API
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ */
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/lockdep.h>
+#include <linux/iopoll.h>
+#include <linux/fwnode.h>
+
+struct module;
+struct clk;
+struct device;
+struct device_node;
+struct fsi_device;
+struct i2c_client;
+struct i3c_device;
+struct irq_domain;
+struct mdio_device;
+struct slim_device;
+struct spi_device;
+struct spmi_device;
+struct regmap;
+struct regmap_range_cfg;
+struct regmap_field;
+struct snd_ac97;
+struct sdw_slave;
+
+/*
+ * regmap_mdio address encoding. IEEE 802.3ae clause 45 addresses consist of a
+ * device address and a register address.
+ */
+#define REGMAP_MDIO_C45_DEVAD_SHIFT 16
+#define REGMAP_MDIO_C45_DEVAD_MASK GENMASK(20, 16)
+#define REGMAP_MDIO_C45_REGNUM_MASK GENMASK(15, 0)
+
+/*
+ * regmap.reg_shift indicates by how much we must shift registers prior to
+ * performing any operation. It's a signed value, positive numbers means
+ * downshifting the register's address, while negative numbers means upshifting.
+ */
+#define REGMAP_UPSHIFT(s) (-(s))
+#define REGMAP_DOWNSHIFT(s) (s)
+
+/* An enum of all the supported cache types */
+enum regcache_type {
+ REGCACHE_NONE,
+ REGCACHE_RBTREE,
+ REGCACHE_FLAT,
+ REGCACHE_MAPLE,
+};
+
+/**
+ * struct reg_default - Default value for a register.
+ *
+ * @reg: Register address.
+ * @def: Register default value.
+ *
+ * We use an array of structs rather than a simple array as many modern devices
+ * have very sparse register maps.
+ */
+struct reg_default {
+ unsigned int reg;
+ unsigned int def;
+};
+
+/**
+ * struct reg_sequence - An individual write from a sequence of writes.
+ *
+ * @reg: Register address.
+ * @def: Register value.
+ * @delay_us: Delay to be applied after the register write in microseconds
+ *
+ * Register/value pairs for sequences of writes with an optional delay in
+ * microseconds to be applied after each write.
+ */
+struct reg_sequence {
+ unsigned int reg;
+ unsigned int def;
+ unsigned int delay_us;
+};
+
+#define REG_SEQ(_reg, _def, _delay_us) { \
+ .reg = _reg, \
+ .def = _def, \
+ .delay_us = _delay_us, \
+ }
+#define REG_SEQ0(_reg, _def) REG_SEQ(_reg, _def, 0)
+
+/**
+ * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs
+ *
+ * @map: Regmap to read from
+ * @addr: Address to poll
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val. Must not be called
+ * from atomic context if sleep_us or timeout_us are used.
+ *
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ */
+#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
+({ \
+ int __ret, __tmp; \
+ __tmp = read_poll_timeout(regmap_read, __ret, __ret || (cond), \
+ sleep_us, timeout_us, false, (map), (addr), &(val)); \
+ __ret ?: __tmp; \
+})
+
+/**
+ * regmap_read_poll_timeout_atomic - Poll until a condition is met or a timeout occurs
+ *
+ * @map: Regmap to read from
+ * @addr: Address to poll
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops).
+ * Should be less than ~10us since udelay is used
+ * (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val.
+ *
+ * This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h.
+ *
+ * Note: In general regmap cannot be used in atomic context. If you want to use
+ * this macro then first setup your regmap for atomic use (flat or no cache
+ * and MMIO regmap).
+ */
+#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \
+({ \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __delay_us = (delay_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ int __ret; \
+ for (;;) { \
+ __ret = regmap_read((map), (addr), &(val)); \
+ if (__ret) \
+ break; \
+ if (cond) \
+ break; \
+ if ((__timeout_us) && \
+ ktime_compare(ktime_get(), __timeout) > 0) { \
+ __ret = regmap_read((map), (addr), &(val)); \
+ break; \
+ } \
+ if (__delay_us) \
+ udelay(__delay_us); \
+ } \
+ __ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+})
+
+/**
+ * regmap_field_read_poll_timeout - Poll until a condition is met or timeout
+ *
+ * @field: Regmap field to read from
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val. Must not be called
+ * from atomic context if sleep_us or timeout_us are used.
+ *
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ */
+#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \
+({ \
+ int __ret, __tmp; \
+ __tmp = read_poll_timeout(regmap_field_read, __ret, __ret || (cond), \
+ sleep_us, timeout_us, false, (field), &(val)); \
+ __ret ?: __tmp; \
+})
+
+#ifdef CONFIG_REGMAP
+
+enum regmap_endian {
+ /* Unspecified -> 0 -> Backwards compatible default */
+ REGMAP_ENDIAN_DEFAULT = 0,
+ REGMAP_ENDIAN_BIG,
+ REGMAP_ENDIAN_LITTLE,
+ REGMAP_ENDIAN_NATIVE,
+};
+
+/**
+ * struct regmap_range - A register range, used for access related checks
+ * (readable/writeable/volatile/precious checks)
+ *
+ * @range_min: address of first register
+ * @range_max: address of last register
+ */
+struct regmap_range {
+ unsigned int range_min;
+ unsigned int range_max;
+};
+
+#define regmap_reg_range(low, high) { .range_min = low, .range_max = high, }
+
+/**
+ * struct regmap_access_table - A table of register ranges for access checks
+ *
+ * @yes_ranges : pointer to an array of regmap ranges used as "yes ranges"
+ * @n_yes_ranges: size of the above array
+ * @no_ranges: pointer to an array of regmap ranges used as "no ranges"
+ * @n_no_ranges: size of the above array
+ *
+ * A table of ranges including some yes ranges and some no ranges.
+ * If a register belongs to a no_range, the corresponding check function
+ * will return false. If a register belongs to a yes range, the corresponding
+ * check function will return true. "no_ranges" are searched first.
+ */
+struct regmap_access_table {
+ const struct regmap_range *yes_ranges;
+ unsigned int n_yes_ranges;
+ const struct regmap_range *no_ranges;
+ unsigned int n_no_ranges;
+};
+
+typedef void (*regmap_lock)(void *);
+typedef void (*regmap_unlock)(void *);
+
+/**
+ * struct regmap_config - Configuration for the register map of a device.
+ *
+ * @name: Optional name of the regmap. Useful when a device has multiple
+ * register regions.
+ *
+ * @reg_bits: Number of bits in a register address, mandatory.
+ * @reg_stride: The register address stride. Valid register addresses are a
+ * multiple of this value. If set to 0, a value of 1 will be
+ * used.
+ * @reg_shift: The number of bits to shift the register before performing any
+ * operations. Any positive number will be downshifted, and negative
+ * values will be upshifted
+ * @reg_base: Value to be added to every register address before performing any
+ * operation.
+ * @pad_bits: Number of bits of padding between register and value.
+ * @val_bits: Number of bits in a register value, mandatory.
+ *
+ * @writeable_reg: Optional callback returning true if the register
+ * can be written to. If this field is NULL but wr_table
+ * (see below) is not, the check is performed on such table
+ * (a register is writeable if it belongs to one of the ranges
+ * specified by wr_table).
+ * @readable_reg: Optional callback returning true if the register
+ * can be read from. If this field is NULL but rd_table
+ * (see below) is not, the check is performed on such table
+ * (a register is readable if it belongs to one of the ranges
+ * specified by rd_table).
+ * @volatile_reg: Optional callback returning true if the register
+ * value can't be cached. If this field is NULL but
+ * volatile_table (see below) is not, the check is performed on
+ * such table (a register is volatile if it belongs to one of
+ * the ranges specified by volatile_table).
+ * @precious_reg: Optional callback returning true if the register
+ * should not be read outside of a call from the driver
+ * (e.g., a clear on read interrupt status register). If this
+ * field is NULL but precious_table (see below) is not, the
+ * check is performed on such table (a register is precious if
+ * it belongs to one of the ranges specified by precious_table).
+ * @writeable_noinc_reg: Optional callback returning true if the register
+ * supports multiple write operations without incrementing
+ * the register number. If this field is NULL but
+ * wr_noinc_table (see below) is not, the check is
+ * performed on such table (a register is no increment
+ * writeable if it belongs to one of the ranges specified
+ * by wr_noinc_table).
+ * @readable_noinc_reg: Optional callback returning true if the register
+ * supports multiple read operations without incrementing
+ * the register number. If this field is NULL but
+ * rd_noinc_table (see below) is not, the check is
+ * performed on such table (a register is no increment
+ * readable if it belongs to one of the ranges specified
+ * by rd_noinc_table).
+ * @disable_locking: This regmap is either protected by external means or
+ * is guaranteed not to be accessed from multiple threads.
+ * Don't use any locking mechanisms.
+ * @lock: Optional lock callback (overrides regmap's default lock
+ * function, based on spinlock or mutex).
+ * @unlock: As above for unlocking.
+ * @lock_arg: this field is passed as the only argument of lock/unlock
+ * functions (ignored in case regular lock/unlock functions
+ * are not overridden).
+ * @reg_read: Optional callback that if filled will be used to perform
+ * all the reads from the registers. Should only be provided for
+ * devices whose read operation cannot be represented as a simple
+ * read operation on a bus such as SPI, I2C, etc. Most of the
+ * devices do not need this.
+ * @reg_write: Same as above for writing.
+ * @reg_update_bits: Optional callback that if filled will be used to perform
+ * all the update_bits(rmw) operation. Should only be provided
+ * if the function require special handling with lock and reg
+ * handling and the operation cannot be represented as a simple
+ * update_bits operation on a bus such as SPI, I2C, etc.
+ * @read: Optional callback that if filled will be used to perform all the
+ * bulk reads from the registers. Data is returned in the buffer used
+ * to transmit data.
+ * @write: Same as above for writing.
+ * @max_raw_read: Max raw read size that can be used on the device.
+ * @max_raw_write: Max raw write size that can be used on the device.
+ * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
+ * to perform locking. This field is ignored if custom lock/unlock
+ * functions are used (see fields lock/unlock of struct regmap_config).
+ * This field is a duplicate of a similar file in
+ * 'struct regmap_bus' and serves exact same purpose.
+ * Use it only for "no-bus" cases.
+ * @io_port: Support IO port accessors. Makes sense only when MMIO vs. IO port
+ * access can be distinguished.
+ * @max_register: Optional, specifies the maximum valid register address.
+ * @wr_table: Optional, points to a struct regmap_access_table specifying
+ * valid ranges for write access.
+ * @rd_table: As above, for read access.
+ * @volatile_table: As above, for volatile registers.
+ * @precious_table: As above, for precious registers.
+ * @wr_noinc_table: As above, for no increment writeable registers.
+ * @rd_noinc_table: As above, for no increment readable registers.
+ * @reg_defaults: Power on reset values for registers (for use with
+ * register cache support).
+ * @num_reg_defaults: Number of elements in reg_defaults.
+ *
+ * @read_flag_mask: Mask to be set in the top bytes of the register when doing
+ * a read.
+ * @write_flag_mask: Mask to be set in the top bytes of the register when doing
+ * a write. If both read_flag_mask and write_flag_mask are
+ * empty and zero_flag_mask is not set the regmap_bus default
+ * masks are used.
+ * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
+ * if they are both empty.
+ * @use_relaxed_mmio: If set, MMIO R/W operations will not use memory barriers.
+ * This can avoid load on devices which don't require strict
+ * orderings, but drivers should carefully add any explicit
+ * memory barriers when they may require them.
+ * @use_single_read: If set, converts the bulk read operation into a series of
+ * single read operations. This is useful for a device that
+ * does not support bulk read.
+ * @use_single_write: If set, converts the bulk write operation into a series of
+ * single write operations. This is useful for a device that
+ * does not support bulk write.
+ * @can_multi_write: If set, the device supports the multi write mode of bulk
+ * write operations, if clear multi write requests will be
+ * split into individual write operations
+ *
+ * @cache_type: The actual cache type.
+ * @reg_defaults_raw: Power on reset values for registers (for use with
+ * register cache support).
+ * @num_reg_defaults_raw: Number of elements in reg_defaults_raw.
+ * @reg_format_endian: Endianness for formatted register addresses. If this is
+ * DEFAULT, the @reg_format_endian_default value from the
+ * regmap bus is used.
+ * @val_format_endian: Endianness for formatted register values. If this is
+ * DEFAULT, the @reg_format_endian_default value from the
+ * regmap bus is used.
+ *
+ * @ranges: Array of configuration entries for virtual address ranges.
+ * @num_ranges: Number of range configuration entries.
+ * @use_hwlock: Indicate if a hardware spinlock should be used.
+ * @use_raw_spinlock: Indicate if a raw spinlock should be used.
+ * @hwlock_id: Specify the hardware spinlock id.
+ * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
+ * HWLOCK_IRQ or 0.
+ * @can_sleep: Optional, specifies whether regmap operations can sleep.
+ */
+struct regmap_config {
+ const char *name;
+
+ int reg_bits;
+ int reg_stride;
+ int reg_shift;
+ unsigned int reg_base;
+ int pad_bits;
+ int val_bits;
+
+ bool (*writeable_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+ bool (*volatile_reg)(struct device *dev, unsigned int reg);
+ bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
+
+ bool disable_locking;
+ regmap_lock lock;
+ regmap_unlock unlock;
+ void *lock_arg;
+
+ int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
+ int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+ int (*reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
+ /* Bulk read/write */
+ int (*read)(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+ int (*write)(void *context, const void *data, size_t count);
+ size_t max_raw_read;
+ size_t max_raw_write;
+
+ bool fast_io;
+ bool io_port;
+
+ unsigned int max_register;
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+ const struct regmap_access_table *volatile_table;
+ const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *wr_noinc_table;
+ const struct regmap_access_table *rd_noinc_table;
+ const struct reg_default *reg_defaults;
+ unsigned int num_reg_defaults;
+ enum regcache_type cache_type;
+ const void *reg_defaults_raw;
+ unsigned int num_reg_defaults_raw;
+
+ unsigned long read_flag_mask;
+ unsigned long write_flag_mask;
+ bool zero_flag_mask;
+
+ bool use_single_read;
+ bool use_single_write;
+ bool use_relaxed_mmio;
+ bool can_multi_write;
+
+ enum regmap_endian reg_format_endian;
+ enum regmap_endian val_format_endian;
+
+ const struct regmap_range_cfg *ranges;
+ unsigned int num_ranges;
+
+ bool use_hwlock;
+ bool use_raw_spinlock;
+ unsigned int hwlock_id;
+ unsigned int hwlock_mode;
+
+ bool can_sleep;
+};
+
+/**
+ * struct regmap_range_cfg - Configuration for indirectly accessed or paged
+ * registers.
+ *
+ * @name: Descriptive name for diagnostics
+ *
+ * @range_min: Address of the lowest register address in virtual range.
+ * @range_max: Address of the highest register in virtual range.
+ *
+ * @selector_reg: Register with selector field.
+ * @selector_mask: Bit mask for selector value.
+ * @selector_shift: Bit shift for selector value.
+ *
+ * @window_start: Address of first (lowest) register in data window.
+ * @window_len: Number of registers in data window.
+ *
+ * Registers, mapped to this virtual range, are accessed in two steps:
+ * 1. page selector register update;
+ * 2. access through data window registers.
+ */
+struct regmap_range_cfg {
+ const char *name;
+
+ /* Registers of virtual address range */
+ unsigned int range_min;
+ unsigned int range_max;
+
+ /* Page selector for indirect addressing */
+ unsigned int selector_reg;
+ unsigned int selector_mask;
+ int selector_shift;
+
+ /* Data window (per each page) */
+ unsigned int window_start;
+ unsigned int window_len;
+};
+
+struct regmap_async;
+
+typedef int (*regmap_hw_write)(void *context, const void *data,
+ size_t count);
+typedef int (*regmap_hw_gather_write)(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len);
+typedef int (*regmap_hw_async_write)(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len,
+ struct regmap_async *async);
+typedef int (*regmap_hw_read)(void *context,
+ const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg,
+ unsigned int *val);
+typedef int (*regmap_hw_reg_noinc_read)(void *context, unsigned int reg,
+ void *val, size_t val_count);
+typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
+ unsigned int val);
+typedef int (*regmap_hw_reg_noinc_write)(void *context, unsigned int reg,
+ const void *val, size_t val_count);
+typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
+typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
+typedef void (*regmap_hw_free_context)(void *context);
+
+/**
+ * struct regmap_bus - Description of a hardware bus for the register map
+ * infrastructure.
+ *
+ * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
+ * to perform locking. This field is ignored if custom lock/unlock
+ * functions are used (see fields lock/unlock of
+ * struct regmap_config).
+ * @free_on_exit: kfree this on exit of regmap
+ * @write: Write operation.
+ * @gather_write: Write operation with split register/value, return -ENOTSUPP
+ * if not implemented on a given device.
+ * @async_write: Write operation which completes asynchronously, optional and
+ * must serialise with respect to non-async I/O.
+ * @reg_write: Write a single register value to the given register address. This
+ * write operation has to complete when returning from the function.
+ * @reg_write_noinc: Write multiple register value to the same register. This
+ * write operation has to complete when returning from the function.
+ * @reg_update_bits: Update bits operation to be used against volatile
+ * registers, intended for devices supporting some mechanism
+ * for setting clearing bits without having to
+ * read/modify/write.
+ * @read: Read operation. Data is returned in the buffer used to transmit
+ * data.
+ * @reg_read: Read a single register value from a given register address.
+ * @free_context: Free context.
+ * @async_alloc: Allocate a regmap_async() structure.
+ * @read_flag_mask: Mask to be set in the top byte of the register when doing
+ * a read.
+ * @reg_format_endian_default: Default endianness for formatted register
+ * addresses. Used when the regmap_config specifies DEFAULT. If this is
+ * DEFAULT, BIG is assumed.
+ * @val_format_endian_default: Default endianness for formatted register
+ * values. Used when the regmap_config specifies DEFAULT. If this is
+ * DEFAULT, BIG is assumed.
+ * @max_raw_read: Max raw read size that can be used on the bus.
+ * @max_raw_write: Max raw write size that can be used on the bus.
+ */
+struct regmap_bus {
+ bool fast_io;
+ bool free_on_exit;
+ regmap_hw_write write;
+ regmap_hw_gather_write gather_write;
+ regmap_hw_async_write async_write;
+ regmap_hw_reg_write reg_write;
+ regmap_hw_reg_noinc_write reg_noinc_write;
+ regmap_hw_reg_update_bits reg_update_bits;
+ regmap_hw_read read;
+ regmap_hw_reg_read reg_read;
+ regmap_hw_reg_noinc_read reg_noinc_read;
+ regmap_hw_free_context free_context;
+ regmap_hw_async_alloc async_alloc;
+ u8 read_flag_mask;
+ enum regmap_endian reg_format_endian_default;
+ enum regmap_endian val_format_endian_default;
+ size_t max_raw_read;
+ size_t max_raw_write;
+};
+
+/*
+ * __regmap_init functions.
+ *
+ * These functions take a lock key and name parameter, and should not be called
+ * directly. Instead, use the regmap_init macros that generate a key and name
+ * for each call.
+ */
+struct regmap *__regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spi(struct spi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spmi_base(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spmi_ext(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_w1(struct device *w1_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_fsi(struct fsi_device *fsi_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+struct regmap *__devm_regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_w1(struct device *w1_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_fsi(struct fsi_device *fsi_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
+/*
+ * Wrapper for regmap_init macros to include a unique lockdep key and name
+ * for each call. No-op if CONFIG_LOCKDEP is not set.
+ *
+ * @fn: Real function to call (in the form __[*_]regmap_init[_*])
+ * @name: Config variable name (#config in the calling macro)
+ **/
+#ifdef CONFIG_LOCKDEP
+#define __regmap_lockdep_wrapper(fn, name, ...) \
+( \
+ ({ \
+ static struct lock_class_key _key; \
+ fn(__VA_ARGS__, &_key, \
+ KBUILD_BASENAME ":" \
+ __stringify(__LINE__) ":" \
+ "(" name ")->lock"); \
+ }) \
+)
+#else
+#define __regmap_lockdep_wrapper(fn, name, ...) fn(__VA_ARGS__, NULL, NULL)
+#endif
+
+/**
+ * regmap_init() - Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions.
+ */
+#define regmap_init(dev, bus, bus_context, config) \
+ __regmap_lockdep_wrapper(__regmap_init, #config, \
+ dev, bus, bus_context, config)
+int regmap_attach_dev(struct device *dev, struct regmap *map,
+ const struct regmap_config *config);
+
+/**
+ * regmap_init_i2c() - Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_i2c(i2c, config) \
+ __regmap_lockdep_wrapper(__regmap_init_i2c, #config, \
+ i2c, config)
+
+/**
+ * regmap_init_mdio() - Initialise register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
+ * regmap_init_sccb() - Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sccb(i2c, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sccb, #config, \
+ i2c, config)
+
+/**
+ * regmap_init_slimbus() - Initialise register map
+ *
+ * @slimbus: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_slimbus(slimbus, config) \
+ __regmap_lockdep_wrapper(__regmap_init_slimbus, #config, \
+ slimbus, config)
+
+/**
+ * regmap_init_spi() - Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spi(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spi, #config, \
+ dev, config)
+
+/**
+ * regmap_init_spmi_base() - Create regmap for the Base register space
+ *
+ * @dev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spmi_base(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spmi_base, #config, \
+ dev, config)
+
+/**
+ * regmap_init_spmi_ext() - Create regmap for Ext register space
+ *
+ * @dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_spmi_ext(dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spmi_ext, #config, \
+ dev, config)
+
+/**
+ * regmap_init_w1() - Initialise register map
+ *
+ * @w1_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_w1(w1_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_w1, #config, \
+ w1_dev, config)
+
+/**
+ * regmap_init_mmio_clk() - Initialise register map with register clock
+ *
+ * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mmio_clk(dev, clk_id, regs, config) \
+ __regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config, \
+ dev, clk_id, regs, config)
+
+/**
+ * regmap_init_mmio() - Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mmio(dev, regs, config) \
+ regmap_init_mmio_clk(dev, NULL, regs, config)
+
+/**
+ * regmap_init_ac97() - Initialise AC'97 register map
+ *
+ * @ac97: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_ac97(ac97, config) \
+ __regmap_lockdep_wrapper(__regmap_init_ac97, #config, \
+ ac97, config)
+bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
+
+/**
+ * regmap_init_sdw() - Initialise register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw, #config, \
+ sdw, config)
+
+/**
+ * regmap_init_sdw_mbq() - Initialise register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sdw_mbq(sdw, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \
+ sdw, config)
+
+/**
+ * regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
+ * to AVMM Bus Bridge
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.
+ */
+#define regmap_init_spi_avmm(spi, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spi_avmm, #config, \
+ spi, config)
+
+/**
+ * regmap_init_fsi() - Initialise register map
+ *
+ * @fsi_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_fsi(fsi_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_fsi, #config, fsi_dev, \
+ config)
+
+/**
+ * devm_regmap_init() - Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions. The
+ * map will be automatically freed by the device management code.
+ */
+#define devm_regmap_init(dev, bus, bus_context, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init, #config, \
+ dev, bus, bus_context, config)
+
+/**
+ * devm_regmap_init_i2c() - Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_i2c(i2c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_i2c, #config, \
+ i2c, config)
+
+/**
+ * devm_regmap_init_mdio() - Initialise managed register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
+ * devm_regmap_init_sccb() - Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sccb(i2c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sccb, #config, \
+ i2c, config)
+
+/**
+ * devm_regmap_init_spi() - Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The map will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spi(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spi, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_spmi_base() - Create managed regmap for Base register space
+ *
+ * @dev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spmi_base(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spmi_base, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_spmi_ext() - Create managed regmap for Ext register space
+ *
+ * @dev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spmi_ext(dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spmi_ext, #config, \
+ dev, config)
+
+/**
+ * devm_regmap_init_w1() - Initialise managed register map
+ *
+ * @w1_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_w1(w1_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_w1, #config, \
+ w1_dev, config)
+/**
+ * devm_regmap_init_mmio_clk() - Initialise managed register map with clock
+ *
+ * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mmio_clk(dev, clk_id, regs, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config, \
+ dev, clk_id, regs, config)
+
+/**
+ * devm_regmap_init_mmio() - Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mmio(dev, regs, config) \
+ devm_regmap_init_mmio_clk(dev, NULL, regs, config)
+
+/**
+ * devm_regmap_init_ac97() - Initialise AC'97 register map
+ *
+ * @ac97: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_ac97(ac97, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
+ ac97, config)
+
+/**
+ * devm_regmap_init_sdw() - Initialise managed register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \
+ sdw, config)
+
+/**
+ * devm_regmap_init_sdw_mbq() - Initialise managed register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw_mbq(sdw, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, #config, \
+ sdw, config)
+
+/**
+ * devm_regmap_init_slimbus() - Initialise managed register map
+ *
+ * @slimbus: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_slimbus(slimbus, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_slimbus, #config, \
+ slimbus, config)
+
+/**
+ * devm_regmap_init_i3c() - Initialise managed register map
+ *
+ * @i3c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_i3c(i3c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_i3c, #config, \
+ i3c, config)
+
+/**
+ * devm_regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
+ * to AVMM Bus Bridge
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The map will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spi_avmm(spi, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spi_avmm, #config, \
+ spi, config)
+
+/**
+ * devm_regmap_init_fsi() - Initialise managed register map
+ *
+ * @fsi_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_fsi(fsi_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_fsi, #config, \
+ fsi_dev, config)
+
+int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk);
+void regmap_mmio_detach_clk(struct regmap *map);
+void regmap_exit(struct regmap *map);
+int regmap_reinit_cache(struct regmap *map,
+ const struct regmap_config *config);
+struct regmap *dev_get_regmap(struct device *dev, const char *name);
+struct device *regmap_get_device(struct regmap *map);
+int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
+int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val);
+int regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
+int regmap_noinc_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
+int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
+ size_t val_count);
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
+ int num_regs);
+int regmap_multi_reg_write_bypassed(struct regmap *map,
+ const struct reg_sequence *regs,
+ int num_regs);
+int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
+int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
+int regmap_raw_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len);
+int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len);
+int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
+ size_t val_count);
+int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force);
+
+static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, false, false);
+}
+
+static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, true, false);
+}
+
+static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return regmap_update_bits_base(map, reg, mask, val,
+ change, false, false);
+}
+
+static inline int
+regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return regmap_update_bits_base(map, reg, mask, val,
+ change, true, false);
+}
+
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, false, true);
+}
+
+int regmap_get_val_bytes(struct regmap *map);
+int regmap_get_max_register(struct regmap *map);
+int regmap_get_reg_stride(struct regmap *map);
+bool regmap_might_sleep(struct regmap *map);
+int regmap_async_complete(struct regmap *map);
+bool regmap_can_raw_write(struct regmap *map);
+size_t regmap_get_raw_read_max(struct regmap *map);
+size_t regmap_get_raw_write_max(struct regmap *map);
+
+int regcache_sync(struct regmap *map);
+int regcache_sync_region(struct regmap *map, unsigned int min,
+ unsigned int max);
+int regcache_drop_region(struct regmap *map, unsigned int min,
+ unsigned int max);
+void regcache_cache_only(struct regmap *map, bool enable);
+void regcache_cache_bypass(struct regmap *map, bool enable);
+void regcache_mark_dirty(struct regmap *map);
+bool regcache_reg_cached(struct regmap *map, unsigned int reg);
+
+bool regmap_check_range_table(struct regmap *map, unsigned int reg,
+ const struct regmap_access_table *table);
+
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
+ int num_regs);
+int regmap_parse_val(struct regmap *map, const void *buf,
+ unsigned int *val);
+
+static inline bool regmap_reg_in_range(unsigned int reg,
+ const struct regmap_range *range)
+{
+ return reg >= range->range_min && reg <= range->range_max;
+}
+
+bool regmap_reg_in_ranges(unsigned int reg,
+ const struct regmap_range *ranges,
+ unsigned int nranges);
+
+static inline int regmap_set_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ return regmap_update_bits_base(map, reg, bits, bits,
+ NULL, false, false);
+}
+
+static inline int regmap_clear_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false);
+}
+
+int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits);
+
+/**
+ * struct reg_field - Description of an register field
+ *
+ * @reg: Offset of the register within the regmap bank
+ * @lsb: lsb of the register field.
+ * @msb: msb of the register field.
+ * @id_size: port size if it has some ports
+ * @id_offset: address offset for each ports
+ */
+struct reg_field {
+ unsigned int reg;
+ unsigned int lsb;
+ unsigned int msb;
+ unsigned int id_size;
+ unsigned int id_offset;
+};
+
+#define REG_FIELD(_reg, _lsb, _msb) { \
+ .reg = _reg, \
+ .lsb = _lsb, \
+ .msb = _msb, \
+ }
+
+#define REG_FIELD_ID(_reg, _lsb, _msb, _size, _offset) { \
+ .reg = _reg, \
+ .lsb = _lsb, \
+ .msb = _msb, \
+ .id_size = _size, \
+ .id_offset = _offset, \
+ }
+
+struct regmap_field *regmap_field_alloc(struct regmap *regmap,
+ struct reg_field reg_field);
+void regmap_field_free(struct regmap_field *field);
+
+struct regmap_field *devm_regmap_field_alloc(struct device *dev,
+ struct regmap *regmap, struct reg_field reg_field);
+void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
+
+int regmap_field_bulk_alloc(struct regmap *regmap,
+ struct regmap_field **rm_field,
+ const struct reg_field *reg_field,
+ int num_fields);
+void regmap_field_bulk_free(struct regmap_field *field);
+int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap,
+ struct regmap_field **field,
+ const struct reg_field *reg_field,
+ int num_fields);
+void devm_regmap_field_bulk_free(struct device *dev,
+ struct regmap_field *field);
+
+int regmap_field_read(struct regmap_field *field, unsigned int *val);
+int regmap_field_update_bits_base(struct regmap_field *field,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force);
+int regmap_fields_read(struct regmap_field *field, unsigned int id,
+ unsigned int *val);
+int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force);
+
+static inline int regmap_field_write(struct regmap_field *field,
+ unsigned int val)
+{
+ return regmap_field_update_bits_base(field, ~0, val,
+ NULL, false, false);
+}
+
+static inline int regmap_field_force_write(struct regmap_field *field,
+ unsigned int val)
+{
+ return regmap_field_update_bits_base(field, ~0, val, NULL, false, true);
+}
+
+static inline int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_field_update_bits_base(field, mask, val,
+ NULL, false, false);
+}
+
+static inline int regmap_field_set_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ return regmap_field_update_bits_base(field, bits, bits, NULL, false,
+ false);
+}
+
+static inline int regmap_field_clear_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ return regmap_field_update_bits_base(field, bits, 0, NULL, false,
+ false);
+}
+
+int regmap_field_test_bits(struct regmap_field *field, unsigned int bits);
+
+static inline int
+regmap_field_force_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_field_update_bits_base(field, mask, val,
+ NULL, false, true);
+}
+
+static inline int regmap_fields_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, ~0, val,
+ NULL, false, false);
+}
+
+static inline int regmap_fields_force_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, ~0, val,
+ NULL, false, true);
+}
+
+static inline int
+regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, mask, val,
+ NULL, false, false);
+}
+
+static inline int
+regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, mask, val,
+ NULL, false, true);
+}
+
+/**
+ * struct regmap_irq_type - IRQ type definitions.
+ *
+ * @type_reg_offset: Offset register for the irq type setting.
+ * @type_rising_val: Register value to configure RISING type irq.
+ * @type_falling_val: Register value to configure FALLING type irq.
+ * @type_level_low_val: Register value to configure LEVEL_LOW type irq.
+ * @type_level_high_val: Register value to configure LEVEL_HIGH type irq.
+ * @types_supported: logical OR of IRQ_TYPE_* flags indicating supported types.
+ */
+struct regmap_irq_type {
+ unsigned int type_reg_offset;
+ unsigned int type_reg_mask;
+ unsigned int type_rising_val;
+ unsigned int type_falling_val;
+ unsigned int type_level_low_val;
+ unsigned int type_level_high_val;
+ unsigned int types_supported;
+};
+
+/**
+ * struct regmap_irq - Description of an IRQ for the generic regmap irq_chip.
+ *
+ * @reg_offset: Offset of the status/mask register within the bank
+ * @mask: Mask used to flag/control the register.
+ * @type: IRQ trigger type setting details if supported.
+ */
+struct regmap_irq {
+ unsigned int reg_offset;
+ unsigned int mask;
+ struct regmap_irq_type type;
+};
+
+#define REGMAP_IRQ_REG(_irq, _off, _mask) \
+ [_irq] = { .reg_offset = (_off), .mask = (_mask) }
+
+#define REGMAP_IRQ_REG_LINE(_id, _reg_bits) \
+ [_id] = { \
+ .mask = BIT((_id) % (_reg_bits)), \
+ .reg_offset = (_id) / (_reg_bits), \
+ }
+
+#define REGMAP_IRQ_MAIN_REG_OFFSET(arr) \
+ { .num_regs = ARRAY_SIZE((arr)), .offset = &(arr)[0] }
+
+struct regmap_irq_sub_irq_map {
+ unsigned int num_regs;
+ unsigned int *offset;
+};
+
+struct regmap_irq_chip_data;
+
+/**
+ * struct regmap_irq_chip - Description of a generic regmap irq_chip.
+ *
+ * @name: Descriptive name for IRQ controller.
+ *
+ * @main_status: Base main status register address. For chips which have
+ * interrupts arranged in separate sub-irq blocks with own IRQ
+ * registers and which have a main IRQ registers indicating
+ * sub-irq blocks with unhandled interrupts. For such chips fill
+ * sub-irq register information in status_base, mask_base and
+ * ack_base.
+ * @num_main_status_bits: Should be given to chips where number of meaningfull
+ * main status bits differs from num_regs.
+ * @sub_reg_offsets: arrays of mappings from main register bits to sub irq
+ * registers. First item in array describes the registers
+ * for first main status bit. Second array for second bit etc.
+ * Offset is given as sub register status offset to
+ * status_base. Should contain num_regs arrays.
+ * Can be provided for chips with more complex mapping than
+ * 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ...
+ * When used with not_fixed_stride, each one-element array
+ * member contains offset calculated as address from each
+ * peripheral to first peripheral.
+ * @num_main_regs: Number of 'main status' irq registers for chips which have
+ * main_status set.
+ *
+ * @status_base: Base status register address.
+ * @mask_base: Base mask register address. Mask bits are set to 1 when an
+ * interrupt is masked, 0 when unmasked.
+ * @unmask_base: Base unmask register address. Unmask bits are set to 1 when
+ * an interrupt is unmasked and 0 when masked.
+ * @ack_base: Base ack address. If zero then the chip is clear on read.
+ * Using zero value is possible with @use_ack bit.
+ * @wake_base: Base address for wake enables. If zero unsupported.
+ * @type_base: Base address for irq type. If zero unsupported. Deprecated,
+ * use @config_base instead.
+ * @virt_reg_base: Base addresses for extra config regs. Deprecated, use
+ * @config_base instead.
+ * @config_base: Base address for IRQ type config regs. If null unsupported.
+ * @irq_reg_stride: Stride to use for chips where registers are not contiguous.
+ * @init_ack_masked: Ack all masked interrupts once during initalization.
+ * @mask_unmask_non_inverted: Controls mask bit inversion for chips that set
+ * both @mask_base and @unmask_base. If false, mask and unmask bits are
+ * inverted (which is deprecated behavior); if true, bits will not be
+ * inverted and the registers keep their normal behavior. Note that if
+ * you use only one of @mask_base or @unmask_base, this flag has no
+ * effect and is unnecessary. Any new drivers that set both @mask_base
+ * and @unmask_base should set this to true to avoid relying on the
+ * deprecated behavior.
+ * @use_ack: Use @ack register even if it is zero.
+ * @ack_invert: Inverted ack register: cleared bits for ack.
+ * @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts.
+ * @wake_invert: Inverted wake register: cleared bits are wake enabled.
+ * @type_in_mask: Use the mask registers for controlling irq type. Use this if
+ * the hardware provides separate bits for rising/falling edge
+ * or low/high level interrupts and they should be combined into
+ * a single logical interrupt. Use &struct regmap_irq_type data
+ * to define the mask bit for each irq type.
+ * @clear_on_unmask: For chips with interrupts cleared on read: read the status
+ * registers before unmasking interrupts to clear any bits
+ * set when they were masked.
+ * @not_fixed_stride: Used when chip peripherals are not laid out with fixed
+ * stride. Must be used with sub_reg_offsets containing the
+ * offsets to each peripheral. Deprecated; the same thing
+ * can be accomplished with a @get_irq_reg callback, without
+ * the need for a @sub_reg_offsets table.
+ * @status_invert: Inverted status register: cleared bits are active interrupts.
+ * @runtime_pm: Hold a runtime PM lock on the device when accessing it.
+ * @no_status: No status register: all interrupts assumed generated by device.
+ *
+ * @num_regs: Number of registers in each control bank.
+ * @irqs: Descriptors for individual IRQs. Interrupt numbers are
+ * assigned based on the index in the array of the interrupt.
+ * @num_irqs: Number of descriptors.
+ * @num_type_reg: Number of type registers. Deprecated, use config registers
+ * instead.
+ * @num_virt_regs: Number of non-standard irq configuration registers.
+ * If zero unsupported. Deprecated, use config registers
+ * instead.
+ * @num_config_bases: Number of config base registers.
+ * @num_config_regs: Number of config registers for each config base register.
+ * @handle_pre_irq: Driver specific callback to handle interrupt from device
+ * before regmap_irq_handler process the interrupts.
+ * @handle_post_irq: Driver specific callback to handle interrupt from device
+ * after handling the interrupts in regmap_irq_handler().
+ * @handle_mask_sync: Callback used to handle IRQ mask syncs. The index will be
+ * in the range [0, num_regs)
+ * @set_type_virt: Driver specific callback to extend regmap_irq_set_type()
+ * and configure virt regs. Deprecated, use @set_type_config
+ * callback and config registers instead.
+ * @set_type_config: Callback used for configuring irq types.
+ * @get_irq_reg: Callback for mapping (base register, index) pairs to register
+ * addresses. The base register will be one of @status_base,
+ * @mask_base, etc., @main_status, or any of @config_base.
+ * The index will be in the range [0, num_main_regs[ for the
+ * main status base, [0, num_type_settings[ for any config
+ * register base, and [0, num_regs[ for any other base.
+ * If unspecified then regmap_irq_get_irq_reg_linear() is used.
+ * @irq_drv_data: Driver specific IRQ data which is passed as parameter when
+ * driver specific pre/post interrupt handler is called.
+ *
+ * This is not intended to handle every possible interrupt controller, but
+ * it should handle a substantial proportion of those that are found in the
+ * wild.
+ */
+struct regmap_irq_chip {
+ const char *name;
+
+ unsigned int main_status;
+ unsigned int num_main_status_bits;
+ struct regmap_irq_sub_irq_map *sub_reg_offsets;
+ int num_main_regs;
+
+ unsigned int status_base;
+ unsigned int mask_base;
+ unsigned int unmask_base;
+ unsigned int ack_base;
+ unsigned int wake_base;
+ unsigned int type_base;
+ unsigned int *virt_reg_base;
+ const unsigned int *config_base;
+ unsigned int irq_reg_stride;
+ unsigned int init_ack_masked:1;
+ unsigned int mask_unmask_non_inverted:1;
+ unsigned int use_ack:1;
+ unsigned int ack_invert:1;
+ unsigned int clear_ack:1;
+ unsigned int wake_invert:1;
+ unsigned int runtime_pm:1;
+ unsigned int type_in_mask:1;
+ unsigned int clear_on_unmask:1;
+ unsigned int not_fixed_stride:1;
+ unsigned int status_invert:1;
+ unsigned int no_status:1;
+
+ int num_regs;
+
+ const struct regmap_irq *irqs;
+ int num_irqs;
+
+ int num_type_reg;
+ int num_virt_regs;
+ int num_config_bases;
+ int num_config_regs;
+
+ int (*handle_pre_irq)(void *irq_drv_data);
+ int (*handle_post_irq)(void *irq_drv_data);
+ int (*handle_mask_sync)(struct regmap *map, int index,
+ unsigned int mask_buf_def,
+ unsigned int mask_buf, void *irq_drv_data);
+ int (*set_type_virt)(unsigned int **buf, unsigned int type,
+ unsigned long hwirq, int reg);
+ int (*set_type_config)(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data, int idx,
+ void *irq_drv_data);
+ unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
+ void *irq_drv_data;
+};
+
+unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
+int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data,
+ int idx, void *irq_drv_data);
+
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+ int irq_base, const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
+int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
+void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
+
+int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
+int devm_regmap_add_irq_chip_fwnode(struct device *dev,
+ struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
+void devm_regmap_del_irq_chip(struct device *dev, int irq,
+ struct regmap_irq_chip_data *data);
+
+int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data);
+int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq);
+struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data);
+
+#else
+
+/*
+ * These stubs should only ever be called by generic code which has
+ * regmap based facilities, if they ever get called at runtime
+ * something is going wrong and something probably needs to select
+ * REGMAP.
+ */
+
+static inline int regmap_write(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_write_async(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_noinc_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_bulk_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_count)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_read(struct regmap *map, unsigned int reg,
+ unsigned int *val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_count)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_set_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_clear_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_test_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_update_bits_base(struct regmap_field *field,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_fields_update_bits_base(struct regmap_field *field,
+ unsigned int id,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_write(struct regmap_field *field,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_force_write(struct regmap_field *field,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_field_force_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_set_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_clear_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_test_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_fields_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_fields_force_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_get_val_bytes(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_get_max_register(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_get_reg_stride(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline bool regmap_might_sleep(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return true;
+}
+
+static inline int regcache_sync(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regcache_sync_region(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regcache_drop_region(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline void regcache_cache_only(struct regmap *map, bool enable)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
+static inline void regcache_cache_bypass(struct regmap *map, bool enable)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
+static inline void regcache_mark_dirty(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
+static inline void regmap_async_complete(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
+static inline int regmap_register_patch(struct regmap *map,
+ const struct reg_sequence *regs,
+ int num_regs)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_parse_val(struct regmap *map, const void *buf,
+ unsigned int *val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline struct regmap *dev_get_regmap(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline struct device *regmap_get_device(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return NULL;
+}
+
+#endif
+
+#endif
diff --git a/snd-alpx/include/5.3/dmaengine.h b/snd-alpx/include/5.3/dmaengine.h
new file mode 100644
index 0000000..501c0b0
--- /dev/null
+++ b/snd-alpx/include/5.3/dmaengine.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The contents of this file are private to DMA engine drivers, and is not
+ * part of the API to be used by DMA engine users.
+ */
+#ifndef DMAENGINE_H
+#define DMAENGINE_H
+
+#include <linux/bug.h>
+#include <linux/dmaengine.h>
+
+/**
+ * dma_cookie_init - initialize the cookies for a DMA channel
+ * @chan: dma channel to initialize
+ */
+static inline void dma_cookie_init(struct dma_chan *chan)
+{
+ chan->cookie = DMA_MIN_COOKIE;
+ chan->completed_cookie = DMA_MIN_COOKIE;
+}
+
+/**
+ * dma_cookie_assign - assign a DMA engine cookie to the descriptor
+ * @tx: descriptor needing cookie
+ *
+ * Assign a unique non-zero per-channel cookie to the descriptor.
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *chan = tx->chan;
+ dma_cookie_t cookie;
+
+ cookie = chan->cookie + 1;
+ if (cookie < DMA_MIN_COOKIE)
+ cookie = DMA_MIN_COOKIE;
+ tx->cookie = chan->cookie = cookie;
+
+ return cookie;
+}
+
+/**
+ * dma_cookie_complete - complete a descriptor
+ * @tx: descriptor to complete
+ *
+ * Mark this descriptor complete by updating the channels completed
+ * cookie marker. Zero the descriptors cookie to prevent accidental
+ * repeated completions.
+ *
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
+{
+ BUG_ON(tx->cookie < DMA_MIN_COOKIE);
+ tx->chan->completed_cookie = tx->cookie;
+ tx->cookie = 0;
+}
+
+/**
+ * dma_cookie_status - report cookie status
+ * @chan: dma channel
+ * @cookie: cookie we are interested in
+ * @state: dma_tx_state structure to return last/used cookies
+ *
+ * Report the status of the cookie, filling in the state structure if
+ * non-NULL. No locking is required.
+ */
+static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ dma_cookie_t used, complete;
+
+ used = chan->cookie;
+ complete = chan->completed_cookie;
+ barrier();
+ if (state) {
+ state->last = complete;
+ state->used = used;
+ state->residue = 0;
+ }
+ return dma_async_is_complete(cookie, complete, used);
+}
+
+static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
+{
+ if (state)
+ state->residue = residue;
+}
+
+struct dmaengine_desc_callback {
+ dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
+ void *callback_param;
+};
+
+/**
+ * dmaengine_desc_get_callback - get the passed in callback function
+ * @tx: tx descriptor
+ * @cb: temp struct to hold the callback info
+ *
+ * Fill the passed in cb struct with what's available in the passed in
+ * tx descriptor struct
+ * No locking is required.
+ */
+static inline void
+dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_desc_callback *cb)
+{
+ cb->callback = tx->callback;
+ cb->callback_result = tx->callback_result;
+ cb->callback_param = tx->callback_param;
+}
+
+/**
+ * dmaengine_desc_callback_invoke - call the callback function in cb struct
+ * @cb: temp struct that is holding the callback info
+ * @result: transaction result
+ *
+ * Call the callback function provided in the cb struct with the parameter
+ * in the cb struct.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_result dummy_result = {
+ .result = DMA_TRANS_NOERROR,
+ .residue = 0
+ };
+
+ if (cb->callback_result) {
+ if (!result)
+ result = &dummy_result;
+ cb->callback_result(cb->callback_param, result);
+ } else if (cb->callback) {
+ cb->callback(cb->callback_param);
+ }
+}
+
+/**
+ * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
+ * then immediately call the callback.
+ * @tx: dma async tx descriptor
+ * @result: transaction result
+ *
+ * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
+ * in a single function since no work is necessary in between for the driver.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_desc_callback cb;
+
+ dmaengine_desc_get_callback(tx, &cb);
+ dmaengine_desc_callback_invoke(&cb, result);
+}
+
+/**
+ * dmaengine_desc_callback_valid - verify the callback is valid in cb
+ * @cb: callback info struct
+ *
+ * Return a bool that verifies whether callback in cb is valid or not.
+ * No locking is required.
+ */
+static inline bool
+dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
+{
+ return (cb->callback) ? true : false;
+}
+
+#endif
diff --git a/snd-alpx/include/5.3/virt-dma.h b/snd-alpx/include/5.3/virt-dma.h
new file mode 100644
index 0000000..ab158ba
--- /dev/null
+++ b/snd-alpx/include/5.3/virt-dma.h
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ */
+#ifndef VIRT_DMA_H
+#define VIRT_DMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "dmaengine.h"
+
+struct virt_dma_desc {
+ struct dma_async_tx_descriptor tx;
+ struct dmaengine_result tx_result;
+ /* protected by vc.lock */
+ struct list_head node;
+};
+
+struct virt_dma_chan {
+ struct dma_chan chan;
+ struct tasklet_struct task;
+ void (*desc_free)(struct virt_dma_desc *);
+
+ spinlock_t lock;
+
+ /* protected by vc.lock */
+ struct list_head desc_allocated;
+ struct list_head desc_submitted;
+ struct list_head desc_issued;
+ struct list_head desc_completed;
+
+ struct virt_dma_desc *cyclic;
+ struct virt_dma_desc *vd_terminated;
+};
+
+static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct virt_dma_chan, chan);
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
+extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
+
+/**
+ * vchan_tx_prep - prepare a descriptor
+ * @vc: virtual channel allocating this descriptor
+ * @vd: virtual descriptor to prepare
+ * @tx_flags: flags argument passed in to prepare function
+ */
+static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
+ struct virt_dma_desc *vd, unsigned long tx_flags)
+{
+ unsigned long flags;
+
+ dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
+ vd->tx.flags = tx_flags;
+ vd->tx.tx_submit = vchan_tx_submit;
+ vd->tx.desc_free = vchan_tx_desc_free;
+
+ vd->tx_result.result = DMA_TRANS_NOERROR;
+ vd->tx_result.residue = 0;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_add_tail(&vd->node, &vc->desc_allocated);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ return &vd->tx;
+}
+
+/**
+ * vchan_issue_pending - move submitted descriptors to issued list
+ * @vc: virtual channel to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
+{
+ list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
+ return !list_empty(&vc->desc_issued);
+}
+
+/**
+ * vchan_cookie_complete - report completion of a descriptor
+ * @vd: virtual descriptor to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+ dma_cookie_t cookie;
+
+ cookie = vd->tx.cookie;
+ dma_cookie_complete(&vd->tx);
+ dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
+ vd, cookie);
+ list_add_tail(&vd->node, &vc->desc_completed);
+
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_vdesc_fini - Free or reuse a descriptor
+ * @vd: virtual descriptor to free/reuse
+ */
+static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ if (dmaengine_desc_test_reuse(&vd->tx))
+ list_add(&vd->node, &vc->desc_allocated);
+ else
+ vc->desc_free(vd);
+}
+
+/**
+ * vchan_cyclic_callback - report the completion of a period
+ * @vd: virtual descriptor
+ */
+static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ vc->cyclic = vd;
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_terminate_vdesc - Disable pending cyclic callback
+ * @vd: virtual descriptor to be terminated
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ /* free up stuck descriptor */
+ if (vc->vd_terminated)
+ vchan_vdesc_fini(vc->vd_terminated);
+
+ vc->vd_terminated = vd;
+ if (vc->cyclic == vd)
+ vc->cyclic = NULL;
+}
+
+/**
+ * vchan_next_desc - peek at the next descriptor to be processed
+ * @vc: virtual channel to obtain descriptor from
+ *
+ * vc.lock must be held by caller
+ */
+static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
+{
+ return list_first_entry_or_null(&vc->desc_issued,
+ struct virt_dma_desc, node);
+}
+
+/**
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
+ * @vc: virtual channel to get descriptors from
+ * @head: list of descriptors found
+ *
+ * vc.lock must be held by caller
+ *
+ * Removes all submitted and issued descriptors from internal lists, and
+ * provides a list of all descriptors found
+ */
+static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
+ struct list_head *head)
+{
+ list_splice_tail_init(&vc->desc_allocated, head);
+ list_splice_tail_init(&vc->desc_submitted, head);
+ list_splice_tail_init(&vc->desc_issued, head);
+ list_splice_tail_init(&vc->desc_completed, head);
+}
+
+static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
+{
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ vchan_get_all_descriptors(vc, &head);
+ list_for_each_entry(vd, &head, node)
+ dmaengine_desc_clear_reuse(&vd->tx);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+}
+
+/**
+ * vchan_synchronize() - synchronize callback execution to the current context
+ * @vc: virtual channel to synchronize
+ *
+ * Makes sure that all scheduled or active callbacks have finished running. For
+ * proper operation the caller has to ensure that no new callbacks are scheduled
+ * after the invocation of this function started.
+ * Free up the terminated cyclic descriptor to prevent memory leakage.
+ */
+static inline void vchan_synchronize(struct virt_dma_chan *vc)
+{
+ unsigned long flags;
+
+ tasklet_kill(&vc->task);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ if (vc->vd_terminated) {
+ vchan_vdesc_fini(vc->vd_terminated);
+ vc->vd_terminated = NULL;
+ }
+ spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+#endif
diff --git a/snd-alpx/include/5.6/dmaengine.h b/snd-alpx/include/5.6/dmaengine.h
new file mode 100644
index 0000000..e8a320c
--- /dev/null
+++ b/snd-alpx/include/5.6/dmaengine.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The contents of this file are private to DMA engine drivers, and is not
+ * part of the API to be used by DMA engine users.
+ */
+#ifndef DMAENGINE_H
+#define DMAENGINE_H
+
+#include <linux/bug.h>
+#include <linux/dmaengine.h>
+
+/**
+ * dma_cookie_init - initialize the cookies for a DMA channel
+ * @chan: dma channel to initialize
+ */
+static inline void dma_cookie_init(struct dma_chan *chan)
+{
+ chan->cookie = DMA_MIN_COOKIE;
+ chan->completed_cookie = DMA_MIN_COOKIE;
+}
+
+/**
+ * dma_cookie_assign - assign a DMA engine cookie to the descriptor
+ * @tx: descriptor needing cookie
+ *
+ * Assign a unique non-zero per-channel cookie to the descriptor.
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *chan = tx->chan;
+ dma_cookie_t cookie;
+
+ cookie = chan->cookie + 1;
+ if (cookie < DMA_MIN_COOKIE)
+ cookie = DMA_MIN_COOKIE;
+ tx->cookie = chan->cookie = cookie;
+
+ return cookie;
+}
+
+/**
+ * dma_cookie_complete - complete a descriptor
+ * @tx: descriptor to complete
+ *
+ * Mark this descriptor complete by updating the channels completed
+ * cookie marker. Zero the descriptors cookie to prevent accidental
+ * repeated completions.
+ *
+ * Note: caller is expected to hold a lock to prevent concurrency.
+ */
+static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
+{
+ BUG_ON(tx->cookie < DMA_MIN_COOKIE);
+ tx->chan->completed_cookie = tx->cookie;
+ tx->cookie = 0;
+}
+
+/**
+ * dma_cookie_status - report cookie status
+ * @chan: dma channel
+ * @cookie: cookie we are interested in
+ * @state: dma_tx_state structure to return last/used cookies
+ *
+ * Report the status of the cookie, filling in the state structure if
+ * non-NULL. No locking is required.
+ */
+static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ dma_cookie_t used, complete;
+
+ used = chan->cookie;
+ complete = chan->completed_cookie;
+ barrier();
+ if (state) {
+ state->last = complete;
+ state->used = used;
+ state->residue = 0;
+ state->in_flight_bytes = 0;
+ }
+ return dma_async_is_complete(cookie, complete, used);
+}
+
+static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
+{
+ if (state)
+ state->residue = residue;
+}
+
+static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
+ u32 in_flight_bytes)
+{
+ if (state)
+ state->in_flight_bytes = in_flight_bytes;
+}
+
+struct dmaengine_desc_callback {
+ dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
+ void *callback_param;
+};
+
+/**
+ * dmaengine_desc_get_callback - get the passed in callback function
+ * @tx: tx descriptor
+ * @cb: temp struct to hold the callback info
+ *
+ * Fill the passed in cb struct with what's available in the passed in
+ * tx descriptor struct
+ * No locking is required.
+ */
+static inline void
+dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_desc_callback *cb)
+{
+ cb->callback = tx->callback;
+ cb->callback_result = tx->callback_result;
+ cb->callback_param = tx->callback_param;
+}
+
+/**
+ * dmaengine_desc_callback_invoke - call the callback function in cb struct
+ * @cb: temp struct that is holding the callback info
+ * @result: transaction result
+ *
+ * Call the callback function provided in the cb struct with the parameter
+ * in the cb struct.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_result dummy_result = {
+ .result = DMA_TRANS_NOERROR,
+ .residue = 0
+ };
+
+ if (cb->callback_result) {
+ if (!result)
+ result = &dummy_result;
+ cb->callback_result(cb->callback_param, result);
+ } else if (cb->callback) {
+ cb->callback(cb->callback_param);
+ }
+}
+
+/**
+ * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
+ * then immediately call the callback.
+ * @tx: dma async tx descriptor
+ * @result: transaction result
+ *
+ * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
+ * in a single function since no work is necessary in between for the driver.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_desc_callback cb;
+
+ dmaengine_desc_get_callback(tx, &cb);
+ dmaengine_desc_callback_invoke(&cb, result);
+}
+
+/**
+ * dmaengine_desc_callback_valid - verify the callback is valid in cb
+ * @cb: callback info struct
+ *
+ * Return a bool that verifies whether callback in cb is valid or not.
+ * No locking is required.
+ */
+static inline bool
+dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
+{
+ return (cb->callback) ? true : false;
+}
+
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
+
+#endif
diff --git a/snd-alpx/include/5.6/virt-dma.h b/snd-alpx/include/5.6/virt-dma.h
new file mode 100644
index 0000000..e9f5250
--- /dev/null
+++ b/snd-alpx/include/5.6/virt-dma.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ */
+#ifndef VIRT_DMA_H
+#define VIRT_DMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "dmaengine.h"
+
+struct virt_dma_desc {
+ struct dma_async_tx_descriptor tx;
+ struct dmaengine_result tx_result;
+ /* protected by vc.lock */
+ struct list_head node;
+};
+
+struct virt_dma_chan {
+ struct dma_chan chan;
+ struct tasklet_struct task;
+ void (*desc_free)(struct virt_dma_desc *);
+
+ spinlock_t lock;
+
+ /* protected by vc.lock */
+ struct list_head desc_allocated;
+ struct list_head desc_submitted;
+ struct list_head desc_issued;
+ struct list_head desc_completed;
+ struct list_head desc_terminated;
+
+ struct virt_dma_desc *cyclic;
+};
+
+static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct virt_dma_chan, chan);
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
+extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
+
+/**
+ * vchan_tx_prep - prepare a descriptor
+ * @vc: virtual channel allocating this descriptor
+ * @vd: virtual descriptor to prepare
+ * @tx_flags: flags argument passed in to prepare function
+ */
+static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
+ struct virt_dma_desc *vd, unsigned long tx_flags)
+{
+ unsigned long flags;
+
+ dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
+ vd->tx.flags = tx_flags;
+ vd->tx.tx_submit = vchan_tx_submit;
+ vd->tx.desc_free = vchan_tx_desc_free;
+
+ vd->tx_result.result = DMA_TRANS_NOERROR;
+ vd->tx_result.residue = 0;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_add_tail(&vd->node, &vc->desc_allocated);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ return &vd->tx;
+}
+
+/**
+ * vchan_issue_pending - move submitted descriptors to issued list
+ * @vc: virtual channel to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
+{
+ list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
+ return !list_empty(&vc->desc_issued);
+}
+
+/**
+ * vchan_cookie_complete - report completion of a descriptor
+ * @vd: virtual descriptor to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+ dma_cookie_t cookie;
+
+ cookie = vd->tx.cookie;
+ dma_cookie_complete(&vd->tx);
+ dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
+ vd, cookie);
+ list_add_tail(&vd->node, &vc->desc_completed);
+
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_vdesc_fini - Free or reuse a descriptor
+ * @vd: virtual descriptor to free/reuse
+ */
+static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ if (dmaengine_desc_test_reuse(&vd->tx)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_add(&vd->node, &vc->desc_allocated);
+ spin_unlock_irqrestore(&vc->lock, flags);
+ } else {
+ vc->desc_free(vd);
+ }
+}
+
+/**
+ * vchan_cyclic_callback - report the completion of a period
+ * @vd: virtual descriptor
+ */
+static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ vc->cyclic = vd;
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_terminate_vdesc - Disable pending cyclic callback
+ * @vd: virtual descriptor to be terminated
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ list_add_tail(&vd->node, &vc->desc_terminated);
+
+ if (vc->cyclic == vd)
+ vc->cyclic = NULL;
+}
+
+/**
+ * vchan_next_desc - peek at the next descriptor to be processed
+ * @vc: virtual channel to obtain descriptor from
+ *
+ * vc.lock must be held by caller
+ */
+static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
+{
+ return list_first_entry_or_null(&vc->desc_issued,
+ struct virt_dma_desc, node);
+}
+
+/**
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
+ * @vc: virtual channel to get descriptors from
+ * @head: list of descriptors found
+ *
+ * vc.lock must be held by caller
+ *
+ * Removes all submitted and issued descriptors from internal lists, and
+ * provides a list of all descriptors found
+ */
+static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
+ struct list_head *head)
+{
+ list_splice_tail_init(&vc->desc_allocated, head);
+ list_splice_tail_init(&vc->desc_submitted, head);
+ list_splice_tail_init(&vc->desc_issued, head);
+ list_splice_tail_init(&vc->desc_completed, head);
+ list_splice_tail_init(&vc->desc_terminated, head);
+}
+
+static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
+{
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ vchan_get_all_descriptors(vc, &head);
+ list_for_each_entry(vd, &head, node)
+ dmaengine_desc_clear_reuse(&vd->tx);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+}
+
+/**
+ * vchan_synchronize() - synchronize callback execution to the current context
+ * @vc: virtual channel to synchronize
+ *
+ * Makes sure that all scheduled or active callbacks have finished running. For
+ * proper operation the caller has to ensure that no new callbacks are scheduled
+ * after the invocation of this function started.
+ * Free up the terminated cyclic descriptor to prevent memory leakage.
+ */
+static inline void vchan_synchronize(struct virt_dma_chan *vc)
+{
+ LIST_HEAD(head);
+ unsigned long flags;
+
+ tasklet_kill(&vc->task);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ list_splice_tail_init(&vc->desc_terminated, &head);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+}
+
+#endif
diff --git a/snd-alpx/include/6.2/amd_xdma.h b/snd-alpx/include/6.2/amd_xdma.h
new file mode 100644
index 0000000..ceba69e
--- /dev/null
+++ b/snd-alpx/include/6.2/amd_xdma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _DMAENGINE_AMD_XDMA_H
+#define _DMAENGINE_AMD_XDMA_H
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num);
+void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num);
+int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index);
+
+#endif /* _DMAENGINE_AMD_XDMA_H */
diff --git a/snd-alpx/include/6.2/dmaengine.h b/snd-alpx/include/6.2/dmaengine.h
new file mode 100644
index 0000000..c3656e5
--- /dev/null
+++ b/snd-alpx/include/6.2/dmaengine.h
@@ -0,0 +1,1637 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
+ */
+#ifndef LINUX_DMAENGINE_H
+#define LINUX_DMAENGINE_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/uio.h>
+#include <linux/bug.h>
+#include <linux/scatterlist.h>
+#include <linux/bitmap.h>
+#include <linux/types.h>
+#include <asm/page.h>
+
+/**
+ * typedef dma_cookie_t - an opaque DMA cookie
+ *
+ * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
+ */
+typedef s32 dma_cookie_t;
+#define DMA_MIN_COOKIE 1
+
+static inline int dma_submit_error(dma_cookie_t cookie)
+{
+ return cookie < 0 ? cookie : 0;
+}
+
+/**
+ * enum dma_status - DMA transaction status
+ * @DMA_COMPLETE: transaction completed
+ * @DMA_IN_PROGRESS: transaction not yet processed
+ * @DMA_PAUSED: transaction is paused
+ * @DMA_ERROR: transaction failed
+ */
+enum dma_status {
+ DMA_COMPLETE,
+ DMA_IN_PROGRESS,
+ DMA_PAUSED,
+ DMA_ERROR,
+ DMA_OUT_OF_ORDER,
+};
+
+/**
+ * enum dma_transaction_type - DMA transaction types/indexes
+ *
+ * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is
+ * automatically set as dma devices are registered.
+ */
+enum dma_transaction_type {
+ DMA_MEMCPY,
+ DMA_XOR,
+ DMA_PQ,
+ DMA_XOR_VAL,
+ DMA_PQ_VAL,
+ DMA_MEMSET,
+ DMA_MEMSET_SG,
+ DMA_INTERRUPT,
+ DMA_PRIVATE,
+ DMA_ASYNC_TX,
+ DMA_SLAVE,
+ DMA_CYCLIC,
+ DMA_INTERLEAVE,
+ DMA_COMPLETION_NO_ORDER,
+ DMA_REPEAT,
+ DMA_LOAD_EOT,
+/* last transaction type for creation of the capabilities mask */
+ DMA_TX_TYPE_END,
+};
+
+/**
+ * enum dma_transfer_direction - dma transfer mode and direction indicator
+ * @DMA_MEM_TO_MEM: Async/Memcpy mode
+ * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
+ * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
+ * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
+ */
+enum dma_transfer_direction {
+ DMA_MEM_TO_MEM,
+ DMA_MEM_TO_DEV,
+ DMA_DEV_TO_MEM,
+ DMA_DEV_TO_DEV,
+ DMA_TRANS_NONE,
+};
+
+/**
+ * Interleaved Transfer Request
+ * ----------------------------
+ * A chunk is collection of contiguous bytes to be transferred.
+ * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
+ * ICGs may or may not change between chunks.
+ * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
+ * that when repeated an integral number of times, specifies the transfer.
+ * A transfer template is specification of a Frame, the number of times
+ * it is to be repeated and other per-transfer attributes.
+ *
+ * Practically, a client driver would have ready a template for each
+ * type of transfer it is going to need during its lifetime and
+ * set only 'src_start' and 'dst_start' before submitting the requests.
+ *
+ *
+ * | Frame-1 | Frame-2 | ~ | Frame-'numf' |
+ * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
+ *
+ * == Chunk size
+ * ... ICG
+ */
+
+/**
+ * struct data_chunk - Element of scatter-gather list that makes a frame.
+ * @size: Number of bytes to read from source.
+ * size_dst := fn(op, size_src), so doesn't mean much for destination.
+ * @icg: Number of bytes to jump after last src/dst address of this
+ * chunk and before first src/dst address for next chunk.
+ * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
+ * Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
+ * @dst_icg: Number of bytes to jump after last dst address of this
+ * chunk and before the first dst address for next chunk.
+ * Ignored if dst_inc is true and dst_sgl is false.
+ * @src_icg: Number of bytes to jump after last src address of this
+ * chunk and before the first src address for next chunk.
+ * Ignored if src_inc is true and src_sgl is false.
+ */
+struct data_chunk {
+ size_t size;
+ size_t icg;
+ size_t dst_icg;
+ size_t src_icg;
+};
+
+/**
+ * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
+ * and attributes.
+ * @src_start: Bus address of source for the first chunk.
+ * @dst_start: Bus address of destination for the first chunk.
+ * @dir: Specifies the type of Source and Destination.
+ * @src_inc: If the source address increments after reading from it.
+ * @dst_inc: If the destination address increments after writing to it.
+ * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
+ * Otherwise, source is read contiguously (icg ignored).
+ * Ignored if src_inc is false.
+ * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
+ * Otherwise, destination is filled contiguously (icg ignored).
+ * Ignored if dst_inc is false.
+ * @numf: Number of frames in this template.
+ * @frame_size: Number of chunks in a frame i.e, size of sgl[].
+ * @sgl: Array of {chunk,icg} pairs that make up a frame.
+ */
+struct dma_interleaved_template {
+ dma_addr_t src_start;
+ dma_addr_t dst_start;
+ enum dma_transfer_direction dir;
+ bool src_inc;
+ bool dst_inc;
+ bool src_sgl;
+ bool dst_sgl;
+ size_t numf;
+ size_t frame_size;
+ struct data_chunk sgl[];
+};
+
+/**
+ * enum dma_ctrl_flags - DMA flags to augment operation preparation,
+ * control completion, and communicate status.
+ * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
+ * this transaction
+ * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
+ * acknowledges receipt, i.e. has a chance to establish any dependency
+ * chains
+ * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
+ * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
+ * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
+ * sources that were the result of a previous operation, in the case of a PQ
+ * operation it continues the calculation with new sources
+ * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
+ * on the result of this operation
+ * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
+ * cleared or freed
+ * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
+ * data and the descriptor should be in different format from normal
+ * data descriptors.
+ * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically
+ * repeated when it ends until a transaction is issued on the same channel
+ * with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to
+ * interleaved transactions and is ignored for all other transaction types.
+ * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any
+ * active repeated (as indicated by DMA_PREP_REPEAT) transaction when the
+ * repeated transaction ends. Not setting this flag when the previously queued
+ * transaction is marked with DMA_PREP_REPEAT will cause the new transaction
+ * to never be processed and stay in the issued queue forever. The flag is
+ * ignored if the previous transaction is not a repeated transaction.
+ */
+enum dma_ctrl_flags {
+ DMA_PREP_INTERRUPT = (1 << 0),
+ DMA_CTRL_ACK = (1 << 1),
+ DMA_PREP_PQ_DISABLE_P = (1 << 2),
+ DMA_PREP_PQ_DISABLE_Q = (1 << 3),
+ DMA_PREP_CONTINUE = (1 << 4),
+ DMA_PREP_FENCE = (1 << 5),
+ DMA_CTRL_REUSE = (1 << 6),
+ DMA_PREP_CMD = (1 << 7),
+ DMA_PREP_REPEAT = (1 << 8),
+ DMA_PREP_LOAD_EOT = (1 << 9),
+};
+
+/**
+ * enum sum_check_bits - bit position of pq_check_flags
+ */
+enum sum_check_bits {
+ SUM_CHECK_P = 0,
+ SUM_CHECK_Q = 1,
+};
+
+/**
+ * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
+ * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
+ * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
+ */
+enum sum_check_flags {
+ SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
+ SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
+};
+
+
+/**
+ * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
+ * See linux/cpumask.h
+ */
+typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
+
+/**
+ * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
+ * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
+ * client driver and it is attached (via the dmaengine_desc_attach_metadata()
+ * helper) to the descriptor.
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * construct the metadata in the client's buffer
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * 4. when the transfer is completed, the metadata should be available in the
+ * attached buffer
+ *
+ * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
+ * driver. The client driver can ask for the pointer, maximum size and the
+ * currently used size of the metadata and can directly update or read it.
+ * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
+ * provided as helper functions.
+ *
+ * Note: the metadata area for the descriptor is no longer valid after the
+ * transfer has been completed (valid up to the point when the completion
+ * callback returns if used).
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
+ * metadata area
+ * 3. update the metadata at the pointer
+ * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount
+ * of data the client has placed into the metadata buffer
+ * 5. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. submit the transfer
+ * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
+ * pointer to the engine's metadata area
+ * 4. Read out the metadata from the pointer
+ *
+ * Note: the two mode is not compatible and clients must use one mode for a
+ * descriptor.
+ */
+enum dma_desc_metadata_mode {
+ DESC_METADATA_NONE = 0,
+ DESC_METADATA_CLIENT = BIT(0),
+ DESC_METADATA_ENGINE = BIT(1),
+};
+
+/**
+ * struct dma_chan_percpu - the per-CPU part of struct dma_chan
+ * @memcpy_count: transaction counter
+ * @bytes_transferred: byte counter
+ */
+struct dma_chan_percpu {
+ /* stats */
+ unsigned long memcpy_count;
+ unsigned long bytes_transferred;
+};
+
+/**
+ * struct dma_router - DMA router structure
+ * @dev: pointer to the DMA router device
+ * @route_free: function to be called when the route can be disconnected
+ */
+struct dma_router {
+ struct device *dev;
+ void (*route_free)(struct device *dev, void *route_data);
+};
+
+/**
+ * struct dma_chan - devices supply DMA channels, clients use them
+ * @device: ptr to the dma device who supplies this channel, always !%NULL
+ * @slave: ptr to the device using this channel
+ * @cookie: last cookie value returned to client
+ * @completed_cookie: last completed cookie for this channel
+ * @chan_id: channel ID for sysfs
+ * @dev: class device for sysfs
+ * @name: backlink name for sysfs
+ * @dbg_client_name: slave name for debugfs in format:
+ * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
+ * @device_node: used to add this to the device chan list
+ * @local: per-cpu pointer to a struct dma_chan_percpu
+ * @client_count: how many clients are using this channel
+ * @table_count: number of appearances in the mem-to-mem allocation table
+ * @router: pointer to the DMA router structure
+ * @route_data: channel specific data for the router
+ * @private: private data for certain client-channel associations
+ */
+struct dma_chan {
+ struct dma_device *device;
+ struct device *slave;
+ dma_cookie_t cookie;
+ dma_cookie_t completed_cookie;
+
+ /* sysfs */
+ int chan_id;
+ struct dma_chan_dev *dev;
+ const char *name;
+#ifdef CONFIG_DEBUG_FS
+ char *dbg_client_name;
+#endif
+
+ struct list_head device_node;
+ struct dma_chan_percpu __percpu *local;
+ int client_count;
+ int table_count;
+
+ /* DMA router */
+ struct dma_router *router;
+ void *route_data;
+
+ void *private;
+};
+
+/**
+ * struct dma_chan_dev - relate sysfs device node to backing channel device
+ * @chan: driver channel device
+ * @device: sysfs device
+ * @dev_id: parent dma_device dev_id
+ * @chan_dma_dev: The channel is using custom/different dma-mapping
+ * compared to the parent dma_device
+ */
+struct dma_chan_dev {
+ struct dma_chan *chan;
+ struct device device;
+ int dev_id;
+ bool chan_dma_dev;
+};
+
+/**
+ * enum dma_slave_buswidth - defines bus width of the DMA slave
+ * device, source or target buses
+ */
+enum dma_slave_buswidth {
+ DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
+ DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
+ DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
+ DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
+ DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
+ DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
+ DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
+ DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
+ DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
+ DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
+};
+
+/**
+ * struct dma_slave_config - dma slave channel runtime config
+ * @direction: whether the data shall go in or out on this slave
+ * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
+ * legal values. DEPRECATED, drivers should use the direction argument
+ * to the device_prep_slave_sg and device_prep_dma_cyclic functions or
+ * the dir field in the dma_interleaved_template structure.
+ * @src_addr: this is the physical address where DMA slave data
+ * should be read (RX), if the source is memory this argument is
+ * ignored.
+ * @dst_addr: this is the physical address where DMA slave data
+ * should be written (TX), if the destination is memory this argument
+ * is ignored.
+ * @src_addr_width: this is the width in bytes of the source (RX)
+ * register where DMA data shall be read. If the source
+ * is memory this may be ignored depending on architecture.
+ * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
+ * @dst_addr_width: same as src_addr_width but for destination
+ * target (TX) mutatis mutandis.
+ * @src_maxburst: the maximum number of words (note: words, as in
+ * units of the src_addr_width member, not bytes) that can be sent
+ * in one burst to the device. Typically something like half the
+ * FIFO depth on I/O peripherals so you don't overflow it. This
+ * may or may not be applicable on memory sources.
+ * @dst_maxburst: same as src_maxburst but for destination target
+ * mutatis mutandis.
+ * @src_port_window_size: The length of the register area in words the data need
+ * to be accessed on the device side. It is only used for devices which is using
+ * an area instead of a single register to receive the data. Typically the DMA
+ * loops in this area in order to transfer the data.
+ * @dst_port_window_size: same as src_port_window_size but for the destination
+ * port.
+ * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
+ * with 'true' if peripheral should be flow controller. Direction will be
+ * selected at Runtime.
+ * @peripheral_config: peripheral configuration for programming peripheral
+ * for dmaengine transfer
+ * @peripheral_size: peripheral configuration buffer size
+ *
+ * This struct is passed in as configuration data to a DMA engine
+ * in order to set up a certain channel for DMA transport at runtime.
+ * The DMA device/engine has to provide support for an additional
+ * callback in the dma_device structure, device_config and this struct
+ * will then be passed in as an argument to the function.
+ *
+ * The rationale for adding configuration information to this struct is as
+ * follows: if it is likely that more than one DMA slave controllers in
+ * the world will support the configuration option, then make it generic.
+ * If not: if it is fixed so that it be sent in static from the platform
+ * data, then prefer to do that.
+ */
+struct dma_slave_config {
+ enum dma_transfer_direction direction;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ enum dma_slave_buswidth src_addr_width;
+ enum dma_slave_buswidth dst_addr_width;
+ u32 src_maxburst;
+ u32 dst_maxburst;
+ u32 src_port_window_size;
+ u32 dst_port_window_size;
+ bool device_fc;
+ void *peripheral_config;
+ size_t peripheral_size;
+};
+
+/**
+ * enum dma_residue_granularity - Granularity of the reported transfer residue
+ * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The
+ * DMA channel is only able to tell whether a descriptor has been completed or
+ * not, which means residue reporting is not supported by this channel. The
+ * residue field of the dma_tx_state field will always be 0.
+ * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully
+ * completed segment of the transfer (For cyclic transfers this is after each
+ * period). This is typically implemented by having the hardware generate an
+ * interrupt after each transferred segment and then the drivers updates the
+ * outstanding residue by the size of the segment. Another possibility is if
+ * the hardware supports scatter-gather and the segment descriptor has a field
+ * which gets set after the segment has been completed. The driver then counts
+ * the number of segments without the flag set to compute the residue.
+ * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred
+ * burst. This is typically only supported if the hardware has a progress
+ * register of some sort (E.g. a register with the current read/write address
+ * or a register with the amount of bursts/beats/bytes that have been
+ * transferred or still need to be transferred).
+ */
+enum dma_residue_granularity {
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
+ DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
+ DMA_RESIDUE_GRANULARITY_BURST = 2,
+};
+
+/**
+ * struct dma_slave_caps - expose capabilities of a slave channel only
+ * @src_addr_widths: bit mask of src addr widths the channel supports.
+ * Width is specified in bytes, e.g. for a channel supporting
+ * a width of 4 the mask should have BIT(4) set.
+ * @dst_addr_widths: bit mask of dst addr widths the channel supports
+ * @directions: bit mask of slave directions the channel supports.
+ * Since the enum dma_transfer_direction is not defined as bit flag for
+ * each type, the dma controller should set BIT(<TYPE>) and same
+ * should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
+ * @max_burst: max burst capability per-transfer
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
+ * @cmd_pause: true, if pause is supported (i.e. for reading residue or
+ * for resume later)
+ * @cmd_resume: true, if resume is supported
+ * @cmd_terminate: true, if terminate cmd is supported
+ * @residue_granularity: granularity of the reported transfer residue
+ * @descriptor_reuse: if a descriptor can be reused by client and
+ * resubmitted multiple times
+ */
+struct dma_slave_caps {
+ u32 src_addr_widths;
+ u32 dst_addr_widths;
+ u32 directions;
+ u32 min_burst;
+ u32 max_burst;
+ u32 max_sg_burst;
+ bool cmd_pause;
+ bool cmd_resume;
+ bool cmd_terminate;
+ enum dma_residue_granularity residue_granularity;
+ bool descriptor_reuse;
+};
+
+static inline const char *dma_chan_name(struct dma_chan *chan)
+{
+ return dev_name(&chan->dev->device);
+}
+
+void dma_chan_cleanup(struct kref *kref);
+
+/**
+ * typedef dma_filter_fn - callback filter for dma_request_channel
+ * @chan: channel to be reviewed
+ * @filter_param: opaque parameter passed through dma_request_channel
+ *
+ * When this optional parameter is specified in a call to dma_request_channel a
+ * suitable channel is passed to this routine for further dispositioning before
+ * being returned. Where 'suitable' indicates a non-busy channel that
+ * satisfies the given capability mask. It returns 'true' to indicate that the
+ * channel is suitable.
+ */
+typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
+
+typedef void (*dma_async_tx_callback)(void *dma_async_param);
+
+enum dmaengine_tx_result {
+ DMA_TRANS_NOERROR = 0, /* SUCCESS */
+ DMA_TRANS_READ_FAILED, /* Source DMA read failed */
+ DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */
+ DMA_TRANS_ABORTED, /* Op never submitted / aborted */
+};
+
+struct dmaengine_result {
+ enum dmaengine_tx_result result;
+ u32 residue;
+};
+
+typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
+ const struct dmaengine_result *result);
+
+struct dmaengine_unmap_data {
+#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
+ u16 map_cnt;
+#else
+ u8 map_cnt;
+#endif
+ u8 to_cnt;
+ u8 from_cnt;
+ u8 bidi_cnt;
+ struct device *dev;
+ struct kref kref;
+ size_t len;
+ dma_addr_t addr[];
+};
+
+struct dma_async_tx_descriptor;
+
+struct dma_descriptor_metadata_ops {
+ int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
+ size_t len);
+
+ void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+ int (*set_len)(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+};
+
+/**
+ * struct dma_async_tx_descriptor - async transaction descriptor
+ * ---dma generic offload fields---
+ * @cookie: tracking cookie for this transaction, set to -EBUSY if
+ * this tx is sitting on a dependency list
+ * @flags: flags to augment operation preparation, control completion, and
+ * communicate status
+ * @phys: physical address of the descriptor
+ * @chan: target channel for this operation
+ * @tx_submit: accept the descriptor, assign ordered cookie and mark the
+ * descriptor pending. To be pushed on .issue_pending() call
+ * @callback: routine to call after this operation is complete
+ * @callback_param: general parameter to pass to the callback routine
+ * @desc_metadata_mode: core managed metadata mode to protect mixed use of
+ * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
+ * DESC_METADATA_NONE
+ * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
+ * DMA driver if metadata mode is supported with the descriptor
+ * ---async_tx api specific fields---
+ * @next: at completion submit this descriptor
+ * @parent: pointer to the next level up in the dependency chain
+ * @lock: protect the parent and next pointers
+ */
+struct dma_async_tx_descriptor {
+ dma_cookie_t cookie;
+ enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
+ dma_addr_t phys;
+ struct dma_chan *chan;
+ dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+ int (*desc_free)(struct dma_async_tx_descriptor *tx);
+ dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
+ void *callback_param;
+ struct dmaengine_unmap_data *unmap;
+ enum dma_desc_metadata_mode desc_metadata_mode;
+ struct dma_descriptor_metadata_ops *metadata_ops;
+#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ struct dma_async_tx_descriptor *next;
+ struct dma_async_tx_descriptor *parent;
+ spinlock_t lock;
+#endif
+};
+
+#ifdef CONFIG_DMA_ENGINE
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+{
+ kref_get(&unmap->kref);
+ tx->unmap = unmap;
+}
+
+struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
+void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
+#else
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+{
+}
+static inline struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
+{
+ return NULL;
+}
+static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
+{
+}
+#endif
+
+static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
+{
+ if (!tx->unmap)
+ return;
+
+ dmaengine_unmap_put(tx->unmap);
+ tx->unmap = NULL;
+}
+
+#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
+static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+{
+ BUG();
+}
+static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+{
+ return NULL;
+}
+static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+{
+ return NULL;
+}
+
+#else
+static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+{
+ spin_lock_bh(&txd->lock);
+}
+static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+{
+ spin_unlock_bh(&txd->lock);
+}
+static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+{
+ txd->next = next;
+ next->parent = txd;
+}
+static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+{
+ txd->parent = NULL;
+}
+static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+{
+ txd->next = NULL;
+}
+static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+{
+ return txd->parent;
+}
+static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+{
+ return txd->next;
+}
+#endif
+
+/**
+ * struct dma_tx_state - filled in to report the status of
+ * a transfer.
+ * @last: last completed DMA cookie
+ * @used: last issued DMA cookie (i.e. the one in progress)
+ * @residue: the remaining number of bytes left to transmit
+ * on the selected transfer for states DMA_IN_PROGRESS and
+ * DMA_PAUSED if this is implemented in the driver, else 0
+ * @in_flight_bytes: amount of data in bytes cached by the DMA.
+ */
+struct dma_tx_state {
+ dma_cookie_t last;
+ dma_cookie_t used;
+ u32 residue;
+ u32 in_flight_bytes;
+};
+
+/**
+ * enum dmaengine_alignment - defines alignment of the DMA async tx
+ * buffers
+ */
+enum dmaengine_alignment {
+ DMAENGINE_ALIGN_1_BYTE = 0,
+ DMAENGINE_ALIGN_2_BYTES = 1,
+ DMAENGINE_ALIGN_4_BYTES = 2,
+ DMAENGINE_ALIGN_8_BYTES = 3,
+ DMAENGINE_ALIGN_16_BYTES = 4,
+ DMAENGINE_ALIGN_32_BYTES = 5,
+ DMAENGINE_ALIGN_64_BYTES = 6,
+ DMAENGINE_ALIGN_128_BYTES = 7,
+ DMAENGINE_ALIGN_256_BYTES = 8,
+};
+
+/**
+ * struct dma_slave_map - associates slave device and it's slave channel with
+ * parameter to be used by a filter function
+ * @devname: name of the device
+ * @slave: slave channel name
+ * @param: opaque parameter to pass to struct dma_filter.fn
+ */
+struct dma_slave_map {
+ const char *devname;
+ const char *slave;
+ void *param;
+};
+
+/**
+ * struct dma_filter - information for slave device/channel to filter_fn/param
+ * mapping
+ * @fn: filter function callback
+ * @mapcnt: number of slave device/channel in the map
+ * @map: array of channel to filter mapping data
+ */
+struct dma_filter {
+ dma_filter_fn fn;
+ int mapcnt;
+ const struct dma_slave_map *map;
+};
+
+/**
+ * struct dma_device - info on the entity supplying DMA services
+ * @ref: reference is taken and put every time a channel is allocated or freed
+ * @chancnt: how many DMA channels are supported
+ * @privatecnt: how many DMA channels are requested by dma_request_channel
+ * @channels: the list of struct dma_chan
+ * @global_node: list_head for global dma_device_list
+ * @filter: information for device/slave to filter function/param mapping
+ * @cap_mask: one or more dma_capability flags
+ * @desc_metadata_modes: supported metadata modes by the DMA device
+ * @max_xor: maximum number of xor sources, 0 if no capability
+ * @max_pq: maximum number of PQ sources and PQ-continue capability
+ * @copy_align: alignment shift for memcpy operations
+ * @xor_align: alignment shift for xor operations
+ * @pq_align: alignment shift for pq operations
+ * @fill_align: alignment shift for memset operations
+ * @dev_id: unique device ID
+ * @dev: struct device reference for dma mapping api
+ * @owner: owner module (automatically set based on the provided dev)
+ * @chan_ida: unique channel ID
+ * @src_addr_widths: bit mask of src addr widths the device supports
+ * Width is specified in bytes, e.g. for a device supporting
+ * a width of 4 the mask should have BIT(4) set.
+ * @dst_addr_widths: bit mask of dst addr widths the device supports
+ * @directions: bit mask of slave directions the device supports.
+ * Since the enum dma_transfer_direction is not defined as bit flag for
+ * each type, the dma controller should set BIT(<TYPE>) and same
+ * should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
+ * @max_burst: max burst capability per-transfer
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
+ * @descriptor_reuse: a submitted transfer can be resubmitted after completion
+ * @residue_granularity: granularity of the transfer residue reported
+ * by tx_status
+ * @device_alloc_chan_resources: allocate resources and return the
+ * number of allocated descriptors
+ * @device_router_config: optional callback for DMA router configuration
+ * @device_free_chan_resources: release DMA channel's resources
+ * @device_prep_dma_memcpy: prepares a memcpy operation
+ * @device_prep_dma_xor: prepares a xor operation
+ * @device_prep_dma_xor_val: prepares a xor validation operation
+ * @device_prep_dma_pq: prepares a pq operation
+ * @device_prep_dma_pq_val: prepares a pqzero_sum operation
+ * @device_prep_dma_memset: prepares a memset operation
+ * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
+ * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
+ * @device_prep_slave_sg: prepares a slave dma operation
+ * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
+ * The function takes a buffer of size buf_len. The callback function will
+ * be called after period_len bytes have been transferred.
+ * @device_prep_interleaved_dma: Transfer expression in a generic way.
+ * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
+ * @device_caps: May be used to override the generic DMA slave capabilities
+ * with per-channel specific ones
+ * @device_config: Pushes a new configuration to a channel, return 0 or an error
+ * code
+ * @device_pause: Pauses any transfer happening on a channel. Returns
+ * 0 or an error code
+ * @device_resume: Resumes any transfer on a channel previously
+ * paused. Returns 0 or an error code
+ * @device_terminate_all: Aborts all transfers on a channel. Returns 0
+ * or an error code
+ * @device_synchronize: Synchronizes the termination of a transfers to the
+ * current context.
+ * @device_tx_status: poll for transaction completion, the optional
+ * txstate parameter can be supplied with a pointer to get a
+ * struct with auxiliary transfer status information, otherwise the call
+ * will just return a simple status code
+ * @device_issue_pending: push pending transactions to hardware
+ * @device_release: called sometime atfer dma_async_device_unregister() is
+ * called and there are no further references to this structure. This
+ * must be implemented to free resources however many existing drivers
+ * do not and are therefore not safe to unbind while in use.
+ * @dbg_summary_show: optional routine to show contents in debugfs; default code
+ * will be used when this is omitted, but custom code can show extra,
+ * controller specific information.
+ * @dbg_dev_root: the root folder in debugfs for this device
+ */
+struct dma_device {
+ struct kref ref;
+ unsigned int chancnt;
+ unsigned int privatecnt;
+ struct list_head channels;
+ struct list_head global_node;
+ struct dma_filter filter;
+ dma_cap_mask_t cap_mask;
+ enum dma_desc_metadata_mode desc_metadata_modes;
+ unsigned short max_xor;
+ unsigned short max_pq;
+ enum dmaengine_alignment copy_align;
+ enum dmaengine_alignment xor_align;
+ enum dmaengine_alignment pq_align;
+ enum dmaengine_alignment fill_align;
+ #define DMA_HAS_PQ_CONTINUE (1 << 15)
+
+ int dev_id;
+ struct device *dev;
+ struct module *owner;
+ struct ida chan_ida;
+
+ u32 src_addr_widths;
+ u32 dst_addr_widths;
+ u32 directions;
+ u32 min_burst;
+ u32 max_burst;
+ u32 max_sg_burst;
+ bool descriptor_reuse;
+ enum dma_residue_granularity residue_granularity;
+
+ int (*device_alloc_chan_resources)(struct dma_chan *chan);
+ int (*device_router_config)(struct dma_chan *chan);
+ void (*device_free_chan_resources)(struct dma_chan *chan);
+
+ struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
+ struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
+ size_t len, enum sum_check_flags *result, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
+ struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
+ struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ enum sum_check_flags *pqres, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
+ struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+ unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
+ struct dma_chan *chan, struct scatterlist *sg,
+ unsigned int nents, int value, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
+ struct dma_chan *chan, unsigned long flags);
+
+ struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+ struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
+ struct dma_chan *chan, dma_addr_t dst, u64 data,
+ unsigned long flags);
+
+ void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
+ int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config);
+ int (*device_pause)(struct dma_chan *chan);
+ int (*device_resume)(struct dma_chan *chan);
+ int (*device_terminate_all)(struct dma_chan *chan);
+ void (*device_synchronize)(struct dma_chan *chan);
+
+ enum dma_status (*device_tx_status)(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
+ void (*device_issue_pending)(struct dma_chan *chan);
+ void (*device_release)(struct dma_device *dev);
+ /* debugfs support */
+ void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
+ struct dentry *dbg_dev_root;
+};
+
+static inline int dmaengine_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ if (chan->device->device_config)
+ return chan->device->device_config(chan, config);
+
+ return -ENOSYS;
+}
+
+static inline bool is_slave_direction(enum dma_transfer_direction direction)
+{
+ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
+ struct dma_chan *chan, dma_addr_t buf, size_t len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct scatterlist sg;
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = buf;
+ sg_dma_len(&sg) = len;
+
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
+ return chan->device->device_prep_slave_sg(chan, &sg, 1,
+ dir, flags, NULL);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
+ return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+ dir, flags, NULL);
+}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+struct rio_dma_ext;
+static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags,
+ struct rio_dma_ext *rio_ext)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
+ return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+ dir, flags, rio_ext);
+}
+#endif
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
+ return NULL;
+
+ return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
+ period_len, dir, flags);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
+ return NULL;
+ if (flags & DMA_PREP_REPEAT &&
+ !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
+ return NULL;
+
+ return chan->device->device_prep_interleaved_dma(chan, xt, flags);
+}
+
+/**
+ * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
+ * @chan: The channel to be used for this descriptor
+ * @dest: Address of buffer to be set
+ * @value: Treated as a single byte value that fills the destination buffer
+ * @len: The total size of dest
+ * @flags: DMA engine flags
+ */
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
+ struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+ unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
+ return NULL;
+
+ return chan->device->device_prep_dma_memset(chan, dest, value,
+ len, flags);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
+ return NULL;
+
+ return chan->device->device_prep_dma_memcpy(chan, dest, src,
+ len, flags);
+}
+
+static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+ enum dma_desc_metadata_mode mode)
+{
+ if (!chan)
+ return false;
+
+ return !!(chan->device->desc_metadata_modes & mode);
+}
+
+#ifdef CONFIG_DMA_ENGINE
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len);
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+#else /* CONFIG_DMA_ENGINE */
+static inline int dmaengine_desc_attach_metadata(
+ struct dma_async_tx_descriptor *desc, void *data, size_t len)
+{
+ return -EINVAL;
+}
+static inline void *dmaengine_desc_get_metadata_ptr(
+ struct dma_async_tx_descriptor *desc, size_t *payload_len,
+ size_t *max_len)
+{
+ return NULL;
+}
+static inline int dmaengine_desc_set_metadata_len(
+ struct dma_async_tx_descriptor *desc, size_t payload_len)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_DMA_ENGINE */
+
+/**
+ * dmaengine_terminate_all() - Terminate all active DMA transfers
+ * @chan: The channel for which to terminate the transfers
+ *
+ * This function is DEPRECATED use either dmaengine_terminate_sync() or
+ * dmaengine_terminate_async() instead.
+ */
+static inline int dmaengine_terminate_all(struct dma_chan *chan)
+{
+ if (chan->device->device_terminate_all)
+ return chan->device->device_terminate_all(chan);
+
+ return -ENOSYS;
+}
+
+/**
+ * dmaengine_terminate_async() - Terminate all active DMA transfers
+ * @chan: The channel for which to terminate the transfers
+ *
+ * Calling this function will terminate all active and pending descriptors
+ * that have previously been submitted to the channel. It is not guaranteed
+ * though that the transfer for the active descriptor has stopped when the
+ * function returns. Furthermore it is possible the complete callback of a
+ * submitted transfer is still running when this function returns.
+ *
+ * dmaengine_synchronize() needs to be called before it is safe to free
+ * any memory that is accessed by previously submitted descriptors or before
+ * freeing any resources accessed from within the completion callback of any
+ * previously submitted descriptors.
+ *
+ * This function can be called from atomic context as well as from within a
+ * complete callback of a descriptor submitted on the same channel.
+ *
+ * If none of the two conditions above apply consider using
+ * dmaengine_terminate_sync() instead.
+ */
+static inline int dmaengine_terminate_async(struct dma_chan *chan)
+{
+ if (chan->device->device_terminate_all)
+ return chan->device->device_terminate_all(chan);
+
+ return -EINVAL;
+}
+
+/**
+ * dmaengine_synchronize() - Synchronize DMA channel termination
+ * @chan: The channel to synchronize
+ *
+ * Synchronizes to the DMA channel termination to the current context. When this
+ * function returns it is guaranteed that all transfers for previously issued
+ * descriptors have stopped and it is safe to free the memory associated
+ * with them. Furthermore it is guaranteed that all complete callback functions
+ * for a previously submitted descriptor have finished running and it is safe to
+ * free resources accessed from within the complete callbacks.
+ *
+ * The behavior of this function is undefined if dma_async_issue_pending() has
+ * been called between dmaengine_terminate_async() and this function.
+ *
+ * This function must only be called from non-atomic context and must not be
+ * called from within a complete callback of a descriptor submitted on the same
+ * channel.
+ */
+static inline void dmaengine_synchronize(struct dma_chan *chan)
+{
+ might_sleep();
+
+ if (chan->device->device_synchronize)
+ chan->device->device_synchronize(chan);
+}
+
+/**
+ * dmaengine_terminate_sync() - Terminate all active DMA transfers
+ * @chan: The channel for which to terminate the transfers
+ *
+ * Calling this function will terminate all active and pending transfers
+ * that have previously been submitted to the channel. It is similar to
+ * dmaengine_terminate_async() but guarantees that the DMA transfer has actually
+ * stopped and that all complete callbacks have finished running when the
+ * function returns.
+ *
+ * This function must only be called from non-atomic context and must not be
+ * called from within a complete callback of a descriptor submitted on the same
+ * channel.
+ */
+static inline int dmaengine_terminate_sync(struct dma_chan *chan)
+{
+ int ret;
+
+ ret = dmaengine_terminate_async(chan);
+ if (ret)
+ return ret;
+
+ dmaengine_synchronize(chan);
+
+ return 0;
+}
+
+static inline int dmaengine_pause(struct dma_chan *chan)
+{
+ if (chan->device->device_pause)
+ return chan->device->device_pause(chan);
+
+ return -ENOSYS;
+}
+
+static inline int dmaengine_resume(struct dma_chan *chan)
+{
+ if (chan->device->device_resume)
+ return chan->device->device_resume(chan);
+
+ return -ENOSYS;
+}
+
+static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ return chan->device->device_tx_status(chan, cookie, state);
+}
+
+static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
+{
+ return desc->tx_submit(desc);
+}
+
+static inline bool dmaengine_check_align(enum dmaengine_alignment align,
+ size_t off1, size_t off2, size_t len)
+{
+ return !(((1 << align) - 1) & (off1 | off2 | len));
+}
+
+static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->copy_align, off1, off2, len);
+}
+
+static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->xor_align, off1, off2, len);
+}
+
+static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->pq_align, off1, off2, len);
+}
+
+static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->fill_align, off1, off2, len);
+}
+
+static inline void
+dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
+{
+ dma->max_pq = maxpq;
+ if (has_pq_continue)
+ dma->max_pq |= DMA_HAS_PQ_CONTINUE;
+}
+
+static inline bool dmaf_continue(enum dma_ctrl_flags flags)
+{
+ return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
+}
+
+static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
+{
+ enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
+
+ return (flags & mask) == mask;
+}
+
+static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
+{
+ return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
+}
+
+static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
+{
+ return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
+}
+
+/* dma_maxpq - reduce maxpq in the face of continued operations
+ * @dma - dma device with PQ capability
+ * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
+ *
+ * When an engine does not support native continuation we need 3 extra
+ * source slots to reuse P and Q with the following coefficients:
+ * 1/ {00} * P : remove P from Q', but use it as a source for P'
+ * 2/ {01} * Q : use Q to continue Q' calculation
+ * 3/ {00} * Q : subtract Q from P' to cancel (2)
+ *
+ * In the case where P is disabled we only need 1 extra source:
+ * 1/ {01} * Q : use Q to continue Q' calculation
+ */
+static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
+{
+ if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
+ return dma_dev_to_maxpq(dma);
+ if (dmaf_p_disabled_continue(flags))
+ return dma_dev_to_maxpq(dma) - 1;
+ if (dmaf_continue(flags))
+ return dma_dev_to_maxpq(dma) - 3;
+ BUG();
+}
+
+static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
+ size_t dir_icg)
+{
+ if (inc) {
+ if (dir_icg)
+ return dir_icg;
+ if (sgl)
+ return icg;
+ }
+
+ return 0;
+}
+
+static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
+ struct data_chunk *chunk)
+{
+ return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
+ chunk->icg, chunk->dst_icg);
+}
+
+static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
+ struct data_chunk *chunk)
+{
+ return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
+ chunk->icg, chunk->src_icg);
+}
+
+/* --- public DMA engine API --- */
+
+#ifdef CONFIG_DMA_ENGINE
+void dmaengine_get(void);
+void dmaengine_put(void);
+#else
+static inline void dmaengine_get(void)
+{
+}
+static inline void dmaengine_put(void)
+{
+}
+#endif
+
+#ifdef CONFIG_ASYNC_TX_DMA
+#define async_dmaengine_get() dmaengine_get()
+#define async_dmaengine_put() dmaengine_put()
+#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
+#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
+#else
+#define async_dma_find_channel(type) dma_find_channel(type)
+#endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
+#else
+static inline void async_dmaengine_get(void)
+{
+}
+static inline void async_dmaengine_put(void)
+{
+}
+static inline struct dma_chan *
+async_dma_find_channel(enum dma_transaction_type type)
+{
+ return NULL;
+}
+#endif /* CONFIG_ASYNC_TX_DMA */
+void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
+ struct dma_chan *chan);
+
+static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
+{
+ tx->flags |= DMA_CTRL_ACK;
+}
+
+static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
+{
+ tx->flags &= ~DMA_CTRL_ACK;
+}
+
+static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
+{
+ return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
+}
+
+#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
+static inline void
+__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
+{
+ set_bit(tx_type, dstp->bits);
+}
+
+#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
+static inline void
+__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
+{
+ clear_bit(tx_type, dstp->bits);
+}
+
+#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
+static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
+{
+ bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
+}
+
+#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
+static inline int
+__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
+{
+ return test_bit(tx_type, srcp->bits);
+}
+
+#define for_each_dma_cap_mask(cap, mask) \
+ for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
+
+/**
+ * dma_async_issue_pending - flush pending transactions to HW
+ * @chan: target DMA channel
+ *
+ * This allows drivers to push copies to HW in batches,
+ * reducing MMIO writes where possible.
+ */
+static inline void dma_async_issue_pending(struct dma_chan *chan)
+{
+ chan->device->device_issue_pending(chan);
+}
+
+/**
+ * dma_async_is_tx_complete - poll for transaction completion
+ * @chan: DMA channel
+ * @cookie: transaction identifier to check status of
+ * @last: returns last completed cookie, can be NULL
+ * @used: returns last issued cookie, can be NULL
+ *
+ * If @last and @used are passed in, upon return they reflect the driver
+ * internal state and can be used with dma_async_is_complete() to check
+ * the status of multiple cookies without re-checking hardware state.
+ */
+static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
+{
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = chan->device->device_tx_status(chan, cookie, &state);
+ if (last)
+ *last = state.last;
+ if (used)
+ *used = state.used;
+ return status;
+}
+
+/**
+ * dma_async_is_complete - test a cookie against chan state
+ * @cookie: transaction identifier to test status of
+ * @last_complete: last know completed transaction
+ * @last_used: last cookie value handed out
+ *
+ * dma_async_is_complete() is used in dma_async_is_tx_complete()
+ * the test logic is separated for lightweight testing of multiple cookies
+ */
+static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
+ dma_cookie_t last_complete, dma_cookie_t last_used)
+{
+ if (last_complete <= last_used) {
+ if ((cookie <= last_complete) || (cookie > last_used))
+ return DMA_COMPLETE;
+ } else {
+ if ((cookie <= last_complete) && (cookie > last_used))
+ return DMA_COMPLETE;
+ }
+ return DMA_IN_PROGRESS;
+}
+
+static inline void
+dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
+{
+ if (!st)
+ return;
+
+ st->last = last;
+ st->used = used;
+ st->residue = residue;
+}
+
+#ifdef CONFIG_DMA_ENGINE
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
+enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
+void dma_issue_pending_all(void);
+struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param,
+ struct device_node *np);
+
+struct dma_chan *dma_request_chan(struct device *dev, const char *name);
+struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
+
+void dma_release_channel(struct dma_chan *chan);
+int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
+#else
+static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
+{
+ return NULL;
+}
+static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
+{
+ return DMA_COMPLETE;
+}
+static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
+{
+ return DMA_COMPLETE;
+}
+static inline void dma_issue_pending_all(void)
+{
+}
+static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn,
+ void *fn_param,
+ struct device_node *np)
+{
+ return NULL;
+}
+static inline struct dma_chan *dma_request_chan(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline struct dma_chan *dma_request_chan_by_mask(
+ const dma_cap_mask_t *mask)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline void dma_release_channel(struct dma_chan *chan)
+{
+}
+static inline int dma_get_slave_caps(struct dma_chan *chan,
+ struct dma_slave_caps *caps)
+{
+ return -ENXIO;
+}
+#endif
+
+static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_slave_caps caps;
+ int ret;
+
+ ret = dma_get_slave_caps(tx->chan, &caps);
+ if (ret)
+ return ret;
+
+ if (!caps.descriptor_reuse)
+ return -EPERM;
+
+ tx->flags |= DMA_CTRL_REUSE;
+ return 0;
+}
+
+static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
+{
+ tx->flags &= ~DMA_CTRL_REUSE;
+}
+
+static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
+{
+ return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
+}
+
+static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
+{
+ /* this is supported for reusable desc, so check that */
+ if (!dmaengine_desc_test_reuse(desc))
+ return -EPERM;
+
+ return desc->desc_free(desc);
+}
+
+/* --- DMA device --- */
+
+int dma_async_device_register(struct dma_device *device);
+int dmaenginem_async_device_register(struct dma_device *device);
+void dma_async_device_unregister(struct dma_device *device);
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan);
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan);
+void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
+#define dma_request_channel(mask, x, y) \
+ __dma_request_channel(&(mask), x, y, NULL)
+
+/* Deprecated, please use dma_request_chan() directly */
+static inline struct dma_chan * __deprecated
+dma_request_slave_channel(struct device *dev, const char *name)
+{
+ struct dma_chan *ch = dma_request_chan(dev, name);
+
+ return IS_ERR(ch) ? NULL : ch;
+}
+
+static inline struct dma_chan
+*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
+ dma_filter_fn fn, void *fn_param,
+ struct device *dev, const char *name)
+{
+ struct dma_chan *chan;
+
+ chan = dma_request_slave_channel(dev, name);
+ if (chan)
+ return chan;
+
+ if (!fn || !fn_param)
+ return NULL;
+
+ return __dma_request_channel(&mask, fn, fn_param, NULL);
+}
+
+static inline char *
+dmaengine_get_direction_text(enum dma_transfer_direction dir)
+{
+ switch (dir) {
+ case DMA_DEV_TO_MEM:
+ return "DEV_TO_MEM";
+ case DMA_MEM_TO_DEV:
+ return "MEM_TO_DEV";
+ case DMA_MEM_TO_MEM:
+ return "MEM_TO_MEM";
+ case DMA_DEV_TO_DEV:
+ return "DEV_TO_DEV";
+ default:
+ return "invalid";
+ }
+}
+
+static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan)
+{
+ if (chan->dev->chan_dma_dev)
+ return &chan->dev->device;
+
+ return chan->device->dev;
+}
+
+#endif /* DMAENGINE_H */
diff --git a/snd-alpx/include/6.3/amd_xdma.h b/snd-alpx/include/6.3/amd_xdma.h
new file mode 100644
index 0000000..ceba69e
--- /dev/null
+++ b/snd-alpx/include/6.3/amd_xdma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _DMAENGINE_AMD_XDMA_H
+#define _DMAENGINE_AMD_XDMA_H
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num);
+void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num);
+int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index);
+
+#endif /* _DMAENGINE_AMD_XDMA_H */
diff --git a/snd-alpx/include/6.3/dmaengine.h b/snd-alpx/include/6.3/dmaengine.h
new file mode 100644
index 0000000..c3656e5
--- /dev/null
+++ b/snd-alpx/include/6.3/dmaengine.h
@@ -0,0 +1,1637 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
+ */
+#ifndef LINUX_DMAENGINE_H
+#define LINUX_DMAENGINE_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/uio.h>
+#include <linux/bug.h>
+#include <linux/scatterlist.h>
+#include <linux/bitmap.h>
+#include <linux/types.h>
+#include <asm/page.h>
+
+/**
+ * typedef dma_cookie_t - an opaque DMA cookie
+ *
+ * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
+ */
+typedef s32 dma_cookie_t;
+#define DMA_MIN_COOKIE 1
+
+static inline int dma_submit_error(dma_cookie_t cookie)
+{
+ return cookie < 0 ? cookie : 0;
+}
+
+/**
+ * enum dma_status - DMA transaction status
+ * @DMA_COMPLETE: transaction completed
+ * @DMA_IN_PROGRESS: transaction not yet processed
+ * @DMA_PAUSED: transaction is paused
+ * @DMA_ERROR: transaction failed
+ */
+enum dma_status {
+ DMA_COMPLETE,
+ DMA_IN_PROGRESS,
+ DMA_PAUSED,
+ DMA_ERROR,
+ DMA_OUT_OF_ORDER,
+};
+
+/**
+ * enum dma_transaction_type - DMA transaction types/indexes
+ *
+ * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is
+ * automatically set as dma devices are registered.
+ */
+enum dma_transaction_type {
+ DMA_MEMCPY,
+ DMA_XOR,
+ DMA_PQ,
+ DMA_XOR_VAL,
+ DMA_PQ_VAL,
+ DMA_MEMSET,
+ DMA_MEMSET_SG,
+ DMA_INTERRUPT,
+ DMA_PRIVATE,
+ DMA_ASYNC_TX,
+ DMA_SLAVE,
+ DMA_CYCLIC,
+ DMA_INTERLEAVE,
+ DMA_COMPLETION_NO_ORDER,
+ DMA_REPEAT,
+ DMA_LOAD_EOT,
+/* last transaction type for creation of the capabilities mask */
+ DMA_TX_TYPE_END,
+};
+
+/**
+ * enum dma_transfer_direction - dma transfer mode and direction indicator
+ * @DMA_MEM_TO_MEM: Async/Memcpy mode
+ * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
+ * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
+ * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
+ */
+enum dma_transfer_direction {
+ DMA_MEM_TO_MEM,
+ DMA_MEM_TO_DEV,
+ DMA_DEV_TO_MEM,
+ DMA_DEV_TO_DEV,
+ DMA_TRANS_NONE,
+};
+
+/**
+ * Interleaved Transfer Request
+ * ----------------------------
+ * A chunk is collection of contiguous bytes to be transferred.
+ * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
+ * ICGs may or may not change between chunks.
+ * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
+ * that when repeated an integral number of times, specifies the transfer.
+ * A transfer template is specification of a Frame, the number of times
+ * it is to be repeated and other per-transfer attributes.
+ *
+ * Practically, a client driver would have ready a template for each
+ * type of transfer it is going to need during its lifetime and
+ * set only 'src_start' and 'dst_start' before submitting the requests.
+ *
+ *
+ * | Frame-1 | Frame-2 | ~ | Frame-'numf' |
+ * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
+ *
+ * == Chunk size
+ * ... ICG
+ */
+
+/**
+ * struct data_chunk - Element of scatter-gather list that makes a frame.
+ * @size: Number of bytes to read from source.
+ * size_dst := fn(op, size_src), so doesn't mean much for destination.
+ * @icg: Number of bytes to jump after last src/dst address of this
+ * chunk and before first src/dst address for next chunk.
+ * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
+ * Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
+ * @dst_icg: Number of bytes to jump after last dst address of this
+ * chunk and before the first dst address for next chunk.
+ * Ignored if dst_inc is true and dst_sgl is false.
+ * @src_icg: Number of bytes to jump after last src address of this
+ * chunk and before the first src address for next chunk.
+ * Ignored if src_inc is true and src_sgl is false.
+ */
+struct data_chunk {
+ size_t size;
+ size_t icg;
+ size_t dst_icg;
+ size_t src_icg;
+};
+
+/**
+ * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
+ * and attributes.
+ * @src_start: Bus address of source for the first chunk.
+ * @dst_start: Bus address of destination for the first chunk.
+ * @dir: Specifies the type of Source and Destination.
+ * @src_inc: If the source address increments after reading from it.
+ * @dst_inc: If the destination address increments after writing to it.
+ * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
+ * Otherwise, source is read contiguously (icg ignored).
+ * Ignored if src_inc is false.
+ * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
+ * Otherwise, destination is filled contiguously (icg ignored).
+ * Ignored if dst_inc is false.
+ * @numf: Number of frames in this template.
+ * @frame_size: Number of chunks in a frame i.e, size of sgl[].
+ * @sgl: Array of {chunk,icg} pairs that make up a frame.
+ */
+struct dma_interleaved_template {
+ dma_addr_t src_start;
+ dma_addr_t dst_start;
+ enum dma_transfer_direction dir;
+ bool src_inc;
+ bool dst_inc;
+ bool src_sgl;
+ bool dst_sgl;
+ size_t numf;
+ size_t frame_size;
+ struct data_chunk sgl[];
+};
+
+/**
+ * enum dma_ctrl_flags - DMA flags to augment operation preparation,
+ * control completion, and communicate status.
+ * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
+ * this transaction
+ * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
+ * acknowledges receipt, i.e. has a chance to establish any dependency
+ * chains
+ * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
+ * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
+ * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
+ * sources that were the result of a previous operation, in the case of a PQ
+ * operation it continues the calculation with new sources
+ * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
+ * on the result of this operation
+ * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
+ * cleared or freed
+ * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
+ * data and the descriptor should be in different format from normal
+ * data descriptors.
+ * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically
+ * repeated when it ends until a transaction is issued on the same channel
+ * with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to
+ * interleaved transactions and is ignored for all other transaction types.
+ * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any
+ * active repeated (as indicated by DMA_PREP_REPEAT) transaction when the
+ * repeated transaction ends. Not setting this flag when the previously queued
+ * transaction is marked with DMA_PREP_REPEAT will cause the new transaction
+ * to never be processed and stay in the issued queue forever. The flag is
+ * ignored if the previous transaction is not a repeated transaction.
+ */
+enum dma_ctrl_flags {
+ DMA_PREP_INTERRUPT = (1 << 0),
+ DMA_CTRL_ACK = (1 << 1),
+ DMA_PREP_PQ_DISABLE_P = (1 << 2),
+ DMA_PREP_PQ_DISABLE_Q = (1 << 3),
+ DMA_PREP_CONTINUE = (1 << 4),
+ DMA_PREP_FENCE = (1 << 5),
+ DMA_CTRL_REUSE = (1 << 6),
+ DMA_PREP_CMD = (1 << 7),
+ DMA_PREP_REPEAT = (1 << 8),
+ DMA_PREP_LOAD_EOT = (1 << 9),
+};
+
+/**
+ * enum sum_check_bits - bit position of pq_check_flags
+ */
+enum sum_check_bits {
+ SUM_CHECK_P = 0,
+ SUM_CHECK_Q = 1,
+};
+
+/**
+ * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
+ * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
+ * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
+ */
+enum sum_check_flags {
+ SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
+ SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
+};
+
+
+/**
+ * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
+ * See linux/cpumask.h
+ */
+typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
+
+/**
+ * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
+ * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
+ * client driver and it is attached (via the dmaengine_desc_attach_metadata()
+ * helper) to the descriptor.
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * construct the metadata in the client's buffer
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * 4. when the transfer is completed, the metadata should be available in the
+ * attached buffer
+ *
+ * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
+ * driver. The client driver can ask for the pointer, maximum size and the
+ * currently used size of the metadata and can directly update or read it.
+ * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
+ * provided as helper functions.
+ *
+ * Note: the metadata area for the descriptor is no longer valid after the
+ * transfer has been completed (valid up to the point when the completion
+ * callback returns if used).
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
+ * metadata area
+ * 3. update the metadata at the pointer
+ * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount
+ * of data the client has placed into the metadata buffer
+ * 5. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. submit the transfer
+ * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
+ * pointer to the engine's metadata area
+ * 4. Read out the metadata from the pointer
+ *
+ * Note: the two mode is not compatible and clients must use one mode for a
+ * descriptor.
+ */
+enum dma_desc_metadata_mode {
+ DESC_METADATA_NONE = 0,
+ DESC_METADATA_CLIENT = BIT(0),
+ DESC_METADATA_ENGINE = BIT(1),
+};
+
+/**
+ * struct dma_chan_percpu - the per-CPU part of struct dma_chan
+ * @memcpy_count: transaction counter
+ * @bytes_transferred: byte counter
+ */
+struct dma_chan_percpu {
+ /* stats */
+ unsigned long memcpy_count;
+ unsigned long bytes_transferred;
+};
+
+/**
+ * struct dma_router - DMA router structure
+ * @dev: pointer to the DMA router device
+ * @route_free: function to be called when the route can be disconnected
+ */
+struct dma_router {
+ struct device *dev;
+ void (*route_free)(struct device *dev, void *route_data);
+};
+
+/**
+ * struct dma_chan - devices supply DMA channels, clients use them
+ * @device: ptr to the dma device who supplies this channel, always !%NULL
+ * @slave: ptr to the device using this channel
+ * @cookie: last cookie value returned to client
+ * @completed_cookie: last completed cookie for this channel
+ * @chan_id: channel ID for sysfs
+ * @dev: class device for sysfs
+ * @name: backlink name for sysfs
+ * @dbg_client_name: slave name for debugfs in format:
+ * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
+ * @device_node: used to add this to the device chan list
+ * @local: per-cpu pointer to a struct dma_chan_percpu
+ * @client_count: how many clients are using this channel
+ * @table_count: number of appearances in the mem-to-mem allocation table
+ * @router: pointer to the DMA router structure
+ * @route_data: channel specific data for the router
+ * @private: private data for certain client-channel associations
+ */
+struct dma_chan {
+ struct dma_device *device;
+ struct device *slave;
+ dma_cookie_t cookie;
+ dma_cookie_t completed_cookie;
+
+ /* sysfs */
+ int chan_id;
+ struct dma_chan_dev *dev;
+ const char *name;
+#ifdef CONFIG_DEBUG_FS
+ char *dbg_client_name;
+#endif
+
+ struct list_head device_node;
+ struct dma_chan_percpu __percpu *local;
+ int client_count;
+ int table_count;
+
+ /* DMA router */
+ struct dma_router *router;
+ void *route_data;
+
+ void *private;
+};
+
+/**
+ * struct dma_chan_dev - relate sysfs device node to backing channel device
+ * @chan: driver channel device
+ * @device: sysfs device
+ * @dev_id: parent dma_device dev_id
+ * @chan_dma_dev: The channel is using custom/different dma-mapping
+ * compared to the parent dma_device
+ */
+struct dma_chan_dev {
+ struct dma_chan *chan;
+ struct device device;
+ int dev_id;
+ bool chan_dma_dev;
+};
+
+/**
+ * enum dma_slave_buswidth - defines bus width of the DMA slave
+ * device, source or target buses
+ */
+enum dma_slave_buswidth {
+ DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
+ DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
+ DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
+ DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
+ DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
+ DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
+ DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
+ DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
+ DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
+ DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
+};
+
+/**
+ * struct dma_slave_config - dma slave channel runtime config
+ * @direction: whether the data shall go in or out on this slave
+ * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
+ * legal values. DEPRECATED, drivers should use the direction argument
+ * to the device_prep_slave_sg and device_prep_dma_cyclic functions or
+ * the dir field in the dma_interleaved_template structure.
+ * @src_addr: this is the physical address where DMA slave data
+ * should be read (RX), if the source is memory this argument is
+ * ignored.
+ * @dst_addr: this is the physical address where DMA slave data
+ * should be written (TX), if the destination is memory this argument
+ * is ignored.
+ * @src_addr_width: this is the width in bytes of the source (RX)
+ * register where DMA data shall be read. If the source
+ * is memory this may be ignored depending on architecture.
+ * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
+ * @dst_addr_width: same as src_addr_width but for destination
+ * target (TX) mutatis mutandis.
+ * @src_maxburst: the maximum number of words (note: words, as in
+ * units of the src_addr_width member, not bytes) that can be sent
+ * in one burst to the device. Typically something like half the
+ * FIFO depth on I/O peripherals so you don't overflow it. This
+ * may or may not be applicable on memory sources.
+ * @dst_maxburst: same as src_maxburst but for destination target
+ * mutatis mutandis.
+ * @src_port_window_size: The length of the register area in words the data need
+ * to be accessed on the device side. It is only used for devices which is using
+ * an area instead of a single register to receive the data. Typically the DMA
+ * loops in this area in order to transfer the data.
+ * @dst_port_window_size: same as src_port_window_size but for the destination
+ * port.
+ * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
+ * with 'true' if peripheral should be flow controller. Direction will be
+ * selected at Runtime.
+ * @peripheral_config: peripheral configuration for programming peripheral
+ * for dmaengine transfer
+ * @peripheral_size: peripheral configuration buffer size
+ *
+ * This struct is passed in as configuration data to a DMA engine
+ * in order to set up a certain channel for DMA transport at runtime.
+ * The DMA device/engine has to provide support for an additional
+ * callback in the dma_device structure, device_config and this struct
+ * will then be passed in as an argument to the function.
+ *
+ * The rationale for adding configuration information to this struct is as
+ * follows: if it is likely that more than one DMA slave controllers in
+ * the world will support the configuration option, then make it generic.
+ * If not: if it is fixed so that it be sent in static from the platform
+ * data, then prefer to do that.
+ */
+struct dma_slave_config {
+ enum dma_transfer_direction direction;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ enum dma_slave_buswidth src_addr_width;
+ enum dma_slave_buswidth dst_addr_width;
+ u32 src_maxburst;
+ u32 dst_maxburst;
+ u32 src_port_window_size;
+ u32 dst_port_window_size;
+ bool device_fc;
+ void *peripheral_config;
+ size_t peripheral_size;
+};
+
+/**
+ * enum dma_residue_granularity - Granularity of the reported transfer residue
+ * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The
+ * DMA channel is only able to tell whether a descriptor has been completed or
+ * not, which means residue reporting is not supported by this channel. The
+ * residue field of the dma_tx_state field will always be 0.
+ * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully
+ * completed segment of the transfer (For cyclic transfers this is after each
+ * period). This is typically implemented by having the hardware generate an
+ * interrupt after each transferred segment and then the drivers updates the
+ * outstanding residue by the size of the segment. Another possibility is if
+ * the hardware supports scatter-gather and the segment descriptor has a field
+ * which gets set after the segment has been completed. The driver then counts
+ * the number of segments without the flag set to compute the residue.
+ * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred
+ * burst. This is typically only supported if the hardware has a progress
+ * register of some sort (E.g. a register with the current read/write address
+ * or a register with the amount of bursts/beats/bytes that have been
+ * transferred or still need to be transferred).
+ */
+enum dma_residue_granularity {
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
+ DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
+ DMA_RESIDUE_GRANULARITY_BURST = 2,
+};
+
+/**
+ * struct dma_slave_caps - expose capabilities of a slave channel only
+ * @src_addr_widths: bit mask of src addr widths the channel supports.
+ * Width is specified in bytes, e.g. for a channel supporting
+ * a width of 4 the mask should have BIT(4) set.
+ * @dst_addr_widths: bit mask of dst addr widths the channel supports
+ * @directions: bit mask of slave directions the channel supports.
+ * Since the enum dma_transfer_direction is not defined as bit flag for
+ * each type, the dma controller should set BIT(<TYPE>) and same
+ * should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
+ * @max_burst: max burst capability per-transfer
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
+ * @cmd_pause: true, if pause is supported (i.e. for reading residue or
+ * for resume later)
+ * @cmd_resume: true, if resume is supported
+ * @cmd_terminate: true, if terminate cmd is supported
+ * @residue_granularity: granularity of the reported transfer residue
+ * @descriptor_reuse: if a descriptor can be reused by client and
+ * resubmitted multiple times
+ */
+struct dma_slave_caps {
+ u32 src_addr_widths;
+ u32 dst_addr_widths;
+ u32 directions;
+ u32 min_burst;
+ u32 max_burst;
+ u32 max_sg_burst;
+ bool cmd_pause;
+ bool cmd_resume;
+ bool cmd_terminate;
+ enum dma_residue_granularity residue_granularity;
+ bool descriptor_reuse;
+};
+
+static inline const char *dma_chan_name(struct dma_chan *chan)
+{
+ return dev_name(&chan->dev->device);
+}
+
+void dma_chan_cleanup(struct kref *kref);
+
+/**
+ * typedef dma_filter_fn - callback filter for dma_request_channel
+ * @chan: channel to be reviewed
+ * @filter_param: opaque parameter passed through dma_request_channel
+ *
+ * When this optional parameter is specified in a call to dma_request_channel a
+ * suitable channel is passed to this routine for further dispositioning before
+ * being returned. Where 'suitable' indicates a non-busy channel that
+ * satisfies the given capability mask. It returns 'true' to indicate that the
+ * channel is suitable.
+ */
+typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
+
+typedef void (*dma_async_tx_callback)(void *dma_async_param);
+
+enum dmaengine_tx_result {
+ DMA_TRANS_NOERROR = 0, /* SUCCESS */
+ DMA_TRANS_READ_FAILED, /* Source DMA read failed */
+ DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */
+ DMA_TRANS_ABORTED, /* Op never submitted / aborted */
+};
+
+struct dmaengine_result {
+ enum dmaengine_tx_result result;
+ u32 residue;
+};
+
+typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
+ const struct dmaengine_result *result);
+
+struct dmaengine_unmap_data {
+#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
+ u16 map_cnt;
+#else
+ u8 map_cnt;
+#endif
+ u8 to_cnt;
+ u8 from_cnt;
+ u8 bidi_cnt;
+ struct device *dev;
+ struct kref kref;
+ size_t len;
+ dma_addr_t addr[];
+};
+
+struct dma_async_tx_descriptor;
+
+struct dma_descriptor_metadata_ops {
+ int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
+ size_t len);
+
+ void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+ int (*set_len)(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+};
+
+/**
+ * struct dma_async_tx_descriptor - async transaction descriptor
+ * ---dma generic offload fields---
+ * @cookie: tracking cookie for this transaction, set to -EBUSY if
+ * this tx is sitting on a dependency list
+ * @flags: flags to augment operation preparation, control completion, and
+ * communicate status
+ * @phys: physical address of the descriptor
+ * @chan: target channel for this operation
+ * @tx_submit: accept the descriptor, assign ordered cookie and mark the
+ * descriptor pending. To be pushed on .issue_pending() call
+ * @callback: routine to call after this operation is complete
+ * @callback_param: general parameter to pass to the callback routine
+ * @desc_metadata_mode: core managed metadata mode to protect mixed use of
+ * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
+ * DESC_METADATA_NONE
+ * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
+ * DMA driver if metadata mode is supported with the descriptor
+ * ---async_tx api specific fields---
+ * @next: at completion submit this descriptor
+ * @parent: pointer to the next level up in the dependency chain
+ * @lock: protect the parent and next pointers
+ */
+struct dma_async_tx_descriptor {
+ dma_cookie_t cookie;
+ enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
+ dma_addr_t phys;
+ struct dma_chan *chan;
+ dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+ int (*desc_free)(struct dma_async_tx_descriptor *tx);
+ dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
+ void *callback_param;
+ struct dmaengine_unmap_data *unmap;
+ enum dma_desc_metadata_mode desc_metadata_mode;
+ struct dma_descriptor_metadata_ops *metadata_ops;
+#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ struct dma_async_tx_descriptor *next;
+ struct dma_async_tx_descriptor *parent;
+ spinlock_t lock;
+#endif
+};
+
+#ifdef CONFIG_DMA_ENGINE
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+{
+ kref_get(&unmap->kref);
+ tx->unmap = unmap;
+}
+
+struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
+void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
+#else
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+{
+}
+static inline struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
+{
+ return NULL;
+}
+static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
+{
+}
+#endif
+
+static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
+{
+ if (!tx->unmap)
+ return;
+
+ dmaengine_unmap_put(tx->unmap);
+ tx->unmap = NULL;
+}
+
+#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
+static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+{
+ BUG();
+}
+static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+{
+ return NULL;
+}
+static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+{
+ return NULL;
+}
+
+#else
+static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+{
+ spin_lock_bh(&txd->lock);
+}
+static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+{
+ spin_unlock_bh(&txd->lock);
+}
+static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+{
+ txd->next = next;
+ next->parent = txd;
+}
+static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+{
+ txd->parent = NULL;
+}
+static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+{
+ txd->next = NULL;
+}
+static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+{
+ return txd->parent;
+}
+static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+{
+ return txd->next;
+}
+#endif
+
+/**
+ * struct dma_tx_state - filled in to report the status of
+ * a transfer.
+ * @last: last completed DMA cookie
+ * @used: last issued DMA cookie (i.e. the one in progress)
+ * @residue: the remaining number of bytes left to transmit
+ * on the selected transfer for states DMA_IN_PROGRESS and
+ * DMA_PAUSED if this is implemented in the driver, else 0
+ * @in_flight_bytes: amount of data in bytes cached by the DMA.
+ */
+struct dma_tx_state {
+ dma_cookie_t last;
+ dma_cookie_t used;
+ u32 residue;
+ u32 in_flight_bytes;
+};
+
+/**
+ * enum dmaengine_alignment - defines alignment of the DMA async tx
+ * buffers
+ */
+enum dmaengine_alignment {
+ DMAENGINE_ALIGN_1_BYTE = 0,
+ DMAENGINE_ALIGN_2_BYTES = 1,
+ DMAENGINE_ALIGN_4_BYTES = 2,
+ DMAENGINE_ALIGN_8_BYTES = 3,
+ DMAENGINE_ALIGN_16_BYTES = 4,
+ DMAENGINE_ALIGN_32_BYTES = 5,
+ DMAENGINE_ALIGN_64_BYTES = 6,
+ DMAENGINE_ALIGN_128_BYTES = 7,
+ DMAENGINE_ALIGN_256_BYTES = 8,
+};
+
+/**
+ * struct dma_slave_map - associates slave device and it's slave channel with
+ * parameter to be used by a filter function
+ * @devname: name of the device
+ * @slave: slave channel name
+ * @param: opaque parameter to pass to struct dma_filter.fn
+ */
+struct dma_slave_map {
+ const char *devname;
+ const char *slave;
+ void *param;
+};
+
+/**
+ * struct dma_filter - information for slave device/channel to filter_fn/param
+ * mapping
+ * @fn: filter function callback
+ * @mapcnt: number of slave device/channel in the map
+ * @map: array of channel to filter mapping data
+ */
+struct dma_filter {
+ dma_filter_fn fn;
+ int mapcnt;
+ const struct dma_slave_map *map;
+};
+
+/**
+ * struct dma_device - info on the entity supplying DMA services
+ * @ref: reference is taken and put every time a channel is allocated or freed
+ * @chancnt: how many DMA channels are supported
+ * @privatecnt: how many DMA channels are requested by dma_request_channel
+ * @channels: the list of struct dma_chan
+ * @global_node: list_head for global dma_device_list
+ * @filter: information for device/slave to filter function/param mapping
+ * @cap_mask: one or more dma_capability flags
+ * @desc_metadata_modes: supported metadata modes by the DMA device
+ * @max_xor: maximum number of xor sources, 0 if no capability
+ * @max_pq: maximum number of PQ sources and PQ-continue capability
+ * @copy_align: alignment shift for memcpy operations
+ * @xor_align: alignment shift for xor operations
+ * @pq_align: alignment shift for pq operations
+ * @fill_align: alignment shift for memset operations
+ * @dev_id: unique device ID
+ * @dev: struct device reference for dma mapping api
+ * @owner: owner module (automatically set based on the provided dev)
+ * @chan_ida: unique channel ID
+ * @src_addr_widths: bit mask of src addr widths the device supports
+ * Width is specified in bytes, e.g. for a device supporting
+ * a width of 4 the mask should have BIT(4) set.
+ * @dst_addr_widths: bit mask of dst addr widths the device supports
+ * @directions: bit mask of slave directions the device supports.
+ * Since the enum dma_transfer_direction is not defined as bit flag for
+ * each type, the dma controller should set BIT(<TYPE>) and same
+ * should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
+ * @max_burst: max burst capability per-transfer
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
+ * @descriptor_reuse: a submitted transfer can be resubmitted after completion
+ * @residue_granularity: granularity of the transfer residue reported
+ * by tx_status
+ * @device_alloc_chan_resources: allocate resources and return the
+ * number of allocated descriptors
+ * @device_router_config: optional callback for DMA router configuration
+ * @device_free_chan_resources: release DMA channel's resources
+ * @device_prep_dma_memcpy: prepares a memcpy operation
+ * @device_prep_dma_xor: prepares a xor operation
+ * @device_prep_dma_xor_val: prepares a xor validation operation
+ * @device_prep_dma_pq: prepares a pq operation
+ * @device_prep_dma_pq_val: prepares a pqzero_sum operation
+ * @device_prep_dma_memset: prepares a memset operation
+ * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
+ * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
+ * @device_prep_slave_sg: prepares a slave dma operation
+ * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
+ * The function takes a buffer of size buf_len. The callback function will
+ * be called after period_len bytes have been transferred.
+ * @device_prep_interleaved_dma: Transfer expression in a generic way.
+ * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
+ * @device_caps: May be used to override the generic DMA slave capabilities
+ * with per-channel specific ones
+ * @device_config: Pushes a new configuration to a channel, return 0 or an error
+ * code
+ * @device_pause: Pauses any transfer happening on a channel. Returns
+ * 0 or an error code
+ * @device_resume: Resumes any transfer on a channel previously
+ * paused. Returns 0 or an error code
+ * @device_terminate_all: Aborts all transfers on a channel. Returns 0
+ * or an error code
+ * @device_synchronize: Synchronizes the termination of a transfers to the
+ * current context.
+ * @device_tx_status: poll for transaction completion, the optional
+ * txstate parameter can be supplied with a pointer to get a
+ * struct with auxiliary transfer status information, otherwise the call
+ * will just return a simple status code
+ * @device_issue_pending: push pending transactions to hardware
+ * @device_release: called sometime atfer dma_async_device_unregister() is
+ * called and there are no further references to this structure. This
+ * must be implemented to free resources however many existing drivers
+ * do not and are therefore not safe to unbind while in use.
+ * @dbg_summary_show: optional routine to show contents in debugfs; default code
+ * will be used when this is omitted, but custom code can show extra,
+ * controller specific information.
+ * @dbg_dev_root: the root folder in debugfs for this device
+ */
+struct dma_device {
+ struct kref ref;
+ unsigned int chancnt;
+ unsigned int privatecnt;
+ struct list_head channels;
+ struct list_head global_node;
+ struct dma_filter filter;
+ dma_cap_mask_t cap_mask;
+ enum dma_desc_metadata_mode desc_metadata_modes;
+ unsigned short max_xor;
+ unsigned short max_pq;
+ enum dmaengine_alignment copy_align;
+ enum dmaengine_alignment xor_align;
+ enum dmaengine_alignment pq_align;
+ enum dmaengine_alignment fill_align;
+ #define DMA_HAS_PQ_CONTINUE (1 << 15)
+
+ int dev_id;
+ struct device *dev;
+ struct module *owner;
+ struct ida chan_ida;
+
+ u32 src_addr_widths;
+ u32 dst_addr_widths;
+ u32 directions;
+ u32 min_burst;
+ u32 max_burst;
+ u32 max_sg_burst;
+ bool descriptor_reuse;
+ enum dma_residue_granularity residue_granularity;
+
+ int (*device_alloc_chan_resources)(struct dma_chan *chan);
+ int (*device_router_config)(struct dma_chan *chan);
+ void (*device_free_chan_resources)(struct dma_chan *chan);
+
+ struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
+ struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
+ size_t len, enum sum_check_flags *result, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
+ struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
+ struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ enum sum_check_flags *pqres, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
+ struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+ unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
+ struct dma_chan *chan, struct scatterlist *sg,
+ unsigned int nents, int value, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
+ struct dma_chan *chan, unsigned long flags);
+
+ struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+ struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
+ struct dma_chan *chan, dma_addr_t dst, u64 data,
+ unsigned long flags);
+
+ void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
+ int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config);
+ int (*device_pause)(struct dma_chan *chan);
+ int (*device_resume)(struct dma_chan *chan);
+ int (*device_terminate_all)(struct dma_chan *chan);
+ void (*device_synchronize)(struct dma_chan *chan);
+
+ enum dma_status (*device_tx_status)(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
+ void (*device_issue_pending)(struct dma_chan *chan);
+ void (*device_release)(struct dma_device *dev);
+ /* debugfs support */
+ void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
+ struct dentry *dbg_dev_root;
+};
+
+static inline int dmaengine_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ if (chan->device->device_config)
+ return chan->device->device_config(chan, config);
+
+ return -ENOSYS;
+}
+
+static inline bool is_slave_direction(enum dma_transfer_direction direction)
+{
+ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
+ struct dma_chan *chan, dma_addr_t buf, size_t len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct scatterlist sg;
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = buf;
+ sg_dma_len(&sg) = len;
+
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
+ return chan->device->device_prep_slave_sg(chan, &sg, 1,
+ dir, flags, NULL);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
+ return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+ dir, flags, NULL);
+}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+struct rio_dma_ext;
+static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags,
+ struct rio_dma_ext *rio_ext)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
+ return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+ dir, flags, rio_ext);
+}
+#endif
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
+ return NULL;
+
+ return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
+ period_len, dir, flags);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
+ return NULL;
+ if (flags & DMA_PREP_REPEAT &&
+ !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
+ return NULL;
+
+ return chan->device->device_prep_interleaved_dma(chan, xt, flags);
+}
+
+/**
+ * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
+ * @chan: The channel to be used for this descriptor
+ * @dest: Address of buffer to be set
+ * @value: Treated as a single byte value that fills the destination buffer
+ * @len: The total size of dest
+ * @flags: DMA engine flags
+ */
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
+ struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+ unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
+ return NULL;
+
+ return chan->device->device_prep_dma_memset(chan, dest, value,
+ len, flags);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
+ return NULL;
+
+ return chan->device->device_prep_dma_memcpy(chan, dest, src,
+ len, flags);
+}
+
+static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+ enum dma_desc_metadata_mode mode)
+{
+ if (!chan)
+ return false;
+
+ return !!(chan->device->desc_metadata_modes & mode);
+}
+
+#ifdef CONFIG_DMA_ENGINE
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len);
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+#else /* CONFIG_DMA_ENGINE */
+static inline int dmaengine_desc_attach_metadata(
+ struct dma_async_tx_descriptor *desc, void *data, size_t len)
+{
+ return -EINVAL;
+}
+static inline void *dmaengine_desc_get_metadata_ptr(
+ struct dma_async_tx_descriptor *desc, size_t *payload_len,
+ size_t *max_len)
+{
+ return NULL;
+}
+static inline int dmaengine_desc_set_metadata_len(
+ struct dma_async_tx_descriptor *desc, size_t payload_len)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_DMA_ENGINE */
+
+/**
+ * dmaengine_terminate_all() - Terminate all active DMA transfers
+ * @chan: The channel for which to terminate the transfers
+ *
+ * This function is DEPRECATED use either dmaengine_terminate_sync() or
+ * dmaengine_terminate_async() instead.
+ */
+static inline int dmaengine_terminate_all(struct dma_chan *chan)
+{
+ if (chan->device->device_terminate_all)
+ return chan->device->device_terminate_all(chan);
+
+ return -ENOSYS;
+}
+
+/**
+ * dmaengine_terminate_async() - Terminate all active DMA transfers
+ * @chan: The channel for which to terminate the transfers
+ *
+ * Calling this function will terminate all active and pending descriptors
+ * that have previously been submitted to the channel. It is not guaranteed
+ * though that the transfer for the active descriptor has stopped when the
+ * function returns. Furthermore it is possible the complete callback of a
+ * submitted transfer is still running when this function returns.
+ *
+ * dmaengine_synchronize() needs to be called before it is safe to free
+ * any memory that is accessed by previously submitted descriptors or before
+ * freeing any resources accessed from within the completion callback of any
+ * previously submitted descriptors.
+ *
+ * This function can be called from atomic context as well as from within a
+ * complete callback of a descriptor submitted on the same channel.
+ *
+ * If none of the two conditions above apply consider using
+ * dmaengine_terminate_sync() instead.
+ */
+static inline int dmaengine_terminate_async(struct dma_chan *chan)
+{
+ if (chan->device->device_terminate_all)
+ return chan->device->device_terminate_all(chan);
+
+ return -EINVAL;
+}
+
+/**
+ * dmaengine_synchronize() - Synchronize DMA channel termination
+ * @chan: The channel to synchronize
+ *
+ * Synchronizes to the DMA channel termination to the current context. When this
+ * function returns it is guaranteed that all transfers for previously issued
+ * descriptors have stopped and it is safe to free the memory associated
+ * with them. Furthermore it is guaranteed that all complete callback functions
+ * for a previously submitted descriptor have finished running and it is safe to
+ * free resources accessed from within the complete callbacks.
+ *
+ * The behavior of this function is undefined if dma_async_issue_pending() has
+ * been called between dmaengine_terminate_async() and this function.
+ *
+ * This function must only be called from non-atomic context and must not be
+ * called from within a complete callback of a descriptor submitted on the same
+ * channel.
+ */
+static inline void dmaengine_synchronize(struct dma_chan *chan)
+{
+ might_sleep();
+
+ if (chan->device->device_synchronize)
+ chan->device->device_synchronize(chan);
+}
+
+/**
+ * dmaengine_terminate_sync() - Terminate all active DMA transfers
+ * @chan: The channel for which to terminate the transfers
+ *
+ * Calling this function will terminate all active and pending transfers
+ * that have previously been submitted to the channel. It is similar to
+ * dmaengine_terminate_async() but guarantees that the DMA transfer has actually
+ * stopped and that all complete callbacks have finished running when the
+ * function returns.
+ *
+ * This function must only be called from non-atomic context and must not be
+ * called from within a complete callback of a descriptor submitted on the same
+ * channel.
+ */
+static inline int dmaengine_terminate_sync(struct dma_chan *chan)
+{
+ int ret;
+
+ ret = dmaengine_terminate_async(chan);
+ if (ret)
+ return ret;
+
+ dmaengine_synchronize(chan);
+
+ return 0;
+}
+
+static inline int dmaengine_pause(struct dma_chan *chan)
+{
+ if (chan->device->device_pause)
+ return chan->device->device_pause(chan);
+
+ return -ENOSYS;
+}
+
+static inline int dmaengine_resume(struct dma_chan *chan)
+{
+ if (chan->device->device_resume)
+ return chan->device->device_resume(chan);
+
+ return -ENOSYS;
+}
+
+static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ return chan->device->device_tx_status(chan, cookie, state);
+}
+
+static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
+{
+ return desc->tx_submit(desc);
+}
+
+static inline bool dmaengine_check_align(enum dmaengine_alignment align,
+ size_t off1, size_t off2, size_t len)
+{
+ return !(((1 << align) - 1) & (off1 | off2 | len));
+}
+
+static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->copy_align, off1, off2, len);
+}
+
+static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->xor_align, off1, off2, len);
+}
+
+static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->pq_align, off1, off2, len);
+}
+
+static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->fill_align, off1, off2, len);
+}
+
+static inline void
+dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
+{
+ dma->max_pq = maxpq;
+ if (has_pq_continue)
+ dma->max_pq |= DMA_HAS_PQ_CONTINUE;
+}
+
+static inline bool dmaf_continue(enum dma_ctrl_flags flags)
+{
+ return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
+}
+
+static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
+{
+ enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
+
+ return (flags & mask) == mask;
+}
+
+static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
+{
+ return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
+}
+
+static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
+{
+ return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
+}
+
+/* dma_maxpq - reduce maxpq in the face of continued operations
+ * @dma - dma device with PQ capability
+ * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
+ *
+ * When an engine does not support native continuation we need 3 extra
+ * source slots to reuse P and Q with the following coefficients:
+ * 1/ {00} * P : remove P from Q', but use it as a source for P'
+ * 2/ {01} * Q : use Q to continue Q' calculation
+ * 3/ {00} * Q : subtract Q from P' to cancel (2)
+ *
+ * In the case where P is disabled we only need 1 extra source:
+ * 1/ {01} * Q : use Q to continue Q' calculation
+ */
+static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
+{
+ if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
+ return dma_dev_to_maxpq(dma);
+ if (dmaf_p_disabled_continue(flags))
+ return dma_dev_to_maxpq(dma) - 1;
+ if (dmaf_continue(flags))
+ return dma_dev_to_maxpq(dma) - 3;
+ BUG();
+}
+
+static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
+ size_t dir_icg)
+{
+ if (inc) {
+ if (dir_icg)
+ return dir_icg;
+ if (sgl)
+ return icg;
+ }
+
+ return 0;
+}
+
+static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
+ struct data_chunk *chunk)
+{
+ return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
+ chunk->icg, chunk->dst_icg);
+}
+
+static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
+ struct data_chunk *chunk)
+{
+ return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
+ chunk->icg, chunk->src_icg);
+}
+
+/* --- public DMA engine API --- */
+
+#ifdef CONFIG_DMA_ENGINE
+void dmaengine_get(void);
+void dmaengine_put(void);
+#else
+static inline void dmaengine_get(void)
+{
+}
+static inline void dmaengine_put(void)
+{
+}
+#endif
+
+#ifdef CONFIG_ASYNC_TX_DMA
+#define async_dmaengine_get() dmaengine_get()
+#define async_dmaengine_put() dmaengine_put()
+#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
+#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
+#else
+#define async_dma_find_channel(type) dma_find_channel(type)
+#endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
+#else
+static inline void async_dmaengine_get(void)
+{
+}
+static inline void async_dmaengine_put(void)
+{
+}
+static inline struct dma_chan *
+async_dma_find_channel(enum dma_transaction_type type)
+{
+ return NULL;
+}
+#endif /* CONFIG_ASYNC_TX_DMA */
+void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
+ struct dma_chan *chan);
+
+static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
+{
+ tx->flags |= DMA_CTRL_ACK;
+}
+
+static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
+{
+ tx->flags &= ~DMA_CTRL_ACK;
+}
+
+static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
+{
+ return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
+}
+
+#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
+static inline void
+__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
+{
+ set_bit(tx_type, dstp->bits);
+}
+
+#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
+static inline void
+__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
+{
+ clear_bit(tx_type, dstp->bits);
+}
+
+#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
+static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
+{
+ bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
+}
+
+#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
+static inline int
+__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
+{
+ return test_bit(tx_type, srcp->bits);
+}
+
+#define for_each_dma_cap_mask(cap, mask) \
+ for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
+
+/**
+ * dma_async_issue_pending - flush pending transactions to HW
+ * @chan: target DMA channel
+ *
+ * This allows drivers to push copies to HW in batches,
+ * reducing MMIO writes where possible.
+ */
+static inline void dma_async_issue_pending(struct dma_chan *chan)
+{
+ chan->device->device_issue_pending(chan);
+}
+
+/**
+ * dma_async_is_tx_complete - poll for transaction completion
+ * @chan: DMA channel
+ * @cookie: transaction identifier to check status of
+ * @last: returns last completed cookie, can be NULL
+ * @used: returns last issued cookie, can be NULL
+ *
+ * If @last and @used are passed in, upon return they reflect the driver
+ * internal state and can be used with dma_async_is_complete() to check
+ * the status of multiple cookies without re-checking hardware state.
+ */
+static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
+{
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = chan->device->device_tx_status(chan, cookie, &state);
+ if (last)
+ *last = state.last;
+ if (used)
+ *used = state.used;
+ return status;
+}
+
+/**
+ * dma_async_is_complete - test a cookie against chan state
+ * @cookie: transaction identifier to test status of
+ * @last_complete: last know completed transaction
+ * @last_used: last cookie value handed out
+ *
+ * dma_async_is_complete() is used in dma_async_is_tx_complete()
+ * the test logic is separated for lightweight testing of multiple cookies
+ */
+static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
+ dma_cookie_t last_complete, dma_cookie_t last_used)
+{
+ if (last_complete <= last_used) {
+ if ((cookie <= last_complete) || (cookie > last_used))
+ return DMA_COMPLETE;
+ } else {
+ if ((cookie <= last_complete) && (cookie > last_used))
+ return DMA_COMPLETE;
+ }
+ return DMA_IN_PROGRESS;
+}
+
+static inline void
+dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
+{
+ if (!st)
+ return;
+
+ st->last = last;
+ st->used = used;
+ st->residue = residue;
+}
+
+#ifdef CONFIG_DMA_ENGINE
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
+enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
+void dma_issue_pending_all(void);
+struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param,
+ struct device_node *np);
+
+struct dma_chan *dma_request_chan(struct device *dev, const char *name);
+struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
+
+void dma_release_channel(struct dma_chan *chan);
+int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
+#else
+static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
+{
+ return NULL;
+}
+static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
+{
+ return DMA_COMPLETE;
+}
+static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
+{
+ return DMA_COMPLETE;
+}
+static inline void dma_issue_pending_all(void)
+{
+}
+static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn,
+ void *fn_param,
+ struct device_node *np)
+{
+ return NULL;
+}
+static inline struct dma_chan *dma_request_chan(struct device *dev,
+ const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline struct dma_chan *dma_request_chan_by_mask(
+ const dma_cap_mask_t *mask)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline void dma_release_channel(struct dma_chan *chan)
+{
+}
+static inline int dma_get_slave_caps(struct dma_chan *chan,
+ struct dma_slave_caps *caps)
+{
+ return -ENXIO;
+}
+#endif
+
+static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_slave_caps caps;
+ int ret;
+
+ ret = dma_get_slave_caps(tx->chan, &caps);
+ if (ret)
+ return ret;
+
+ if (!caps.descriptor_reuse)
+ return -EPERM;
+
+ tx->flags |= DMA_CTRL_REUSE;
+ return 0;
+}
+
+static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
+{
+ tx->flags &= ~DMA_CTRL_REUSE;
+}
+
+static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
+{
+ return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
+}
+
+static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
+{
+ /* this is supported for reusable desc, so check that */
+ if (!dmaengine_desc_test_reuse(desc))
+ return -EPERM;
+
+ return desc->desc_free(desc);
+}
+
+/* --- DMA device --- */
+
+int dma_async_device_register(struct dma_device *device);
+int dmaenginem_async_device_register(struct dma_device *device);
+void dma_async_device_unregister(struct dma_device *device);
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan);
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan);
+void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
+#define dma_request_channel(mask, x, y) \
+ __dma_request_channel(&(mask), x, y, NULL)
+
+/* Deprecated, please use dma_request_chan() directly */
+static inline struct dma_chan * __deprecated
+dma_request_slave_channel(struct device *dev, const char *name)
+{
+ struct dma_chan *ch = dma_request_chan(dev, name);
+
+ return IS_ERR(ch) ? NULL : ch;
+}
+
+static inline struct dma_chan
+*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
+ dma_filter_fn fn, void *fn_param,
+ struct device *dev, const char *name)
+{
+ struct dma_chan *chan;
+
+ chan = dma_request_slave_channel(dev, name);
+ if (chan)
+ return chan;
+
+ if (!fn || !fn_param)
+ return NULL;
+
+ return __dma_request_channel(&mask, fn, fn_param, NULL);
+}
+
+static inline char *
+dmaengine_get_direction_text(enum dma_transfer_direction dir)
+{
+ switch (dir) {
+ case DMA_DEV_TO_MEM:
+ return "DEV_TO_MEM";
+ case DMA_MEM_TO_DEV:
+ return "MEM_TO_DEV";
+ case DMA_MEM_TO_MEM:
+ return "MEM_TO_MEM";
+ case DMA_DEV_TO_DEV:
+ return "DEV_TO_DEV";
+ default:
+ return "invalid";
+ }
+}
+
+static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan)
+{
+ if (chan->dev->chan_dma_dev)
+ return &chan->dev->device;
+
+ return chan->device->dev;
+}
+
+#endif /* DMAENGINE_H */
diff --git a/snd-alpx/regmap-mmio.c b/snd-alpx/regmap-mmio.c
new file mode 100644
index 0000000..59c2007
--- /dev/null
+++ b/snd-alpx/regmap-mmio.c
@@ -0,0 +1,9 @@
+#include <linux/version.h>
+
+#if KERNEL_VERSION(5, 14, 0) == LINUX_VERSION_CODE
+#include "core/generic/5.14/regmap-mmio.c"
+#elif KERNEL_VERSION(4, 19, 0) == LINUX_VERSION_CODE
+#include "core/generic/4.19/regmap-mmio.c"
+#else
+#error "Unsupported kernel version, backporting another version of regmap-mmio is required"
+#endif
diff --git a/snd-alpx/snd_alpx_xdma.c b/snd-alpx/snd_alpx_xdma.c
new file mode 100644
index 0000000..2941793
--- /dev/null
+++ b/snd-alpx/snd_alpx_xdma.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+
+#include <linux/version.h>
+#include <linux/module.h>
+
+
+/* Defined an exported symbol to allow dependency use */
+const char* SND_ALPX_XDMA_DEP(void)
+{
+ return "snd_alpx_xdma support";
+}
+EXPORT_SYMBOL(SND_ALPX_XDMA_DEP);
+
+
+/*
+ * Use the in-kernel driver if enabled and recent enough. Otherwise, use the
+ * mainline kernel driver backported locally and improved with cyclic transfers
+ * support (6.7 code)
+ */
+#if IS_ENABLED(CONFIG_XILINX_XDMA) && (KERNEL_VERSION(6, 7, 0) <= LINUX_VERSION_CODE)
+ /* EMPTY MODULE !!! */
+ #include "alpx_version.h"
+ #warning "Use Kernel's XDMA"
+
+ /* MODULE meta required for build only */
+ MODULE_DESCRIPTION("AlpX XDMA support module placeholder");
+ MODULE_AUTHOR("Digigram Digital");
+ MODULE_VERSION("ALPX_MODULE_VERSION");
+ MODULE_LICENSE("GPL");
+
+#else
+/* Need our module BUT with the right version !! */
+ #if KERNEL_VERSION(6, 3, 0) > LINUX_VERSION_CODE
+
+ #warning 6.2
+ #include "core/generic/6.2/xilinx/xdma.c"
+
+ #elif KERNEL_VERSION(6, 7, 0) > LINUX_VERSION_CODE
+
+ #warning 6.3
+ #include "core/generic/6.3/xilinx/xdma.c"
+
+ #endif
+#endif /* IS_ENABLED(CONFIG_XILINX_XDMA) && (KERNEL_VERSION(6, 7, 0) >= LINUX_VERSION_CODE) */
diff --git a/snd-alpx/snd_alpx_xdma.h b/snd-alpx/snd_alpx_xdma.h
new file mode 100644
index 0000000..5acba24
--- /dev/null
+++ b/snd-alpx/snd_alpx_xdma.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+* Support for Digigram AlpX PCI-e boards
+*
+* Copyright (c) 2024 Digigram Digital (info@digigram.com)
+*/
+#ifndef _SND_ALPX_XDMA_H_
+#define _SND_ALPX_XDMA_H_
+
+/* dependency check support */
+#if (!IS_ENABLED(CONFIG_XILINX_XDMA)) || \
+ (KERNEL_VERSION(6, 7, 0) > LINUX_VERSION_CODE)
+#warning "USE Package's XDMA module"
+ /* snd-alpx-xdma dependency */
+ const char* SND_ALPX_XDMA_DEP(void);
+#else
+/* NO dependency SO required local define*/
+static const char* SND_ALPX_XDMA_DEP(void)
+{
+ return "Internal XDMA support";
+}
+#endif
+
+#endif /*_SND_ALPX_XDMA_H_*/
diff --git a/snd-alpx/tools/audio_card_update_firmware.sh b/snd-alpx/tools/audio_card_update_firmware.sh
new file mode 100755
index 0000000..9dfa0c7
--- /dev/null
+++ b/snd-alpx/tools/audio_card_update_firmware.sh
@@ -0,0 +1,99 @@
+#!/bin/bash
+
+
+# This script is used to update the firmware of the card FOR Audio card only (Alpxxy) with x and y are numbers.
+# Param 1 : mtd partition number of the "fw-user-updatable", see /proc/mtd file and select
+# the one of the to be updated card.
+#Param 2 : firmware complete filepath
+
+#NOTE : use BASH read, SH read : follow https://stackoverflow.com/questions/226703/how-do-i-prompt-for-yes-no-cancel-input-in-a-linux-shell-script
+
+
+user_manual(){
+ echo "Param 1 : mtd partition number of the \"fw-user-updatable\", see /proc/mtd file and select"
+ echo "Param 2 : firmware complete filepath"
+ echo "Check user can write to the mtd partition's file"
+ return 0
+}
+
+
+## MAIN ##
+# warn about supported card
+#Warn about the card supported model : Alp Audio card.
+read -r -n 1 -p "WARNING: This is only for AUDIO based Alp cards (ie Alp222, Alp882 and Alp442) update ? [y/N]:" user_resp
+
+echo # (optional) move to a new line
+if [[ ! "$user_resp" =~ ^[Yy]$ ]]
+then
+ # handle exits from shell or function but don't exit interactive shell
+ [[ "$0" = "$BASH_SOURCE" ]] && echo "CANCELED by User" && exit 1 || return 1
+fi
+
+
+#Check parameters
+[ "$#" -ne "2" ] && user_manual && exit 1
+
+fw_partition_path="$1"
+source_fw_file="$2"
+
+
+
+# /dev/mtd is a character device
+[ ! -c "$fw_partition_path" ] && echo "$fw_partition_path Not a C Device" && exit 2
+#Write access ?
+[ ! -w "$fw_partition_path" ] && echo "Not writeable" && exit 2
+
+# firmware file condition
+[ ! -e "$source_fw_file" ] && user_manual && exit 2
+[ ! -f "$source_fw_file" ] && user_manual && exit 2
+[ ! -s "$source_fw_file" ] && user_manual && exit 2
+[ ! -r "$source_fw_file" ] && user_manual && exit 2
+
+#Check available space !! TODO
+# It Requires firmware size + 4kB for the extracted HEADER.
+
+#read : BASH extensions used !
+read -r -n 1 -p Writing\ "$source_fw_file"\ to\ "$fw_partition_path ? [y/N]: " user_resp
+
+echo # (optional) move to a new line
+if [[ ! "$user_resp" =~ ^[Yy]$ ]]
+then
+ # handle exits from shell or function but don't exit interactive shell
+ [[ "$0" = "$BASH_SOURCE" ]] && echo "CANCELED by User" && exit 1 || return 1
+fi
+
+dd if="$source_fw_file" of=fw_header.bin bs=4096 count=1 status=none
+if [ "$?" -ne "0" ] ; then
+ echo "Error when preparing the firmware, check disk size please."
+ exit 4;
+fi
+
+dd if="$source_fw_file" of=fw_body.bin bs=4096 skip=1 seek=1 status=none
+if [ "$?" -ne "0" ] ; then
+ echo "Error when preparing the firmware, check disk size please."
+ exit 4;
+fi
+
+
+echo "Updating first phase ..."
+cat fw_body.bin > "$fw_partition_path"
+if [ "$?" -ne "0" ] ; then
+ echo "!! Update failed DON'T poweroff, correct to retry."
+ rm -f fw_body.bin fw_header.bin
+ exit 3;
+fi
+
+echo "Updating second phase ..."
+cat fw_header.bin > "$fw_partition_path"
+
+if [ "$?" -ne "0" ] ; then
+ echo "!! Update failed DON'T poweroff, correct to retry."
+ rm -f fw_body.bin fw_header.bin
+ exit 3;
+fi
+
+echo "Update SUCCEEDED"
+
+rm -f fw_body.bin fw_header.bin
+
+exit 0
diff --git a/snd-alpx/tools/build-load-script.sh b/snd-alpx/tools/build-load-script.sh
new file mode 100755
index 0000000..76e0174
--- /dev/null
+++ b/snd-alpx/tools/build-load-script.sh
@@ -0,0 +1,111 @@
+#!/bin/bash
+#This script will build the manual launch script for the Alpx cards' driver.
+
+TEXT_BOLD="\e[1m"
+TEXT_BLUE="\e[34m"
+TEXT_RED="\e[31m"
+TEXT_RESET="\e[0m"
+
+manual_load_script_name="$1"
+ALPX_SOUND_DRIVER_NAME="snd-alpx.ko"
+ALPX_DMA_DRIVER_NAME="snd-alpx-xdma.ko"
+
+OPTIONS_REQUIRED=("CONFIG_MTD" "CONFIG_DMA_VIRTUAL_CHANNELS" "CONFIG_SND_PCM")
+OPTIONS_OPTIONAL=("CONFIG_SND_DMAENGINE_PCM")
+
+module_for_option()
+{
+ option=$1
+
+ case $option in
+ "CONFIG_MTD")
+ echo "mtd"
+ ;;
+ "CONFIG_DMA_VIRTUAL_CHANNELS")
+ echo "virt-dma"
+ ;;
+ "CONFIG_SND_DMAENGINE_PCM")
+ echo "snd-pcm-dmaengine"
+ ;;
+ "CONFIG_SND_PCM")
+ echo "snd-pcm"
+ ;;
+ esac
+}
+
+required_module_entry()
+{
+ option=$1
+ module_name=$( module_for_option "$option" )
+
+ echo -e $TEXT_BOLD$TEXT_BLUE"Option $option was built as a module and will be loaded"
+ echo -e "echo 'loading required module : $module_name'" >> $manual_load_script_name
+ echo -e "modprobe $module_name" >> $manual_load_script_name
+}
+
+build_launch_script()
+{
+ list=$( cat - )
+
+ if [ -f "$manual_load_script_name" ]
+ then
+ rm "$manual_load_script_name"
+ echo -e "#!/bin/bash" >> $manual_load_script_name
+ fi
+
+ for option in ${OPTIONS_REQUIRED[@]}
+ do
+ match_y=$( echo "$list" | grep -P "^$option=y" )
+ match_m=$( echo "$list" | grep -P "^$option=m" )
+
+ if [ -z "$match_y" ] && [ -z "$match_m" ]
+ then
+ echo -e $TEXT_BOLD$TEXT_RED"Missing required option $option!"$TEXT_RESET
+ return 1
+ fi
+
+ if [ -n "$match_m" ]
+ then
+ required_module_entry "$option"
+ fi
+ done
+
+ for option in ${OPTIONS_OPTIONAL[@]}
+ do
+ match_m=$( echo "$list" | grep -P "^$option=m" )
+
+ if [ -n "$match_m" ]
+ then
+ required_module_entry "$option"
+ fi
+ done
+
+
+
+ if [ -f "$manual_load_script_name" ]
+ then
+ chmod u+x "$manual_load_script_name"
+ fi
+
+ #NO> add the drivers entries themselves
+ echo -e "echo 'loading the dma driver : $ALPX_DMA_DRIVER_NAME '" >> $manual_load_script_name
+ echo -e "insmod $ALPX_DMA_DRIVER_NAME" >> $manual_load_script_name
+ echo -e "echo 'loading the sound driver : $ALPX_SOUND_DRIVER_NAME '" >> $manual_load_script_name
+ echo -e "insmod $ALPX_SOUND_DRIVER_NAME" >> $manual_load_script_name
+
+
+ echo -e $TEXT_RESET
+}
+
+config_path="/boot/config"-$( uname -r )
+
+if [ -f "/proc/config.gz" ]
+then
+ cat /proc/config.gz | gunzip | build_launch_script
+elif [ -f "$config_path" ]
+then
+ cat "$config_path" | build_launch_script
+else
+ echo "No config path found, unable to check!" >&2
+ exit 1
+fi
diff --git a/snd-alpx/tools/build_driver_pkg.sh b/snd-alpx/tools/build_driver_pkg.sh
new file mode 100755
index 0000000..ef568bd
--- /dev/null
+++ b/snd-alpx/tools/build_driver_pkg.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+#This script will create the driver source archive with the given tag, name compilant with dkms
+# and put it in the destination dir
+
+print_help() {
+ echo -e "This script requires 2 parameters : \n\t 1: version tag \n\t 2: destination directory"
+}
+
+## MAIN ##
+#Check parameters
+echo "starting $0 with $#"
+
+[ ! "$#" -eq "2" ] && print_help && exit 1
+
+version_tag=$1
+dest_dir=$2
+
+##Handle HEAD as a Special value
+if [ "$version_tag" == "HEAD" ]
+then
+ version=$version_tag
+else
+ version=${version_tag:2}
+fi
+
+echo "get ready for the archive V: $version_tag, from tag: $version_tag to $dest_dir/snd-alpx-$version.zip ..."
+git archive --format=zip -9 --prefix="snd-alpx-$version/" "$version_tag" > "$dest_dir/snd-alpx-$version.zip"
diff --git a/snd-alpx/tools/build_virtual_board_alsa_conf.sh b/snd-alpx/tools/build_virtual_board_alsa_conf.sh
new file mode 100755
index 0000000..3e8a605
--- /dev/null
+++ b/snd-alpx/tools/build_virtual_board_alsa_conf.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+## This script will print an asoundrc stream which add virtual boards from one physical board.
+## Output must be directed into a file to be stored
+
+### FUNCTIONS ###
+user_manual() {
+ echo "Required Parameters
+ 1. Phy board id like "hw:CARD=Alp882e,DEV=0"
+ 2. Phy board channels quantity
+ 3. Virtual boards base name like : \"Emulated analog Alp222\"
+ 4. Virtual boards channels quantities (2: stéréo, ...)" >&2
+}
+
+
+build_phy_node() {
+ node_type=$1
+ card_id=$2
+ channels_qty=$3
+ echo -e "pcm_slave.$node_type { \n pcm \"$card_id\"\n channels $channels_qty\n }\n"
+}
+
+build_virtual_node() {
+node_alias=$1
+node_type=$2
+node_id=$3
+ipc_key=$4
+phy_name=$5
+phy_channels_base=$6
+phy_channels_qty=$7
+description=$8
+direction=$9
+
+echo -e "pcm.$node_alias$node_id {
+ type $node_type
+ ipc_key $ipc_key
+ ipc_key_add_uid true
+ slave $phy_name"
+
+ for channel_id in $(seq 0 "$(($phy_channels_qty-1))")
+ do
+ echo -e "\tbindings.$channel_id $(($channel_id+$phy_channels_base))"
+ done
+
+
+ echo -e "\thint.description" \"$description \#$node_id $direction\"
+ echo -e "}\n"
+}
+
+### MAIN ###
+
+[ "$#" -ne "4" ] && user_manual && exit 1
+
+ipc_key=$(date +%N)
+card_id=$1
+channels_qty=$2
+virtual_basename=$3
+stream_width=$4
+virt_qty=$(($channels_qty / $stream_width))
+
+echo "#Building Alsa sound RC file for $virt_qty virtual boards of $stream_width channels from physical board $card_id with $channels_qty."
+
+##Build the entries first
+build_phy_node "ins" "$card_id" "$channels_qty"
+
+for entry_idx in $(seq 0 $(($virt_qty-1)))
+do
+ build_virtual_node "mic" "dsnoop" "$entry_idx" "$ipc_key" "ins" "$(($stream_width * $entry_idx))" "$stream_width" "virtual Alp-$stream_width" "in"
+done
+
+##Now the outpus
+build_phy_node "outs" "$card_id" "$channels_qty"
+
+ipc_key=$(date +%N)
+
+for entry_idx in $(seq 0 $(($virt_qty-1)))
+do
+ build_virtual_node "out" "dshare" "$entry_idx" "$ipc_key" "outs" "$(($stream_width * $entry_idx))" "$stream_width" "virtual Alp-$stream_width" "out"
+done
+
diff --git a/snd-alpx/udev/88-pulseaudio-no-alp.rules b/snd-alpx/udev/88-pulseaudio-no-alp.rules
new file mode 100644
index 0000000..89e01c5
--- /dev/null
+++ b/snd-alpx/udev/88-pulseaudio-no-alp.rules
@@ -0,0 +1 @@
+ATTRS{vendor}=="0x1369", ATTRS{device}=="0x0002", ATTRS{subsystem_vendor}=="0x1369", ENV{PULSE_IGNORE}="1"