diff options
author | Christian Pointner <equinox@helsinki.at> | 2024-05-10 18:56:00 (GMT) |
---|---|---|
committer | Christian Pointner <equinox@helsinki.at> | 2024-05-10 18:56:00 (GMT) |
commit | af6bfbbd2496b1f5aa02b94caff4e6f988fa32c9 (patch) | |
tree | 45bebb68a0bf3bfc2fff1646f6fa0f4b976fba57 /snd-alpx-dkms/snd-alpx/core | |
parent | 7035064ca063fdf15669ceb790ecef1e5ed054b0 (diff) |
rename package to snd-alpx-dkms
Diffstat (limited to 'snd-alpx-dkms/snd-alpx/core')
33 files changed, 12616 insertions, 0 deletions
diff --git a/snd-alpx-dkms/snd-alpx/core/RedHat/4.18/dmaengine_pcm.h b/snd-alpx-dkms/snd-alpx/core/RedHat/4.18/dmaengine_pcm.h new file mode 100755 index 0000000..2df54cf --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/RedHat/4.18/dmaengine_pcm.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + */ + +#ifndef __SOUND_DMAENGINE_PCM_H__ +#define __SOUND_DMAENGINE_PCM_H__ + +#include <sound/pcm.h> +#include <sound/soc.h> +#include <linux/dmaengine.h> + +/** + * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM + * substream + * @substream: PCM substream + * + * Return: DMA transfer direction + */ +static inline enum dma_transfer_direction +snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + return DMA_MEM_TO_DEV; + else + return DMA_DEV_TO_MEM; +} + +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config); +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan); +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data); +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream); + +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data); +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream); + +/* + * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word. + * If this flag is set the dmaengine driver won't put any restriction on + * the supported sample formats and set the DMA transfer size to undefined. + * The DAI driver is responsible to disable any unsupported formats in it's + * configuration and catch corner cases that are not already handled in + * the ALSA core. + */ +#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0) + +/** + * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data + * @addr: Address of the DAI data source or destination register. + * @addr_width: Width of the DAI data source or destination register. + * @maxburst: Maximum number of words(note: words, as in units of the + * src_addr_width member, not bytes) that can be send to or received from the + * DAI in one burst. + * @filter_data: Custom DMA channel filter data, this will usually be used when + * requesting the DMA channel. + * @chan_name: Custom channel name to use when requesting DMA channel. + * @fifo_size: FIFO size of the DAI controller in bytes + * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now + * @peripheral_config: peripheral configuration for programming peripheral + * for dmaengine transfer + * @peripheral_size: peripheral configuration buffer size + */ +struct snd_dmaengine_dai_dma_data { + dma_addr_t addr; + enum dma_slave_buswidth addr_width; + u32 maxburst; + void *filter_data; + const char *chan_name; + unsigned int fifo_size; + unsigned int flags; + void *peripheral_config; + size_t peripheral_size; +}; + +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *config); + +int snd_dmaengine_pcm_refine_runtime_hwparams( + struct snd_pcm_substream *substream, + struct snd_dmaengine_dai_dma_data *dma_data, + struct snd_pcm_hardware *hw, + struct dma_chan *chan); + +/* + * Try to request the DMA channel using compat_request_channel or + * compat_filter_fn if it couldn't be requested through devicetree. + */ +#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0) +/* + * Don't try to request the DMA channels through devicetree. This flag only + * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well. + */ +#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1) +/* + * The PCM is half duplex and the DMA channel is shared between capture and + * playback. + */ +#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3) + +/** + * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM + * @prepare_slave_config: Callback used to fill in the DMA slave_config for a + * PCM substream. Will be called from the PCM drivers hwparams callback. + * @compat_request_channel: Callback to request a DMA channel for platforms + * which do not use devicetree. + * @process: Callback used to apply processing on samples transferred from/to + * user space. + * @compat_filter_fn: Will be used as the filter function when requesting a + * channel for platforms which do not use devicetree. The filter parameter + * will be the DAI's DMA data. + * @dma_dev: If set, request DMA channel on this device rather than the DAI + * device. + * @chan_names: If set, these custom DMA channel names will be requested at + * registration time. + * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM. + * @prealloc_buffer_size: Size of the preallocated audio buffer. + * + * Note: If both compat_request_channel and compat_filter_fn are set + * compat_request_channel will be used to request the channel and + * compat_filter_fn will be ignored. Otherwise the channel will be requested + * using dma_request_channel with compat_filter_fn as the filter function. + */ +struct snd_dmaengine_pcm_config { + int (*prepare_slave_config)(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + struct dma_chan *(*compat_request_channel)( + struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_substream *substream); + int (*process)(struct snd_pcm_substream *substream, + int channel, unsigned long hwoff, + void *buf, unsigned long bytes); + dma_filter_fn compat_filter_fn; + struct device *dma_dev; + const char *chan_names[SNDRV_PCM_STREAM_LAST + 1]; + + const struct snd_pcm_hardware *pcm_hardware; + unsigned int prealloc_buffer_size; +}; + +int snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); +void snd_dmaengine_pcm_unregister(struct device *dev); + +int devm_snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); + +int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + +#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm" + +struct dmaengine_pcm { + struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1]; + const struct snd_dmaengine_pcm_config *config; + struct snd_soc_component component; + unsigned int flags; +}; + +static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p) +{ + return container_of(p, struct dmaengine_pcm, component); +} +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/RedHat/4.18/pcm_dmaengine.c b/snd-alpx-dkms/snd-alpx/core/RedHat/4.18/pcm_dmaengine.c new file mode 100755 index 0000000..494ec0c --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/RedHat/4.18/pcm_dmaengine.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + * + * Based on: + * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de> + * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc. + * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * Copyright (C) 2006 Applied Data Systems + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/dmaengine.h> +#include <linux/slab.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/soc.h> + +#include <sound/dmaengine_pcm.h> + +struct dmaengine_pcm_runtime_data { + struct dma_chan *dma_chan; + dma_cookie_t cookie; + + unsigned int pos; +}; + +static inline struct dmaengine_pcm_runtime_data *substream_to_prtd( + const struct snd_pcm_substream *substream) +{ + return substream->runtime->private_data; +} + +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + return prtd->dma_chan; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan); + +/** + * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config + * @substream: PCM substream + * @params: hw_params + * @slave_config: DMA slave config + * + * This function can be used to initialize a dma_slave_config from a substream + * and hw_params in a dmaengine based PCM driver implementation. + * + * Return: zero if successful, or a negative error code + */ +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config) +{ + enum dma_slave_buswidth buswidth; + int bits; + + bits = params_physical_width(params); + if (bits < 8 || bits > 64) + return -EINVAL; + else if (bits == 8) + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (bits == 16) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else if (bits == 24) + buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES; + else if (bits <= 32) + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->direction = DMA_MEM_TO_DEV; + slave_config->dst_addr_width = buswidth; + } else { + slave_config->direction = DMA_DEV_TO_MEM; + slave_config->src_addr_width = buswidth; + } + + slave_config->device_fc = false; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config); + +/** + * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config + * using DAI DMA data. + * @substream: PCM substream + * @dma_data: DAI DMA data + * @slave_config: DMA slave configuration + * + * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width + * fields of the DMA slave config from the same fields of the DAI DMA + * data struct. The src and dst fields will be initialized depending on the + * direction of the substream. If the substream is a playback stream the dst + * fields will be initialized, if it is a capture stream the src fields will be + * initialized. The {dst,src}_addr_width field will only be initialized if the + * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of + * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If + * both conditions are met the latter takes priority. + */ +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *slave_config) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->dst_addr = dma_data->addr; + slave_config->dst_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->dst_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->dst_addr_width = dma_data->addr_width; + } else { + slave_config->src_addr = dma_data->addr; + slave_config->src_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->src_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->src_addr_width = dma_data->addr_width; + } + + slave_config->peripheral_config = dma_data->peripheral_config; + slave_config->peripheral_size = dma_data->peripheral_size; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data); + +static void dmaengine_pcm_dma_complete(void *arg) +{ + unsigned int new_pos; + struct snd_pcm_substream *substream = arg; + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + new_pos = prtd->pos + snd_pcm_lib_period_bytes(substream); + if (new_pos >= snd_pcm_lib_buffer_bytes(substream)) + new_pos = 0; + prtd->pos = new_pos; + + snd_pcm_period_elapsed(substream); +} + +static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct dma_chan *chan = prtd->dma_chan; + struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction direction; + unsigned long flags = DMA_CTRL_ACK; + + direction = snd_pcm_substream_to_dma_direction(substream); + + if (!substream->runtime->no_period_wakeup) + flags |= DMA_PREP_INTERRUPT; + + prtd->pos = 0; + desc = dmaengine_prep_dma_cyclic(chan, + substream->runtime->dma_addr, + snd_pcm_lib_buffer_bytes(substream), + snd_pcm_lib_period_bytes(substream), direction, flags); + + if (!desc) + return -ENOMEM; + + desc->callback = dmaengine_pcm_dma_complete; + desc->callback_param = substream; + prtd->cookie = dmaengine_submit(desc); + + return 0; +} + +/** + * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation + * @substream: PCM substream + * @cmd: Trigger command + * + * This function can be used as the PCM trigger callback for dmaengine based PCM + * driver implementations. + * + * Return: 0 on success, a negative error code otherwise + */ +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + int ret; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + ret = dmaengine_pcm_prepare_and_submit(substream); + if (ret) + return ret; + dma_async_issue_pending(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + dmaengine_resume(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_SUSPEND: + if (runtime->info & SNDRV_PCM_INFO_PAUSE) + dmaengine_pause(prtd->dma_chan); + else + dmaengine_terminate_async(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + dmaengine_pause(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_STOP: + dmaengine_terminate_async(prtd->dma_chan); + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger); + +/** + * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function is deprecated and should not be used by new drivers, as its + * results may be unreliable. + * + * Return: PCM position in frames + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + return bytes_to_frames(substream->runtime, prtd->pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue); + +/** + * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function can be used as the PCM pointer callback for dmaengine based PCM + * driver implementations. + * + * Return: PCM position in frames + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + struct dma_tx_state state; + enum dma_status status; + unsigned int buf_size; + unsigned int pos = 0; + + status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state); + if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) { + buf_size = snd_pcm_lib_buffer_bytes(substream); + if (state.residue > 0 && state.residue <= buf_size) + pos = buf_size - state.residue; + + runtime->delay = bytes_to_frames(runtime, + state.in_flight_bytes); + } + + return bytes_to_frames(runtime, pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer); + +/** + * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * This function request a DMA channel for usage with dmaengine PCM. + * + * Return: NULL or the requested DMA channel + */ +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_CYCLIC, mask); + + return dma_request_channel(mask, filter_fn, filter_data); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel); + +/** + * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream + * @substream: PCM substream + * @chan: DMA channel to use for data transfers + * + * The function should usually be called from the pcm open callback. Note that + * this function will use private_data field of the substream's runtime. So it + * is not available to your pcm driver implementation. + * + * Return: 0 on success, a negative error code otherwise + */ +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan) +{ + struct dmaengine_pcm_runtime_data *prtd; + int ret; + + if (!chan) + return -ENXIO; + + ret = snd_pcm_hw_constraint_integer(substream->runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (ret < 0) + return ret; + + prtd = kzalloc(sizeof(*prtd), GFP_KERNEL); + if (!prtd) + return -ENOMEM; + + prtd->dma_chan = chan; + + substream->runtime->private_data = prtd; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open); + +/** + * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel + * @substream: PCM substream + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * This function will request a DMA channel using the passed filter function and + * data. The function should usually be called from the pcm open callback. Note + * that this function will use private_data field of the substream's runtime. So + * it is not available to your pcm driver implementation. + * + * Return: 0 on success, a negative error code otherwise + */ +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data) +{ + return snd_dmaengine_pcm_open(substream, + snd_dmaengine_pcm_request_channel(filter_fn, filter_data)); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan); + +/** + * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream + * @substream: PCM substream + * + * Return: 0 on success, a negative error code otherwise + */ +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close); + +/** + * snd_dmaengine_pcm_close_release_chan - Close a dmaengine based PCM + * substream and release channel + * @substream: PCM substream + * + * Releases the DMA channel associated with the PCM substream. + * + * Return: zero if successful, or a negative error code + */ +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + dma_release_channel(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan); + +/** + * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params + * @substream: PCM substream + * @dma_data: DAI DMA data + * @hw: PCM hw params + * @chan: DMA channel to use for data transfers + * + * This function will query DMA capability, then refine the pcm hardware + * parameters. + * + * Return: 0 on success, a negative error code otherwise + */ +int snd_dmaengine_pcm_refine_runtime_hwparams( + struct snd_pcm_substream *substream, + struct snd_dmaengine_dai_dma_data *dma_data, + struct snd_pcm_hardware *hw, + struct dma_chan *chan) +{ + struct dma_slave_caps dma_caps; + u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + snd_pcm_format_t i; + int ret = 0; + + if (!hw || !chan || !dma_data) + return -EINVAL; + + ret = dma_get_slave_caps(chan, &dma_caps); + if (ret == 0) { + if (dma_caps.cmd_pause && dma_caps.cmd_resume) + hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME; + if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT) + hw->info |= SNDRV_PCM_INFO_BATCH; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + addr_widths = dma_caps.dst_addr_widths; + else + addr_widths = dma_caps.src_addr_widths; + } + + /* + * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep + * hw.formats set to 0, meaning no restrictions are in place. + * In this case it's the responsibility of the DAI driver to + * provide the supported format information. + */ + if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)) + /* + * Prepare formats mask for valid/allowed sample types. If the + * dma does not have support for the given physical word size, + * it needs to be masked out so user space can not use the + * format which produces corrupted audio. + * In case the dma driver does not implement the slave_caps the + * default assumption is that it supports 1, 2 and 4 bytes + * widths. + */ + pcm_for_each_format(i) { + int bits = snd_pcm_format_physical_width(i); + + /* + * Enable only samples with DMA supported physical + * widths + */ + switch (bits) { + case 8: + case 16: + case 24: + case 32: + case 64: + if (addr_widths & (1 << (bits / 8))) + hw->formats |= pcm_format_to_bits(i); + break; + default: + /* Unsupported types */ + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams); + +MODULE_LICENSE("GPL"); diff --git a/snd-alpx-dkms/snd-alpx/core/RedHat/5.14/dmaengine_pcm.h b/snd-alpx-dkms/snd-alpx/core/RedHat/5.14/dmaengine_pcm.h new file mode 100755 index 0000000..2df54cf --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/RedHat/5.14/dmaengine_pcm.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + */ + +#ifndef __SOUND_DMAENGINE_PCM_H__ +#define __SOUND_DMAENGINE_PCM_H__ + +#include <sound/pcm.h> +#include <sound/soc.h> +#include <linux/dmaengine.h> + +/** + * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM + * substream + * @substream: PCM substream + * + * Return: DMA transfer direction + */ +static inline enum dma_transfer_direction +snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + return DMA_MEM_TO_DEV; + else + return DMA_DEV_TO_MEM; +} + +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config); +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan); +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data); +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream); + +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data); +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream); + +/* + * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word. + * If this flag is set the dmaengine driver won't put any restriction on + * the supported sample formats and set the DMA transfer size to undefined. + * The DAI driver is responsible to disable any unsupported formats in it's + * configuration and catch corner cases that are not already handled in + * the ALSA core. + */ +#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0) + +/** + * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data + * @addr: Address of the DAI data source or destination register. + * @addr_width: Width of the DAI data source or destination register. + * @maxburst: Maximum number of words(note: words, as in units of the + * src_addr_width member, not bytes) that can be send to or received from the + * DAI in one burst. + * @filter_data: Custom DMA channel filter data, this will usually be used when + * requesting the DMA channel. + * @chan_name: Custom channel name to use when requesting DMA channel. + * @fifo_size: FIFO size of the DAI controller in bytes + * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now + * @peripheral_config: peripheral configuration for programming peripheral + * for dmaengine transfer + * @peripheral_size: peripheral configuration buffer size + */ +struct snd_dmaengine_dai_dma_data { + dma_addr_t addr; + enum dma_slave_buswidth addr_width; + u32 maxburst; + void *filter_data; + const char *chan_name; + unsigned int fifo_size; + unsigned int flags; + void *peripheral_config; + size_t peripheral_size; +}; + +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *config); + +int snd_dmaengine_pcm_refine_runtime_hwparams( + struct snd_pcm_substream *substream, + struct snd_dmaengine_dai_dma_data *dma_data, + struct snd_pcm_hardware *hw, + struct dma_chan *chan); + +/* + * Try to request the DMA channel using compat_request_channel or + * compat_filter_fn if it couldn't be requested through devicetree. + */ +#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0) +/* + * Don't try to request the DMA channels through devicetree. This flag only + * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well. + */ +#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1) +/* + * The PCM is half duplex and the DMA channel is shared between capture and + * playback. + */ +#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3) + +/** + * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM + * @prepare_slave_config: Callback used to fill in the DMA slave_config for a + * PCM substream. Will be called from the PCM drivers hwparams callback. + * @compat_request_channel: Callback to request a DMA channel for platforms + * which do not use devicetree. + * @process: Callback used to apply processing on samples transferred from/to + * user space. + * @compat_filter_fn: Will be used as the filter function when requesting a + * channel for platforms which do not use devicetree. The filter parameter + * will be the DAI's DMA data. + * @dma_dev: If set, request DMA channel on this device rather than the DAI + * device. + * @chan_names: If set, these custom DMA channel names will be requested at + * registration time. + * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM. + * @prealloc_buffer_size: Size of the preallocated audio buffer. + * + * Note: If both compat_request_channel and compat_filter_fn are set + * compat_request_channel will be used to request the channel and + * compat_filter_fn will be ignored. Otherwise the channel will be requested + * using dma_request_channel with compat_filter_fn as the filter function. + */ +struct snd_dmaengine_pcm_config { + int (*prepare_slave_config)(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + struct dma_chan *(*compat_request_channel)( + struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_substream *substream); + int (*process)(struct snd_pcm_substream *substream, + int channel, unsigned long hwoff, + void *buf, unsigned long bytes); + dma_filter_fn compat_filter_fn; + struct device *dma_dev; + const char *chan_names[SNDRV_PCM_STREAM_LAST + 1]; + + const struct snd_pcm_hardware *pcm_hardware; + unsigned int prealloc_buffer_size; +}; + +int snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); +void snd_dmaengine_pcm_unregister(struct device *dev); + +int devm_snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); + +int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + +#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm" + +struct dmaengine_pcm { + struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1]; + const struct snd_dmaengine_pcm_config *config; + struct snd_soc_component component; + unsigned int flags; +}; + +static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p) +{ + return container_of(p, struct dmaengine_pcm, component); +} +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/RedHat/5.14/pcm_dmaengine.c b/snd-alpx-dkms/snd-alpx/core/RedHat/5.14/pcm_dmaengine.c new file mode 100755 index 0000000..494ec0c --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/RedHat/5.14/pcm_dmaengine.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + * + * Based on: + * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de> + * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc. + * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * Copyright (C) 2006 Applied Data Systems + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/dmaengine.h> +#include <linux/slab.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/soc.h> + +#include <sound/dmaengine_pcm.h> + +struct dmaengine_pcm_runtime_data { + struct dma_chan *dma_chan; + dma_cookie_t cookie; + + unsigned int pos; +}; + +static inline struct dmaengine_pcm_runtime_data *substream_to_prtd( + const struct snd_pcm_substream *substream) +{ + return substream->runtime->private_data; +} + +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + return prtd->dma_chan; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan); + +/** + * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config + * @substream: PCM substream + * @params: hw_params + * @slave_config: DMA slave config + * + * This function can be used to initialize a dma_slave_config from a substream + * and hw_params in a dmaengine based PCM driver implementation. + * + * Return: zero if successful, or a negative error code + */ +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config) +{ + enum dma_slave_buswidth buswidth; + int bits; + + bits = params_physical_width(params); + if (bits < 8 || bits > 64) + return -EINVAL; + else if (bits == 8) + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (bits == 16) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else if (bits == 24) + buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES; + else if (bits <= 32) + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->direction = DMA_MEM_TO_DEV; + slave_config->dst_addr_width = buswidth; + } else { + slave_config->direction = DMA_DEV_TO_MEM; + slave_config->src_addr_width = buswidth; + } + + slave_config->device_fc = false; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config); + +/** + * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config + * using DAI DMA data. + * @substream: PCM substream + * @dma_data: DAI DMA data + * @slave_config: DMA slave configuration + * + * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width + * fields of the DMA slave config from the same fields of the DAI DMA + * data struct. The src and dst fields will be initialized depending on the + * direction of the substream. If the substream is a playback stream the dst + * fields will be initialized, if it is a capture stream the src fields will be + * initialized. The {dst,src}_addr_width field will only be initialized if the + * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of + * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If + * both conditions are met the latter takes priority. + */ +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *slave_config) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->dst_addr = dma_data->addr; + slave_config->dst_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->dst_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->dst_addr_width = dma_data->addr_width; + } else { + slave_config->src_addr = dma_data->addr; + slave_config->src_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->src_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->src_addr_width = dma_data->addr_width; + } + + slave_config->peripheral_config = dma_data->peripheral_config; + slave_config->peripheral_size = dma_data->peripheral_size; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data); + +static void dmaengine_pcm_dma_complete(void *arg) +{ + unsigned int new_pos; + struct snd_pcm_substream *substream = arg; + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + new_pos = prtd->pos + snd_pcm_lib_period_bytes(substream); + if (new_pos >= snd_pcm_lib_buffer_bytes(substream)) + new_pos = 0; + prtd->pos = new_pos; + + snd_pcm_period_elapsed(substream); +} + +static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct dma_chan *chan = prtd->dma_chan; + struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction direction; + unsigned long flags = DMA_CTRL_ACK; + + direction = snd_pcm_substream_to_dma_direction(substream); + + if (!substream->runtime->no_period_wakeup) + flags |= DMA_PREP_INTERRUPT; + + prtd->pos = 0; + desc = dmaengine_prep_dma_cyclic(chan, + substream->runtime->dma_addr, + snd_pcm_lib_buffer_bytes(substream), + snd_pcm_lib_period_bytes(substream), direction, flags); + + if (!desc) + return -ENOMEM; + + desc->callback = dmaengine_pcm_dma_complete; + desc->callback_param = substream; + prtd->cookie = dmaengine_submit(desc); + + return 0; +} + +/** + * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation + * @substream: PCM substream + * @cmd: Trigger command + * + * This function can be used as the PCM trigger callback for dmaengine based PCM + * driver implementations. + * + * Return: 0 on success, a negative error code otherwise + */ +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + int ret; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + ret = dmaengine_pcm_prepare_and_submit(substream); + if (ret) + return ret; + dma_async_issue_pending(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + dmaengine_resume(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_SUSPEND: + if (runtime->info & SNDRV_PCM_INFO_PAUSE) + dmaengine_pause(prtd->dma_chan); + else + dmaengine_terminate_async(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + dmaengine_pause(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_STOP: + dmaengine_terminate_async(prtd->dma_chan); + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger); + +/** + * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function is deprecated and should not be used by new drivers, as its + * results may be unreliable. + * + * Return: PCM position in frames + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + return bytes_to_frames(substream->runtime, prtd->pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue); + +/** + * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function can be used as the PCM pointer callback for dmaengine based PCM + * driver implementations. + * + * Return: PCM position in frames + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + struct dma_tx_state state; + enum dma_status status; + unsigned int buf_size; + unsigned int pos = 0; + + status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state); + if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) { + buf_size = snd_pcm_lib_buffer_bytes(substream); + if (state.residue > 0 && state.residue <= buf_size) + pos = buf_size - state.residue; + + runtime->delay = bytes_to_frames(runtime, + state.in_flight_bytes); + } + + return bytes_to_frames(runtime, pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer); + +/** + * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * This function request a DMA channel for usage with dmaengine PCM. + * + * Return: NULL or the requested DMA channel + */ +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_CYCLIC, mask); + + return dma_request_channel(mask, filter_fn, filter_data); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel); + +/** + * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream + * @substream: PCM substream + * @chan: DMA channel to use for data transfers + * + * The function should usually be called from the pcm open callback. Note that + * this function will use private_data field of the substream's runtime. So it + * is not available to your pcm driver implementation. + * + * Return: 0 on success, a negative error code otherwise + */ +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan) +{ + struct dmaengine_pcm_runtime_data *prtd; + int ret; + + if (!chan) + return -ENXIO; + + ret = snd_pcm_hw_constraint_integer(substream->runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (ret < 0) + return ret; + + prtd = kzalloc(sizeof(*prtd), GFP_KERNEL); + if (!prtd) + return -ENOMEM; + + prtd->dma_chan = chan; + + substream->runtime->private_data = prtd; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open); + +/** + * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel + * @substream: PCM substream + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * This function will request a DMA channel using the passed filter function and + * data. The function should usually be called from the pcm open callback. Note + * that this function will use private_data field of the substream's runtime. So + * it is not available to your pcm driver implementation. + * + * Return: 0 on success, a negative error code otherwise + */ +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data) +{ + return snd_dmaengine_pcm_open(substream, + snd_dmaengine_pcm_request_channel(filter_fn, filter_data)); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan); + +/** + * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream + * @substream: PCM substream + * + * Return: 0 on success, a negative error code otherwise + */ +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close); + +/** + * snd_dmaengine_pcm_close_release_chan - Close a dmaengine based PCM + * substream and release channel + * @substream: PCM substream + * + * Releases the DMA channel associated with the PCM substream. + * + * Return: zero if successful, or a negative error code + */ +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + dma_release_channel(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan); + +/** + * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params + * @substream: PCM substream + * @dma_data: DAI DMA data + * @hw: PCM hw params + * @chan: DMA channel to use for data transfers + * + * This function will query DMA capability, then refine the pcm hardware + * parameters. + * + * Return: 0 on success, a negative error code otherwise + */ +int snd_dmaengine_pcm_refine_runtime_hwparams( + struct snd_pcm_substream *substream, + struct snd_dmaengine_dai_dma_data *dma_data, + struct snd_pcm_hardware *hw, + struct dma_chan *chan) +{ + struct dma_slave_caps dma_caps; + u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + snd_pcm_format_t i; + int ret = 0; + + if (!hw || !chan || !dma_data) + return -EINVAL; + + ret = dma_get_slave_caps(chan, &dma_caps); + if (ret == 0) { + if (dma_caps.cmd_pause && dma_caps.cmd_resume) + hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME; + if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT) + hw->info |= SNDRV_PCM_INFO_BATCH; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + addr_widths = dma_caps.dst_addr_widths; + else + addr_widths = dma_caps.src_addr_widths; + } + + /* + * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep + * hw.formats set to 0, meaning no restrictions are in place. + * In this case it's the responsibility of the DAI driver to + * provide the supported format information. + */ + if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)) + /* + * Prepare formats mask for valid/allowed sample types. If the + * dma does not have support for the given physical word size, + * it needs to be masked out so user space can not use the + * format which produces corrupted audio. + * In case the dma driver does not implement the slave_caps the + * default assumption is that it supports 1, 2 and 4 bytes + * widths. + */ + pcm_for_each_format(i) { + int bits = snd_pcm_format_physical_width(i); + + /* + * Enable only samples with DMA supported physical + * widths + */ + switch (bits) { + case 8: + case 16: + case 24: + case 32: + case 64: + if (addr_widths & (1 << (bits / 8))) + hw->formats |= pcm_format_to_bits(i); + break; + default: + /* Unsupported types */ + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams); + +MODULE_LICENSE("GPL"); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/4.19/dmaengine_pcm.h b/snd-alpx-dkms/snd-alpx/core/generic/4.19/dmaengine_pcm.h new file mode 100755 index 0000000..2c4cfaa --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/4.19/dmaengine_pcm.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + */ + +#ifndef __SOUND_DMAENGINE_PCM_H__ +#define __SOUND_DMAENGINE_PCM_H__ + +#include <sound/pcm.h> +#include <sound/soc.h> +#include <linux/dmaengine.h> + +/** + * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM + * substream + * @substream: PCM substream + */ +static inline enum dma_transfer_direction +snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + return DMA_MEM_TO_DEV; + else + return DMA_DEV_TO_MEM; +} + +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config); +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan); +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data); +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream); + +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data); +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream); + +/* + * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word. + * If this flag is set the dmaengine driver won't put any restriction on + * the supported sample formats and set the DMA transfer size to undefined. + * The DAI driver is responsible to disable any unsupported formats in it's + * configuration and catch corner cases that are not already handled in + * the ALSA core. + */ +#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0) + +/** + * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data + * @addr: Address of the DAI data source or destination register. + * @addr_width: Width of the DAI data source or destination register. + * @maxburst: Maximum number of words(note: words, as in units of the + * src_addr_width member, not bytes) that can be send to or received from the + * DAI in one burst. + * @slave_id: Slave requester id for the DMA channel. + * @filter_data: Custom DMA channel filter data, this will usually be used when + * requesting the DMA channel. + * @chan_name: Custom channel name to use when requesting DMA channel. + * @fifo_size: FIFO size of the DAI controller in bytes + * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now + */ +struct snd_dmaengine_dai_dma_data { + dma_addr_t addr; + enum dma_slave_buswidth addr_width; + u32 maxburst; + unsigned int slave_id; + void *filter_data; + const char *chan_name; + unsigned int fifo_size; + unsigned int flags; +}; + +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *config); + + +/* + * Try to request the DMA channel using compat_request_channel or + * compat_filter_fn if it couldn't be requested through devicetree. + */ +#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0) +/* + * Don't try to request the DMA channels through devicetree. This flag only + * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well. + */ +#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1) +/* + * The PCM is half duplex and the DMA channel is shared between capture and + * playback. + */ +#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3) +/* + * The PCM streams have custom channel names specified. + */ +#define SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME BIT(4) + +/** + * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM + * @prepare_slave_config: Callback used to fill in the DMA slave_config for a + * PCM substream. Will be called from the PCM drivers hwparams callback. + * @compat_request_channel: Callback to request a DMA channel for platforms + * which do not use devicetree. + * @process: Callback used to apply processing on samples transferred from/to + * user space. + * @compat_filter_fn: Will be used as the filter function when requesting a + * channel for platforms which do not use devicetree. The filter parameter + * will be the DAI's DMA data. + * @dma_dev: If set, request DMA channel on this device rather than the DAI + * device. + * @chan_names: If set, these custom DMA channel names will be requested at + * registration time. + * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM. + * @prealloc_buffer_size: Size of the preallocated audio buffer. + * + * Note: If both compat_request_channel and compat_filter_fn are set + * compat_request_channel will be used to request the channel and + * compat_filter_fn will be ignored. Otherwise the channel will be requested + * using dma_request_channel with compat_filter_fn as the filter function. + */ +struct snd_dmaengine_pcm_config { + int (*prepare_slave_config)(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + struct dma_chan *(*compat_request_channel)( + struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_substream *substream); + int (*process)(struct snd_pcm_substream *substream, + int channel, unsigned long hwoff, + void *buf, unsigned long bytes); + dma_filter_fn compat_filter_fn; + struct device *dma_dev; + const char *chan_names[SNDRV_PCM_STREAM_LAST + 1]; + + const struct snd_pcm_hardware *pcm_hardware; + unsigned int prealloc_buffer_size; +}; + +int snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); +void snd_dmaengine_pcm_unregister(struct device *dev); + +int devm_snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); + +int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + +#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm" + +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/generic/4.19/internal.h b/snd-alpx-dkms/snd-alpx/core/generic/4.19/internal.h new file mode 100644 index 0000000..a6bf34d --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/4.19/internal.h @@ -0,0 +1,297 @@ +/* + * Register map access API internal header + * + * Copyright 2011 Wolfson Microelectronics plc + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _REGMAP_INTERNAL_H +#define _REGMAP_INTERNAL_H + +#include <linux/device.h> +#include <linux/regmap.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/wait.h> + +struct regmap; +struct regcache_ops; + +struct regmap_debugfs_off_cache { + struct list_head list; + off_t min; + off_t max; + unsigned int base_reg; + unsigned int max_reg; +}; + +struct regmap_format { + size_t buf_size; + size_t reg_bytes; + size_t pad_bytes; + size_t val_bytes; + void (*format_write)(struct regmap *map, + unsigned int reg, unsigned int val); + void (*format_reg)(void *buf, unsigned int reg, unsigned int shift); + void (*format_val)(void *buf, unsigned int val, unsigned int shift); + unsigned int (*parse_val)(const void *buf); + void (*parse_inplace)(void *buf); +}; + +struct regmap_async { + struct list_head list; + struct regmap *map; + void *work_buf; +}; + +struct regmap { + union { + struct mutex mutex; + struct { + spinlock_t spinlock; + unsigned long spinlock_flags; + }; + }; + regmap_lock lock; + regmap_unlock unlock; + void *lock_arg; /* This is passed to lock/unlock functions */ + gfp_t alloc_flags; + + struct device *dev; /* Device we do I/O on */ + void *work_buf; /* Scratch buffer used to format I/O */ + struct regmap_format format; /* Buffer format */ + const struct regmap_bus *bus; + void *bus_context; + const char *name; + + bool async; + spinlock_t async_lock; + wait_queue_head_t async_waitq; + struct list_head async_list; + struct list_head async_free; + int async_ret; + +#ifdef CONFIG_DEBUG_FS + bool debugfs_disable; + struct dentry *debugfs; + const char *debugfs_name; + + unsigned int debugfs_reg_len; + unsigned int debugfs_val_len; + unsigned int debugfs_tot_len; + + struct list_head debugfs_off_cache; + struct mutex cache_lock; +#endif + + unsigned int max_register; + bool (*writeable_reg)(struct device *dev, unsigned int reg); + bool (*readable_reg)(struct device *dev, unsigned int reg); + bool (*volatile_reg)(struct device *dev, unsigned int reg); + bool (*precious_reg)(struct device *dev, unsigned int reg); + bool (*readable_noinc_reg)(struct device *dev, unsigned int reg); + const struct regmap_access_table *wr_table; + const struct regmap_access_table *rd_table; + const struct regmap_access_table *volatile_table; + const struct regmap_access_table *precious_table; + const struct regmap_access_table *rd_noinc_table; + + int (*reg_read)(void *context, unsigned int reg, unsigned int *val); + int (*reg_write)(void *context, unsigned int reg, unsigned int val); + int (*reg_update_bits)(void *context, unsigned int reg, + unsigned int mask, unsigned int val); + + bool defer_caching; + + unsigned long read_flag_mask; + unsigned long write_flag_mask; + + /* number of bits to (left) shift the reg value when formatting*/ + int reg_shift; + int reg_stride; + int reg_stride_order; + + /* regcache specific members */ + const struct regcache_ops *cache_ops; + enum regcache_type cache_type; + + /* number of bytes in reg_defaults_raw */ + unsigned int cache_size_raw; + /* number of bytes per word in reg_defaults_raw */ + unsigned int cache_word_size; + /* number of entries in reg_defaults */ + unsigned int num_reg_defaults; + /* number of entries in reg_defaults_raw */ + unsigned int num_reg_defaults_raw; + + /* if set, only the cache is modified not the HW */ + bool cache_only; + /* if set, only the HW is modified not the cache */ + bool cache_bypass; + /* if set, remember to free reg_defaults_raw */ + bool cache_free; + + struct reg_default *reg_defaults; + const void *reg_defaults_raw; + void *cache; + /* if set, the cache contains newer data than the HW */ + bool cache_dirty; + /* if set, the HW registers are known to match map->reg_defaults */ + bool no_sync_defaults; + + struct reg_sequence *patch; + int patch_regs; + + /* if set, converts bulk read to single read */ + bool use_single_read; + /* if set, converts bulk read to single read */ + bool use_single_write; + /* if set, the device supports multi write mode */ + bool can_multi_write; + + /* if set, raw reads/writes are limited to this size */ + size_t max_raw_read; + size_t max_raw_write; + + struct rb_root range_tree; + void *selector_work_buf; /* Scratch buffer used for selector */ + + struct hwspinlock *hwlock; +}; + +struct regcache_ops { + const char *name; + enum regcache_type type; + int (*init)(struct regmap *map); + int (*exit)(struct regmap *map); +#ifdef CONFIG_DEBUG_FS + void (*debugfs_init)(struct regmap *map); +#endif + int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); + int (*write)(struct regmap *map, unsigned int reg, unsigned int value); + int (*sync)(struct regmap *map, unsigned int min, unsigned int max); + int (*drop)(struct regmap *map, unsigned int min, unsigned int max); +}; + +bool regmap_cached(struct regmap *map, unsigned int reg); +bool regmap_writeable(struct regmap *map, unsigned int reg); +bool regmap_readable(struct regmap *map, unsigned int reg); +bool regmap_volatile(struct regmap *map, unsigned int reg); +bool regmap_precious(struct regmap *map, unsigned int reg); +bool regmap_readable_noinc(struct regmap *map, unsigned int reg); + +int _regmap_write(struct regmap *map, unsigned int reg, + unsigned int val); + +struct regmap_range_node { + struct rb_node node; + const char *name; + struct regmap *map; + + unsigned int range_min; + unsigned int range_max; + + unsigned int selector_reg; + unsigned int selector_mask; + int selector_shift; + + unsigned int window_start; + unsigned int window_len; +}; + +struct regmap_field { + struct regmap *regmap; + unsigned int mask; + /* lsb */ + unsigned int shift; + unsigned int reg; + + unsigned int id_size; + unsigned int id_offset; +}; + +#ifdef CONFIG_DEBUG_FS +extern void regmap_debugfs_initcall(void); +extern void regmap_debugfs_init(struct regmap *map, const char *name); +extern void regmap_debugfs_exit(struct regmap *map); + +static inline void regmap_debugfs_disable(struct regmap *map) +{ + map->debugfs_disable = true; +} + +#else +static inline void regmap_debugfs_initcall(void) { } +static inline void regmap_debugfs_init(struct regmap *map, const char *name) { } +static inline void regmap_debugfs_exit(struct regmap *map) { } +static inline void regmap_debugfs_disable(struct regmap *map) { } +#endif + +/* regcache core declarations */ +int regcache_init(struct regmap *map, const struct regmap_config *config); +void regcache_exit(struct regmap *map); +int regcache_read(struct regmap *map, + unsigned int reg, unsigned int *value); +int regcache_write(struct regmap *map, + unsigned int reg, unsigned int value); +int regcache_sync(struct regmap *map); +int regcache_sync_block(struct regmap *map, void *block, + unsigned long *cache_present, + unsigned int block_base, unsigned int start, + unsigned int end); + +static inline const void *regcache_get_val_addr(struct regmap *map, + const void *base, + unsigned int idx) +{ + return base + (map->cache_word_size * idx); +} + +unsigned int regcache_get_val(struct regmap *map, const void *base, + unsigned int idx); +bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, + unsigned int val); +int regcache_lookup_reg(struct regmap *map, unsigned int reg); + +int _regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len); + +void regmap_async_complete_cb(struct regmap_async *async, int ret); + +enum regmap_endian regmap_get_val_endian(struct device *dev, + const struct regmap_bus *bus, + const struct regmap_config *config); + +extern struct regcache_ops regcache_rbtree_ops; +extern struct regcache_ops regcache_lzo_ops; +extern struct regcache_ops regcache_flat_ops; + +static inline const char *regmap_name(const struct regmap *map) +{ + if (map->dev) + return dev_name(map->dev); + + return map->name; +} + +static inline unsigned int regmap_get_offset(const struct regmap *map, + unsigned int index) +{ + if (map->reg_stride_order >= 0) + return index << map->reg_stride_order; + else + return index * map->reg_stride; +} + +static inline unsigned int regcache_get_index_by_order(const struct regmap *map, + unsigned int reg) +{ + return reg >> map->reg_stride_order; +} + +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/generic/4.19/pcm_dmaengine.c b/snd-alpx-dkms/snd-alpx/core/generic/4.19/pcm_dmaengine.c new file mode 100755 index 0000000..6f6da11 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/4.19/pcm_dmaengine.c @@ -0,0 +1,383 @@ +/* + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + * + * Based on: + * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de> + * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc. + * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * Copyright (C) 2006 Applied Data Systems + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/dmaengine.h> +#include <linux/slab.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/soc.h> + +#include <sound/dmaengine_pcm.h> + +struct dmaengine_pcm_runtime_data { + struct dma_chan *dma_chan; + dma_cookie_t cookie; + + unsigned int pos; +}; + +static inline struct dmaengine_pcm_runtime_data *substream_to_prtd( + const struct snd_pcm_substream *substream) +{ + return substream->runtime->private_data; +} + +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + return prtd->dma_chan; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan); + +/** + * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config + * @substream: PCM substream + * @params: hw_params + * @slave_config: DMA slave config + * + * This function can be used to initialize a dma_slave_config from a substream + * and hw_params in a dmaengine based PCM driver implementation. + */ +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config) +{ + enum dma_slave_buswidth buswidth; + int bits; + + bits = params_physical_width(params); + if (bits < 8 || bits > 64) + return -EINVAL; + else if (bits == 8) + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (bits == 16) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else if (bits == 24) + buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES; + else if (bits <= 32) + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->direction = DMA_MEM_TO_DEV; + slave_config->dst_addr_width = buswidth; + } else { + slave_config->direction = DMA_DEV_TO_MEM; + slave_config->src_addr_width = buswidth; + } + + slave_config->device_fc = false; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config); + +/** + * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config + * using DAI DMA data. + * @substream: PCM substream + * @dma_data: DAI DMA data + * @slave_config: DMA slave configuration + * + * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width and + * slave_id fields of the DMA slave config from the same fields of the DAI DMA + * data struct. The src and dst fields will be initialized depending on the + * direction of the substream. If the substream is a playback stream the dst + * fields will be initialized, if it is a capture stream the src fields will be + * initialized. The {dst,src}_addr_width field will only be initialized if the + * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of + * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If + * both conditions are met the latter takes priority. + */ +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *slave_config) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->dst_addr = dma_data->addr; + slave_config->dst_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->dst_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->dst_addr_width = dma_data->addr_width; + } else { + slave_config->src_addr = dma_data->addr; + slave_config->src_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->src_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->src_addr_width = dma_data->addr_width; + } + + slave_config->slave_id = dma_data->slave_id; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data); + +static void dmaengine_pcm_dma_complete(void *arg) +{ + unsigned int new_pos; + struct snd_pcm_substream *substream = arg; + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + new_pos = prtd->pos + snd_pcm_lib_period_bytes(substream); + if (new_pos >= snd_pcm_lib_buffer_bytes(substream)) + new_pos = 0; + prtd->pos = new_pos; + + snd_pcm_period_elapsed(substream); +} + +static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct dma_chan *chan = prtd->dma_chan; + struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction direction; + unsigned long flags = DMA_CTRL_ACK; + + direction = snd_pcm_substream_to_dma_direction(substream); + + if (!substream->runtime->no_period_wakeup) + flags |= DMA_PREP_INTERRUPT; + + prtd->pos = 0; + desc = dmaengine_prep_dma_cyclic(chan, + substream->runtime->dma_addr, + snd_pcm_lib_buffer_bytes(substream), + snd_pcm_lib_period_bytes(substream), direction, flags); + + if (!desc) + return -ENOMEM; + + desc->callback = dmaengine_pcm_dma_complete; + desc->callback_param = substream; + prtd->cookie = dmaengine_submit(desc); + + return 0; +} + +/** + * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation + * @substream: PCM substream + * @cmd: Trigger command + * + * Returns 0 on success, a negative error code otherwise. + * + * This function can be used as the PCM trigger callback for dmaengine based PCM + * driver implementations. + */ +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + int ret; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + ret = dmaengine_pcm_prepare_and_submit(substream); + if (ret) + return ret; + dma_async_issue_pending(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + dmaengine_resume(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_SUSPEND: + if (runtime->info & SNDRV_PCM_INFO_PAUSE) + dmaengine_pause(prtd->dma_chan); + else + dmaengine_terminate_async(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + dmaengine_pause(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_STOP: + dmaengine_terminate_async(prtd->dma_chan); + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger); + +/** + * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function is deprecated and should not be used by new drivers, as its + * results may be unreliable. + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + return bytes_to_frames(substream->runtime, prtd->pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue); + +/** + * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function can be used as the PCM pointer callback for dmaengine based PCM + * driver implementations. + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct dma_tx_state state; + enum dma_status status; + unsigned int buf_size; + unsigned int pos = 0; + + status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state); + if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) { + buf_size = snd_pcm_lib_buffer_bytes(substream); + if (state.residue > 0 && state.residue <= buf_size) + pos = buf_size - state.residue; + } + + return bytes_to_frames(substream->runtime, pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer); + +/** + * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * Returns NULL or the requested DMA channel. + * + * This function request a DMA channel for usage with dmaengine PCM. + */ +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_CYCLIC, mask); + + return dma_request_channel(mask, filter_fn, filter_data); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel); + +/** + * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream + * @substream: PCM substream + * @chan: DMA channel to use for data transfers + * + * Returns 0 on success, a negative error code otherwise. + * + * The function should usually be called from the pcm open callback. Note that + * this function will use private_data field of the substream's runtime. So it + * is not available to your pcm driver implementation. + */ +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan) +{ + struct dmaengine_pcm_runtime_data *prtd; + int ret; + + if (!chan) + return -ENXIO; + + ret = snd_pcm_hw_constraint_integer(substream->runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (ret < 0) + return ret; + + prtd = kzalloc(sizeof(*prtd), GFP_KERNEL); + if (!prtd) + return -ENOMEM; + + prtd->dma_chan = chan; + + substream->runtime->private_data = prtd; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open); + +/** + * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel + * @substream: PCM substream + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * Returns 0 on success, a negative error code otherwise. + * + * This function will request a DMA channel using the passed filter function and + * data. The function should usually be called from the pcm open callback. Note + * that this function will use private_data field of the substream's runtime. So + * it is not available to your pcm driver implementation. + */ +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data) +{ + return snd_dmaengine_pcm_open(substream, + snd_dmaengine_pcm_request_channel(filter_fn, filter_data)); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan); + +/** + * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream + * @substream: PCM substream + */ +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close); + +/** + * snd_dmaengine_pcm_release_chan_close - Close a dmaengine based PCM substream and release channel + * @substream: PCM substream + * + * Releases the DMA channel associated with the PCM substream. + */ +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + dma_release_channel(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan); + +MODULE_LICENSE("GPL"); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/4.19/regmap-mmio.c b/snd-alpx-dkms/snd-alpx/core/generic/4.19/regmap-mmio.c new file mode 100644 index 0000000..d524815 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/4.19/regmap-mmio.c @@ -0,0 +1,399 @@ +/* + * Register map access API - MMIO support + * + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +/* Simple patch to embed this file in its user module */ +/* DISABLE the EXPORT_SYMBOL() macros */ +#undef CONFIG_MODULES +#include <linux/export.h> +/* Then re-enable the module support to include modle.h */ +#define CONFIG_MODULES + + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#include "internal.h" + +struct regmap_mmio_context { + void __iomem *regs; + unsigned val_bytes; + + bool attached_clk; + struct clk *clk; + + void (*reg_write)(struct regmap_mmio_context *ctx, + unsigned int reg, unsigned int val); + unsigned int (*reg_read)(struct regmap_mmio_context *ctx, + unsigned int reg); +}; + +static int regmap_mmio_regbits_check(size_t reg_bits) +{ + switch (reg_bits) { + case 8: + case 16: + case 32: +#ifdef CONFIG_64BIT + case 64: +#endif + return 0; + default: + return -EINVAL; + } +} + +static int regmap_mmio_get_min_stride(size_t val_bits) +{ + int min_stride; + + switch (val_bits) { + case 8: + /* The core treats 0 as 1 */ + min_stride = 0; + return 0; + case 16: + min_stride = 2; + break; + case 32: + min_stride = 4; + break; +#ifdef CONFIG_64BIT + case 64: + min_stride = 8; + break; +#endif + default: + return -EINVAL; + } + + return min_stride; +} + +static void regmap_mmio_write8(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writeb(val, ctx->regs + reg); +} + +static void regmap_mmio_write16le(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writew(val, ctx->regs + reg); +} + +static void regmap_mmio_write16be(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + iowrite16be(val, ctx->regs + reg); +} + +static void regmap_mmio_write32le(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writel(val, ctx->regs + reg); +} + +static void regmap_mmio_write32be(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + iowrite32be(val, ctx->regs + reg); +} + +#ifdef CONFIG_64BIT +static void regmap_mmio_write64le(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writeq(val, ctx->regs + reg); +} +#endif + +static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val) +{ + struct regmap_mmio_context *ctx = context; + int ret; + + if (!IS_ERR(ctx->clk)) { + ret = clk_enable(ctx->clk); + if (ret < 0) + return ret; + } + + ctx->reg_write(ctx, reg, val); + + if (!IS_ERR(ctx->clk)) + clk_disable(ctx->clk); + + return 0; +} + +static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readb(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readw(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return ioread16be(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readl(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return ioread32be(ctx->regs + reg); +} + +#ifdef CONFIG_64BIT +static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readq(ctx->regs + reg); +} +#endif + +static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val) +{ + struct regmap_mmio_context *ctx = context; + int ret; + + if (!IS_ERR(ctx->clk)) { + ret = clk_enable(ctx->clk); + if (ret < 0) + return ret; + } + + *val = ctx->reg_read(ctx, reg); + + if (!IS_ERR(ctx->clk)) + clk_disable(ctx->clk); + + return 0; +} + +static void regmap_mmio_free_context(void *context) +{ + struct regmap_mmio_context *ctx = context; + + if (!IS_ERR(ctx->clk)) { + clk_unprepare(ctx->clk); + if (!ctx->attached_clk) + clk_put(ctx->clk); + } + kfree(context); +} + +static const struct regmap_bus regmap_mmio = { + .fast_io = true, + .reg_write = regmap_mmio_write, + .reg_read = regmap_mmio_read, + .free_context = regmap_mmio_free_context, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, + const char *clk_id, + void __iomem *regs, + const struct regmap_config *config) +{ + struct regmap_mmio_context *ctx; + int min_stride; + int ret; + + ret = regmap_mmio_regbits_check(config->reg_bits); + if (ret) + return ERR_PTR(ret); + + if (config->pad_bits) + return ERR_PTR(-EINVAL); + + min_stride = regmap_mmio_get_min_stride(config->val_bits); + if (min_stride < 0) + return ERR_PTR(min_stride); + + if (config->reg_stride < min_stride) + return ERR_PTR(-EINVAL); + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + ctx->regs = regs; + ctx->val_bytes = config->val_bits / 8; + ctx->clk = ERR_PTR(-ENODEV); + + switch (regmap_get_val_endian(dev, ®map_mmio, config)) { + case REGMAP_ENDIAN_DEFAULT: + case REGMAP_ENDIAN_LITTLE: +#ifdef __LITTLE_ENDIAN + case REGMAP_ENDIAN_NATIVE: +#endif + switch (config->val_bits) { + case 8: + ctx->reg_read = regmap_mmio_read8; + ctx->reg_write = regmap_mmio_write8; + break; + case 16: + ctx->reg_read = regmap_mmio_read16le; + ctx->reg_write = regmap_mmio_write16le; + break; + case 32: + ctx->reg_read = regmap_mmio_read32le; + ctx->reg_write = regmap_mmio_write32le; + break; +#ifdef CONFIG_64BIT + case 64: + ctx->reg_read = regmap_mmio_read64le; + ctx->reg_write = regmap_mmio_write64le; + break; +#endif + default: + ret = -EINVAL; + goto err_free; + } + break; + case REGMAP_ENDIAN_BIG: +#ifdef __BIG_ENDIAN + case REGMAP_ENDIAN_NATIVE: +#endif + switch (config->val_bits) { + case 8: + ctx->reg_read = regmap_mmio_read8; + ctx->reg_write = regmap_mmio_write8; + break; + case 16: + ctx->reg_read = regmap_mmio_read16be; + ctx->reg_write = regmap_mmio_write16be; + break; + case 32: + ctx->reg_read = regmap_mmio_read32be; + ctx->reg_write = regmap_mmio_write32be; + break; + default: + ret = -EINVAL; + goto err_free; + } + break; + default: + ret = -EINVAL; + goto err_free; + } + + if (clk_id == NULL) + return ctx; + + ctx->clk = clk_get(dev, clk_id); + if (IS_ERR(ctx->clk)) { + ret = PTR_ERR(ctx->clk); + goto err_free; + } + + ret = clk_prepare(ctx->clk); + if (ret < 0) { + clk_put(ctx->clk); + goto err_free; + } + + return ctx; + +err_free: + kfree(ctx); + + return ERR_PTR(ret); +} + +struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id, + void __iomem *regs, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct regmap_mmio_context *ctx; + + ctx = regmap_mmio_gen_context(dev, clk_id, regs, config); + if (IS_ERR(ctx)) + return ERR_CAST(ctx); + + return __regmap_init(dev, ®map_mmio, ctx, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk); + +struct regmap *__devm_regmap_init_mmio_clk(struct device *dev, + const char *clk_id, + void __iomem *regs, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct regmap_mmio_context *ctx; + + ctx = regmap_mmio_gen_context(dev, clk_id, regs, config); + if (IS_ERR(ctx)) + return ERR_CAST(ctx); + + return __devm_regmap_init(dev, ®map_mmio, ctx, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk); + +int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk) +{ + struct regmap_mmio_context *ctx = map->bus_context; + + ctx->clk = clk; + ctx->attached_clk = true; + + return clk_prepare(ctx->clk); +} +EXPORT_SYMBOL_GPL(regmap_mmio_attach_clk); + +void regmap_mmio_detach_clk(struct regmap *map) +{ + struct regmap_mmio_context *ctx = map->bus_context; + + clk_unprepare(ctx->clk); + + ctx->attached_clk = false; + ctx->clk = NULL; +} +EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk); + +MODULE_LICENSE("GPL v2"); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/5.12/dmaengine_pcm.h b/snd-alpx-dkms/snd-alpx/core/generic/5.12/dmaengine_pcm.h new file mode 100644 index 0000000..96666ef --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/5.12/dmaengine_pcm.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + */ + +#ifndef __SOUND_DMAENGINE_PCM_H__ +#define __SOUND_DMAENGINE_PCM_H__ + +#include <sound/pcm.h> +#include <sound/soc.h> +#include <linux/dmaengine.h> + +/** + * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM + * substream + * @substream: PCM substream + */ +static inline enum dma_transfer_direction +snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + return DMA_MEM_TO_DEV; + else + return DMA_DEV_TO_MEM; +} + +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config); +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan); +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data); +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream); + +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data); +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream); + +/* + * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word. + * If this flag is set the dmaengine driver won't put any restriction on + * the supported sample formats and set the DMA transfer size to undefined. + * The DAI driver is responsible to disable any unsupported formats in it's + * configuration and catch corner cases that are not already handled in + * the ALSA core. + */ +#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0) + +/** + * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data + * @addr: Address of the DAI data source or destination register. + * @addr_width: Width of the DAI data source or destination register. + * @maxburst: Maximum number of words(note: words, as in units of the + * src_addr_width member, not bytes) that can be send to or received from the + * DAI in one burst. + * @slave_id: Slave requester id for the DMA channel. + * @filter_data: Custom DMA channel filter data, this will usually be used when + * requesting the DMA channel. + * @chan_name: Custom channel name to use when requesting DMA channel. + * @fifo_size: FIFO size of the DAI controller in bytes + * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now + * @peripheral_config: peripheral configuration for programming peripheral + * for dmaengine transfer + * @peripheral_size: peripheral configuration buffer size + */ +struct snd_dmaengine_dai_dma_data { + dma_addr_t addr; + enum dma_slave_buswidth addr_width; + u32 maxburst; + unsigned int slave_id; + void *filter_data; + const char *chan_name; + unsigned int fifo_size; + unsigned int flags; + void *peripheral_config; + size_t peripheral_size; +}; + +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *config); + +int snd_dmaengine_pcm_refine_runtime_hwparams( + struct snd_pcm_substream *substream, + struct snd_dmaengine_dai_dma_data *dma_data, + struct snd_pcm_hardware *hw, + struct dma_chan *chan); + +/* + * Try to request the DMA channel using compat_request_channel or + * compat_filter_fn if it couldn't be requested through devicetree. + */ +#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0) +/* + * Don't try to request the DMA channels through devicetree. This flag only + * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well. + */ +#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1) +/* + * The PCM is half duplex and the DMA channel is shared between capture and + * playback. + */ +#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3) + +/** + * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM + * @prepare_slave_config: Callback used to fill in the DMA slave_config for a + * PCM substream. Will be called from the PCM drivers hwparams callback. + * @compat_request_channel: Callback to request a DMA channel for platforms + * which do not use devicetree. + * @process: Callback used to apply processing on samples transferred from/to + * user space. + * @compat_filter_fn: Will be used as the filter function when requesting a + * channel for platforms which do not use devicetree. The filter parameter + * will be the DAI's DMA data. + * @dma_dev: If set, request DMA channel on this device rather than the DAI + * device. + * @chan_names: If set, these custom DMA channel names will be requested at + * registration time. + * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM. + * @prealloc_buffer_size: Size of the preallocated audio buffer. + * + * Note: If both compat_request_channel and compat_filter_fn are set + * compat_request_channel will be used to request the channel and + * compat_filter_fn will be ignored. Otherwise the channel will be requested + * using dma_request_channel with compat_filter_fn as the filter function. + */ +struct snd_dmaengine_pcm_config { + int (*prepare_slave_config)(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + struct dma_chan *(*compat_request_channel)( + struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_substream *substream); + int (*process)(struct snd_pcm_substream *substream, + int channel, unsigned long hwoff, + void *buf, unsigned long bytes); + dma_filter_fn compat_filter_fn; + struct device *dma_dev; + const char *chan_names[SNDRV_PCM_STREAM_LAST + 1]; + + const struct snd_pcm_hardware *pcm_hardware; + unsigned int prealloc_buffer_size; +}; + +int snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); +void snd_dmaengine_pcm_unregister(struct device *dev); + +int devm_snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); + +int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + +#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm" + +struct dmaengine_pcm { + struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1]; + const struct snd_dmaengine_pcm_config *config; + struct snd_soc_component component; + unsigned int flags; +}; + +static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p) +{ + return container_of(p, struct dmaengine_pcm, component); +} +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/generic/5.12/pcm_dmaengine.c b/snd-alpx-dkms/snd-alpx/core/generic/5.12/pcm_dmaengine.c new file mode 100644 index 0000000..1fc2fa0 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/5.12/pcm_dmaengine.c @@ -0,0 +1,462 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + * + * Based on: + * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de> + * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc. + * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * Copyright (C) 2006 Applied Data Systems + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/dmaengine.h> +#include <linux/slab.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/soc.h> + +#include <sound/dmaengine_pcm.h> + +struct dmaengine_pcm_runtime_data { + struct dma_chan *dma_chan; + dma_cookie_t cookie; + + unsigned int pos; +}; + +static inline struct dmaengine_pcm_runtime_data *substream_to_prtd( + const struct snd_pcm_substream *substream) +{ + return substream->runtime->private_data; +} + +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + return prtd->dma_chan; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan); + +/** + * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config + * @substream: PCM substream + * @params: hw_params + * @slave_config: DMA slave config + * + * This function can be used to initialize a dma_slave_config from a substream + * and hw_params in a dmaengine based PCM driver implementation. + */ +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config) +{ + enum dma_slave_buswidth buswidth; + int bits; + + bits = params_physical_width(params); + if (bits < 8 || bits > 64) + return -EINVAL; + else if (bits == 8) + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (bits == 16) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else if (bits == 24) + buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES; + else if (bits <= 32) + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->direction = DMA_MEM_TO_DEV; + slave_config->dst_addr_width = buswidth; + } else { + slave_config->direction = DMA_DEV_TO_MEM; + slave_config->src_addr_width = buswidth; + } + + slave_config->device_fc = false; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config); + +/** + * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config + * using DAI DMA data. + * @substream: PCM substream + * @dma_data: DAI DMA data + * @slave_config: DMA slave configuration + * + * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width and + * slave_id fields of the DMA slave config from the same fields of the DAI DMA + * data struct. The src and dst fields will be initialized depending on the + * direction of the substream. If the substream is a playback stream the dst + * fields will be initialized, if it is a capture stream the src fields will be + * initialized. The {dst,src}_addr_width field will only be initialized if the + * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of + * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If + * both conditions are met the latter takes priority. + */ +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *slave_config) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->dst_addr = dma_data->addr; + slave_config->dst_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->dst_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->dst_addr_width = dma_data->addr_width; + } else { + slave_config->src_addr = dma_data->addr; + slave_config->src_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->src_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->src_addr_width = dma_data->addr_width; + } + + slave_config->slave_id = dma_data->slave_id; + slave_config->peripheral_config = dma_data->peripheral_config; + slave_config->peripheral_size = dma_data->peripheral_size; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data); + +static void dmaengine_pcm_dma_complete(void *arg) +{ + struct snd_pcm_substream *substream = arg; + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + prtd->pos += snd_pcm_lib_period_bytes(substream); + if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream)) + prtd->pos = 0; + + snd_pcm_period_elapsed(substream); +} + +static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct dma_chan *chan = prtd->dma_chan; + struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction direction; + unsigned long flags = DMA_CTRL_ACK; + + direction = snd_pcm_substream_to_dma_direction(substream); + + if (!substream->runtime->no_period_wakeup) + flags |= DMA_PREP_INTERRUPT; + + prtd->pos = 0; + desc = dmaengine_prep_dma_cyclic(chan, + substream->runtime->dma_addr, + snd_pcm_lib_buffer_bytes(substream), + snd_pcm_lib_period_bytes(substream), direction, flags); + + if (!desc) + return -ENOMEM; + + desc->callback = dmaengine_pcm_dma_complete; + desc->callback_param = substream; + prtd->cookie = dmaengine_submit(desc); + + return 0; +} + +/** + * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation + * @substream: PCM substream + * @cmd: Trigger command + * + * Returns 0 on success, a negative error code otherwise. + * + * This function can be used as the PCM trigger callback for dmaengine based PCM + * driver implementations. + */ +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + int ret; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + ret = dmaengine_pcm_prepare_and_submit(substream); + if (ret) + return ret; + dma_async_issue_pending(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + dmaengine_resume(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_SUSPEND: + if (runtime->info & SNDRV_PCM_INFO_PAUSE) + dmaengine_pause(prtd->dma_chan); + else + dmaengine_terminate_async(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + dmaengine_pause(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_STOP: + dmaengine_terminate_async(prtd->dma_chan); + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger); + +/** + * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function is deprecated and should not be used by new drivers, as its + * results may be unreliable. + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + return bytes_to_frames(substream->runtime, prtd->pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue); + +/** + * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function can be used as the PCM pointer callback for dmaengine based PCM + * driver implementations. + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + struct dma_tx_state state; + enum dma_status status; + unsigned int buf_size; + unsigned int pos = 0; + + status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state); + if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) { + buf_size = snd_pcm_lib_buffer_bytes(substream); + if (state.residue > 0 && state.residue <= buf_size) + pos = buf_size - state.residue; + + runtime->delay = bytes_to_frames(runtime, + state.in_flight_bytes); + } + + return bytes_to_frames(runtime, pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer); + +/** + * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * Returns NULL or the requested DMA channel. + * + * This function request a DMA channel for usage with dmaengine PCM. + */ +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_CYCLIC, mask); + + return dma_request_channel(mask, filter_fn, filter_data); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel); + +/** + * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream + * @substream: PCM substream + * @chan: DMA channel to use for data transfers + * + * Returns 0 on success, a negative error code otherwise. + * + * The function should usually be called from the pcm open callback. Note that + * this function will use private_data field of the substream's runtime. So it + * is not available to your pcm driver implementation. + */ +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan) +{ + struct dmaengine_pcm_runtime_data *prtd; + int ret; + + if (!chan) + return -ENXIO; + + ret = snd_pcm_hw_constraint_integer(substream->runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (ret < 0) + return ret; + + prtd = kzalloc(sizeof(*prtd), GFP_KERNEL); + if (!prtd) + return -ENOMEM; + + prtd->dma_chan = chan; + + substream->runtime->private_data = prtd; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open); + +/** + * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel + * @substream: PCM substream + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * Returns 0 on success, a negative error code otherwise. + * + * This function will request a DMA channel using the passed filter function and + * data. The function should usually be called from the pcm open callback. Note + * that this function will use private_data field of the substream's runtime. So + * it is not available to your pcm driver implementation. + */ +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data) +{ + return snd_dmaengine_pcm_open(substream, + snd_dmaengine_pcm_request_channel(filter_fn, filter_data)); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan); + +/** + * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream + * @substream: PCM substream + */ +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close); + +/** + * snd_dmaengine_pcm_close_release_chan - Close a dmaengine based PCM + * substream and release channel + * @substream: PCM substream + * + * Releases the DMA channel associated with the PCM substream. + */ +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + dma_release_channel(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan); + +/** + * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params + * @substream: PCM substream + * @dma_data: DAI DMA data + * @hw: PCM hw params + * @chan: DMA channel to use for data transfers + * + * Returns 0 on success, a negative error code otherwise. + * + * This function will query DMA capability, then refine the pcm hardware + * parameters. + */ +int snd_dmaengine_pcm_refine_runtime_hwparams( + struct snd_pcm_substream *substream, + struct snd_dmaengine_dai_dma_data *dma_data, + struct snd_pcm_hardware *hw, + struct dma_chan *chan) +{ + struct dma_slave_caps dma_caps; + u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + snd_pcm_format_t i; + int ret = 0; + + if (!hw || !chan || !dma_data) + return -EINVAL; + + ret = dma_get_slave_caps(chan, &dma_caps); + if (ret == 0) { + if (dma_caps.cmd_pause && dma_caps.cmd_resume) + hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME; + if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT) + hw->info |= SNDRV_PCM_INFO_BATCH; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + addr_widths = dma_caps.dst_addr_widths; + else + addr_widths = dma_caps.src_addr_widths; + } + + /* + * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep + * hw.formats set to 0, meaning no restrictions are in place. + * In this case it's the responsibility of the DAI driver to + * provide the supported format information. + */ + if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)) + /* + * Prepare formats mask for valid/allowed sample types. If the + * dma does not have support for the given physical word size, + * it needs to be masked out so user space can not use the + * format which produces corrupted audio. + * In case the dma driver does not implement the slave_caps the + * default assumption is that it supports 1, 2 and 4 bytes + * widths. + */ + pcm_for_each_format(i) { + int bits = snd_pcm_format_physical_width(i); + + /* + * Enable only samples with DMA supported physical + * widths + */ + switch (bits) { + case 8: + case 16: + case 24: + case 32: + case 64: + if (addr_widths & (1 << (bits / 8))) + hw->formats |= pcm_format_to_bits(i); + break; + default: + /* Unsupported types */ + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams); + +MODULE_LICENSE("GPL"); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/5.14/internal.h b/snd-alpx-dkms/snd-alpx/core/generic/5.14/internal.h new file mode 100644 index 0000000..37dfbdf --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/5.14/internal.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Register map access API internal header + * + * Copyright 2011 Wolfson Microelectronics plc + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + */ + +#ifndef _REGMAP_INTERNAL_H +#define _REGMAP_INTERNAL_H + +#include <linux/device.h> +#include <linux/regmap.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/wait.h> + +struct regmap; +struct regcache_ops; + +struct regmap_debugfs_off_cache { + struct list_head list; + off_t min; + off_t max; + unsigned int base_reg; + unsigned int max_reg; +}; + +struct regmap_format { + size_t buf_size; + size_t reg_bytes; + size_t pad_bytes; + size_t val_bytes; + s8 reg_shift; + void (*format_write)(struct regmap *map, + unsigned int reg, unsigned int val); + void (*format_reg)(void *buf, unsigned int reg, unsigned int shift); + void (*format_val)(void *buf, unsigned int val, unsigned int shift); + unsigned int (*parse_val)(const void *buf); + void (*parse_inplace)(void *buf); +}; + +struct regmap_async { + struct list_head list; + struct regmap *map; + void *work_buf; +}; + +struct regmap { + union { + struct mutex mutex; + struct { + spinlock_t spinlock; + unsigned long spinlock_flags; + }; + struct { + raw_spinlock_t raw_spinlock; + unsigned long raw_spinlock_flags; + }; + }; + regmap_lock lock; + regmap_unlock unlock; + void *lock_arg; /* This is passed to lock/unlock functions */ + gfp_t alloc_flags; + unsigned int reg_base; + + struct device *dev; /* Device we do I/O on */ + void *work_buf; /* Scratch buffer used to format I/O */ + struct regmap_format format; /* Buffer format */ + const struct regmap_bus *bus; + void *bus_context; + const char *name; + + bool async; + spinlock_t async_lock; + wait_queue_head_t async_waitq; + struct list_head async_list; + struct list_head async_free; + int async_ret; + +#ifdef CONFIG_DEBUG_FS + bool debugfs_disable; + struct dentry *debugfs; + const char *debugfs_name; + + unsigned int debugfs_reg_len; + unsigned int debugfs_val_len; + unsigned int debugfs_tot_len; + + struct list_head debugfs_off_cache; + struct mutex cache_lock; +#endif + + unsigned int max_register; + bool (*writeable_reg)(struct device *dev, unsigned int reg); + bool (*readable_reg)(struct device *dev, unsigned int reg); + bool (*volatile_reg)(struct device *dev, unsigned int reg); + bool (*precious_reg)(struct device *dev, unsigned int reg); + bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg); + bool (*readable_noinc_reg)(struct device *dev, unsigned int reg); + const struct regmap_access_table *wr_table; + const struct regmap_access_table *rd_table; + const struct regmap_access_table *volatile_table; + const struct regmap_access_table *precious_table; + const struct regmap_access_table *wr_noinc_table; + const struct regmap_access_table *rd_noinc_table; + + int (*reg_read)(void *context, unsigned int reg, unsigned int *val); + int (*reg_write)(void *context, unsigned int reg, unsigned int val); + int (*reg_update_bits)(void *context, unsigned int reg, + unsigned int mask, unsigned int val); + /* Bulk read/write */ + int (*read)(void *context, const void *reg_buf, size_t reg_size, + void *val_buf, size_t val_size); + int (*write)(void *context, const void *data, size_t count); + + bool defer_caching; + + unsigned long read_flag_mask; + unsigned long write_flag_mask; + + /* number of bits to (left) shift the reg value when formatting*/ + int reg_shift; + int reg_stride; + int reg_stride_order; + + /* regcache specific members */ + const struct regcache_ops *cache_ops; + enum regcache_type cache_type; + + /* number of bytes in reg_defaults_raw */ + unsigned int cache_size_raw; + /* number of bytes per word in reg_defaults_raw */ + unsigned int cache_word_size; + /* number of entries in reg_defaults */ + unsigned int num_reg_defaults; + /* number of entries in reg_defaults_raw */ + unsigned int num_reg_defaults_raw; + + /* if set, only the cache is modified not the HW */ + bool cache_only; + /* if set, only the HW is modified not the cache */ + bool cache_bypass; + /* if set, remember to free reg_defaults_raw */ + bool cache_free; + + struct reg_default *reg_defaults; + const void *reg_defaults_raw; + void *cache; + /* if set, the cache contains newer data than the HW */ + bool cache_dirty; + /* if set, the HW registers are known to match map->reg_defaults */ + bool no_sync_defaults; + + struct reg_sequence *patch; + int patch_regs; + + /* if set, converts bulk read to single read */ + bool use_single_read; + /* if set, converts bulk write to single write */ + bool use_single_write; + /* if set, the device supports multi write mode */ + bool can_multi_write; + + /* if set, raw reads/writes are limited to this size */ + size_t max_raw_read; + size_t max_raw_write; + + struct rb_root range_tree; + void *selector_work_buf; /* Scratch buffer used for selector */ + + struct hwspinlock *hwlock; + + /* if set, the regmap core can sleep */ + bool can_sleep; +}; + +struct regcache_ops { + const char *name; + enum regcache_type type; + int (*init)(struct regmap *map); + int (*exit)(struct regmap *map); +#ifdef CONFIG_DEBUG_FS + void (*debugfs_init)(struct regmap *map); +#endif + int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); + int (*write)(struct regmap *map, unsigned int reg, unsigned int value); + int (*sync)(struct regmap *map, unsigned int min, unsigned int max); + int (*drop)(struct regmap *map, unsigned int min, unsigned int max); +}; + +bool regmap_cached(struct regmap *map, unsigned int reg); +bool regmap_writeable(struct regmap *map, unsigned int reg); +bool regmap_readable(struct regmap *map, unsigned int reg); +bool regmap_volatile(struct regmap *map, unsigned int reg); +bool regmap_precious(struct regmap *map, unsigned int reg); +bool regmap_writeable_noinc(struct regmap *map, unsigned int reg); +bool regmap_readable_noinc(struct regmap *map, unsigned int reg); + +int _regmap_write(struct regmap *map, unsigned int reg, + unsigned int val); + +struct regmap_range_node { + struct rb_node node; + const char *name; + struct regmap *map; + + unsigned int range_min; + unsigned int range_max; + + unsigned int selector_reg; + unsigned int selector_mask; + int selector_shift; + + unsigned int window_start; + unsigned int window_len; +}; + +struct regmap_field { + struct regmap *regmap; + unsigned int mask; + /* lsb */ + unsigned int shift; + unsigned int reg; + + unsigned int id_size; + unsigned int id_offset; +}; + +#ifdef CONFIG_DEBUG_FS +extern void regmap_debugfs_initcall(void); +extern void regmap_debugfs_init(struct regmap *map); +extern void regmap_debugfs_exit(struct regmap *map); + +static inline void regmap_debugfs_disable(struct regmap *map) +{ + map->debugfs_disable = true; +} + +#else +static inline void regmap_debugfs_initcall(void) { } +static inline void regmap_debugfs_init(struct regmap *map) { } +static inline void regmap_debugfs_exit(struct regmap *map) { } +static inline void regmap_debugfs_disable(struct regmap *map) { } +#endif + +/* regcache core declarations */ +int regcache_init(struct regmap *map, const struct regmap_config *config); +void regcache_exit(struct regmap *map); +int regcache_read(struct regmap *map, + unsigned int reg, unsigned int *value); +int regcache_write(struct regmap *map, + unsigned int reg, unsigned int value); +int regcache_sync(struct regmap *map); +int regcache_sync_block(struct regmap *map, void *block, + unsigned long *cache_present, + unsigned int block_base, unsigned int start, + unsigned int end); +bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg, + unsigned int val); + +static inline const void *regcache_get_val_addr(struct regmap *map, + const void *base, + unsigned int idx) +{ + return base + (map->cache_word_size * idx); +} + +unsigned int regcache_get_val(struct regmap *map, const void *base, + unsigned int idx); +bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, + unsigned int val); +int regcache_lookup_reg(struct regmap *map, unsigned int reg); +int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val); + +int _regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len, bool noinc); + +void regmap_async_complete_cb(struct regmap_async *async, int ret); + +enum regmap_endian regmap_get_val_endian(struct device *dev, + const struct regmap_bus *bus, + const struct regmap_config *config); + +extern struct regcache_ops regcache_rbtree_ops; +extern struct regcache_ops regcache_maple_ops; +extern struct regcache_ops regcache_flat_ops; + +static inline const char *regmap_name(const struct regmap *map) +{ + if (map->dev) + return dev_name(map->dev); + + return map->name; +} + +static inline unsigned int regmap_get_offset(const struct regmap *map, + unsigned int index) +{ + if (map->reg_stride_order >= 0) + return index << map->reg_stride_order; + else + return index * map->reg_stride; +} + +static inline unsigned int regcache_get_index_by_order(const struct regmap *map, + unsigned int reg) +{ + return reg >> map->reg_stride_order; +} + +struct regmap_ram_data { + unsigned int *vals; /* Allocatd by caller */ + bool *read; + bool *written; + enum regmap_endian reg_endian; +}; + +/* + * Create a test register map with data stored in RAM, not intended + * for practical use. + */ +struct regmap *__regmap_init_ram(const struct regmap_config *config, + struct regmap_ram_data *data, + struct lock_class_key *lock_key, + const char *lock_name); + +#define regmap_init_ram(config, data) \ + __regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data) + +struct regmap *__regmap_init_raw_ram(const struct regmap_config *config, + struct regmap_ram_data *data, + struct lock_class_key *lock_key, + const char *lock_name); + +#define regmap_init_raw_ram(config, data) \ + __regmap_lockdep_wrapper(__regmap_init_raw_ram, #config, config, data) + +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/generic/5.14/regmap-mmio.c b/snd-alpx-dkms/snd-alpx/core/generic/5.14/regmap-mmio.c new file mode 100644 index 0000000..3ccdd86 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/5.14/regmap-mmio.c @@ -0,0 +1,636 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register map access API - MMIO support +// +// Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/swab.h> + +#include "internal.h" + +struct regmap_mmio_context { + void __iomem *regs; + unsigned int val_bytes; + bool big_endian; + + bool attached_clk; + struct clk *clk; + + void (*reg_write)(struct regmap_mmio_context *ctx, + unsigned int reg, unsigned int val); + unsigned int (*reg_read)(struct regmap_mmio_context *ctx, + unsigned int reg); +}; + +static int regmap_mmio_regbits_check(size_t reg_bits) +{ + switch (reg_bits) { + case 8: + case 16: + case 32: + return 0; + default: + return -EINVAL; + } +} + +static int regmap_mmio_get_min_stride(size_t val_bits) +{ + int min_stride; + + switch (val_bits) { + case 8: + /* The core treats 0 as 1 */ + min_stride = 0; + break; + case 16: + min_stride = 2; + break; + case 32: + min_stride = 4; + break; + default: + return -EINVAL; + } + + return min_stride; +} + +static void regmap_mmio_write8(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writeb(val, ctx->regs + reg); +} + +static void regmap_mmio_write8_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writeb_relaxed(val, ctx->regs + reg); +} + +static void regmap_mmio_iowrite8(struct regmap_mmio_context *ctx, + unsigned int reg, unsigned int val) +{ + iowrite8(val, ctx->regs + reg); +} + +static void regmap_mmio_write16le(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writew(val, ctx->regs + reg); +} + +static void regmap_mmio_write16le_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writew_relaxed(val, ctx->regs + reg); +} + +static void regmap_mmio_iowrite16le(struct regmap_mmio_context *ctx, + unsigned int reg, unsigned int val) +{ + iowrite16(val, ctx->regs + reg); +} + +static void regmap_mmio_write16be(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writew(swab16(val), ctx->regs + reg); +} + +static void regmap_mmio_iowrite16be(struct regmap_mmio_context *ctx, + unsigned int reg, unsigned int val) +{ + iowrite16be(val, ctx->regs + reg); +} + +static void regmap_mmio_write32le(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writel(val, ctx->regs + reg); +} + +static void regmap_mmio_write32le_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writel_relaxed(val, ctx->regs + reg); +} + +static void regmap_mmio_iowrite32le(struct regmap_mmio_context *ctx, + unsigned int reg, unsigned int val) +{ + iowrite32(val, ctx->regs + reg); +} + +static void regmap_mmio_write32be(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writel(swab32(val), ctx->regs + reg); +} + +static void regmap_mmio_iowrite32be(struct regmap_mmio_context *ctx, + unsigned int reg, unsigned int val) +{ + iowrite32be(val, ctx->regs + reg); +} + +static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val) +{ + struct regmap_mmio_context *ctx = context; + int ret; + + if (!IS_ERR(ctx->clk)) { + ret = clk_enable(ctx->clk); + if (ret < 0) + return ret; + } + + ctx->reg_write(ctx, reg, val); + + if (!IS_ERR(ctx->clk)) + clk_disable(ctx->clk); + + return 0; +} + +static int regmap_mmio_noinc_write(void *context, unsigned int reg, + const void *val, size_t val_count) +{ + struct regmap_mmio_context *ctx = context; + int ret = 0; + int i; + + if (!IS_ERR(ctx->clk)) { + ret = clk_enable(ctx->clk); + if (ret < 0) + return ret; + } + + /* + * There are no native, assembly-optimized write single register + * operations for big endian, so fall back to emulation if this + * is needed. (Single bytes are fine, they are not affected by + * endianness.) + */ + if (ctx->big_endian && (ctx->val_bytes > 1)) { + switch (ctx->val_bytes) { + case 2: + { + const u16 *valp = (const u16 *)val; + for (i = 0; i < val_count; i++) + writew(swab16(valp[i]), ctx->regs + reg); + goto out_clk; + } + case 4: + { + const u32 *valp = (const u32 *)val; + for (i = 0; i < val_count; i++) + writel(swab32(valp[i]), ctx->regs + reg); + goto out_clk; + } +#ifdef CONFIG_64BIT + case 8: + { + const u64 *valp = (const u64 *)val; + for (i = 0; i < val_count; i++) + writeq(swab64(valp[i]), ctx->regs + reg); + goto out_clk; + } +#endif + default: + ret = -EINVAL; + goto out_clk; + } + } + + switch (ctx->val_bytes) { + case 1: + writesb(ctx->regs + reg, (const u8 *)val, val_count); + break; + case 2: + writesw(ctx->regs + reg, (const u16 *)val, val_count); + break; + case 4: + writesl(ctx->regs + reg, (const u32 *)val, val_count); + break; +#ifdef CONFIG_64BIT + case 8: + writesq(ctx->regs + reg, (const u64 *)val, val_count); + break; +#endif + default: + ret = -EINVAL; + break; + } + +out_clk: + if (!IS_ERR(ctx->clk)) + clk_disable(ctx->clk); + + return ret; +} + +static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readb(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read8_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readb_relaxed(ctx->regs + reg); +} + +static unsigned int regmap_mmio_ioread8(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return ioread8(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readw(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read16le_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readw_relaxed(ctx->regs + reg); +} + +static unsigned int regmap_mmio_ioread16le(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return ioread16(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return swab16(readw(ctx->regs + reg)); +} + +static unsigned int regmap_mmio_ioread16be(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return ioread16be(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readl(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read32le_relaxed(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readl_relaxed(ctx->regs + reg); +} + +static unsigned int regmap_mmio_ioread32le(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return ioread32(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return swab32(readl(ctx->regs + reg)); +} + +static unsigned int regmap_mmio_ioread32be(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return ioread32be(ctx->regs + reg); +} + +static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val) +{ + struct regmap_mmio_context *ctx = context; + int ret; + + if (!IS_ERR(ctx->clk)) { + ret = clk_enable(ctx->clk); + if (ret < 0) + return ret; + } + + *val = ctx->reg_read(ctx, reg); + + if (!IS_ERR(ctx->clk)) + clk_disable(ctx->clk); + + return 0; +} + +static int regmap_mmio_noinc_read(void *context, unsigned int reg, + void *val, size_t val_count) +{ + struct regmap_mmio_context *ctx = context; + int ret = 0; + + if (!IS_ERR(ctx->clk)) { + ret = clk_enable(ctx->clk); + if (ret < 0) + return ret; + } + + switch (ctx->val_bytes) { + case 1: + readsb(ctx->regs + reg, (u8 *)val, val_count); + break; + case 2: + readsw(ctx->regs + reg, (u16 *)val, val_count); + break; + case 4: + readsl(ctx->regs + reg, (u32 *)val, val_count); + break; +#ifdef CONFIG_64BIT + case 8: + readsq(ctx->regs + reg, (u64 *)val, val_count); + break; +#endif + default: + ret = -EINVAL; + goto out_clk; + } + + /* + * There are no native, assembly-optimized write single register + * operations for big endian, so fall back to emulation if this + * is needed. (Single bytes are fine, they are not affected by + * endianness.) + */ + if (ctx->big_endian && (ctx->val_bytes > 1)) { + switch (ctx->val_bytes) { + case 2: + swab16_array(val, val_count); + break; + case 4: + swab32_array(val, val_count); + break; +#ifdef CONFIG_64BIT + case 8: + swab64_array(val, val_count); + break; +#endif + default: + ret = -EINVAL; + break; + } + } + +out_clk: + if (!IS_ERR(ctx->clk)) + clk_disable(ctx->clk); + + return ret; +} + + +static void regmap_mmio_free_context(void *context) +{ + struct regmap_mmio_context *ctx = context; + + if (!IS_ERR(ctx->clk)) { + clk_unprepare(ctx->clk); + if (!ctx->attached_clk) + clk_put(ctx->clk); + } + kfree(context); +} + +static const struct regmap_bus regmap_mmio = { + .fast_io = true, + .reg_write = regmap_mmio_write, + .reg_read = regmap_mmio_read, + .reg_noinc_write = regmap_mmio_noinc_write, + .reg_noinc_read = regmap_mmio_noinc_read, + .free_context = regmap_mmio_free_context, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, + const char *clk_id, + void __iomem *regs, + const struct regmap_config *config) +{ + struct regmap_mmio_context *ctx; + int min_stride; + int ret; + + ret = regmap_mmio_regbits_check(config->reg_bits); + if (ret) + return ERR_PTR(ret); + + if (config->pad_bits) + return ERR_PTR(-EINVAL); + + min_stride = regmap_mmio_get_min_stride(config->val_bits); + if (min_stride < 0) + return ERR_PTR(min_stride); + + if (config->reg_stride < min_stride) + return ERR_PTR(-EINVAL); + + if (config->use_relaxed_mmio && config->io_port) + return ERR_PTR(-EINVAL); + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + ctx->regs = regs; + ctx->val_bytes = config->val_bits / 8; + ctx->clk = ERR_PTR(-ENODEV); + + switch (regmap_get_val_endian(dev, ®map_mmio, config)) { + case REGMAP_ENDIAN_DEFAULT: + case REGMAP_ENDIAN_LITTLE: +#ifdef __LITTLE_ENDIAN + case REGMAP_ENDIAN_NATIVE: +#endif + switch (config->val_bits) { + case 8: + if (config->io_port) { + ctx->reg_read = regmap_mmio_ioread8; + ctx->reg_write = regmap_mmio_iowrite8; + } else if (config->use_relaxed_mmio) { + ctx->reg_read = regmap_mmio_read8_relaxed; + ctx->reg_write = regmap_mmio_write8_relaxed; + } else { + ctx->reg_read = regmap_mmio_read8; + ctx->reg_write = regmap_mmio_write8; + } + break; + case 16: + if (config->io_port) { + ctx->reg_read = regmap_mmio_ioread16le; + ctx->reg_write = regmap_mmio_iowrite16le; + } else if (config->use_relaxed_mmio) { + ctx->reg_read = regmap_mmio_read16le_relaxed; + ctx->reg_write = regmap_mmio_write16le_relaxed; + } else { + ctx->reg_read = regmap_mmio_read16le; + ctx->reg_write = regmap_mmio_write16le; + } + break; + case 32: + if (config->io_port) { + ctx->reg_read = regmap_mmio_ioread32le; + ctx->reg_write = regmap_mmio_iowrite32le; + } else if (config->use_relaxed_mmio) { + ctx->reg_read = regmap_mmio_read32le_relaxed; + ctx->reg_write = regmap_mmio_write32le_relaxed; + } else { + ctx->reg_read = regmap_mmio_read32le; + ctx->reg_write = regmap_mmio_write32le; + } + break; + default: + ret = -EINVAL; + goto err_free; + } + break; + case REGMAP_ENDIAN_BIG: +#ifdef __BIG_ENDIAN + case REGMAP_ENDIAN_NATIVE: +#endif + ctx->big_endian = true; + switch (config->val_bits) { + case 8: + if (config->io_port) { + ctx->reg_read = regmap_mmio_ioread8; + ctx->reg_write = regmap_mmio_iowrite8; + } else { + ctx->reg_read = regmap_mmio_read8; + ctx->reg_write = regmap_mmio_write8; + } + break; + case 16: + if (config->io_port) { + ctx->reg_read = regmap_mmio_ioread16be; + ctx->reg_write = regmap_mmio_iowrite16be; + } else { + ctx->reg_read = regmap_mmio_read16be; + ctx->reg_write = regmap_mmio_write16be; + } + break; + case 32: + if (config->io_port) { + ctx->reg_read = regmap_mmio_ioread32be; + ctx->reg_write = regmap_mmio_iowrite32be; + } else { + ctx->reg_read = regmap_mmio_read32be; + ctx->reg_write = regmap_mmio_write32be; + } + break; + default: + ret = -EINVAL; + goto err_free; + } + break; + default: + ret = -EINVAL; + goto err_free; + } + + if (clk_id == NULL) + return ctx; + + ctx->clk = clk_get(dev, clk_id); + if (IS_ERR(ctx->clk)) { + ret = PTR_ERR(ctx->clk); + goto err_free; + } + + ret = clk_prepare(ctx->clk); + if (ret < 0) { + clk_put(ctx->clk); + goto err_free; + } + + return ctx; + +err_free: + kfree(ctx); + + return ERR_PTR(ret); +} + +struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id, + void __iomem *regs, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct regmap_mmio_context *ctx; + + ctx = regmap_mmio_gen_context(dev, clk_id, regs, config); + if (IS_ERR(ctx)) + return ERR_CAST(ctx); + + return __regmap_init(dev, ®map_mmio, ctx, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk); + +struct regmap *__devm_regmap_init_mmio_clk(struct device *dev, + const char *clk_id, + void __iomem *regs, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct regmap_mmio_context *ctx; + + ctx = regmap_mmio_gen_context(dev, clk_id, regs, config); + if (IS_ERR(ctx)) + return ERR_CAST(ctx); + + return __devm_regmap_init(dev, ®map_mmio, ctx, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk); + +int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk) +{ + struct regmap_mmio_context *ctx = map->bus_context; + + ctx->clk = clk; + ctx->attached_clk = true; + + return clk_prepare(ctx->clk); +} +EXPORT_SYMBOL_GPL(regmap_mmio_attach_clk); + +void regmap_mmio_detach_clk(struct regmap *map) +{ + struct regmap_mmio_context *ctx = map->bus_context; + + clk_unprepare(ctx->clk); + + ctx->attached_clk = false; + ctx->clk = NULL; +} +EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk); + +MODULE_LICENSE("GPL v2"); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/5.17/dmaengine_pcm.h b/snd-alpx-dkms/snd-alpx/core/generic/5.17/dmaengine_pcm.h new file mode 100644 index 0000000..38ea046 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/5.17/dmaengine_pcm.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + */ + +#ifndef __SOUND_DMAENGINE_PCM_H__ +#define __SOUND_DMAENGINE_PCM_H__ + +#include <sound/pcm.h> +#include <sound/soc.h> +#include <linux/dmaengine.h> + +/** + * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM + * substream + * @substream: PCM substream + */ +static inline enum dma_transfer_direction +snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + return DMA_MEM_TO_DEV; + else + return DMA_DEV_TO_MEM; +} + +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config); +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan); +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data); +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream); + +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data); +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream); + +/* + * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word. + * If this flag is set the dmaengine driver won't put any restriction on + * the supported sample formats and set the DMA transfer size to undefined. + * The DAI driver is responsible to disable any unsupported formats in it's + * configuration and catch corner cases that are not already handled in + * the ALSA core. + */ +#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0) + +/** + * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data + * @addr: Address of the DAI data source or destination register. + * @addr_width: Width of the DAI data source or destination register. + * @maxburst: Maximum number of words(note: words, as in units of the + * src_addr_width member, not bytes) that can be send to or received from the + * DAI in one burst. + * @filter_data: Custom DMA channel filter data, this will usually be used when + * requesting the DMA channel. + * @chan_name: Custom channel name to use when requesting DMA channel. + * @fifo_size: FIFO size of the DAI controller in bytes + * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now + * @peripheral_config: peripheral configuration for programming peripheral + * for dmaengine transfer + * @peripheral_size: peripheral configuration buffer size + */ +struct snd_dmaengine_dai_dma_data { + dma_addr_t addr; + enum dma_slave_buswidth addr_width; + u32 maxburst; + void *filter_data; + const char *chan_name; + unsigned int fifo_size; + unsigned int flags; + void *peripheral_config; + size_t peripheral_size; +}; + +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *config); + +int snd_dmaengine_pcm_refine_runtime_hwparams( + struct snd_pcm_substream *substream, + struct snd_dmaengine_dai_dma_data *dma_data, + struct snd_pcm_hardware *hw, + struct dma_chan *chan); + +/* + * Try to request the DMA channel using compat_request_channel or + * compat_filter_fn if it couldn't be requested through devicetree. + */ +#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0) +/* + * Don't try to request the DMA channels through devicetree. This flag only + * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well. + */ +#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1) +/* + * The PCM is half duplex and the DMA channel is shared between capture and + * playback. + */ +#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3) + +/** + * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM + * @prepare_slave_config: Callback used to fill in the DMA slave_config for a + * PCM substream. Will be called from the PCM drivers hwparams callback. + * @compat_request_channel: Callback to request a DMA channel for platforms + * which do not use devicetree. + * @process: Callback used to apply processing on samples transferred from/to + * user space. + * @compat_filter_fn: Will be used as the filter function when requesting a + * channel for platforms which do not use devicetree. The filter parameter + * will be the DAI's DMA data. + * @dma_dev: If set, request DMA channel on this device rather than the DAI + * device. + * @chan_names: If set, these custom DMA channel names will be requested at + * registration time. + * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM. + * @prealloc_buffer_size: Size of the preallocated audio buffer. + * + * Note: If both compat_request_channel and compat_filter_fn are set + * compat_request_channel will be used to request the channel and + * compat_filter_fn will be ignored. Otherwise the channel will be requested + * using dma_request_channel with compat_filter_fn as the filter function. + */ +struct snd_dmaengine_pcm_config { + int (*prepare_slave_config)(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + struct dma_chan *(*compat_request_channel)( + struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_substream *substream); + int (*process)(struct snd_pcm_substream *substream, + int channel, unsigned long hwoff, + void *buf, unsigned long bytes); + dma_filter_fn compat_filter_fn; + struct device *dma_dev; + const char *chan_names[SNDRV_PCM_STREAM_LAST + 1]; + + const struct snd_pcm_hardware *pcm_hardware; + unsigned int prealloc_buffer_size; +}; + +int snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); +void snd_dmaengine_pcm_unregister(struct device *dev); + +int devm_snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); + +int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + +#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm" + +struct dmaengine_pcm { + struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1]; + const struct snd_dmaengine_pcm_config *config; + struct snd_soc_component component; + unsigned int flags; +}; + +static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p) +{ + return container_of(p, struct dmaengine_pcm, component); +} +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/generic/5.17/pcm_dmaengine.c b/snd-alpx-dkms/snd-alpx/core/generic/5.17/pcm_dmaengine.c new file mode 100644 index 0000000..af6f717 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/5.17/pcm_dmaengine.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + * + * Based on: + * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de> + * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc. + * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * Copyright (C) 2006 Applied Data Systems + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/dmaengine.h> +#include <linux/slab.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/soc.h> + +#include <sound/dmaengine_pcm.h> + +struct dmaengine_pcm_runtime_data { + struct dma_chan *dma_chan; + dma_cookie_t cookie; + + unsigned int pos; +}; + +static inline struct dmaengine_pcm_runtime_data *substream_to_prtd( + const struct snd_pcm_substream *substream) +{ + return substream->runtime->private_data; +} + +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + return prtd->dma_chan; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan); + +/** + * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config + * @substream: PCM substream + * @params: hw_params + * @slave_config: DMA slave config + * + * This function can be used to initialize a dma_slave_config from a substream + * and hw_params in a dmaengine based PCM driver implementation. + */ +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config) +{ + enum dma_slave_buswidth buswidth; + int bits; + + bits = params_physical_width(params); + if (bits < 8 || bits > 64) + return -EINVAL; + else if (bits == 8) + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (bits == 16) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else if (bits == 24) + buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES; + else if (bits <= 32) + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->direction = DMA_MEM_TO_DEV; + slave_config->dst_addr_width = buswidth; + } else { + slave_config->direction = DMA_DEV_TO_MEM; + slave_config->src_addr_width = buswidth; + } + + slave_config->device_fc = false; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config); + +/** + * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config + * using DAI DMA data. + * @substream: PCM substream + * @dma_data: DAI DMA data + * @slave_config: DMA slave configuration + * + * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width + * fields of the DMA slave config from the same fields of the DAI DMA + * data struct. The src and dst fields will be initialized depending on the + * direction of the substream. If the substream is a playback stream the dst + * fields will be initialized, if it is a capture stream the src fields will be + * initialized. The {dst,src}_addr_width field will only be initialized if the + * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of + * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If + * both conditions are met the latter takes priority. + */ +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *slave_config) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->dst_addr = dma_data->addr; + slave_config->dst_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->dst_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->dst_addr_width = dma_data->addr_width; + } else { + slave_config->src_addr = dma_data->addr; + slave_config->src_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->src_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->src_addr_width = dma_data->addr_width; + } + + slave_config->peripheral_config = dma_data->peripheral_config; + slave_config->peripheral_size = dma_data->peripheral_size; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data); + +static void dmaengine_pcm_dma_complete(void *arg) +{ + struct snd_pcm_substream *substream = arg; + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + prtd->pos += snd_pcm_lib_period_bytes(substream); + if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream)) + prtd->pos = 0; + + snd_pcm_period_elapsed(substream); +} + +static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct dma_chan *chan = prtd->dma_chan; + struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction direction; + unsigned long flags = DMA_CTRL_ACK; + + direction = snd_pcm_substream_to_dma_direction(substream); + + if (!substream->runtime->no_period_wakeup) + flags |= DMA_PREP_INTERRUPT; + + prtd->pos = 0; + desc = dmaengine_prep_dma_cyclic(chan, + substream->runtime->dma_addr, + snd_pcm_lib_buffer_bytes(substream), + snd_pcm_lib_period_bytes(substream), direction, flags); + + if (!desc) + return -ENOMEM; + + desc->callback = dmaengine_pcm_dma_complete; + desc->callback_param = substream; + prtd->cookie = dmaengine_submit(desc); + + return 0; +} + +/** + * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation + * @substream: PCM substream + * @cmd: Trigger command + * + * Returns 0 on success, a negative error code otherwise. + * + * This function can be used as the PCM trigger callback for dmaengine based PCM + * driver implementations. + */ +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + int ret; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + ret = dmaengine_pcm_prepare_and_submit(substream); + if (ret) + return ret; + dma_async_issue_pending(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + dmaengine_resume(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_SUSPEND: + if (runtime->info & SNDRV_PCM_INFO_PAUSE) + dmaengine_pause(prtd->dma_chan); + else + dmaengine_terminate_async(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + dmaengine_pause(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_STOP: + dmaengine_terminate_async(prtd->dma_chan); + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger); + +/** + * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function is deprecated and should not be used by new drivers, as its + * results may be unreliable. + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + return bytes_to_frames(substream->runtime, prtd->pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue); + +/** + * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function can be used as the PCM pointer callback for dmaengine based PCM + * driver implementations. + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + struct dma_tx_state state; + enum dma_status status; + unsigned int buf_size; + unsigned int pos = 0; + + status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state); + if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) { + buf_size = snd_pcm_lib_buffer_bytes(substream); + if (state.residue > 0 && state.residue <= buf_size) + pos = buf_size - state.residue; + + runtime->delay = bytes_to_frames(runtime, + state.in_flight_bytes); + } + + return bytes_to_frames(runtime, pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer); + +/** + * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * Returns NULL or the requested DMA channel. + * + * This function request a DMA channel for usage with dmaengine PCM. + */ +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_CYCLIC, mask); + + return dma_request_channel(mask, filter_fn, filter_data); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel); + +/** + * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream + * @substream: PCM substream + * @chan: DMA channel to use for data transfers + * + * Returns 0 on success, a negative error code otherwise. + * + * The function should usually be called from the pcm open callback. Note that + * this function will use private_data field of the substream's runtime. So it + * is not available to your pcm driver implementation. + */ +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan) +{ + struct dmaengine_pcm_runtime_data *prtd; + int ret; + + if (!chan) + return -ENXIO; + + ret = snd_pcm_hw_constraint_integer(substream->runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (ret < 0) + return ret; + + prtd = kzalloc(sizeof(*prtd), GFP_KERNEL); + if (!prtd) + return -ENOMEM; + + prtd->dma_chan = chan; + + substream->runtime->private_data = prtd; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open); + +/** + * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel + * @substream: PCM substream + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * Returns 0 on success, a negative error code otherwise. + * + * This function will request a DMA channel using the passed filter function and + * data. The function should usually be called from the pcm open callback. Note + * that this function will use private_data field of the substream's runtime. So + * it is not available to your pcm driver implementation. + */ +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data) +{ + return snd_dmaengine_pcm_open(substream, + snd_dmaengine_pcm_request_channel(filter_fn, filter_data)); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan); + +/** + * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream + * @substream: PCM substream + */ +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close); + +/** + * snd_dmaengine_pcm_close_release_chan - Close a dmaengine based PCM + * substream and release channel + * @substream: PCM substream + * + * Releases the DMA channel associated with the PCM substream. + */ +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + dma_release_channel(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan); + +/** + * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params + * @substream: PCM substream + * @dma_data: DAI DMA data + * @hw: PCM hw params + * @chan: DMA channel to use for data transfers + * + * Returns 0 on success, a negative error code otherwise. + * + * This function will query DMA capability, then refine the pcm hardware + * parameters. + */ +int snd_dmaengine_pcm_refine_runtime_hwparams( + struct snd_pcm_substream *substream, + struct snd_dmaengine_dai_dma_data *dma_data, + struct snd_pcm_hardware *hw, + struct dma_chan *chan) +{ + struct dma_slave_caps dma_caps; + u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + snd_pcm_format_t i; + int ret = 0; + + if (!hw || !chan || !dma_data) + return -EINVAL; + + ret = dma_get_slave_caps(chan, &dma_caps); + if (ret == 0) { + if (dma_caps.cmd_pause && dma_caps.cmd_resume) + hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME; + if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT) + hw->info |= SNDRV_PCM_INFO_BATCH; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + addr_widths = dma_caps.dst_addr_widths; + else + addr_widths = dma_caps.src_addr_widths; + } + + /* + * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep + * hw.formats set to 0, meaning no restrictions are in place. + * In this case it's the responsibility of the DAI driver to + * provide the supported format information. + */ + if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)) + /* + * Prepare formats mask for valid/allowed sample types. If the + * dma does not have support for the given physical word size, + * it needs to be masked out so user space can not use the + * format which produces corrupted audio. + * In case the dma driver does not implement the slave_caps the + * default assumption is that it supports 1, 2 and 4 bytes + * widths. + */ + pcm_for_each_format(i) { + int bits = snd_pcm_format_physical_width(i); + + /* + * Enable only samples with DMA supported physical + * widths + */ + switch (bits) { + case 8: + case 16: + case 24: + case 32: + case 64: + if (addr_widths & (1 << (bits / 8))) + hw->formats |= pcm_format_to_bits(i); + break; + default: + /* Unsupported types */ + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams); + +MODULE_LICENSE("GPL"); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/5.2/dmaengine_pcm.h b/snd-alpx-dkms/snd-alpx/core/generic/5.2/dmaengine_pcm.h new file mode 100644 index 0000000..c679f61 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/5.2/dmaengine_pcm.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + */ + +#ifndef __SOUND_DMAENGINE_PCM_H__ +#define __SOUND_DMAENGINE_PCM_H__ + +#include <sound/pcm.h> +#include <sound/soc.h> +#include <linux/dmaengine.h> + +/** + * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM + * substream + * @substream: PCM substream + */ +static inline enum dma_transfer_direction +snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + return DMA_MEM_TO_DEV; + else + return DMA_DEV_TO_MEM; +} + +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config); +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan); +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data); +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream); + +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data); +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream); + +/* + * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word. + * If this flag is set the dmaengine driver won't put any restriction on + * the supported sample formats and set the DMA transfer size to undefined. + * The DAI driver is responsible to disable any unsupported formats in it's + * configuration and catch corner cases that are not already handled in + * the ALSA core. + */ +#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0) + +/** + * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data + * @addr: Address of the DAI data source or destination register. + * @addr_width: Width of the DAI data source or destination register. + * @maxburst: Maximum number of words(note: words, as in units of the + * src_addr_width member, not bytes) that can be send to or received from the + * DAI in one burst. + * @slave_id: Slave requester id for the DMA channel. + * @filter_data: Custom DMA channel filter data, this will usually be used when + * requesting the DMA channel. + * @chan_name: Custom channel name to use when requesting DMA channel. + * @fifo_size: FIFO size of the DAI controller in bytes + * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now + */ +struct snd_dmaengine_dai_dma_data { + dma_addr_t addr; + enum dma_slave_buswidth addr_width; + u32 maxburst; + unsigned int slave_id; + void *filter_data; + const char *chan_name; + unsigned int fifo_size; + unsigned int flags; +}; + +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *config); + + +/* + * Try to request the DMA channel using compat_request_channel or + * compat_filter_fn if it couldn't be requested through devicetree. + */ +#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0) +/* + * Don't try to request the DMA channels through devicetree. This flag only + * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well. + */ +#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1) +/* + * The PCM is half duplex and the DMA channel is shared between capture and + * playback. + */ +#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3) + +/** + * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM + * @prepare_slave_config: Callback used to fill in the DMA slave_config for a + * PCM substream. Will be called from the PCM drivers hwparams callback. + * @compat_request_channel: Callback to request a DMA channel for platforms + * which do not use devicetree. + * @process: Callback used to apply processing on samples transferred from/to + * user space. + * @compat_filter_fn: Will be used as the filter function when requesting a + * channel for platforms which do not use devicetree. The filter parameter + * will be the DAI's DMA data. + * @dma_dev: If set, request DMA channel on this device rather than the DAI + * device. + * @chan_names: If set, these custom DMA channel names will be requested at + * registration time. + * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM. + * @prealloc_buffer_size: Size of the preallocated audio buffer. + * + * Note: If both compat_request_channel and compat_filter_fn are set + * compat_request_channel will be used to request the channel and + * compat_filter_fn will be ignored. Otherwise the channel will be requested + * using dma_request_channel with compat_filter_fn as the filter function. + */ +struct snd_dmaengine_pcm_config { + int (*prepare_slave_config)(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + struct dma_chan *(*compat_request_channel)( + struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_substream *substream); + int (*process)(struct snd_pcm_substream *substream, + int channel, unsigned long hwoff, + void *buf, unsigned long bytes); + dma_filter_fn compat_filter_fn; + struct device *dma_dev; + const char *chan_names[SNDRV_PCM_STREAM_LAST + 1]; + + const struct snd_pcm_hardware *pcm_hardware; + unsigned int prealloc_buffer_size; +}; + +int snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); +void snd_dmaengine_pcm_unregister(struct device *dev); + +int devm_snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); + +int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + +#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm" + +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/generic/5.2/pcm_dmaengine.c b/snd-alpx-dkms/snd-alpx/core/generic/5.2/pcm_dmaengine.c new file mode 100644 index 0000000..89a0592 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/5.2/pcm_dmaengine.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + * + * Based on: + * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de> + * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc. + * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * Copyright (C) 2006 Applied Data Systems + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/dmaengine.h> +#include <linux/slab.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/soc.h> + +#include <sound/dmaengine_pcm.h> + +struct dmaengine_pcm_runtime_data { + struct dma_chan *dma_chan; + dma_cookie_t cookie; + + unsigned int pos; +}; + +static inline struct dmaengine_pcm_runtime_data *substream_to_prtd( + const struct snd_pcm_substream *substream) +{ + return substream->runtime->private_data; +} + +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + return prtd->dma_chan; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan); + +/** + * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config + * @substream: PCM substream + * @params: hw_params + * @slave_config: DMA slave config + * + * This function can be used to initialize a dma_slave_config from a substream + * and hw_params in a dmaengine based PCM driver implementation. + */ +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config) +{ + enum dma_slave_buswidth buswidth; + int bits; + + bits = params_physical_width(params); + if (bits < 8 || bits > 64) + return -EINVAL; + else if (bits == 8) + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (bits == 16) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else if (bits == 24) + buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES; + else if (bits <= 32) + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->direction = DMA_MEM_TO_DEV; + slave_config->dst_addr_width = buswidth; + } else { + slave_config->direction = DMA_DEV_TO_MEM; + slave_config->src_addr_width = buswidth; + } + + slave_config->device_fc = false; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config); + +/** + * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config + * using DAI DMA data. + * @substream: PCM substream + * @dma_data: DAI DMA data + * @slave_config: DMA slave configuration + * + * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width and + * slave_id fields of the DMA slave config from the same fields of the DAI DMA + * data struct. The src and dst fields will be initialized depending on the + * direction of the substream. If the substream is a playback stream the dst + * fields will be initialized, if it is a capture stream the src fields will be + * initialized. The {dst,src}_addr_width field will only be initialized if the + * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of + * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If + * both conditions are met the latter takes priority. + */ +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *slave_config) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->dst_addr = dma_data->addr; + slave_config->dst_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->dst_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->dst_addr_width = dma_data->addr_width; + } else { + slave_config->src_addr = dma_data->addr; + slave_config->src_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->src_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->src_addr_width = dma_data->addr_width; + } + + slave_config->slave_id = dma_data->slave_id; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data); + +static void dmaengine_pcm_dma_complete(void *arg) +{ + struct snd_pcm_substream *substream = arg; + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + prtd->pos += snd_pcm_lib_period_bytes(substream); + if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream)) + prtd->pos = 0; + + snd_pcm_period_elapsed(substream); +} + +static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct dma_chan *chan = prtd->dma_chan; + struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction direction; + unsigned long flags = DMA_CTRL_ACK; + + direction = snd_pcm_substream_to_dma_direction(substream); + + if (!substream->runtime->no_period_wakeup) + flags |= DMA_PREP_INTERRUPT; + + prtd->pos = 0; + desc = dmaengine_prep_dma_cyclic(chan, + substream->runtime->dma_addr, + snd_pcm_lib_buffer_bytes(substream), + snd_pcm_lib_period_bytes(substream), direction, flags); + + if (!desc) + return -ENOMEM; + + desc->callback = dmaengine_pcm_dma_complete; + desc->callback_param = substream; + prtd->cookie = dmaengine_submit(desc); + + return 0; +} + +/** + * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation + * @substream: PCM substream + * @cmd: Trigger command + * + * Returns 0 on success, a negative error code otherwise. + * + * This function can be used as the PCM trigger callback for dmaengine based PCM + * driver implementations. + */ +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + int ret; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + ret = dmaengine_pcm_prepare_and_submit(substream); + if (ret) + return ret; + dma_async_issue_pending(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + dmaengine_resume(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_SUSPEND: + if (runtime->info & SNDRV_PCM_INFO_PAUSE) + dmaengine_pause(prtd->dma_chan); + else + dmaengine_terminate_async(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + dmaengine_pause(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_STOP: + dmaengine_terminate_async(prtd->dma_chan); + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger); + +/** + * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function is deprecated and should not be used by new drivers, as its + * results may be unreliable. + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + return bytes_to_frames(substream->runtime, prtd->pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue); + +/** + * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function can be used as the PCM pointer callback for dmaengine based PCM + * driver implementations. + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct dma_tx_state state; + enum dma_status status; + unsigned int buf_size; + unsigned int pos = 0; + + status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state); + if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) { + buf_size = snd_pcm_lib_buffer_bytes(substream); + if (state.residue > 0 && state.residue <= buf_size) + pos = buf_size - state.residue; + } + + return bytes_to_frames(substream->runtime, pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer); + +/** + * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * Returns NULL or the requested DMA channel. + * + * This function request a DMA channel for usage with dmaengine PCM. + */ +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_CYCLIC, mask); + + return dma_request_channel(mask, filter_fn, filter_data); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel); + +/** + * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream + * @substream: PCM substream + * @chan: DMA channel to use for data transfers + * + * Returns 0 on success, a negative error code otherwise. + * + * The function should usually be called from the pcm open callback. Note that + * this function will use private_data field of the substream's runtime. So it + * is not available to your pcm driver implementation. + */ +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan) +{ + struct dmaengine_pcm_runtime_data *prtd; + int ret; + + if (!chan) + return -ENXIO; + + ret = snd_pcm_hw_constraint_integer(substream->runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (ret < 0) + return ret; + + prtd = kzalloc(sizeof(*prtd), GFP_KERNEL); + if (!prtd) + return -ENOMEM; + + prtd->dma_chan = chan; + + substream->runtime->private_data = prtd; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open); + +/** + * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel + * @substream: PCM substream + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * Returns 0 on success, a negative error code otherwise. + * + * This function will request a DMA channel using the passed filter function and + * data. The function should usually be called from the pcm open callback. Note + * that this function will use private_data field of the substream's runtime. So + * it is not available to your pcm driver implementation. + */ +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data) +{ + return snd_dmaengine_pcm_open(substream, + snd_dmaengine_pcm_request_channel(filter_fn, filter_data)); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan); + +/** + * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream + * @substream: PCM substream + */ +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close); + +/** + * snd_dmaengine_pcm_release_chan_close - Close a dmaengine based PCM substream and release channel + * @substream: PCM substream + * + * Releases the DMA channel associated with the PCM substream. + */ +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + dma_release_channel(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan); + +MODULE_LICENSE("GPL"); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/5.5/dmaengine_pcm.h b/snd-alpx-dkms/snd-alpx/core/generic/5.5/dmaengine_pcm.h new file mode 100644 index 0000000..b652206 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/5.5/dmaengine_pcm.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + */ + +#ifndef __SOUND_DMAENGINE_PCM_H__ +#define __SOUND_DMAENGINE_PCM_H__ + +#include <sound/pcm.h> +#include <sound/soc.h> +#include <linux/dmaengine.h> + +/** + * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM + * substream + * @substream: PCM substream + */ +static inline enum dma_transfer_direction +snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + return DMA_MEM_TO_DEV; + else + return DMA_DEV_TO_MEM; +} + +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config); +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan); +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data); +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream); + +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data); +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream); + +/* + * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word. + * If this flag is set the dmaengine driver won't put any restriction on + * the supported sample formats and set the DMA transfer size to undefined. + * The DAI driver is responsible to disable any unsupported formats in it's + * configuration and catch corner cases that are not already handled in + * the ALSA core. + */ +#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0) + +/** + * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data + * @addr: Address of the DAI data source or destination register. + * @addr_width: Width of the DAI data source or destination register. + * @maxburst: Maximum number of words(note: words, as in units of the + * src_addr_width member, not bytes) that can be send to or received from the + * DAI in one burst. + * @slave_id: Slave requester id for the DMA channel. + * @filter_data: Custom DMA channel filter data, this will usually be used when + * requesting the DMA channel. + * @chan_name: Custom channel name to use when requesting DMA channel. + * @fifo_size: FIFO size of the DAI controller in bytes + * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now + */ +struct snd_dmaengine_dai_dma_data { + dma_addr_t addr; + enum dma_slave_buswidth addr_width; + u32 maxburst; + unsigned int slave_id; + void *filter_data; + const char *chan_name; + unsigned int fifo_size; + unsigned int flags; +}; + +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *config); + +int snd_dmaengine_pcm_refine_runtime_hwparams( + struct snd_pcm_substream *substream, + struct snd_dmaengine_dai_dma_data *dma_data, + struct snd_pcm_hardware *hw, + struct dma_chan *chan); + +/* + * Try to request the DMA channel using compat_request_channel or + * compat_filter_fn if it couldn't be requested through devicetree. + */ +#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0) +/* + * Don't try to request the DMA channels through devicetree. This flag only + * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well. + */ +#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1) +/* + * The PCM is half duplex and the DMA channel is shared between capture and + * playback. + */ +#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3) + +/** + * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM + * @prepare_slave_config: Callback used to fill in the DMA slave_config for a + * PCM substream. Will be called from the PCM drivers hwparams callback. + * @compat_request_channel: Callback to request a DMA channel for platforms + * which do not use devicetree. + * @process: Callback used to apply processing on samples transferred from/to + * user space. + * @compat_filter_fn: Will be used as the filter function when requesting a + * channel for platforms which do not use devicetree. The filter parameter + * will be the DAI's DMA data. + * @dma_dev: If set, request DMA channel on this device rather than the DAI + * device. + * @chan_names: If set, these custom DMA channel names will be requested at + * registration time. + * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM. + * @prealloc_buffer_size: Size of the preallocated audio buffer. + * + * Note: If both compat_request_channel and compat_filter_fn are set + * compat_request_channel will be used to request the channel and + * compat_filter_fn will be ignored. Otherwise the channel will be requested + * using dma_request_channel with compat_filter_fn as the filter function. + */ +struct snd_dmaengine_pcm_config { + int (*prepare_slave_config)(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + struct dma_chan *(*compat_request_channel)( + struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_substream *substream); + int (*process)(struct snd_pcm_substream *substream, + int channel, unsigned long hwoff, + void *buf, unsigned long bytes); + dma_filter_fn compat_filter_fn; + struct device *dma_dev; + const char *chan_names[SNDRV_PCM_STREAM_LAST + 1]; + + const struct snd_pcm_hardware *pcm_hardware; + unsigned int prealloc_buffer_size; +}; + +int snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); +void snd_dmaengine_pcm_unregister(struct device *dev); + +int devm_snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); + +int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + +#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm" + +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/generic/5.5/pcm_dmaengine.c b/snd-alpx-dkms/snd-alpx/core/generic/5.5/pcm_dmaengine.c new file mode 100644 index 0000000..5749a8a --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/5.5/pcm_dmaengine.c @@ -0,0 +1,455 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + * + * Based on: + * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de> + * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc. + * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * Copyright (C) 2006 Applied Data Systems + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/dmaengine.h> +#include <linux/slab.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/soc.h> + +#include <sound/dmaengine_pcm.h> + +struct dmaengine_pcm_runtime_data { + struct dma_chan *dma_chan; + dma_cookie_t cookie; + + unsigned int pos; +}; + +static inline struct dmaengine_pcm_runtime_data *substream_to_prtd( + const struct snd_pcm_substream *substream) +{ + return substream->runtime->private_data; +} + +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + return prtd->dma_chan; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan); + +/** + * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config + * @substream: PCM substream + * @params: hw_params + * @slave_config: DMA slave config + * + * This function can be used to initialize a dma_slave_config from a substream + * and hw_params in a dmaengine based PCM driver implementation. + */ +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config) +{ + enum dma_slave_buswidth buswidth; + int bits; + + bits = params_physical_width(params); + if (bits < 8 || bits > 64) + return -EINVAL; + else if (bits == 8) + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (bits == 16) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else if (bits == 24) + buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES; + else if (bits <= 32) + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->direction = DMA_MEM_TO_DEV; + slave_config->dst_addr_width = buswidth; + } else { + slave_config->direction = DMA_DEV_TO_MEM; + slave_config->src_addr_width = buswidth; + } + + slave_config->device_fc = false; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config); + +/** + * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config + * using DAI DMA data. + * @substream: PCM substream + * @dma_data: DAI DMA data + * @slave_config: DMA slave configuration + * + * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width and + * slave_id fields of the DMA slave config from the same fields of the DAI DMA + * data struct. The src and dst fields will be initialized depending on the + * direction of the substream. If the substream is a playback stream the dst + * fields will be initialized, if it is a capture stream the src fields will be + * initialized. The {dst,src}_addr_width field will only be initialized if the + * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of + * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If + * both conditions are met the latter takes priority. + */ +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *slave_config) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->dst_addr = dma_data->addr; + slave_config->dst_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->dst_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->dst_addr_width = dma_data->addr_width; + } else { + slave_config->src_addr = dma_data->addr; + slave_config->src_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->src_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->src_addr_width = dma_data->addr_width; + } + + slave_config->slave_id = dma_data->slave_id; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data); + +static void dmaengine_pcm_dma_complete(void *arg) +{ + struct snd_pcm_substream *substream = arg; + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + prtd->pos += snd_pcm_lib_period_bytes(substream); + if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream)) + prtd->pos = 0; + + snd_pcm_period_elapsed(substream); +} + +static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct dma_chan *chan = prtd->dma_chan; + struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction direction; + unsigned long flags = DMA_CTRL_ACK; + + direction = snd_pcm_substream_to_dma_direction(substream); + + if (!substream->runtime->no_period_wakeup) + flags |= DMA_PREP_INTERRUPT; + + prtd->pos = 0; + desc = dmaengine_prep_dma_cyclic(chan, + substream->runtime->dma_addr, + snd_pcm_lib_buffer_bytes(substream), + snd_pcm_lib_period_bytes(substream), direction, flags); + + if (!desc) + return -ENOMEM; + + desc->callback = dmaengine_pcm_dma_complete; + desc->callback_param = substream; + prtd->cookie = dmaengine_submit(desc); + + return 0; +} + +/** + * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation + * @substream: PCM substream + * @cmd: Trigger command + * + * Returns 0 on success, a negative error code otherwise. + * + * This function can be used as the PCM trigger callback for dmaengine based PCM + * driver implementations. + */ +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + int ret; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + ret = dmaengine_pcm_prepare_and_submit(substream); + if (ret) + return ret; + dma_async_issue_pending(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + dmaengine_resume(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_SUSPEND: + if (runtime->info & SNDRV_PCM_INFO_PAUSE) + dmaengine_pause(prtd->dma_chan); + else + dmaengine_terminate_async(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + dmaengine_pause(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_STOP: + dmaengine_terminate_async(prtd->dma_chan); + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger); + +/** + * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function is deprecated and should not be used by new drivers, as its + * results may be unreliable. + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + return bytes_to_frames(substream->runtime, prtd->pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue); + +/** + * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function can be used as the PCM pointer callback for dmaengine based PCM + * driver implementations. + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct dma_tx_state state; + enum dma_status status; + unsigned int buf_size; + unsigned int pos = 0; + + status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state); + if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) { + buf_size = snd_pcm_lib_buffer_bytes(substream); + if (state.residue > 0 && state.residue <= buf_size) + pos = buf_size - state.residue; + } + + return bytes_to_frames(substream->runtime, pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer); + +/** + * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * Returns NULL or the requested DMA channel. + * + * This function request a DMA channel for usage with dmaengine PCM. + */ +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_CYCLIC, mask); + + return dma_request_channel(mask, filter_fn, filter_data); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel); + +/** + * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream + * @substream: PCM substream + * @chan: DMA channel to use for data transfers + * + * Returns 0 on success, a negative error code otherwise. + * + * The function should usually be called from the pcm open callback. Note that + * this function will use private_data field of the substream's runtime. So it + * is not available to your pcm driver implementation. + */ +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan) +{ + struct dmaengine_pcm_runtime_data *prtd; + int ret; + + if (!chan) + return -ENXIO; + + ret = snd_pcm_hw_constraint_integer(substream->runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (ret < 0) + return ret; + + prtd = kzalloc(sizeof(*prtd), GFP_KERNEL); + if (!prtd) + return -ENOMEM; + + prtd->dma_chan = chan; + + substream->runtime->private_data = prtd; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open); + +/** + * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel + * @substream: PCM substream + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * Returns 0 on success, a negative error code otherwise. + * + * This function will request a DMA channel using the passed filter function and + * data. The function should usually be called from the pcm open callback. Note + * that this function will use private_data field of the substream's runtime. So + * it is not available to your pcm driver implementation. + */ +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data) +{ + return snd_dmaengine_pcm_open(substream, + snd_dmaengine_pcm_request_channel(filter_fn, filter_data)); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan); + +/** + * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream + * @substream: PCM substream + */ +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close); + +/** + * snd_dmaengine_pcm_release_chan_close - Close a dmaengine based PCM substream and release channel + * @substream: PCM substream + * + * Releases the DMA channel associated with the PCM substream. + */ +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + dma_release_channel(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan); + +/** + * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params + * @substream: PCM substream + * @dma_data: DAI DMA data + * @hw: PCM hw params + * @chan: DMA channel to use for data transfers + * + * Returns 0 on success, a negative error code otherwise. + * + * This function will query DMA capability, then refine the pcm hardware + * parameters. + */ +int snd_dmaengine_pcm_refine_runtime_hwparams( + struct snd_pcm_substream *substream, + struct snd_dmaengine_dai_dma_data *dma_data, + struct snd_pcm_hardware *hw, + struct dma_chan *chan) +{ + struct dma_slave_caps dma_caps; + u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + snd_pcm_format_t i; + int ret = 0; + + if (!hw || !chan || !dma_data) + return -EINVAL; + + ret = dma_get_slave_caps(chan, &dma_caps); + if (ret == 0) { + if (dma_caps.cmd_pause && dma_caps.cmd_resume) + hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME; + if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT) + hw->info |= SNDRV_PCM_INFO_BATCH; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + addr_widths = dma_caps.dst_addr_widths; + else + addr_widths = dma_caps.src_addr_widths; + } + + /* + * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep + * hw.formats set to 0, meaning no restrictions are in place. + * In this case it's the responsibility of the DAI driver to + * provide the supported format information. + */ + if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)) + /* + * Prepare formats mask for valid/allowed sample types. If the + * dma does not have support for the given physical word size, + * it needs to be masked out so user space can not use the + * format which produces corrupted audio. + * In case the dma driver does not implement the slave_caps the + * default assumption is that it supports 1, 2 and 4 bytes + * widths. + */ + for (i = SNDRV_PCM_FORMAT_FIRST; i <= SNDRV_PCM_FORMAT_LAST; i++) { + int bits = snd_pcm_format_physical_width(i); + + /* + * Enable only samples with DMA supported physical + * widths + */ + switch (bits) { + case 8: + case 16: + case 24: + case 32: + case 64: + if (addr_widths & (1 << (bits / 8))) + hw->formats |= pcm_format_to_bits(i); + break; + default: + /* Unsupported types */ + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams); + +MODULE_LICENSE("GPL"); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/5.7/dmaengine_pcm.h b/snd-alpx-dkms/snd-alpx/core/generic/5.7/dmaengine_pcm.h new file mode 100644 index 0000000..b652206 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/5.7/dmaengine_pcm.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + */ + +#ifndef __SOUND_DMAENGINE_PCM_H__ +#define __SOUND_DMAENGINE_PCM_H__ + +#include <sound/pcm.h> +#include <sound/soc.h> +#include <linux/dmaengine.h> + +/** + * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM + * substream + * @substream: PCM substream + */ +static inline enum dma_transfer_direction +snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + return DMA_MEM_TO_DEV; + else + return DMA_DEV_TO_MEM; +} + +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config); +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream); +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan); +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream); + +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data); +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream); + +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data); +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream); + +/* + * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word. + * If this flag is set the dmaengine driver won't put any restriction on + * the supported sample formats and set the DMA transfer size to undefined. + * The DAI driver is responsible to disable any unsupported formats in it's + * configuration and catch corner cases that are not already handled in + * the ALSA core. + */ +#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0) + +/** + * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data + * @addr: Address of the DAI data source or destination register. + * @addr_width: Width of the DAI data source or destination register. + * @maxburst: Maximum number of words(note: words, as in units of the + * src_addr_width member, not bytes) that can be send to or received from the + * DAI in one burst. + * @slave_id: Slave requester id for the DMA channel. + * @filter_data: Custom DMA channel filter data, this will usually be used when + * requesting the DMA channel. + * @chan_name: Custom channel name to use when requesting DMA channel. + * @fifo_size: FIFO size of the DAI controller in bytes + * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now + */ +struct snd_dmaengine_dai_dma_data { + dma_addr_t addr; + enum dma_slave_buswidth addr_width; + u32 maxburst; + unsigned int slave_id; + void *filter_data; + const char *chan_name; + unsigned int fifo_size; + unsigned int flags; +}; + +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *config); + +int snd_dmaengine_pcm_refine_runtime_hwparams( + struct snd_pcm_substream *substream, + struct snd_dmaengine_dai_dma_data *dma_data, + struct snd_pcm_hardware *hw, + struct dma_chan *chan); + +/* + * Try to request the DMA channel using compat_request_channel or + * compat_filter_fn if it couldn't be requested through devicetree. + */ +#define SND_DMAENGINE_PCM_FLAG_COMPAT BIT(0) +/* + * Don't try to request the DMA channels through devicetree. This flag only + * makes sense if SND_DMAENGINE_PCM_FLAG_COMPAT is set as well. + */ +#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1) +/* + * The PCM is half duplex and the DMA channel is shared between capture and + * playback. + */ +#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3) + +/** + * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM + * @prepare_slave_config: Callback used to fill in the DMA slave_config for a + * PCM substream. Will be called from the PCM drivers hwparams callback. + * @compat_request_channel: Callback to request a DMA channel for platforms + * which do not use devicetree. + * @process: Callback used to apply processing on samples transferred from/to + * user space. + * @compat_filter_fn: Will be used as the filter function when requesting a + * channel for platforms which do not use devicetree. The filter parameter + * will be the DAI's DMA data. + * @dma_dev: If set, request DMA channel on this device rather than the DAI + * device. + * @chan_names: If set, these custom DMA channel names will be requested at + * registration time. + * @pcm_hardware: snd_pcm_hardware struct to be used for the PCM. + * @prealloc_buffer_size: Size of the preallocated audio buffer. + * + * Note: If both compat_request_channel and compat_filter_fn are set + * compat_request_channel will be used to request the channel and + * compat_filter_fn will be ignored. Otherwise the channel will be requested + * using dma_request_channel with compat_filter_fn as the filter function. + */ +struct snd_dmaengine_pcm_config { + int (*prepare_slave_config)(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + struct dma_chan *(*compat_request_channel)( + struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_substream *substream); + int (*process)(struct snd_pcm_substream *substream, + int channel, unsigned long hwoff, + void *buf, unsigned long bytes); + dma_filter_fn compat_filter_fn; + struct device *dma_dev; + const char *chan_names[SNDRV_PCM_STREAM_LAST + 1]; + + const struct snd_pcm_hardware *pcm_hardware; + unsigned int prealloc_buffer_size; +}; + +int snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); +void snd_dmaengine_pcm_unregister(struct device *dev); + +int devm_snd_dmaengine_pcm_register(struct device *dev, + const struct snd_dmaengine_pcm_config *config, + unsigned int flags); + +int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config); + +#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm" + +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/generic/5.7/pcm_dmaengine.c b/snd-alpx-dkms/snd-alpx/core/generic/5.7/pcm_dmaengine.c new file mode 100644 index 0000000..4d059ff --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/5.7/pcm_dmaengine.c @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2012, Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + * + * Based on: + * imx-pcm-dma-mx2.c, Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de> + * mxs-pcm.c, Copyright (C) 2011 Freescale Semiconductor, Inc. + * ep93xx-pcm.c, Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * Copyright (C) 2006 Applied Data Systems + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/dmaengine.h> +#include <linux/slab.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/soc.h> + +#include <sound/dmaengine_pcm.h> + +struct dmaengine_pcm_runtime_data { + struct dma_chan *dma_chan; + dma_cookie_t cookie; + + unsigned int pos; +}; + +static inline struct dmaengine_pcm_runtime_data *substream_to_prtd( + const struct snd_pcm_substream *substream) +{ + return substream->runtime->private_data; +} + +struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + return prtd->dma_chan; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan); + +/** + * snd_hwparams_to_dma_slave_config - Convert hw_params to dma_slave_config + * @substream: PCM substream + * @params: hw_params + * @slave_config: DMA slave config + * + * This function can be used to initialize a dma_slave_config from a substream + * and hw_params in a dmaengine based PCM driver implementation. + */ +int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream, + const struct snd_pcm_hw_params *params, + struct dma_slave_config *slave_config) +{ + enum dma_slave_buswidth buswidth; + int bits; + + bits = params_physical_width(params); + if (bits < 8 || bits > 64) + return -EINVAL; + else if (bits == 8) + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (bits == 16) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else if (bits == 24) + buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES; + else if (bits <= 32) + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->direction = DMA_MEM_TO_DEV; + slave_config->dst_addr_width = buswidth; + } else { + slave_config->direction = DMA_DEV_TO_MEM; + slave_config->src_addr_width = buswidth; + } + + slave_config->device_fc = false; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_hwparams_to_dma_slave_config); + +/** + * snd_dmaengine_pcm_set_config_from_dai_data() - Initializes a dma slave config + * using DAI DMA data. + * @substream: PCM substream + * @dma_data: DAI DMA data + * @slave_config: DMA slave configuration + * + * Initializes the {dst,src}_addr, {dst,src}_maxburst, {dst,src}_addr_width and + * slave_id fields of the DMA slave config from the same fields of the DAI DMA + * data struct. The src and dst fields will be initialized depending on the + * direction of the substream. If the substream is a playback stream the dst + * fields will be initialized, if it is a capture stream the src fields will be + * initialized. The {dst,src}_addr_width field will only be initialized if the + * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of + * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If + * both conditions are met the latter takes priority. + */ +void snd_dmaengine_pcm_set_config_from_dai_data( + const struct snd_pcm_substream *substream, + const struct snd_dmaengine_dai_dma_data *dma_data, + struct dma_slave_config *slave_config) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + slave_config->dst_addr = dma_data->addr; + slave_config->dst_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->dst_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->dst_addr_width = dma_data->addr_width; + } else { + slave_config->src_addr = dma_data->addr; + slave_config->src_maxburst = dma_data->maxburst; + if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK) + slave_config->src_addr_width = + DMA_SLAVE_BUSWIDTH_UNDEFINED; + if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED) + slave_config->src_addr_width = dma_data->addr_width; + } + + slave_config->slave_id = dma_data->slave_id; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data); + +static void dmaengine_pcm_dma_complete(void *arg) +{ + struct snd_pcm_substream *substream = arg; + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + prtd->pos += snd_pcm_lib_period_bytes(substream); + if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream)) + prtd->pos = 0; + + snd_pcm_period_elapsed(substream); +} + +static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct dma_chan *chan = prtd->dma_chan; + struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction direction; + unsigned long flags = DMA_CTRL_ACK; + + direction = snd_pcm_substream_to_dma_direction(substream); + + if (!substream->runtime->no_period_wakeup) + flags |= DMA_PREP_INTERRUPT; + + prtd->pos = 0; + desc = dmaengine_prep_dma_cyclic(chan, + substream->runtime->dma_addr, + snd_pcm_lib_buffer_bytes(substream), + snd_pcm_lib_period_bytes(substream), direction, flags); + + if (!desc) + return -ENOMEM; + + desc->callback = dmaengine_pcm_dma_complete; + desc->callback_param = substream; + prtd->cookie = dmaengine_submit(desc); + + return 0; +} + +/** + * snd_dmaengine_pcm_trigger - dmaengine based PCM trigger implementation + * @substream: PCM substream + * @cmd: Trigger command + * + * Returns 0 on success, a negative error code otherwise. + * + * This function can be used as the PCM trigger callback for dmaengine based PCM + * driver implementations. + */ +int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + int ret; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + ret = dmaengine_pcm_prepare_and_submit(substream); + if (ret) + return ret; + dma_async_issue_pending(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + dmaengine_resume(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_SUSPEND: + if (runtime->info & SNDRV_PCM_INFO_PAUSE) + dmaengine_pause(prtd->dma_chan); + else + dmaengine_terminate_async(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + dmaengine_pause(prtd->dma_chan); + break; + case SNDRV_PCM_TRIGGER_STOP: + dmaengine_terminate_async(prtd->dma_chan); + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger); + +/** + * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function is deprecated and should not be used by new drivers, as its + * results may be unreliable. + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + return bytes_to_frames(substream->runtime, prtd->pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue); + +/** + * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation + * @substream: PCM substream + * + * This function can be used as the PCM pointer callback for dmaengine based PCM + * driver implementations. + */ +snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + struct dma_tx_state state; + enum dma_status status; + unsigned int buf_size; + unsigned int pos = 0; + + status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state); + if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) { + buf_size = snd_pcm_lib_buffer_bytes(substream); + if (state.residue > 0 && state.residue <= buf_size) + pos = buf_size - state.residue; + + runtime->delay = bytes_to_frames(runtime, + state.in_flight_bytes); + } + + return bytes_to_frames(runtime, pos); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer); + +/** + * snd_dmaengine_pcm_request_channel - Request channel for the dmaengine PCM + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * Returns NULL or the requested DMA channel. + * + * This function request a DMA channel for usage with dmaengine PCM. + */ +struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn, + void *filter_data) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_CYCLIC, mask); + + return dma_request_channel(mask, filter_fn, filter_data); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel); + +/** + * snd_dmaengine_pcm_open - Open a dmaengine based PCM substream + * @substream: PCM substream + * @chan: DMA channel to use for data transfers + * + * Returns 0 on success, a negative error code otherwise. + * + * The function should usually be called from the pcm open callback. Note that + * this function will use private_data field of the substream's runtime. So it + * is not available to your pcm driver implementation. + */ +int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream, + struct dma_chan *chan) +{ + struct dmaengine_pcm_runtime_data *prtd; + int ret; + + if (!chan) + return -ENXIO; + + ret = snd_pcm_hw_constraint_integer(substream->runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (ret < 0) + return ret; + + prtd = kzalloc(sizeof(*prtd), GFP_KERNEL); + if (!prtd) + return -ENOMEM; + + prtd->dma_chan = chan; + + substream->runtime->private_data = prtd; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open); + +/** + * snd_dmaengine_pcm_open_request_chan - Open a dmaengine based PCM substream and request channel + * @substream: PCM substream + * @filter_fn: Filter function used to request the DMA channel + * @filter_data: Data passed to the DMA filter function + * + * Returns 0 on success, a negative error code otherwise. + * + * This function will request a DMA channel using the passed filter function and + * data. The function should usually be called from the pcm open callback. Note + * that this function will use private_data field of the substream's runtime. So + * it is not available to your pcm driver implementation. + */ +int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream, + dma_filter_fn filter_fn, void *filter_data) +{ + return snd_dmaengine_pcm_open(substream, + snd_dmaengine_pcm_request_channel(filter_fn, filter_data)); +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan); + +/** + * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream + * @substream: PCM substream + */ +int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close); + +/** + * snd_dmaengine_pcm_release_chan_close - Close a dmaengine based PCM substream and release channel + * @substream: PCM substream + * + * Releases the DMA channel associated with the PCM substream. + */ +int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream) +{ + struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + + dmaengine_synchronize(prtd->dma_chan); + dma_release_channel(prtd->dma_chan); + kfree(prtd); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan); + +/** + * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params + * @substream: PCM substream + * @dma_data: DAI DMA data + * @hw: PCM hw params + * @chan: DMA channel to use for data transfers + * + * Returns 0 on success, a negative error code otherwise. + * + * This function will query DMA capability, then refine the pcm hardware + * parameters. + */ +int snd_dmaengine_pcm_refine_runtime_hwparams( + struct snd_pcm_substream *substream, + struct snd_dmaengine_dai_dma_data *dma_data, + struct snd_pcm_hardware *hw, + struct dma_chan *chan) +{ + struct dma_slave_caps dma_caps; + u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + snd_pcm_format_t i; + int ret = 0; + + if (!hw || !chan || !dma_data) + return -EINVAL; + + ret = dma_get_slave_caps(chan, &dma_caps); + if (ret == 0) { + if (dma_caps.cmd_pause && dma_caps.cmd_resume) + hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME; + if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT) + hw->info |= SNDRV_PCM_INFO_BATCH; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + addr_widths = dma_caps.dst_addr_widths; + else + addr_widths = dma_caps.src_addr_widths; + } + + /* + * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep + * hw.formats set to 0, meaning no restrictions are in place. + * In this case it's the responsibility of the DAI driver to + * provide the supported format information. + */ + if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)) + /* + * Prepare formats mask for valid/allowed sample types. If the + * dma does not have support for the given physical word size, + * it needs to be masked out so user space can not use the + * format which produces corrupted audio. + * In case the dma driver does not implement the slave_caps the + * default assumption is that it supports 1, 2 and 4 bytes + * widths. + */ + pcm_for_each_format(i) { + int bits = snd_pcm_format_physical_width(i); + + /* + * Enable only samples with DMA supported physical + * widths + */ + switch (bits) { + case 8: + case 16: + case 24: + case 32: + case 64: + if (addr_widths & (1 << (bits / 8))) + hw->formats |= pcm_format_to_bits(i); + break; + default: + /* Unsupported types */ + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams); + +MODULE_LICENSE("GPL"); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.2/amd_xdma.h b/snd-alpx-dkms/snd-alpx/core/generic/6.2/amd_xdma.h new file mode 100644 index 0000000..b5e23e1 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.2/amd_xdma.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Advanced Micro Devices, Inc. + */ + +#ifndef _PLATDATA_AMD_XDMA_H +#define _PLATDATA_AMD_XDMA_H + +#include <linux/dmaengine.h> + +/** + * struct xdma_chan_info - DMA channel information + * This information is used to match channel when request dma channel + * @dir: Channel transfer direction + */ +struct xdma_chan_info { + enum dma_transfer_direction dir; +}; + +#define XDMA_FILTER_PARAM(chan_info) ((void *)(chan_info)) + +struct dma_slave_map; + +/** + * struct xdma_platdata - platform specific data for XDMA engine + * @max_dma_channels: Maximum dma channels in each direction + */ +struct xdma_platdata { + u32 max_dma_channels; + u32 device_map_cnt; + struct dma_slave_map *device_map; +}; + +#endif /* _PLATDATA_AMD_XDMA_H */ diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.2/dmaengine.c b/snd-alpx-dkms/snd-alpx/core/generic/6.2/dmaengine.c new file mode 100644 index 0000000..8a6e6b6 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.2/dmaengine.c @@ -0,0 +1,1652 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. + */ + +/* + * This code implements the DMA subsystem. It provides a HW-neutral interface + * for other kernel code to use asynchronous memory copy capabilities, + * if present, and allows different HW DMA drivers to register as providing + * this capability. + * + * Due to the fact we are accelerating what is already a relatively fast + * operation, the code goes to great lengths to avoid additional overhead, + * such as locking. + * + * LOCKING: + * + * The subsystem keeps a global list of dma_device structs it is protected by a + * mutex, dma_list_mutex. + * + * A subsystem can get access to a channel by calling dmaengine_get() followed + * by dma_find_channel(), or if it has need for an exclusive channel it can call + * dma_request_channel(). Once a channel is allocated a reference is taken + * against its corresponding driver to disable removal. + * + * Each device has a channels list, which runs unlocked but is never modified + * once the device is registered, it's just setup by the driver. + * + * See Documentation/driver-api/dmaengine for more details + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/device.h> +#include <linux/dmaengine.h> +#include <linux/hardirq.h> +#include <linux/spinlock.h> +#include <linux/percpu.h> +#include <linux/rcupdate.h> +#include <linux/mutex.h> +#include <linux/jiffies.h> +#include <linux/rculist.h> +#include <linux/idr.h> +#include <linux/slab.h> +#include <linux/acpi.h> +#include <linux/acpi_dma.h> +#include <linux/of_dma.h> +#include <linux/mempool.h> +#include <linux/numa.h> + +#include "dmaengine.h" + +static DEFINE_MUTEX(dma_list_mutex); +static DEFINE_IDA(dma_ida); +static LIST_HEAD(dma_device_list); +static long dmaengine_ref_count; + +/* --- debugfs implementation --- */ +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> + +static struct dentry *rootdir; + +static void dmaengine_debug_register(struct dma_device *dma_dev) +{ + dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev), + rootdir); + if (IS_ERR(dma_dev->dbg_dev_root)) + dma_dev->dbg_dev_root = NULL; +} + +static void dmaengine_debug_unregister(struct dma_device *dma_dev) +{ + debugfs_remove_recursive(dma_dev->dbg_dev_root); + dma_dev->dbg_dev_root = NULL; +} + +static void dmaengine_dbg_summary_show(struct seq_file *s, + struct dma_device *dma_dev) +{ + struct dma_chan *chan; + + list_for_each_entry(chan, &dma_dev->channels, device_node) { + if (chan->client_count) { + seq_printf(s, " %-13s| %s", dma_chan_name(chan), + chan->dbg_client_name ?: "in-use"); + + if (chan->router) + seq_printf(s, " (via router: %s)\n", + dev_name(chan->router->dev)); + else + seq_puts(s, "\n"); + } + } +} + +static int dmaengine_summary_show(struct seq_file *s, void *data) +{ + struct dma_device *dma_dev = NULL; + + mutex_lock(&dma_list_mutex); + list_for_each_entry(dma_dev, &dma_device_list, global_node) { + seq_printf(s, "dma%d (%s): number of channels: %u\n", + dma_dev->dev_id, dev_name(dma_dev->dev), + dma_dev->chancnt); + + if (dma_dev->dbg_summary_show) + dma_dev->dbg_summary_show(s, dma_dev); + else + dmaengine_dbg_summary_show(s, dma_dev); + + if (!list_is_last(&dma_dev->global_node, &dma_device_list)) + seq_puts(s, "\n"); + } + mutex_unlock(&dma_list_mutex); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dmaengine_summary); + +static void __init dmaengine_debugfs_init(void) +{ + rootdir = debugfs_create_dir("dmaengine", NULL); + + /* /sys/kernel/debug/dmaengine/summary */ + debugfs_create_file("summary", 0444, rootdir, NULL, + &dmaengine_summary_fops); +} +#else +static inline void dmaengine_debugfs_init(void) { } +static inline int dmaengine_debug_register(struct dma_device *dma_dev) +{ + return 0; +} + +static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { } +#endif /* DEBUG_FS */ + +/* --- sysfs implementation --- */ + +#define DMA_SLAVE_NAME "slave" + +/** + * dev_to_dma_chan - convert a device pointer to its sysfs container object + * @dev: device node + * + * Must be called under dma_list_mutex. + */ +static struct dma_chan *dev_to_dma_chan(struct device *dev) +{ + struct dma_chan_dev *chan_dev; + + chan_dev = container_of(dev, typeof(*chan_dev), device); + return chan_dev->chan; +} + +static ssize_t memcpy_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dma_chan *chan; + unsigned long count = 0; + int i; + int err; + + mutex_lock(&dma_list_mutex); + chan = dev_to_dma_chan(dev); + if (chan) { + for_each_possible_cpu(i) + count += per_cpu_ptr(chan->local, i)->memcpy_count; + err = sprintf(buf, "%lu\n", count); + } else + err = -ENODEV; + mutex_unlock(&dma_list_mutex); + + return err; +} +static DEVICE_ATTR_RO(memcpy_count); + +static ssize_t bytes_transferred_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dma_chan *chan; + unsigned long count = 0; + int i; + int err; + + mutex_lock(&dma_list_mutex); + chan = dev_to_dma_chan(dev); + if (chan) { + for_each_possible_cpu(i) + count += per_cpu_ptr(chan->local, i)->bytes_transferred; + err = sprintf(buf, "%lu\n", count); + } else + err = -ENODEV; + mutex_unlock(&dma_list_mutex); + + return err; +} +static DEVICE_ATTR_RO(bytes_transferred); + +static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dma_chan *chan; + int err; + + mutex_lock(&dma_list_mutex); + chan = dev_to_dma_chan(dev); + if (chan) + err = sprintf(buf, "%d\n", chan->client_count); + else + err = -ENODEV; + mutex_unlock(&dma_list_mutex); + + return err; +} +static DEVICE_ATTR_RO(in_use); + +static struct attribute *dma_dev_attrs[] = { + &dev_attr_memcpy_count.attr, + &dev_attr_bytes_transferred.attr, + &dev_attr_in_use.attr, + NULL, +}; +ATTRIBUTE_GROUPS(dma_dev); + +static void chan_dev_release(struct device *dev) +{ + struct dma_chan_dev *chan_dev; + + chan_dev = container_of(dev, typeof(*chan_dev), device); + kfree(chan_dev); +} + +static struct class dma_devclass = { + .name = "dma", + .dev_groups = dma_dev_groups, + .dev_release = chan_dev_release, +}; + +/* --- client and device registration --- */ + +/* enable iteration over all operation types */ +static dma_cap_mask_t dma_cap_mask_all; + +/** + * struct dma_chan_tbl_ent - tracks channel allocations per core/operation + * @chan: associated channel for this entry + */ +struct dma_chan_tbl_ent { + struct dma_chan *chan; +}; + +/* percpu lookup table for memory-to-memory offload providers */ +static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; + +static int __init dma_channel_table_init(void) +{ + enum dma_transaction_type cap; + int err = 0; + + bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); + + /* 'interrupt', 'private', and 'slave' are channel capabilities, + * but are not associated with an operation so they do not need + * an entry in the channel_table + */ + clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); + clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); + clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); + + for_each_dma_cap_mask(cap, dma_cap_mask_all) { + channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); + if (!channel_table[cap]) { + err = -ENOMEM; + break; + } + } + + if (err) { + pr_err("dmaengine dma_channel_table_init failure: %d\n", err); + for_each_dma_cap_mask(cap, dma_cap_mask_all) + free_percpu(channel_table[cap]); + } + + return err; +} +arch_initcall(dma_channel_table_init); + +/** + * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU + * @chan: DMA channel to test + * @cpu: CPU index which the channel should be close to + * + * Returns true if the channel is in the same NUMA-node as the CPU. + */ +static bool dma_chan_is_local(struct dma_chan *chan, int cpu) +{ + int node = dev_to_node(chan->device->dev); + return node == NUMA_NO_NODE || + cpumask_test_cpu(cpu, cpumask_of_node(node)); +} + +/** + * min_chan - finds the channel with min count and in the same NUMA-node as the CPU + * @cap: capability to match + * @cpu: CPU index which the channel should be close to + * + * If some channels are close to the given CPU, the one with the lowest + * reference count is returned. Otherwise, CPU is ignored and only the + * reference count is taken into account. + * + * Must be called under dma_list_mutex. + */ +static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) +{ + struct dma_device *device; + struct dma_chan *chan; + struct dma_chan *min = NULL; + struct dma_chan *localmin = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (!dma_has_cap(cap, device->cap_mask) || + dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) { + if (!chan->client_count) + continue; + if (!min || chan->table_count < min->table_count) + min = chan; + + if (dma_chan_is_local(chan, cpu)) + if (!localmin || + chan->table_count < localmin->table_count) + localmin = chan; + } + } + + chan = localmin ? localmin : min; + + if (chan) + chan->table_count++; + + return chan; +} + +/** + * dma_channel_rebalance - redistribute the available channels + * + * Optimize for CPU isolation (each CPU gets a dedicated channel for an + * operation type) in the SMP case, and operation isolation (avoid + * multi-tasking channels) in the non-SMP case. + * + * Must be called under dma_list_mutex. + */ +static void dma_channel_rebalance(void) +{ + struct dma_chan *chan; + struct dma_device *device; + int cpu; + int cap; + + /* undo the last distribution */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_possible_cpu(cpu) + per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) + chan->table_count = 0; + } + + /* don't populate the channel_table if no clients are available */ + if (!dmaengine_ref_count) + return; + + /* redistribute available channels */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_online_cpu(cpu) { + chan = min_chan(cap, cpu); + per_cpu_ptr(channel_table[cap], cpu)->chan = chan; + } +} + +static int dma_device_satisfies_mask(struct dma_device *device, + const dma_cap_mask_t *want) +{ + dma_cap_mask_t has; + + bitmap_and(has.bits, want->bits, device->cap_mask.bits, + DMA_TX_TYPE_END); + return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); +} + +static struct module *dma_chan_to_owner(struct dma_chan *chan) +{ + return chan->device->owner; +} + +/** + * balance_ref_count - catch up the channel reference count + * @chan: channel to balance ->client_count versus dmaengine_ref_count + * + * Must be called under dma_list_mutex. + */ +static void balance_ref_count(struct dma_chan *chan) +{ + struct module *owner = dma_chan_to_owner(chan); + + while (chan->client_count < dmaengine_ref_count) { + __module_get(owner); + chan->client_count++; + } +} + +static void dma_device_release(struct kref *ref) +{ + struct dma_device *device = container_of(ref, struct dma_device, ref); + + list_del_rcu(&device->global_node); + dma_channel_rebalance(); + + if (device->device_release) + device->device_release(device); +} + +static void dma_device_put(struct dma_device *device) +{ + lockdep_assert_held(&dma_list_mutex); + kref_put(&device->ref, dma_device_release); +} + +/** + * dma_chan_get - try to grab a DMA channel's parent driver module + * @chan: channel to grab + * + * Must be called under dma_list_mutex. + */ +static int dma_chan_get(struct dma_chan *chan) +{ + struct module *owner = dma_chan_to_owner(chan); + int ret; + + /* The channel is already in use, update client count */ + if (chan->client_count) { + __module_get(owner); + chan->client_count++; + return 0; + } + + if (!try_module_get(owner)) + return -ENODEV; + + ret = kref_get_unless_zero(&chan->device->ref); + if (!ret) { + ret = -ENODEV; + goto module_put_out; + } + + /* allocate upon first client reference */ + if (chan->device->device_alloc_chan_resources) { + ret = chan->device->device_alloc_chan_resources(chan); + if (ret < 0) + goto err_out; + } + + chan->client_count++; + + if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) + balance_ref_count(chan); + + return 0; + +err_out: + dma_device_put(chan->device); +module_put_out: + module_put(owner); + return ret; +} + +/** + * dma_chan_put - drop a reference to a DMA channel's parent driver module + * @chan: channel to release + * + * Must be called under dma_list_mutex. + */ +static void dma_chan_put(struct dma_chan *chan) +{ + /* This channel is not in use, bail out */ + if (!chan->client_count) + return; + + chan->client_count--; + + /* This channel is not in use anymore, free it */ + if (!chan->client_count && chan->device->device_free_chan_resources) { + /* Make sure all operations have completed */ + dmaengine_synchronize(chan); + chan->device->device_free_chan_resources(chan); + } + + /* If the channel is used via a DMA request router, free the mapping */ + if (chan->router && chan->router->route_free) { + chan->router->route_free(chan->router->dev, chan->route_data); + chan->router = NULL; + chan->route_data = NULL; + } + + dma_device_put(chan->device); + module_put(dma_chan_to_owner(chan)); +} + +enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) +{ + enum dma_status status; + unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); + + dma_async_issue_pending(chan); + do { + status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); + if (time_after_eq(jiffies, dma_sync_wait_timeout)) { + dev_err(chan->device->dev, "%s: timeout!\n", __func__); + return DMA_ERROR; + } + if (status != DMA_IN_PROGRESS) + break; + cpu_relax(); + } while (1); + + return status; +} +EXPORT_SYMBOL(dma_sync_wait); + +/** + * dma_find_channel - find a channel to carry out the operation + * @tx_type: transaction type + */ +struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) +{ + return this_cpu_read(channel_table[tx_type]->chan); +} +EXPORT_SYMBOL(dma_find_channel); + +/** + * dma_issue_pending_all - flush all pending operations across all channels + */ +void dma_issue_pending_all(void) +{ + struct dma_device *device; + struct dma_chan *chan; + + rcu_read_lock(); + list_for_each_entry_rcu(device, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) + if (chan->client_count) + device->device_issue_pending(chan); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL(dma_issue_pending_all); + +int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) +{ + struct dma_device *device; + + if (!chan || !caps) + return -EINVAL; + + device = chan->device; + + /* check if the channel supports slave transactions */ + if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || + test_bit(DMA_CYCLIC, device->cap_mask.bits))) + return -ENXIO; + + /* + * Check whether it reports it uses the generic slave + * capabilities, if not, that means it doesn't support any + * kind of slave capabilities reporting. + */ + if (!device->directions) + return -ENXIO; + + caps->src_addr_widths = device->src_addr_widths; + caps->dst_addr_widths = device->dst_addr_widths; + caps->directions = device->directions; + caps->min_burst = device->min_burst; + caps->max_burst = device->max_burst; + caps->max_sg_burst = device->max_sg_burst; + caps->residue_granularity = device->residue_granularity; + caps->descriptor_reuse = device->descriptor_reuse; + caps->cmd_pause = !!device->device_pause; + caps->cmd_resume = !!device->device_resume; + caps->cmd_terminate = !!device->device_terminate_all; + + /* + * DMA engine device might be configured with non-uniformly + * distributed slave capabilities per device channels. In this + * case the corresponding driver may provide the device_caps + * callback to override the generic capabilities with + * channel-specific ones. + */ + if (device->device_caps) + device->device_caps(chan, caps); + + return 0; +} +EXPORT_SYMBOL_GPL(dma_get_slave_caps); + +static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, + struct dma_device *dev, + dma_filter_fn fn, void *fn_param) +{ + struct dma_chan *chan; + + if (mask && !dma_device_satisfies_mask(dev, mask)) { + dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); + return NULL; + } + /* devices with multiple channels need special handling as we need to + * ensure that all channels are either private or public. + */ + if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) + list_for_each_entry(chan, &dev->channels, device_node) { + /* some channels are already publicly allocated */ + if (chan->client_count) + return NULL; + } + + list_for_each_entry(chan, &dev->channels, device_node) { + if (chan->client_count) { + dev_dbg(dev->dev, "%s: %s busy\n", + __func__, dma_chan_name(chan)); + continue; + } + if (fn && !fn(chan, fn_param)) { + dev_dbg(dev->dev, "%s: %s filter said false\n", + __func__, dma_chan_name(chan)); + continue; + } + return chan; + } + + return NULL; +} + +static struct dma_chan *find_candidate(struct dma_device *device, + const dma_cap_mask_t *mask, + dma_filter_fn fn, void *fn_param) +{ + struct dma_chan *chan = private_candidate(mask, device, fn, fn_param); + int err; + + if (chan) { + /* Found a suitable channel, try to grab, prep, and return it. + * We first set DMA_PRIVATE to disable balance_ref_count as this + * channel will not be published in the general-purpose + * allocator + */ + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; + err = dma_chan_get(chan); + + if (err) { + if (err == -ENODEV) { + dev_dbg(device->dev, "%s: %s module removed\n", + __func__, dma_chan_name(chan)); + list_del_rcu(&device->global_node); + } else + dev_dbg(device->dev, + "%s: failed to get %s: (%d)\n", + __func__, dma_chan_name(chan), err); + + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); + + chan = ERR_PTR(err); + } + } + + return chan ? chan : ERR_PTR(-EPROBE_DEFER); +} + +/** + * dma_get_slave_channel - try to get specific channel exclusively + * @chan: target channel + */ +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) +{ + /* lock against __dma_request_channel */ + mutex_lock(&dma_list_mutex); + + if (chan->client_count == 0) { + struct dma_device *device = chan->device; + int err; + + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; + err = dma_chan_get(chan); + if (err) { + dev_dbg(chan->device->dev, + "%s: failed to get %s: (%d)\n", + __func__, dma_chan_name(chan), err); + chan = NULL; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); + } + } else + chan = NULL; + + mutex_unlock(&dma_list_mutex); + + + return chan; +} +EXPORT_SYMBOL_GPL(dma_get_slave_channel); + +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) +{ + dma_cap_mask_t mask; + struct dma_chan *chan; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* lock against __dma_request_channel */ + mutex_lock(&dma_list_mutex); + + chan = find_candidate(device, &mask, NULL, NULL); + + mutex_unlock(&dma_list_mutex); + + return IS_ERR(chan) ? NULL : chan; +} +EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); + +/** + * __dma_request_channel - try to allocate an exclusive channel + * @mask: capabilities that the channel must satisfy + * @fn: optional callback to disposition available channels + * @fn_param: opaque parameter to pass to dma_filter_fn() + * @np: device node to look for DMA channels + * + * Returns pointer to appropriate DMA channel on success or NULL. + */ +struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, + dma_filter_fn fn, void *fn_param, + struct device_node *np) +{ + struct dma_device *device, *_d; + struct dma_chan *chan = NULL; + + /* Find a channel */ + mutex_lock(&dma_list_mutex); + list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { + /* Finds a DMA controller with matching device node */ + if (np && device->dev->of_node && np != device->dev->of_node) + continue; + + chan = find_candidate(device, mask, fn, fn_param); + if (!IS_ERR(chan)) + break; + + chan = NULL; + } + mutex_unlock(&dma_list_mutex); + + pr_debug("%s: %s (%s)\n", + __func__, + chan ? "success" : "fail", + chan ? dma_chan_name(chan) : NULL); + + return chan; +} +EXPORT_SYMBOL_GPL(__dma_request_channel); + +static const struct dma_slave_map *dma_filter_match(struct dma_device *device, + const char *name, + struct device *dev) +{ + int i; + + if (!device->filter.mapcnt) + return NULL; + + for (i = 0; i < device->filter.mapcnt; i++) { + const struct dma_slave_map *map = &device->filter.map[i]; + + if (!strcmp(map->devname, dev_name(dev)) && + !strcmp(map->slave, name)) + return map; + } + + return NULL; +} + +/** + * dma_request_chan - try to allocate an exclusive slave channel + * @dev: pointer to client device structure + * @name: slave channel name + * + * Returns pointer to appropriate DMA channel on success or an error pointer. + */ +struct dma_chan *dma_request_chan(struct device *dev, const char *name) +{ + struct dma_device *d, *_d; + struct dma_chan *chan = NULL; + + /* If device-tree is present get slave info from here */ + if (dev->of_node) + chan = of_dma_request_slave_channel(dev->of_node, name); + + /* If device was enumerated by ACPI get slave info from here */ + if (has_acpi_companion(dev) && !chan) + chan = acpi_dma_request_slave_chan_by_name(dev, name); + + if (PTR_ERR(chan) == -EPROBE_DEFER) + return chan; + + if (!IS_ERR_OR_NULL(chan)) + goto found; + + /* Try to find the channel via the DMA filter map(s) */ + mutex_lock(&dma_list_mutex); + list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { + dma_cap_mask_t mask; + const struct dma_slave_map *map = dma_filter_match(d, name, dev); + + if (!map) + continue; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + chan = find_candidate(d, &mask, d->filter.fn, map->param); + if (!IS_ERR(chan)) + break; + } + mutex_unlock(&dma_list_mutex); + + if (IS_ERR(chan)) + return chan; + if (!chan) + return ERR_PTR(-EPROBE_DEFER); + +found: +#ifdef CONFIG_DEBUG_FS + chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev), + name); +#endif + + chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); + if (!chan->name) + return chan; + chan->slave = dev; + + if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, + DMA_SLAVE_NAME)) + dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME); + if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) + dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name); + + return chan; +} +EXPORT_SYMBOL_GPL(dma_request_chan); + +/** + * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities + * @mask: capabilities that the channel must satisfy + * + * Returns pointer to appropriate DMA channel on success or an error pointer. + */ +struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) +{ + struct dma_chan *chan; + + if (!mask) + return ERR_PTR(-ENODEV); + + chan = __dma_request_channel(mask, NULL, NULL, NULL); + if (!chan) { + mutex_lock(&dma_list_mutex); + if (list_empty(&dma_device_list)) + chan = ERR_PTR(-EPROBE_DEFER); + else + chan = ERR_PTR(-ENODEV); + mutex_unlock(&dma_list_mutex); + } + + return chan; +} +EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); + +void dma_release_channel(struct dma_chan *chan) +{ + mutex_lock(&dma_list_mutex); + WARN_ONCE(chan->client_count != 1, + "chan reference count %d != 1\n", chan->client_count); + dma_chan_put(chan); + /* drop PRIVATE cap enabled by __dma_request_channel() */ + if (--chan->device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); + + if (chan->slave) { + sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); + sysfs_remove_link(&chan->slave->kobj, chan->name); + kfree(chan->name); + chan->name = NULL; + chan->slave = NULL; + } + +#ifdef CONFIG_DEBUG_FS + kfree(chan->dbg_client_name); + chan->dbg_client_name = NULL; +#endif + mutex_unlock(&dma_list_mutex); +} +EXPORT_SYMBOL_GPL(dma_release_channel); + +/** + * dmaengine_get - register interest in dma_channels + */ +void dmaengine_get(void) +{ + struct dma_device *device, *_d; + struct dma_chan *chan; + int err; + + mutex_lock(&dma_list_mutex); + dmaengine_ref_count++; + + /* try to grab channels */ + list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) { + err = dma_chan_get(chan); + if (err == -ENODEV) { + /* module removed before we could use it */ + list_del_rcu(&device->global_node); + break; + } else if (err) + dev_dbg(chan->device->dev, + "%s: failed to get %s: (%d)\n", + __func__, dma_chan_name(chan), err); + } + } + + /* if this is the first reference and there were channels + * waiting we need to rebalance to get those channels + * incorporated into the channel table + */ + if (dmaengine_ref_count == 1) + dma_channel_rebalance(); + mutex_unlock(&dma_list_mutex); +} +EXPORT_SYMBOL(dmaengine_get); + +/** + * dmaengine_put - let DMA drivers be removed when ref_count == 0 + */ +void dmaengine_put(void) +{ + struct dma_device *device, *_d; + struct dma_chan *chan; + + mutex_lock(&dma_list_mutex); + dmaengine_ref_count--; + BUG_ON(dmaengine_ref_count < 0); + /* drop channel references */ + list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) + dma_chan_put(chan); + } + mutex_unlock(&dma_list_mutex); +} +EXPORT_SYMBOL(dmaengine_put); + +static bool device_has_all_tx_types(struct dma_device *device) +{ + /* A device that satisfies this test has channels that will never cause + * an async_tx channel switch event as all possible operation types can + * be handled. + */ + #ifdef CONFIG_ASYNC_TX_DMA + if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) + return false; + #endif + + #if IS_ENABLED(CONFIG_ASYNC_MEMCPY) + if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) + return false; + #endif + + #if IS_ENABLED(CONFIG_ASYNC_XOR) + if (!dma_has_cap(DMA_XOR, device->cap_mask)) + return false; + + #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA + if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) + return false; + #endif + #endif + + #if IS_ENABLED(CONFIG_ASYNC_PQ) + if (!dma_has_cap(DMA_PQ, device->cap_mask)) + return false; + + #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA + if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) + return false; + #endif + #endif + + return true; +} + +static int get_dma_id(struct dma_device *device) +{ + int rc = ida_alloc(&dma_ida, GFP_KERNEL); + + if (rc < 0) + return rc; + device->dev_id = rc; + return 0; +} + +static int __dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan) +{ + int rc; + + chan->local = alloc_percpu(typeof(*chan->local)); + if (!chan->local) + return -ENOMEM; + chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); + if (!chan->dev) { + rc = -ENOMEM; + goto err_free_local; + } + + /* + * When the chan_id is a negative value, we are dynamically adding + * the channel. Otherwise we are static enumerating. + */ + chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); + if (chan->chan_id < 0) { + pr_err("%s: unable to alloc ida for chan: %d\n", + __func__, chan->chan_id); + rc = chan->chan_id; + goto err_free_dev; + } + + chan->dev->device.class = &dma_devclass; + chan->dev->device.parent = device->dev; + chan->dev->chan = chan; + chan->dev->dev_id = device->dev_id; + dev_set_name(&chan->dev->device, "dma%dchan%d", + device->dev_id, chan->chan_id); + rc = device_register(&chan->dev->device); + if (rc) + goto err_out_ida; + chan->client_count = 0; + device->chancnt++; + + return 0; + + err_out_ida: + ida_free(&device->chan_ida, chan->chan_id); + err_free_dev: + kfree(chan->dev); + err_free_local: + free_percpu(chan->local); + chan->local = NULL; + return rc; +} + +int dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan) +{ + int rc; + + rc = __dma_async_device_channel_register(device, chan); + if (rc < 0) + return rc; + + dma_channel_rebalance(); + return 0; +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_register); + +static void __dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + WARN_ONCE(!device->device_release && chan->client_count, + "%s called while %d clients hold a reference\n", + __func__, chan->client_count); + mutex_lock(&dma_list_mutex); + device->chancnt--; + chan->dev->chan = NULL; + mutex_unlock(&dma_list_mutex); + ida_free(&device->chan_ida, chan->chan_id); + device_unregister(&chan->dev->device); + free_percpu(chan->local); +} + +void dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + __dma_async_device_channel_unregister(device, chan); + dma_channel_rebalance(); +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); + +/** + * dma_async_device_register - registers DMA devices found + * @device: pointer to &struct dma_device + * + * After calling this routine the structure should not be freed except in the + * device_release() callback which will be called after + * dma_async_device_unregister() is called and no further references are taken. + */ +int dma_async_device_register(struct dma_device *device) +{ + int rc; + struct dma_chan* chan; + + if (!device) + return -ENODEV; + + /* validate device routines */ + if (!device->dev) { + pr_err("DMAdevice must have dev\n"); + return -EIO; + } + + device->owner = device->dev->driver->owner; + + if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_MEMCPY"); + return -EIO; + } + + if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_XOR"); + return -EIO; + } + + if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_XOR_VAL"); + return -EIO; + } + + if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_PQ"); + return -EIO; + } + + if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_PQ_VAL"); + return -EIO; + } + + if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_MEMSET"); + return -EIO; + } + + if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_INTERRUPT"); + return -EIO; + } + + if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_CYCLIC"); + return -EIO; + } + + if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_INTERLEAVE"); + return -EIO; + } + + + if (!device->device_tx_status) { + dev_err(device->dev, "Device tx_status is not defined\n"); + return -EIO; + } + + + if (!device->device_issue_pending) { + dev_err(device->dev, "Device issue_pending is not defined\n"); + return -EIO; + } + + if (!device->device_release) + dev_dbg(device->dev, + "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); + + kref_init(&device->ref); + + /* note: this only matters in the + * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case + */ + if (device_has_all_tx_types(device)) + dma_cap_set(DMA_ASYNC_TX, device->cap_mask); + + rc = get_dma_id(device); + if (rc != 0) + return rc; + + ida_init(&device->chan_ida); + + /* represent channels in sysfs. Probably want devs too */ + list_for_each_entry(chan, &device->channels, device_node) { + rc = __dma_async_device_channel_register(device, chan); + if (rc < 0) + goto err_out; + } + + mutex_lock(&dma_list_mutex); + /* take references on public channels */ + if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) + list_for_each_entry(chan, &device->channels, device_node) { + /* if clients are already waiting for channels we need + * to take references on their behalf + */ + if (dma_chan_get(chan) == -ENODEV) { + /* note we can only get here for the first + * channel as the remaining channels are + * guaranteed to get a reference + */ + rc = -ENODEV; + mutex_unlock(&dma_list_mutex); + goto err_out; + } + } + list_add_tail_rcu(&device->global_node, &dma_device_list); + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + device->privatecnt++; /* Always private */ + dma_channel_rebalance(); + mutex_unlock(&dma_list_mutex); + + dmaengine_debug_register(device); + + return 0; + +err_out: + /* if we never registered a channel just release the idr */ + if (!device->chancnt) { + ida_free(&dma_ida, device->dev_id); + return rc; + } + + list_for_each_entry(chan, &device->channels, device_node) { + if (chan->local == NULL) + continue; + mutex_lock(&dma_list_mutex); + chan->dev->chan = NULL; + mutex_unlock(&dma_list_mutex); + device_unregister(&chan->dev->device); + free_percpu(chan->local); + } + return rc; +} +EXPORT_SYMBOL(dma_async_device_register); + +/** + * dma_async_device_unregister - unregister a DMA device + * @device: pointer to &struct dma_device + * + * This routine is called by dma driver exit routines, dmaengine holds module + * references to prevent it being called while channels are in use. + */ +void dma_async_device_unregister(struct dma_device *device) +{ + struct dma_chan *chan, *n; + + dmaengine_debug_unregister(device); + + list_for_each_entry_safe(chan, n, &device->channels, device_node) + __dma_async_device_channel_unregister(device, chan); + + mutex_lock(&dma_list_mutex); + /* + * setting DMA_PRIVATE ensures the device being torn down will not + * be used in the channel_table + */ + dma_cap_set(DMA_PRIVATE, device->cap_mask); + dma_channel_rebalance(); + ida_free(&dma_ida, device->dev_id); + dma_device_put(device); + mutex_unlock(&dma_list_mutex); +} +EXPORT_SYMBOL(dma_async_device_unregister); + +static void dmam_device_release(struct device *dev, void *res) +{ + struct dma_device *device; + + device = *(struct dma_device **)res; + dma_async_device_unregister(device); +} + +/** + * dmaenginem_async_device_register - registers DMA devices found + * @device: pointer to &struct dma_device + * + * The operation is managed and will be undone on driver detach. + */ +int dmaenginem_async_device_register(struct dma_device *device) +{ + void *p; + int ret; + + p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL); + if (!p) + return -ENOMEM; + + ret = dma_async_device_register(device); + if (!ret) { + *(struct dma_device **)p = device; + devres_add(device->dev, p); + } else { + devres_free(p); + } + + return ret; +} +EXPORT_SYMBOL(dmaenginem_async_device_register); + +struct dmaengine_unmap_pool { + struct kmem_cache *cache; + const char *name; + mempool_t *pool; + size_t size; +}; + +#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } +static struct dmaengine_unmap_pool unmap_pool[] = { + __UNMAP_POOL(2), + #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) + __UNMAP_POOL(16), + __UNMAP_POOL(128), + __UNMAP_POOL(256), + #endif +}; + +static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) +{ + int order = get_count_order(nr); + + switch (order) { + case 0 ... 1: + return &unmap_pool[0]; +#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) + case 2 ... 4: + return &unmap_pool[1]; + case 5 ... 7: + return &unmap_pool[2]; + case 8: + return &unmap_pool[3]; +#endif + default: + BUG(); + return NULL; + } +} + +static void dmaengine_unmap(struct kref *kref) +{ + struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); + struct device *dev = unmap->dev; + int cnt, i; + + cnt = unmap->to_cnt; + for (i = 0; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_TO_DEVICE); + cnt += unmap->from_cnt; + for (; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_FROM_DEVICE); + cnt += unmap->bidi_cnt; + for (; i < cnt; i++) { + if (unmap->addr[i] == 0) + continue; + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_BIDIRECTIONAL); + } + cnt = unmap->map_cnt; + mempool_free(unmap, __get_unmap_pool(cnt)->pool); +} + +void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) +{ + if (unmap) + kref_put(&unmap->kref, dmaengine_unmap); +} +EXPORT_SYMBOL_GPL(dmaengine_unmap_put); + +static void dmaengine_destroy_unmap_pool(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { + struct dmaengine_unmap_pool *p = &unmap_pool[i]; + + mempool_destroy(p->pool); + p->pool = NULL; + kmem_cache_destroy(p->cache); + p->cache = NULL; + } +} + +static int __init dmaengine_init_unmap_pool(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { + struct dmaengine_unmap_pool *p = &unmap_pool[i]; + size_t size; + + size = sizeof(struct dmaengine_unmap_data) + + sizeof(dma_addr_t) * p->size; + + p->cache = kmem_cache_create(p->name, size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!p->cache) + break; + p->pool = mempool_create_slab_pool(1, p->cache); + if (!p->pool) + break; + } + + if (i == ARRAY_SIZE(unmap_pool)) + return 0; + + dmaengine_destroy_unmap_pool(); + return -ENOMEM; +} + +struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) +{ + struct dmaengine_unmap_data *unmap; + + unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); + if (!unmap) + return NULL; + + memset(unmap, 0, sizeof(*unmap)); + kref_init(&unmap->kref); + unmap->dev = dev; + unmap->map_cnt = nr; + + return unmap; +} +EXPORT_SYMBOL(dmaengine_get_unmap_data); + +void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, + struct dma_chan *chan) +{ + tx->chan = chan; + #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH + spin_lock_init(&tx->lock); + #endif +} +EXPORT_SYMBOL(dma_async_tx_descriptor_init); + +static inline int desc_check_and_set_metadata_mode( + struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode) +{ + /* Make sure that the metadata mode is not mixed */ + if (!desc->desc_metadata_mode) { + if (dmaengine_is_metadata_mode_supported(desc->chan, mode)) + desc->desc_metadata_mode = mode; + else + return -ENOTSUPP; + } else if (desc->desc_metadata_mode != mode) { + return -EINVAL; + } + + return 0; +} + +int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->attach) + return -ENOTSUPP; + + return desc->metadata_ops->attach(desc, data, len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata); + +void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len) +{ + int ret; + + if (!desc) + return ERR_PTR(-EINVAL); + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ERR_PTR(ret); + + if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) + return ERR_PTR(-ENOTSUPP); + + return desc->metadata_ops->get_ptr(desc, payload_len, max_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr); + +int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->set_len) + return -ENOTSUPP; + + return desc->metadata_ops->set_len(desc, payload_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); + +/** + * dma_wait_for_async_tx - spin wait for a transaction to complete + * @tx: in-flight transaction to wait on + */ +enum dma_status +dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) +{ + unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); + + if (!tx) + return DMA_COMPLETE; + + while (tx->cookie == -EBUSY) { + if (time_after_eq(jiffies, dma_sync_wait_timeout)) { + dev_err(tx->chan->device->dev, + "%s timeout waiting for descriptor submission\n", + __func__); + return DMA_ERROR; + } + cpu_relax(); + } + return dma_sync_wait(tx->chan, tx->cookie); +} +EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); + +/** + * dma_run_dependencies - process dependent operations on the target channel + * @tx: transaction with dependencies + * + * Helper routine for DMA drivers to process (start) dependent operations + * on their target channel. + */ +void dma_run_dependencies(struct dma_async_tx_descriptor *tx) +{ + struct dma_async_tx_descriptor *dep = txd_next(tx); + struct dma_async_tx_descriptor *dep_next; + struct dma_chan *chan; + + if (!dep) + return; + + /* we'll submit tx->next now, so clear the link */ + txd_clear_next(tx); + chan = dep->chan; + + /* keep submitting up until a channel switch is detected + * in that case we will be called again as a result of + * processing the interrupt from async_tx_channel_switch + */ + for (; dep; dep = dep_next) { + txd_lock(dep); + txd_clear_parent(dep); + dep_next = txd_next(dep); + if (dep_next && dep_next->chan == chan) + txd_clear_next(dep); /* ->next will be submitted */ + else + dep_next = NULL; /* submit current dep and terminate */ + txd_unlock(dep); + + dep->tx_submit(dep); + } + + chan->device->device_issue_pending(chan); +} +EXPORT_SYMBOL_GPL(dma_run_dependencies); + +static int __init dma_bus_init(void) +{ + int err = dmaengine_init_unmap_pool(); + + if (err) + return err; + + err = class_register(&dma_devclass); + if (!err) + dmaengine_debugfs_init(); + + return err; +} +arch_initcall(dma_bus_init); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.2/dmaengine.h b/snd-alpx-dkms/snd-alpx/core/generic/6.2/dmaengine.h new file mode 100644 index 0000000..53f16d3 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.2/dmaengine.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * The contents of this file are private to DMA engine drivers, and is not + * part of the API to be used by DMA engine users. + */ +#ifndef DMAENGINE_H +#define DMAENGINE_H + +#include <linux/bug.h> +#include <linux/dmaengine.h> + +/** + * dma_cookie_init - initialize the cookies for a DMA channel + * @chan: dma channel to initialize + */ +static inline void dma_cookie_init(struct dma_chan *chan) +{ + chan->cookie = DMA_MIN_COOKIE; + chan->completed_cookie = DMA_MIN_COOKIE; +} + +/** + * dma_cookie_assign - assign a DMA engine cookie to the descriptor + * @tx: descriptor needing cookie + * + * Assign a unique non-zero per-channel cookie to the descriptor. + * Note: caller is expected to hold a lock to prevent concurrency. + */ +static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx) +{ + struct dma_chan *chan = tx->chan; + dma_cookie_t cookie; + + cookie = chan->cookie + 1; + if (cookie < DMA_MIN_COOKIE) + cookie = DMA_MIN_COOKIE; + tx->cookie = chan->cookie = cookie; + + return cookie; +} + +/** + * dma_cookie_complete - complete a descriptor + * @tx: descriptor to complete + * + * Mark this descriptor complete by updating the channels completed + * cookie marker. Zero the descriptors cookie to prevent accidental + * repeated completions. + * + * Note: caller is expected to hold a lock to prevent concurrency. + */ +static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx) +{ + BUG_ON(tx->cookie < DMA_MIN_COOKIE); + tx->chan->completed_cookie = tx->cookie; + tx->cookie = 0; +} + +/** + * dma_cookie_status - report cookie status + * @chan: dma channel + * @cookie: cookie we are interested in + * @state: dma_tx_state structure to return last/used cookies + * + * Report the status of the cookie, filling in the state structure if + * non-NULL. No locking is required. + */ +static inline enum dma_status dma_cookie_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *state) +{ + dma_cookie_t used, complete; + + used = chan->cookie; + complete = chan->completed_cookie; + barrier(); + if (state) { + state->last = complete; + state->used = used; + state->residue = 0; + state->in_flight_bytes = 0; + } + return dma_async_is_complete(cookie, complete, used); +} + +static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) +{ + if (state) + state->residue = residue; +} + +static inline void dma_set_in_flight_bytes(struct dma_tx_state *state, + u32 in_flight_bytes) +{ + if (state) + state->in_flight_bytes = in_flight_bytes; +} + +struct dmaengine_desc_callback { + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; +}; + +/** + * dmaengine_desc_get_callback - get the passed in callback function + * @tx: tx descriptor + * @cb: temp struct to hold the callback info + * + * Fill the passed in cb struct with what's available in the passed in + * tx descriptor struct + * No locking is required. + */ +static inline void +dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx, + struct dmaengine_desc_callback *cb) +{ + cb->callback = tx->callback; + cb->callback_result = tx->callback_result; + cb->callback_param = tx->callback_param; +} + +/** + * dmaengine_desc_callback_invoke - call the callback function in cb struct + * @cb: temp struct that is holding the callback info + * @result: transaction result + * + * Call the callback function provided in the cb struct with the parameter + * in the cb struct. + * Locking is dependent on the driver. + */ +static inline void +dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb, + const struct dmaengine_result *result) +{ + struct dmaengine_result dummy_result = { + .result = DMA_TRANS_NOERROR, + .residue = 0 + }; + + if (cb->callback_result) { + if (!result) + result = &dummy_result; + cb->callback_result(cb->callback_param, result); + } else if (cb->callback) { + cb->callback(cb->callback_param); + } +} + +/** + * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and + * then immediately call the callback. + * @tx: dma async tx descriptor + * @result: transaction result + * + * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke() + * in a single function since no work is necessary in between for the driver. + * Locking is dependent on the driver. + */ +static inline void +dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx, + const struct dmaengine_result *result) +{ + struct dmaengine_desc_callback cb; + + dmaengine_desc_get_callback(tx, &cb); + dmaengine_desc_callback_invoke(&cb, result); +} + +/** + * dmaengine_desc_callback_valid - verify the callback is valid in cb + * @cb: callback info struct + * + * Return a bool that verifies whether callback in cb is valid or not. + * No locking is required. + */ +static inline bool +dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) +{ + return cb->callback || cb->callback_result; +} + +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); + +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> + +static inline struct dentry * +dmaengine_get_debugfs_root(struct dma_device *dma_dev) { + return dma_dev->dbg_dev_root; +} +#else +struct dentry; +static inline struct dentry * +dmaengine_get_debugfs_root(struct dma_device *dma_dev) +{ + return NULL; +} +#endif /* CONFIG_DEBUG_FS */ + +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.2/virt-dma.c b/snd-alpx-dkms/snd-alpx/core/generic/6.2/virt-dma.c new file mode 100644 index 0000000..a6f4265 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.2/virt-dma.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + */ +#include <linux/device.h> +#include <linux/dmaengine.h> +#include <linux/module.h> +#include <linux/spinlock.h> + +#include "virt-dma.h" + +static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct virt_dma_desc, tx); +} + +dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct virt_dma_chan *vc = to_virt_chan(tx->chan); + struct virt_dma_desc *vd = to_virt_desc(tx); + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&vc->lock, flags); + cookie = dma_cookie_assign(tx); + + list_move_tail(&vd->node, &vc->desc_submitted); + spin_unlock_irqrestore(&vc->lock, flags); + + dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", + vc, vd, cookie); + + return cookie; +} +EXPORT_SYMBOL_GPL(vchan_tx_submit); + +/** + * vchan_tx_desc_free - free a reusable descriptor + * @tx: the transfer + * + * This function frees a previously allocated reusable descriptor. The only + * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the + * transfer. + * + * Returns 0 upon success + */ +int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx) +{ + struct virt_dma_chan *vc = to_virt_chan(tx->chan); + struct virt_dma_desc *vd = to_virt_desc(tx); + unsigned long flags; + + spin_lock_irqsave(&vc->lock, flags); + list_del(&vd->node); + spin_unlock_irqrestore(&vc->lock, flags); + + dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n", + vc, vd, vd->tx.cookie); + vc->desc_free(vd); + return 0; +} +EXPORT_SYMBOL_GPL(vchan_tx_desc_free); + +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, + dma_cookie_t cookie) +{ + struct virt_dma_desc *vd; + + list_for_each_entry(vd, &vc->desc_issued, node) + if (vd->tx.cookie == cookie) + return vd; + + return NULL; +} +EXPORT_SYMBOL_GPL(vchan_find_desc); + +/* + * This tasklet handles the completion of a DMA descriptor by + * calling its callback and freeing it. + */ +static void vchan_complete(struct tasklet_struct *t) +{ + struct virt_dma_chan *vc = from_tasklet(vc, t, task); + struct virt_dma_desc *vd, *_vd; + struct dmaengine_desc_callback cb; + LIST_HEAD(head); + + spin_lock_irq(&vc->lock); + list_splice_tail_init(&vc->desc_completed, &head); + vd = vc->cyclic; + if (vd) { + vc->cyclic = NULL; + dmaengine_desc_get_callback(&vd->tx, &cb); + } else { + memset(&cb, 0, sizeof(cb)); + } + spin_unlock_irq(&vc->lock); + + dmaengine_desc_callback_invoke(&cb, &vd->tx_result); + + list_for_each_entry_safe(vd, _vd, &head, node) { + dmaengine_desc_get_callback(&vd->tx, &cb); + + list_del(&vd->node); + dmaengine_desc_callback_invoke(&cb, &vd->tx_result); + vchan_vdesc_fini(vd); + } +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) +{ + struct virt_dma_desc *vd, *_vd; + + list_for_each_entry_safe(vd, _vd, head, node) { + list_del(&vd->node); + vchan_vdesc_fini(vd); + } +} +EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); + +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) +{ + dma_cookie_init(&vc->chan); + + spin_lock_init(&vc->lock); + INIT_LIST_HEAD(&vc->desc_allocated); + INIT_LIST_HEAD(&vc->desc_submitted); + INIT_LIST_HEAD(&vc->desc_issued); + INIT_LIST_HEAD(&vc->desc_completed); + INIT_LIST_HEAD(&vc->desc_terminated); + + tasklet_setup(&vc->task, vchan_complete); + + vc->chan.device = dmadev; + list_add_tail(&vc->chan.device_node, &dmadev->channels); +} +EXPORT_SYMBOL_GPL(vchan_init); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.2/virt-dma.h b/snd-alpx-dkms/snd-alpx/core/generic/6.2/virt-dma.h new file mode 100644 index 0000000..e9f5250 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.2/virt-dma.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + */ +#ifndef VIRT_DMA_H +#define VIRT_DMA_H + +#include <linux/dmaengine.h> +#include <linux/interrupt.h> + +#include "dmaengine.h" + +struct virt_dma_desc { + struct dma_async_tx_descriptor tx; + struct dmaengine_result tx_result; + /* protected by vc.lock */ + struct list_head node; +}; + +struct virt_dma_chan { + struct dma_chan chan; + struct tasklet_struct task; + void (*desc_free)(struct virt_dma_desc *); + + spinlock_t lock; + + /* protected by vc.lock */ + struct list_head desc_allocated; + struct list_head desc_submitted; + struct list_head desc_issued; + struct list_head desc_completed; + struct list_head desc_terminated; + + struct virt_dma_desc *cyclic; +}; + +static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) +{ + return container_of(chan, struct virt_dma_chan, chan); +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); +extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); +extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *); + +/** + * vchan_tx_prep - prepare a descriptor + * @vc: virtual channel allocating this descriptor + * @vd: virtual descriptor to prepare + * @tx_flags: flags argument passed in to prepare function + */ +static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, + struct virt_dma_desc *vd, unsigned long tx_flags) +{ + unsigned long flags; + + dma_async_tx_descriptor_init(&vd->tx, &vc->chan); + vd->tx.flags = tx_flags; + vd->tx.tx_submit = vchan_tx_submit; + vd->tx.desc_free = vchan_tx_desc_free; + + vd->tx_result.result = DMA_TRANS_NOERROR; + vd->tx_result.residue = 0; + + spin_lock_irqsave(&vc->lock, flags); + list_add_tail(&vd->node, &vc->desc_allocated); + spin_unlock_irqrestore(&vc->lock, flags); + + return &vd->tx; +} + +/** + * vchan_issue_pending - move submitted descriptors to issued list + * @vc: virtual channel to update + * + * vc.lock must be held by caller + */ +static inline bool vchan_issue_pending(struct virt_dma_chan *vc) +{ + list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); + return !list_empty(&vc->desc_issued); +} + +/** + * vchan_cookie_complete - report completion of a descriptor + * @vd: virtual descriptor to update + * + * vc.lock must be held by caller + */ +static inline void vchan_cookie_complete(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + dma_cookie_t cookie; + + cookie = vd->tx.cookie; + dma_cookie_complete(&vd->tx); + dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", + vd, cookie); + list_add_tail(&vd->node, &vc->desc_completed); + + tasklet_schedule(&vc->task); +} + +/** + * vchan_vdesc_fini - Free or reuse a descriptor + * @vd: virtual descriptor to free/reuse + */ +static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + if (dmaengine_desc_test_reuse(&vd->tx)) { + unsigned long flags; + + spin_lock_irqsave(&vc->lock, flags); + list_add(&vd->node, &vc->desc_allocated); + spin_unlock_irqrestore(&vc->lock, flags); + } else { + vc->desc_free(vd); + } +} + +/** + * vchan_cyclic_callback - report the completion of a period + * @vd: virtual descriptor + */ +static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + vc->cyclic = vd; + tasklet_schedule(&vc->task); +} + +/** + * vchan_terminate_vdesc - Disable pending cyclic callback + * @vd: virtual descriptor to be terminated + * + * vc.lock must be held by caller + */ +static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + list_add_tail(&vd->node, &vc->desc_terminated); + + if (vc->cyclic == vd) + vc->cyclic = NULL; +} + +/** + * vchan_next_desc - peek at the next descriptor to be processed + * @vc: virtual channel to obtain descriptor from + * + * vc.lock must be held by caller + */ +static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) +{ + return list_first_entry_or_null(&vc->desc_issued, + struct virt_dma_desc, node); +} + +/** + * vchan_get_all_descriptors - obtain all submitted and issued descriptors + * @vc: virtual channel to get descriptors from + * @head: list of descriptors found + * + * vc.lock must be held by caller + * + * Removes all submitted and issued descriptors from internal lists, and + * provides a list of all descriptors found + */ +static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, + struct list_head *head) +{ + list_splice_tail_init(&vc->desc_allocated, head); + list_splice_tail_init(&vc->desc_submitted, head); + list_splice_tail_init(&vc->desc_issued, head); + list_splice_tail_init(&vc->desc_completed, head); + list_splice_tail_init(&vc->desc_terminated, head); +} + +static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) +{ + struct virt_dma_desc *vd; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&vc->lock, flags); + vchan_get_all_descriptors(vc, &head); + list_for_each_entry(vd, &head, node) + dmaengine_desc_clear_reuse(&vd->tx); + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); +} + +/** + * vchan_synchronize() - synchronize callback execution to the current context + * @vc: virtual channel to synchronize + * + * Makes sure that all scheduled or active callbacks have finished running. For + * proper operation the caller has to ensure that no new callbacks are scheduled + * after the invocation of this function started. + * Free up the terminated cyclic descriptor to prevent memory leakage. + */ +static inline void vchan_synchronize(struct virt_dma_chan *vc) +{ + LIST_HEAD(head); + unsigned long flags; + + tasklet_kill(&vc->task); + + spin_lock_irqsave(&vc->lock, flags); + + list_splice_tail_init(&vc->desc_terminated, &head); + + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); +} + +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.2/xilinx/xdma-regs.h b/snd-alpx-dkms/snd-alpx/core/generic/6.2/xilinx/xdma-regs.h new file mode 100644 index 0000000..4ee96de --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.2/xilinx/xdma-regs.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. + * Copyright (C) 2022, Advanced Micro Devices, Inc. + */ + +#ifndef __DMA_XDMA_REGS_H +#define __DMA_XDMA_REGS_H + +/* The length of register space exposed to host */ +#define XDMA_REG_SPACE_LEN 65536 + +/* + * maximum number of DMA channels for each direction: + * Host to Card (H2C) or Card to Host (C2H) + */ +#define XDMA_MAX_CHANNELS 4 + +/* + * macros to define the number of descriptor blocks can be used in one + * DMA transfer request. + * the DMA engine uses a linked list of descriptor blocks that specify the + * source, destination, and length of the DMA transfers. + */ +#define XDMA_DESC_BLOCK_NUM BIT(7) +#define XDMA_DESC_BLOCK_MASK (XDMA_DESC_BLOCK_NUM - 1) + +/* descriptor definitions */ +#define XDMA_DESC_ADJACENT 32 +#define XDMA_DESC_ADJACENT_MASK (XDMA_DESC_ADJACENT - 1) +#define XDMA_DESC_ADJACENT_BITS GENMASK(13, 8) +#define XDMA_DESC_MAGIC 0xad4bUL +#define XDMA_DESC_MAGIC_BITS GENMASK(31, 16) +#define XDMA_DESC_FLAGS_BITS GENMASK(7, 0) +#define XDMA_DESC_STOPPED BIT(0) +#define XDMA_DESC_COMPLETED BIT(1) +#define XDMA_DESC_BLEN_BITS 28 +#define XDMA_DESC_BLEN_MAX (BIT(XDMA_DESC_BLEN_BITS) - PAGE_SIZE) + +/* macros to construct the descriptor control word */ +#define XDMA_DESC_CONTROL(adjacent, flag) \ + (FIELD_PREP(XDMA_DESC_MAGIC_BITS, XDMA_DESC_MAGIC) | \ + FIELD_PREP(XDMA_DESC_ADJACENT_BITS, (adjacent) - 1) | \ + FIELD_PREP(XDMA_DESC_FLAGS_BITS, (flag))) +#define XDMA_DESC_CONTROL_LAST \ + XDMA_DESC_CONTROL(1, XDMA_DESC_STOPPED | XDMA_DESC_COMPLETED) +#define XDMA_DESC_CONTROL_CYCLIC \ + XDMA_DESC_CONTROL(1, XDMA_DESC_COMPLETED) + +/* + * Descriptor for a single contiguous memory block transfer. + * + * Multiple descriptors are linked by means of the next pointer. An additional + * extra adjacent number gives the amount of extra contiguous descriptors. + * + * The descriptors are in root complex memory, and the bytes in the 32-bit + * words must be in little-endian byte ordering. + */ +struct xdma_hw_desc { + __le32 control; + __le32 bytes; + __le64 src_addr; + __le64 dst_addr; + __le64 next_desc; +}; + +#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc) +#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT) +#define XDMA_DESC_BLOCK_ALIGN 32 +#define XDMA_DESC_BLOCK_BOUNDARY 4096 + +/* + * Channel registers + */ +#define XDMA_CHAN_IDENTIFIER 0x0 +#define XDMA_CHAN_CONTROL 0x4 +#define XDMA_CHAN_CONTROL_W1S 0x8 +#define XDMA_CHAN_CONTROL_W1C 0xc +#define XDMA_CHAN_STATUS 0x40 +#define XDMA_CHAN_STATUS_RC 0x44 +#define XDMA_CHAN_COMPLETED_DESC 0x48 +#define XDMA_CHAN_ALIGNMENTS 0x4c +#define XDMA_CHAN_INTR_ENABLE 0x90 +#define XDMA_CHAN_INTR_ENABLE_W1S 0x94 +#define XDMA_CHAN_INTR_ENABLE_W1C 0x9c + +#define XDMA_CHAN_STRIDE 0x100 +#define XDMA_CHAN_H2C_OFFSET 0x0 +#define XDMA_CHAN_C2H_OFFSET 0x1000 +#define XDMA_CHAN_H2C_TARGET 0x0 +#define XDMA_CHAN_C2H_TARGET 0x1 + +/* macro to check if channel is available */ +#define XDMA_CHAN_MAGIC 0x1fc0 +#define XDMA_CHAN_CHECK_TARGET(id, target) \ + (((u32)(id) >> 16) == XDMA_CHAN_MAGIC + (target)) + +/* bits of the channel control register */ +#define CHAN_CTRL_RUN_STOP BIT(0) +#define CHAN_CTRL_IE_DESC_STOPPED BIT(1) +#define CHAN_CTRL_IE_DESC_COMPLETED BIT(2) +#define CHAN_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3) +#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4) +#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6) +#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9) +#define CHAN_CTRL_IE_WRITE_ERROR GENMASK(18, 14) +#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19) +#define CHAN_CTRL_NON_INCR_ADDR BIT(25) +#define CHAN_CTRL_POLL_MODE_WB BIT(26) +#define CHAN_CTRL_TRANSFER_INFO_WB BIT(27) + +#define CHAN_CTRL_START (CHAN_CTRL_RUN_STOP | \ + CHAN_CTRL_IE_DESC_STOPPED | \ + CHAN_CTRL_IE_DESC_COMPLETED | \ + CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \ + CHAN_CTRL_IE_MAGIC_STOPPED | \ + CHAN_CTRL_IE_READ_ERROR | \ + CHAN_CTRL_IE_WRITE_ERROR | \ + CHAN_CTRL_IE_DESC_ERROR) + +#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START + +#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \ + CHAN_CTRL_IE_MAGIC_STOPPED | \ + CHAN_CTRL_IE_READ_ERROR | \ + CHAN_CTRL_IE_WRITE_ERROR | \ + CHAN_CTRL_IE_DESC_ERROR) + +/* bits of the channel interrupt enable mask */ +#define CHAN_IM_DESC_ERROR BIT(19) +#define CHAN_IM_READ_ERROR BIT(9) +#define CHAN_IM_IDLE_STOPPED BIT(6) +#define CHAN_IM_MAGIC_STOPPED BIT(4) +#define CHAN_IM_DESC_COMPLETED BIT(2) +#define CHAN_IM_DESC_STOPPED BIT(1) + +#define CHAN_IM_ALL (CHAN_IM_DESC_ERROR | CHAN_IM_READ_ERROR | \ + CHAN_IM_IDLE_STOPPED | CHAN_IM_MAGIC_STOPPED | \ + CHAN_IM_DESC_COMPLETED | CHAN_IM_DESC_STOPPED) + +/* + * Channel SGDMA registers + */ +#define XDMA_SGDMA_IDENTIFIER 0x4000 +#define XDMA_SGDMA_DESC_LO 0x4080 +#define XDMA_SGDMA_DESC_HI 0x4084 +#define XDMA_SGDMA_DESC_ADJ 0x4088 +#define XDMA_SGDMA_DESC_CREDIT 0x408c + +/* + * interrupt registers + */ +#define XDMA_IRQ_IDENTIFIER 0x2000 +#define XDMA_IRQ_USER_INT_EN 0x2004 +#define XDMA_IRQ_USER_INT_EN_W1S 0x2008 +#define XDMA_IRQ_USER_INT_EN_W1C 0x200c +#define XDMA_IRQ_CHAN_INT_EN 0x2010 +#define XDMA_IRQ_CHAN_INT_EN_W1S 0x2014 +#define XDMA_IRQ_CHAN_INT_EN_W1C 0x2018 +#define XDMA_IRQ_USER_INT_REQ 0x2040 +#define XDMA_IRQ_CHAN_INT_REQ 0x2044 +#define XDMA_IRQ_USER_INT_PEND 0x2048 +#define XDMA_IRQ_CHAN_INT_PEND 0x204c +#define XDMA_IRQ_USER_VEC_NUM 0x2080 +#define XDMA_IRQ_CHAN_VEC_NUM 0x20a0 + +#define XDMA_IRQ_VEC_SHIFT 8 + +#endif /* __DMA_XDMA_REGS_H */ diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.2/xilinx/xdma.c b/snd-alpx-dkms/snd-alpx/core/generic/6.2/xilinx/xdma.c new file mode 100644 index 0000000..6c89145 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.2/xilinx/xdma.c @@ -0,0 +1,1437 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * DMA driver for Xilinx DMA/Bridge Subsystem + * + * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. + * Copyright (C) 2022, Advanced Micro Devices, Inc. + */ + +/* + * The DMA/Bridge Subsystem for PCI Express allows for the movement of data + * between Host memory and the DMA subsystem. It does this by operating on + * 'descriptors' that contain information about the source, destination and + * amount of data to transfer. These direct memory transfers can be both in + * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be + * configured to have a single AXI4 Master interface shared by all channels + * or one AXI4-Stream interface for each channel enabled. Memory transfers are + * specified on a per-channel basis in descriptor linked lists, which the DMA + * fetches from host memory and processes. Events such as descriptor completion + * and errors are signaled using interrupts. The core also provides up to 16 + * user interrupt wires that generate interrupts to the host. + */ + +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/bitfield.h> +#include <linux/dmapool.h> + +#include <linux/regmap.h> + +#if defined (CONFIG_KERNEL_REDHAT) + #warning REDHAT Kernel + #if KERNEL_VERSION(5, 19, 0) <= LINUX_VERSION_CODE + /* Use Generic include */ + #include "../../../../include/5.6/virt-dma.h" + #elif KERNEL_VERSION(4, 18, 0) <= LINUX_VERSION_CODE + /* Use Generic include : files equal !! */ + #warning ReadHat 4.18 at least + #include "../../../../include/5.6/virt-dma.h" + #else + #error Redhat kernel NOT Supported + #endif + +#else + /* Generic Kernels */ + #warning "Generic Kernels" + #if KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE + #include "../virt-dma.h" + #elif KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE + #include "../../../../include/5.6/virt-dma.h" + #elif KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE + #include "../../../../include/5.3/virt-dma.h" + #else + #include "../../../../include/4.16/virt-dma.h" + #endif + +#endif + + +#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE +#include <linux/dmaengine.h> +#elif KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE +#include "../../../../include/5.3/dmaengine.h" +#else +#include "../../../../include/4.16/dmaengine.h" +#endif + +#include "../../../../include/6.2/amd_xdma.h" +#include <linux/platform_device.h> +#include "../amd_xdma.h" +#include <linux/dma-mapping.h> +#include <linux/pci.h> + +#include "xdma-regs.h" + +/* mmio regmap config for all XDMA registers */ +static const struct regmap_config xdma_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = XDMA_REG_SPACE_LEN, +}; + +/** + * struct xdma_desc_block - Descriptor block + * @virt_addr: Virtual address of block start + * @dma_addr: DMA address of block start + */ +struct xdma_desc_block { + void *virt_addr; + dma_addr_t dma_addr; +}; + +/** + * struct xdma_c2h_write_back - Write back block , written by the XDMA. + * @magic_status_bit : magic (0x52B4) once written + * @length: effective transfer length (in bytes) + * @PADDING to be aligned on 32 bytes + * @associated dma address + */ +struct xdma_c2h_write_back { + __le32 magic_status_bit; + __le32 length; + u32 padding_1[6]; + dma_addr_t dma_addr; +}; + +/** + * struct xdma_chan - Driver specific DMA channel structure + * @vchan: Virtual channel + * @xdev_hdl: Pointer to DMA device structure + * @base: Offset of channel registers + * @desc_pool: Descriptor pool + * @busy: Busy flag of the channel + * @dir: Transferring direction of the channel + * @cfg: Transferring config of the channel + * @irq: IRQ assigned to the channel + * @write_back : C2H meta data write back + */ +struct xdma_chan { + struct virt_dma_chan vchan; + void *xdev_hdl; + u32 base; + struct dma_pool *desc_pool; + bool busy; + enum dma_transfer_direction dir; + struct dma_slave_config cfg; + u32 irq; + struct xdma_c2h_write_back* write_back; +}; + +/** + * struct xdma_desc - DMA desc structure + * @vdesc: Virtual DMA descriptor + * @chan: DMA channel pointer + * @dir: Transferring direction of the request + * @desc_blocks: Hardware descriptor blocks + * @dblk_num: Number of hardware descriptor blocks + * @desc_num: Number of hardware descriptors + * @completed_desc_num: Completed hardware descriptors + * @cyclic: Cyclic transfer vs. scatter-gather + * @interleaved_dma: Interleaved DMA transfer + * @periods: Number of periods in the cyclic transfer + * @period_size: Size of a period in bytes in cyclic transfers + * @frames_left: Number of frames left in interleaved DMA transfer + * @error: tx error flag + */ +struct xdma_desc { + struct virt_dma_desc vdesc; + struct xdma_chan *chan; + enum dma_transfer_direction dir; + struct xdma_desc_block *desc_blocks; + u32 dblk_num; + u32 desc_num; + u32 completed_desc_num; + bool cyclic; + bool interleaved_dma; + u32 periods; + u32 period_size; + u32 frames_left; + bool error; +}; + +#define XDMA_DEV_STATUS_REG_DMA BIT(0) +#define XDMA_DEV_STATUS_INIT_MSIX BIT(1) + +/** + * struct xdma_device - DMA device structure + * @pdev: Platform device pointer + * @dma_dev: DMA device structure + * @rmap: MMIO regmap for DMA registers + * @h2c_chans: Host to Card channels + * @c2h_chans: Card to Host channels + * @h2c_chan_num: Number of H2C channels + * @c2h_chan_num: Number of C2H channels + * @irq_start: Start IRQ assigned to device + * @irq_num: Number of IRQ assigned to device + * @status: Initialization status + */ +struct xdma_device { + struct platform_device *pdev; + struct dma_device dma_dev; + struct regmap *rmap; + struct xdma_chan *h2c_chans; + struct xdma_chan *c2h_chans; + u32 h2c_chan_num; + u32 c2h_chan_num; + u32 irq_start; + u32 irq_num; + u32 status; +}; + +#define xdma_err(xdev, fmt, args...) \ + dev_err(&(xdev)->pdev->dev, fmt, ##args) +#define XDMA_CHAN_NUM(_xd) ({ \ + typeof(_xd) (xd) = (_xd); \ + ((xd)->h2c_chan_num + (xd)->c2h_chan_num); }) + +/* Get the last desc in a desc block */ +static inline void *xdma_blk_last_desc(struct xdma_desc_block *block) +{ + return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE; +} + +/** + * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer + * @sw_desc: Tx descriptor pointer + */ +static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc) +{ + struct xdma_desc_block *block; + u32 last_blk_desc, desc_control; + struct xdma_hw_desc *desc; + int i; + + desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0); + for (i = 1; i < sw_desc->dblk_num; i++) { + block = &sw_desc->desc_blocks[i - 1]; + desc = xdma_blk_last_desc(block); + + if (!(i & XDMA_DESC_BLOCK_MASK)) { + desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); + continue; + } + desc->control = cpu_to_le32(desc_control); + desc->next_desc = cpu_to_le64(block[1].dma_addr); + } + + /* update the last block */ + last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; + if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) { + block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2]; + desc = xdma_blk_last_desc(block); + desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0); + desc->control = cpu_to_le32(desc_control); + } + + block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1]; + desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE; + desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); +} + +/** + * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer + * @sw_desc: Tx descriptor pointer + */ +static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc) +{ + struct xdma_desc_block *block; + struct xdma_hw_desc *desc; + int i; + + block = sw_desc->desc_blocks; + for (i = 0; i < sw_desc->desc_num - 1; i++) { + desc = block->virt_addr + i * XDMA_DESC_SIZE; + desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE)); + } + desc = block->virt_addr + i * XDMA_DESC_SIZE; + desc->next_desc = cpu_to_le64(block->dma_addr); +} + +static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct xdma_chan, vchan.chan); +} + +static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc) +{ + return container_of(vdesc, struct xdma_desc, vdesc); +} + +/** + * xdma_channel_init - Initialize DMA channel registers + * @chan: DMA channel pointer + */ +static int xdma_channel_init(struct xdma_chan *chan) +{ + struct xdma_device *xdev = chan->xdev_hdl; + int ret; + unsigned int reg_ctrl = 0; + + regmap_read(xdev->rmap, chan->base + XDMA_CHAN_CONTROL, ®_ctrl); + dev_dbg(&xdev->pdev->dev, "CONTROL Init: 0x%08x\n", reg_ctrl); + + ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C, + CHAN_CTRL_NON_INCR_ADDR | CHAN_CTRL_TRANSFER_INFO_WB); + if (ret) + return ret; + + regmap_read(xdev->rmap, chan->base + XDMA_CHAN_CONTROL, ®_ctrl); + dev_dbg(&xdev->pdev->dev, "CONTROL Init: 0x%08x\n", reg_ctrl); + + ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE, + CHAN_IM_ALL); + if (ret) + return ret; + + return 0; +} + +/** + * xdma_free_desc - Free descriptor + * @vdesc: Virtual DMA descriptor + */ +static void xdma_free_desc(struct virt_dma_desc *vdesc) +{ + struct xdma_desc *sw_desc; + int i; + + sw_desc = to_xdma_desc(vdesc); + for (i = 0; i < sw_desc->dblk_num; i++) { + if (!sw_desc->desc_blocks[i].virt_addr) + break; + dma_pool_free(sw_desc->chan->desc_pool, + sw_desc->desc_blocks[i].virt_addr, + sw_desc->desc_blocks[i].dma_addr); + } + kfree(sw_desc->desc_blocks); + kfree(sw_desc); +} + +/** + * xdma_alloc_desc - Allocate descriptor + * @chan: DMA channel pointer + * @desc_num: Number of hardware descriptors + * @cyclic: Whether this is a cyclic transfer + */ +static struct xdma_desc * +xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic) +{ + struct xdma_desc *sw_desc; + struct xdma_hw_desc *desc; + dma_addr_t dma_addr; + u32 dblk_num; + u32 control; + void *addr; + int i, j; + + sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT); + if (!sw_desc) + return NULL; + + sw_desc->chan = chan; + sw_desc->desc_num = desc_num; + sw_desc->cyclic = cyclic; + sw_desc->error = false; + dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT); + sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks), + GFP_NOWAIT); + if (!sw_desc->desc_blocks) + goto failed; + + if (cyclic) + control = XDMA_DESC_CONTROL_CYCLIC; + else + control = XDMA_DESC_CONTROL(1, 0); + + sw_desc->dblk_num = dblk_num; + for (i = 0; i < sw_desc->dblk_num; i++) { + addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr); + if (!addr) + goto failed; + + sw_desc->desc_blocks[i].virt_addr = addr; + sw_desc->desc_blocks[i].dma_addr = dma_addr; + for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++) + desc[j].control = cpu_to_le32(control); + } + + if (cyclic) + xdma_link_cyclic_desc_blocks(sw_desc); + else + xdma_link_sg_desc_blocks(sw_desc); + + return sw_desc; + +failed: + xdma_free_desc(&sw_desc->vdesc); + return NULL; +} + +/** + * xdma_xfer_start - Start DMA transfer + * @xchan: DMA channel pointer + */ +static int xdma_xfer_start(struct xdma_chan *xchan) +{ + struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan); + struct xdma_device *xdev = xchan->xdev_hdl; + struct xdma_desc_block *block; + u32 val, completed_blocks; + struct xdma_desc *desc; + int ret; + + /* + * check if there is not any submitted descriptor or channel is busy. + * vchan lock should be held where this function is called. + */ + if (!vd || xchan->busy) + return -EINVAL; + + /* clear run stop bit to get ready for transfer */ + ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, + CHAN_CTRL_RUN_STOP); + if (ret) + return ret; + + desc = to_xdma_desc(vd); + if (desc->dir != xchan->dir) { + xdma_err(xdev, "incorrect request direction"); + return -EINVAL; + } + + /* set DMA engine to the first descriptor block */ + completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT; + block = &desc->desc_blocks[completed_blocks]; + val = lower_32_bits(block->dma_addr); + ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val); + if (ret) + return ret; + + val = upper_32_bits(block->dma_addr); + ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val); + if (ret) + return ret; + + if (completed_blocks + 1 == desc->dblk_num) + val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; + else + val = XDMA_DESC_ADJACENT - 1; + ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val); + if (ret) + return ret; + + /* kick off DMA transfer, force 0=1 transition, USE Bit clear/set registers */ + + regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, + CHAN_CTRL_START); + + ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1S, + CHAN_CTRL_START); + if (ret) + return ret; + + xchan->busy = true; + + return 0; +} + +/** + * xdma_xfer_stop - Stop DMA transfer + * @xchan: DMA channel pointer + */ +static int xdma_xfer_stop(struct xdma_chan *xchan) +{ + int ret; + u32 val; + struct xdma_device *xdev = xchan->xdev_hdl; + + /* clear run stop bit to prevent any further auto-triggering */ + ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, + CHAN_CTRL_RUN_STOP); + if (ret) + return ret; + + /* Clear the channel status register */ + ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val); + if (ret) + return ret; + + return 0; +} + +/** + * xdma_alloc_channels - Detect and allocate DMA channels + * @xdev: DMA device pointer + * @dir: Channel direction + */ +static int xdma_alloc_channels(struct xdma_device *xdev, + enum dma_transfer_direction dir) +{ + struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev); + struct xdma_chan **chans, *xchan; + u32 base, identifier, target; + u32 *chan_num; + int i, j, ret; + + if (dir == DMA_MEM_TO_DEV) { + base = XDMA_CHAN_H2C_OFFSET; + target = XDMA_CHAN_H2C_TARGET; + chans = &xdev->h2c_chans; + chan_num = &xdev->h2c_chan_num; + } else if (dir == DMA_DEV_TO_MEM) { + base = XDMA_CHAN_C2H_OFFSET; + target = XDMA_CHAN_C2H_TARGET; + chans = &xdev->c2h_chans; + chan_num = &xdev->c2h_chan_num; + } else { + xdma_err(xdev, "invalid direction specified"); + return -EINVAL; + } + + /* detect number of available DMA channels */ + for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) { + ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE, + &identifier); + if (ret) + return ret; + + /* check if it is available DMA channel */ + if (XDMA_CHAN_CHECK_TARGET(identifier, target)) + (*chan_num)++; + } + + if (!*chan_num) { + xdma_err(xdev, "does not probe any channel"); + return -EINVAL; + } + + *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans), + GFP_KERNEL); + if (!*chans) + return -ENOMEM; + + for (i = 0, j = 0; i < pdata->max_dma_channels; i++) { + ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE, + &identifier); + if (ret) + return ret; + + if (!XDMA_CHAN_CHECK_TARGET(identifier, target)) + continue; + + if (j == *chan_num) { + xdma_err(xdev, "invalid channel number"); + return -EIO; + } + + /* init channel structure and hardware */ + xchan = &(*chans)[j]; + xchan->xdev_hdl = xdev; + xchan->base = base + i * XDMA_CHAN_STRIDE; + xchan->dir = dir; + + ret = xdma_channel_init(xchan); + if (ret) + return ret; + xchan->vchan.desc_free = xdma_free_desc; + vchan_init(&xchan->vchan, &xdev->dma_dev); + + j++; + } + + dev_info(&xdev->pdev->dev, "configured %d %s channels", j, + (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H"); + + return 0; +} + +/** + * xdma_issue_pending - Issue pending transactions + * @chan: DMA channel pointer + */ +static void xdma_issue_pending(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&xdma_chan->vchan.lock, flags); + if (vchan_issue_pending(&xdma_chan->vchan)) + xdma_xfer_start(xdma_chan); + spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); +} + +/** + * xdma_terminate_all - Terminate all transactions + * @chan: DMA channel pointer + */ +static int xdma_terminate_all(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct virt_dma_desc *vd; + unsigned long flags; + LIST_HEAD(head); + + xdma_xfer_stop(xdma_chan); + + spin_lock_irqsave(&xdma_chan->vchan.lock, flags); + + xdma_chan->busy = false; + vd = vchan_next_desc(&xdma_chan->vchan); + if (vd) { + list_del(&vd->node); + dma_cookie_complete(&vd->tx); + vchan_terminate_vdesc(vd); + } + vchan_get_all_descriptors(&xdma_chan->vchan, &head); + list_splice_tail(&head, &xdma_chan->vchan.desc_terminated); + + spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); + + return 0; +} + +/** + * xdma_synchronize - Synchronize terminated transactions + * @chan: DMA channel pointer + */ +static void xdma_synchronize(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + + vchan_synchronize(&xdma_chan->vchan); +} + +/** + * xdma_fill_descs - Fill hardware descriptors for one contiguous memory chunk. + * More than one descriptor will be used if the size is bigger + * than XDMA_DESC_BLEN_MAX. + * @sw_desc: Descriptor container + * @src_addr: First value for the ->src_addr field + * @dst_addr: First value for the ->dst_addr field + * @size: Size of the contiguous memory block + * @desc_start_num: Index of the first descriptor to take care of in @sw_desc + */ +static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr, + u64 dst_addr, u32 size, u32 filled_descs_num) +{ + u32 left = size, len, desc_num = filled_descs_num; + struct xdma_desc_block *dblk; + struct xdma_hw_desc *desc; + + dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT); + desc = dblk->virt_addr; + desc += desc_num & XDMA_DESC_ADJACENT_MASK; + do { + len = min_t(u32, left, XDMA_DESC_BLEN_MAX); + /* set hardware descriptor */ + desc->bytes = cpu_to_le32(len); + desc->src_addr = cpu_to_le64(src_addr); + desc->dst_addr = cpu_to_le64(dst_addr); + + dev_dbg(NULL, "desc[%u]:%p {src:0x%llx, dst: 0x%llx, length: %u}", + desc_num, + desc, + src_addr, + dst_addr, + len); + + if (!(++desc_num & XDMA_DESC_ADJACENT_MASK)) + desc = (++dblk)->virt_addr; + else + desc++; + + src_addr += len; + dst_addr += len; + left -= len; + } while (left); + + return desc_num - filled_descs_num; +} + +/** + * xdma_prep_device_sg - prepare a descriptor for a DMA transaction + * @chan: DMA channel pointer + * @sgl: Transfer scatter gather list + * @sg_len: Length of scatter gather list + * @dir: Transfer direction + * @flags: transfer ack flags + * @context: APP words of the descriptor + */ +static struct dma_async_tx_descriptor * +xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_device *xdev = xdma_chan ->xdev_hdl; + + struct dma_async_tx_descriptor *tx_desc; + struct xdma_desc *sw_desc; + u32 desc_num = 0, i; + u64 addr, dev_addr, *src, *dst; + struct scatterlist *sg; + + for_each_sg(sgl, sg, sg_len, i) + desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX); + + sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false); + if (!sw_desc) + return NULL; + sw_desc->dir = dir; + sw_desc->cyclic = false; + sw_desc->interleaved_dma = false; + + if (dir == DMA_MEM_TO_DEV) { + dev_addr = xdma_chan->cfg.dst_addr; + src = &addr; + dst = &dev_addr; + } else { + dev_addr = xdma_chan->cfg.src_addr ? xdma_chan->cfg.src_addr : xdma_chan->write_back->dma_addr; + src = &dev_addr; + dst = &addr; + } + + dev_dbg(&xdev->pdev->dev, "desc[%s]:%p {src: %p, dst: %p, length: %u}", + dir == DMA_MEM_TO_DEV ? "C2H" : "H2C", + sw_desc, + src, + dst, + sg_dma_len(sg)); + + + desc_num = 0; + for_each_sg(sgl, sg, sg_len, i) { + addr = sg_dma_address(sg); + desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num); + dev_addr += sg_dma_len(sg); + } + + tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); + if (!tx_desc) + goto failed; + + return tx_desc; + +failed: + xdma_free_desc(&sw_desc->vdesc); + + return NULL; +} + +/** + * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions + * @chan: DMA channel pointer + * @address: Device DMA address to access + * @size: Total length to transfer + * @period_size: Period size to use for each transfer + * @dir: Transfer direction + * @flags: Transfer ack flags + */ +static struct dma_async_tx_descriptor * +xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address, + size_t size, size_t period_size, + enum dma_transfer_direction dir, + unsigned long flags) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_device *xdev = xdma_chan->xdev_hdl; + unsigned int periods = size / period_size; + struct dma_async_tx_descriptor *tx_desc; + struct xdma_desc *sw_desc; + u64 addr, dev_addr, *src, *dst; + u32 desc_num = 0; + unsigned int i; + + /* + * Simplify the whole logic by preventing an abnormally high number of + * periods and periods size. + */ + if (period_size > XDMA_DESC_BLEN_MAX) { + xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX); + return NULL; + } + + if (periods > XDMA_DESC_ADJACENT) { + xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT); + return NULL; + } + + sw_desc = xdma_alloc_desc(xdma_chan, periods, true); + if (!sw_desc) + return NULL; + + sw_desc->periods = periods; + sw_desc->period_size = period_size; + sw_desc->dir = dir; + sw_desc->interleaved_dma = false; + + addr = address; + if (dir == DMA_MEM_TO_DEV) { + dev_addr = xdma_chan->cfg.dst_addr; + src = &addr; + dst = &dev_addr; + } else { + dev_addr = xdma_chan->cfg.src_addr ? xdma_chan->cfg.src_addr : xdma_chan->write_back->dma_addr; + src = &dev_addr; + dst = &addr; + } + + dev_dbg(&xdev->pdev->dev, "desc[%s]:%p {src: %p, dst: %p, length: %lu}", + dir == DMA_MEM_TO_DEV ? "C2H" : "H2C", + sw_desc, + src, + dst, + period_size); + + for (i = 0; i < periods; i++) { + xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num++); + addr += period_size; + } + + tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); + if (!tx_desc) + goto failed; + + return tx_desc; + +failed: + xdma_free_desc(&sw_desc->vdesc); + + return NULL; +} + +/** + * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers + * @chan: DMA channel + * @xt: DMA transfer template + * @flags: tx flags + */ +static struct dma_async_tx_descriptor * +xdma_prep_interleaved_dma(struct dma_chan *chan, + struct dma_interleaved_template *xt, + unsigned long flags) +{ + int i; + u32 desc_num = 0, period_size = 0; + struct dma_async_tx_descriptor *tx_desc; + struct xdma_chan *xchan = to_xdma_chan(chan); + struct xdma_desc *sw_desc; + u64 src_addr, dst_addr; + + for (i = 0; i < xt->frame_size; ++i) + desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX); + + sw_desc = xdma_alloc_desc(xchan, desc_num, false); + if (!sw_desc) + return NULL; + sw_desc->dir = xt->dir; + sw_desc->interleaved_dma = true; + sw_desc->cyclic = flags & DMA_PREP_REPEAT; + sw_desc->frames_left = xt->numf; + sw_desc->periods = xt->numf; + + desc_num = 0; + src_addr = xt->src_start; + dst_addr = xt->dst_start; + for (i = 0; i < xt->frame_size; ++i) { + desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num); + src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ? + xt->sgl[i].size : 0); + dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ? + xt->sgl[i].size : 0); + period_size += xt->sgl[i].size; + } + sw_desc->period_size = period_size; + + tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags); + if (tx_desc) + return tx_desc; + + xdma_free_desc(&sw_desc->vdesc); + return NULL; +} + +/** + * xdma_device_config - Configure the DMA channel + * @chan: DMA channel + * @cfg: channel configuration + */ +static int xdma_device_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + + memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg)); + + return 0; +} + +/** + * xdma_free_chan_resources - Free channel resources + * @chan: DMA channel + */ +static void xdma_free_chan_resources(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + + vchan_free_chan_resources(&xdma_chan->vchan); + dma_pool_free(xdma_chan->desc_pool, + xdma_chan->write_back, + xdma_chan->write_back->dma_addr); + dma_pool_destroy(xdma_chan->desc_pool); + xdma_chan->desc_pool = NULL; +} + +/** + * xdma_alloc_chan_resources - Allocate channel resources + * @chan: DMA channel + */ +static int xdma_alloc_chan_resources(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_device *xdev = xdma_chan->xdev_hdl; + struct device *dev = xdev->dma_dev.dev; + dma_addr_t write_back_addr; + + while (dev && !dev_is_pci(dev)) + dev = dev->parent; + if (!dev) { + xdma_err(xdev, "unable to find pci device"); + return -EINVAL; + } + + //Allocate the pool WITH the H2C write back + xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE + + sizeof(struct xdma_c2h_write_back), + XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY); + if (!xdma_chan->desc_pool) { + xdma_err(xdev, "unable to allocate descriptor pool"); + return -ENOMEM; + } + + /* Allocate the C2H write back out of the pool*/ + + xdma_chan->write_back = dma_pool_alloc(xdma_chan->desc_pool, GFP_NOWAIT, &write_back_addr); + + dev_dbg(dev, "C2H write_back : %p, dma_addr: %lld", xdma_chan->write_back, write_back_addr); + + if (!xdma_chan->write_back) { + xdma_err(xdev, "unable to allocate C2H write back block"); + return -ENOMEM; + } + xdma_chan->write_back->dma_addr = write_back_addr; + + + return 0; +} + +static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *state) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_desc *desc = NULL; + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + unsigned int period_idx; + u32 residue = 0; + + ret = dma_cookie_status(chan, cookie, state); + if (ret == DMA_COMPLETE) + return ret; + + spin_lock_irqsave(&xdma_chan->vchan.lock, flags); + + vd = vchan_find_desc(&xdma_chan->vchan, cookie); + if (!vd) + goto out; + + desc = to_xdma_desc(vd); + if (desc->error) { + ret = DMA_ERROR; + } else if (desc->cyclic) { + period_idx = desc->completed_desc_num % desc->periods; + residue = (desc->periods - period_idx) * desc->period_size; + dma_set_residue(state, residue); + } +out: + spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); + + return ret; +} + +/** + * xdma_channel_isr - XDMA channel interrupt handler + * @irq: IRQ number + * @dev_id: Pointer to the DMA channel structure + */ +static irqreturn_t xdma_channel_isr(int irq, void *dev_id) +{ + struct xdma_chan *xchan = dev_id; + u32 complete_desc_num = 0; + struct xdma_device *xdev = xchan->xdev_hdl; + struct virt_dma_desc *vd, *next_vd; + struct xdma_desc *desc; + int ret; + u32 st; + bool repeat_tx; + + spin_lock(&xchan->vchan.lock); + + /* get submitted request */ + vd = vchan_next_desc(&xchan->vchan); + if (!vd) + goto out; + + /* Clear-on-read the status register */ + ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st); + if (ret) + goto out; + + desc = to_xdma_desc(vd); + + st &= XDMA_CHAN_STATUS_MASK; + if ((st & XDMA_CHAN_ERROR_MASK) || + !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) { + desc->error = true; + xdma_err(xdev, "channel error, status register value: 0x%x", st); + goto out; + } + + ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC, + &complete_desc_num); + if (ret) + goto out; + + if (desc->interleaved_dma) { + xchan->busy = false; + desc->completed_desc_num += complete_desc_num; + if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) { + xdma_xfer_start(xchan); + goto out; + } + + /* last desc of any frame */ + desc->frames_left--; + if (desc->frames_left) + goto out; + + /* last desc of the last frame */ + repeat_tx = vd->tx.flags & DMA_PREP_REPEAT; + next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node); + if (next_vd) + repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT); + if (repeat_tx) { + desc->frames_left = desc->periods; + desc->completed_desc_num = 0; + vchan_cyclic_callback(vd); + } else { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */ + xdma_xfer_start(xchan); + } else if (!desc->cyclic) { + xchan->busy = false; + desc->completed_desc_num += complete_desc_num; + + /* if all data blocks are transferred, remove and complete the request */ + if (desc->completed_desc_num == desc->desc_num) { + list_del(&vd->node); + vchan_cookie_complete(vd); + goto out; + } + + if (desc->completed_desc_num > desc->desc_num || + complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) + goto out; + + /* transfer the rest of data */ + xdma_xfer_start(xchan); + } else { + desc->completed_desc_num = complete_desc_num; + vchan_cyclic_callback(vd); + } + +out: + spin_unlock(&xchan->vchan.lock); + return IRQ_HANDLED; +} + +/** + * xdma_irq_fini - Uninitialize IRQ + * @xdev: DMA device pointer + */ +static void xdma_irq_fini(struct xdma_device *xdev) +{ + int i; + + /* disable interrupt */ + regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0); + + /* free irq handler */ + for (i = 0; i < xdev->h2c_chan_num; i++) + free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); + + for (i = 0; i < xdev->c2h_chan_num; i++) + free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]); +} + +/** + * xdma_set_vector_reg - configure hardware IRQ registers + * @xdev: DMA device pointer + * @vec_tbl_start: Start of IRQ registers + * @irq_start: Start of IRQ + * @irq_num: Number of IRQ + */ +static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start, + u32 irq_start, u32 irq_num) +{ + u32 shift, i, val = 0; + int ret; + + /* Each IRQ register is 32 bit and contains 4 IRQs */ + while (irq_num > 0) { + for (i = 0; i < 4; i++) { + shift = XDMA_IRQ_VEC_SHIFT * i; + val |= irq_start << shift; + irq_start++; + irq_num--; + if (!irq_num) + break; + } + + /* write IRQ register */ + ret = regmap_write(xdev->rmap, vec_tbl_start, val); + if (ret) + return ret; + vec_tbl_start += sizeof(u32); + val = 0; + } + + return 0; +} + +/** + * xdma_irq_init - initialize IRQs + * @xdev: DMA device pointer + */ +static int xdma_irq_init(struct xdma_device *xdev) +{ + u32 irq = xdev->irq_start; + u32 user_irq_start; + int i, j, ret; + + /* return failure if there are not enough IRQs */ + if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) { + xdma_err(xdev, "not enough irq"); + return -EINVAL; + } + + /* setup H2C interrupt handler */ + for (i = 0; i < xdev->h2c_chan_num; i++) { + ret = request_irq(irq, xdma_channel_isr, 0, + "xdma-h2c-channel", &xdev->h2c_chans[i]); + if (ret) { + xdma_err(xdev, "H2C channel%d request irq%d failed: %d", + i, irq, ret); + goto failed_init_h2c; + } + xdev->h2c_chans[i].irq = irq; + irq++; + } + + /* setup C2H interrupt handler */ + for (j = 0; j < xdev->c2h_chan_num; j++) { + ret = request_irq(irq, xdma_channel_isr, 0, + "xdma-c2h-channel", &xdev->c2h_chans[j]); + if (ret) { + xdma_err(xdev, "C2H channel%d request irq%d failed: %d", + j, irq, ret); + goto failed_init_c2h; + } + xdev->c2h_chans[j].irq = irq; + irq++; + } + + /* config hardware IRQ registers */ + ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0, + XDMA_CHAN_NUM(xdev)); + if (ret) { + xdma_err(xdev, "failed to set channel vectors: %d", ret); + goto failed_init_c2h; + } + + /* config user IRQ registers if needed */ + user_irq_start = XDMA_CHAN_NUM(xdev); + if (xdev->irq_num > user_irq_start) { + ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM, + user_irq_start, + xdev->irq_num - user_irq_start); + if (ret) { + xdma_err(xdev, "failed to set user vectors: %d", ret); + goto failed_init_c2h; + } + } + + /* enable interrupt */ + ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0); + if (ret) + goto failed_init_c2h; + + return 0; + +failed_init_c2h: + while (j--) + free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]); +failed_init_h2c: + while (i--) + free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); + + return ret; +} + +static bool xdma_filter_fn(struct dma_chan *chan, void *param) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_chan_info *chan_info = param; + + return chan_info->dir == xdma_chan->dir; +} + +/** + * xdma_disable_user_irq - Disable user interrupt + * @pdev: Pointer to the platform_device structure + * @irq_num: System IRQ number + */ +void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num) +{ + struct xdma_device *xdev = platform_get_drvdata(pdev); + u32 index; + + index = irq_num - xdev->irq_start; + if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { + xdma_err(xdev, "invalid user irq number"); + return; + } + index -= XDMA_CHAN_NUM(xdev); + + regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index); +} +EXPORT_SYMBOL(xdma_disable_user_irq); + +/** + * xdma_enable_user_irq - Enable user logic interrupt + * @pdev: Pointer to the platform_device structure + * @irq_num: System IRQ number + */ +int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num) +{ + struct xdma_device *xdev = platform_get_drvdata(pdev); + u32 index; + int ret; + + index = irq_num - xdev->irq_start; + if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { + xdma_err(xdev, "invalid user irq number"); + return -EINVAL; + } + index -= XDMA_CHAN_NUM(xdev); + + ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL(xdma_enable_user_irq); + +/** + * xdma_get_user_irq - Get system IRQ number + * @pdev: Pointer to the platform_device structure + * @user_irq_index: User logic IRQ wire index + * + * Return: The system IRQ number allocated for the given wire index. + */ +int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index) +{ + struct xdma_device *xdev = platform_get_drvdata(pdev); + + if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) { + xdma_err(xdev, "invalid user irq index"); + return -EINVAL; + } + + return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index; +} +EXPORT_SYMBOL(xdma_get_user_irq); + +/** + * xdma_remove - Driver remove function + * @pdev: Pointer to the platform_device structure + */ +static int xdma_remove(struct platform_device *pdev) +{ + struct xdma_device *xdev = platform_get_drvdata(pdev); + + if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX) + xdma_irq_fini(xdev); + + if (xdev->status & XDMA_DEV_STATUS_REG_DMA) + dma_async_device_unregister(&xdev->dma_dev); + + return 0; +} + +/** + * xdma_probe - Driver probe function + * @pdev: Pointer to the platform_device structure + */ +static int xdma_probe(struct platform_device *pdev) +{ + struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev); + struct xdma_device *xdev; + void __iomem *reg_base; + struct resource *res; + int ret = -ENODEV; + + if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) { + dev_err(&pdev->dev, "invalid max dma channels %d", + pdata->max_dma_channels); + return -EINVAL; + } + + xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); + if (!xdev) + return -ENOMEM; + + platform_set_drvdata(pdev, xdev); + xdev->pdev = pdev; + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + xdma_err(xdev, "failed to get irq resource"); + goto failed; + } + xdev->irq_start = res->start; + xdev->irq_num = resource_size(res); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + xdma_err(xdev, "failed to get io resource"); + goto failed; + } + + reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(reg_base)) { + xdma_err(xdev, "ioremap failed"); + goto failed; + } + + dev_dbg(&pdev->dev, " %s - config: %p (%lu bytes), reg_bits:%d, reg_stride:%d, pad_bits:%d, val_bits:%d, &val_bits:%p", + __func__, + &xdma_regmap_config, + sizeof(struct regmap_config), + xdma_regmap_config.reg_bits, + xdma_regmap_config.reg_stride, + xdma_regmap_config.pad_bits, + xdma_regmap_config.val_bits, + &xdma_regmap_config.val_bits); + + xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base, + &xdma_regmap_config); + if (IS_ERR(xdev->rmap)) { + ret = PTR_ERR(xdev->rmap); + xdma_err(xdev, "config regmap failed: %d", ret); + goto failed; + } + + INIT_LIST_HEAD(&xdev->dma_dev.channels); + + ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV); + if (ret) { + xdma_err(xdev, "config H2C channels failed: %d", ret); + goto failed; + } + + ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM); + if (ret) { + xdma_err(xdev, "config C2H channels failed: %d", ret); + goto failed; + } + + dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask); + + xdev->dma_dev.dev = &pdev->dev; + xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources; + xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources; + xdev->dma_dev.device_tx_status = xdma_tx_status; + xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg; + xdev->dma_dev.device_config = xdma_device_config; + xdev->dma_dev.device_issue_pending = xdma_issue_pending; + xdev->dma_dev.device_terminate_all = xdma_terminate_all; + xdev->dma_dev.device_synchronize = xdma_synchronize; + xdev->dma_dev.filter.map = pdata->device_map; + xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt; + xdev->dma_dev.filter.fn = xdma_filter_fn; + xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic; + xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma; + + ret = dma_async_device_register(&xdev->dma_dev); + if (ret) { + xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret); + goto failed; + } + xdev->status |= XDMA_DEV_STATUS_REG_DMA; + + ret = xdma_irq_init(xdev); + if (ret) { + xdma_err(xdev, "failed to init msix: %d", ret); + goto failed; + } + xdev->status |= XDMA_DEV_STATUS_INIT_MSIX; + + return 0; + +failed: + xdma_remove(pdev); + + return ret; +} + +static const struct platform_device_id xdma_id_table[] = { + { "xdma", 0}, + { }, +}; + +static struct platform_driver xdma_driver = { + .driver = { + .name = "xdma", + }, + .id_table = xdma_id_table, + .probe = xdma_probe, + .remove = xdma_remove, +}; + +module_platform_driver(xdma_driver); + +MODULE_DESCRIPTION("AMD XDMA driver"); +MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>"); +MODULE_LICENSE("GPL"); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.3/amd_xdma.h b/snd-alpx-dkms/snd-alpx/core/generic/6.3/amd_xdma.h new file mode 100644 index 0000000..b5e23e1 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.3/amd_xdma.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Advanced Micro Devices, Inc. + */ + +#ifndef _PLATDATA_AMD_XDMA_H +#define _PLATDATA_AMD_XDMA_H + +#include <linux/dmaengine.h> + +/** + * struct xdma_chan_info - DMA channel information + * This information is used to match channel when request dma channel + * @dir: Channel transfer direction + */ +struct xdma_chan_info { + enum dma_transfer_direction dir; +}; + +#define XDMA_FILTER_PARAM(chan_info) ((void *)(chan_info)) + +struct dma_slave_map; + +/** + * struct xdma_platdata - platform specific data for XDMA engine + * @max_dma_channels: Maximum dma channels in each direction + */ +struct xdma_platdata { + u32 max_dma_channels; + u32 device_map_cnt; + struct dma_slave_map *device_map; +}; + +#endif /* _PLATDATA_AMD_XDMA_H */ diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.3/dmaengine.h b/snd-alpx-dkms/snd-alpx/core/generic/6.3/dmaengine.h new file mode 100644 index 0000000..53f16d3 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.3/dmaengine.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * The contents of this file are private to DMA engine drivers, and is not + * part of the API to be used by DMA engine users. + */ +#ifndef DMAENGINE_H +#define DMAENGINE_H + +#include <linux/bug.h> +#include <linux/dmaengine.h> + +/** + * dma_cookie_init - initialize the cookies for a DMA channel + * @chan: dma channel to initialize + */ +static inline void dma_cookie_init(struct dma_chan *chan) +{ + chan->cookie = DMA_MIN_COOKIE; + chan->completed_cookie = DMA_MIN_COOKIE; +} + +/** + * dma_cookie_assign - assign a DMA engine cookie to the descriptor + * @tx: descriptor needing cookie + * + * Assign a unique non-zero per-channel cookie to the descriptor. + * Note: caller is expected to hold a lock to prevent concurrency. + */ +static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx) +{ + struct dma_chan *chan = tx->chan; + dma_cookie_t cookie; + + cookie = chan->cookie + 1; + if (cookie < DMA_MIN_COOKIE) + cookie = DMA_MIN_COOKIE; + tx->cookie = chan->cookie = cookie; + + return cookie; +} + +/** + * dma_cookie_complete - complete a descriptor + * @tx: descriptor to complete + * + * Mark this descriptor complete by updating the channels completed + * cookie marker. Zero the descriptors cookie to prevent accidental + * repeated completions. + * + * Note: caller is expected to hold a lock to prevent concurrency. + */ +static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx) +{ + BUG_ON(tx->cookie < DMA_MIN_COOKIE); + tx->chan->completed_cookie = tx->cookie; + tx->cookie = 0; +} + +/** + * dma_cookie_status - report cookie status + * @chan: dma channel + * @cookie: cookie we are interested in + * @state: dma_tx_state structure to return last/used cookies + * + * Report the status of the cookie, filling in the state structure if + * non-NULL. No locking is required. + */ +static inline enum dma_status dma_cookie_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *state) +{ + dma_cookie_t used, complete; + + used = chan->cookie; + complete = chan->completed_cookie; + barrier(); + if (state) { + state->last = complete; + state->used = used; + state->residue = 0; + state->in_flight_bytes = 0; + } + return dma_async_is_complete(cookie, complete, used); +} + +static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) +{ + if (state) + state->residue = residue; +} + +static inline void dma_set_in_flight_bytes(struct dma_tx_state *state, + u32 in_flight_bytes) +{ + if (state) + state->in_flight_bytes = in_flight_bytes; +} + +struct dmaengine_desc_callback { + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; +}; + +/** + * dmaengine_desc_get_callback - get the passed in callback function + * @tx: tx descriptor + * @cb: temp struct to hold the callback info + * + * Fill the passed in cb struct with what's available in the passed in + * tx descriptor struct + * No locking is required. + */ +static inline void +dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx, + struct dmaengine_desc_callback *cb) +{ + cb->callback = tx->callback; + cb->callback_result = tx->callback_result; + cb->callback_param = tx->callback_param; +} + +/** + * dmaengine_desc_callback_invoke - call the callback function in cb struct + * @cb: temp struct that is holding the callback info + * @result: transaction result + * + * Call the callback function provided in the cb struct with the parameter + * in the cb struct. + * Locking is dependent on the driver. + */ +static inline void +dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb, + const struct dmaengine_result *result) +{ + struct dmaengine_result dummy_result = { + .result = DMA_TRANS_NOERROR, + .residue = 0 + }; + + if (cb->callback_result) { + if (!result) + result = &dummy_result; + cb->callback_result(cb->callback_param, result); + } else if (cb->callback) { + cb->callback(cb->callback_param); + } +} + +/** + * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and + * then immediately call the callback. + * @tx: dma async tx descriptor + * @result: transaction result + * + * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke() + * in a single function since no work is necessary in between for the driver. + * Locking is dependent on the driver. + */ +static inline void +dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx, + const struct dmaengine_result *result) +{ + struct dmaengine_desc_callback cb; + + dmaengine_desc_get_callback(tx, &cb); + dmaengine_desc_callback_invoke(&cb, result); +} + +/** + * dmaengine_desc_callback_valid - verify the callback is valid in cb + * @cb: callback info struct + * + * Return a bool that verifies whether callback in cb is valid or not. + * No locking is required. + */ +static inline bool +dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) +{ + return cb->callback || cb->callback_result; +} + +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); + +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> + +static inline struct dentry * +dmaengine_get_debugfs_root(struct dma_device *dma_dev) { + return dma_dev->dbg_dev_root; +} +#else +struct dentry; +static inline struct dentry * +dmaengine_get_debugfs_root(struct dma_device *dma_dev) +{ + return NULL; +} +#endif /* CONFIG_DEBUG_FS */ + +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.3/virt-dma.c b/snd-alpx-dkms/snd-alpx/core/generic/6.3/virt-dma.c new file mode 100644 index 0000000..a6f4265 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.3/virt-dma.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + */ +#include <linux/device.h> +#include <linux/dmaengine.h> +#include <linux/module.h> +#include <linux/spinlock.h> + +#include "virt-dma.h" + +static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct virt_dma_desc, tx); +} + +dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct virt_dma_chan *vc = to_virt_chan(tx->chan); + struct virt_dma_desc *vd = to_virt_desc(tx); + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&vc->lock, flags); + cookie = dma_cookie_assign(tx); + + list_move_tail(&vd->node, &vc->desc_submitted); + spin_unlock_irqrestore(&vc->lock, flags); + + dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", + vc, vd, cookie); + + return cookie; +} +EXPORT_SYMBOL_GPL(vchan_tx_submit); + +/** + * vchan_tx_desc_free - free a reusable descriptor + * @tx: the transfer + * + * This function frees a previously allocated reusable descriptor. The only + * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the + * transfer. + * + * Returns 0 upon success + */ +int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx) +{ + struct virt_dma_chan *vc = to_virt_chan(tx->chan); + struct virt_dma_desc *vd = to_virt_desc(tx); + unsigned long flags; + + spin_lock_irqsave(&vc->lock, flags); + list_del(&vd->node); + spin_unlock_irqrestore(&vc->lock, flags); + + dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n", + vc, vd, vd->tx.cookie); + vc->desc_free(vd); + return 0; +} +EXPORT_SYMBOL_GPL(vchan_tx_desc_free); + +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, + dma_cookie_t cookie) +{ + struct virt_dma_desc *vd; + + list_for_each_entry(vd, &vc->desc_issued, node) + if (vd->tx.cookie == cookie) + return vd; + + return NULL; +} +EXPORT_SYMBOL_GPL(vchan_find_desc); + +/* + * This tasklet handles the completion of a DMA descriptor by + * calling its callback and freeing it. + */ +static void vchan_complete(struct tasklet_struct *t) +{ + struct virt_dma_chan *vc = from_tasklet(vc, t, task); + struct virt_dma_desc *vd, *_vd; + struct dmaengine_desc_callback cb; + LIST_HEAD(head); + + spin_lock_irq(&vc->lock); + list_splice_tail_init(&vc->desc_completed, &head); + vd = vc->cyclic; + if (vd) { + vc->cyclic = NULL; + dmaengine_desc_get_callback(&vd->tx, &cb); + } else { + memset(&cb, 0, sizeof(cb)); + } + spin_unlock_irq(&vc->lock); + + dmaengine_desc_callback_invoke(&cb, &vd->tx_result); + + list_for_each_entry_safe(vd, _vd, &head, node) { + dmaengine_desc_get_callback(&vd->tx, &cb); + + list_del(&vd->node); + dmaengine_desc_callback_invoke(&cb, &vd->tx_result); + vchan_vdesc_fini(vd); + } +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) +{ + struct virt_dma_desc *vd, *_vd; + + list_for_each_entry_safe(vd, _vd, head, node) { + list_del(&vd->node); + vchan_vdesc_fini(vd); + } +} +EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); + +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) +{ + dma_cookie_init(&vc->chan); + + spin_lock_init(&vc->lock); + INIT_LIST_HEAD(&vc->desc_allocated); + INIT_LIST_HEAD(&vc->desc_submitted); + INIT_LIST_HEAD(&vc->desc_issued); + INIT_LIST_HEAD(&vc->desc_completed); + INIT_LIST_HEAD(&vc->desc_terminated); + + tasklet_setup(&vc->task, vchan_complete); + + vc->chan.device = dmadev; + list_add_tail(&vc->chan.device_node, &dmadev->channels); +} +EXPORT_SYMBOL_GPL(vchan_init); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.3/virt-dma.h b/snd-alpx-dkms/snd-alpx/core/generic/6.3/virt-dma.h new file mode 100644 index 0000000..e9f5250 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.3/virt-dma.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + */ +#ifndef VIRT_DMA_H +#define VIRT_DMA_H + +#include <linux/dmaengine.h> +#include <linux/interrupt.h> + +#include "dmaengine.h" + +struct virt_dma_desc { + struct dma_async_tx_descriptor tx; + struct dmaengine_result tx_result; + /* protected by vc.lock */ + struct list_head node; +}; + +struct virt_dma_chan { + struct dma_chan chan; + struct tasklet_struct task; + void (*desc_free)(struct virt_dma_desc *); + + spinlock_t lock; + + /* protected by vc.lock */ + struct list_head desc_allocated; + struct list_head desc_submitted; + struct list_head desc_issued; + struct list_head desc_completed; + struct list_head desc_terminated; + + struct virt_dma_desc *cyclic; +}; + +static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) +{ + return container_of(chan, struct virt_dma_chan, chan); +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); +extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); +extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *); + +/** + * vchan_tx_prep - prepare a descriptor + * @vc: virtual channel allocating this descriptor + * @vd: virtual descriptor to prepare + * @tx_flags: flags argument passed in to prepare function + */ +static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, + struct virt_dma_desc *vd, unsigned long tx_flags) +{ + unsigned long flags; + + dma_async_tx_descriptor_init(&vd->tx, &vc->chan); + vd->tx.flags = tx_flags; + vd->tx.tx_submit = vchan_tx_submit; + vd->tx.desc_free = vchan_tx_desc_free; + + vd->tx_result.result = DMA_TRANS_NOERROR; + vd->tx_result.residue = 0; + + spin_lock_irqsave(&vc->lock, flags); + list_add_tail(&vd->node, &vc->desc_allocated); + spin_unlock_irqrestore(&vc->lock, flags); + + return &vd->tx; +} + +/** + * vchan_issue_pending - move submitted descriptors to issued list + * @vc: virtual channel to update + * + * vc.lock must be held by caller + */ +static inline bool vchan_issue_pending(struct virt_dma_chan *vc) +{ + list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); + return !list_empty(&vc->desc_issued); +} + +/** + * vchan_cookie_complete - report completion of a descriptor + * @vd: virtual descriptor to update + * + * vc.lock must be held by caller + */ +static inline void vchan_cookie_complete(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + dma_cookie_t cookie; + + cookie = vd->tx.cookie; + dma_cookie_complete(&vd->tx); + dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", + vd, cookie); + list_add_tail(&vd->node, &vc->desc_completed); + + tasklet_schedule(&vc->task); +} + +/** + * vchan_vdesc_fini - Free or reuse a descriptor + * @vd: virtual descriptor to free/reuse + */ +static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + if (dmaengine_desc_test_reuse(&vd->tx)) { + unsigned long flags; + + spin_lock_irqsave(&vc->lock, flags); + list_add(&vd->node, &vc->desc_allocated); + spin_unlock_irqrestore(&vc->lock, flags); + } else { + vc->desc_free(vd); + } +} + +/** + * vchan_cyclic_callback - report the completion of a period + * @vd: virtual descriptor + */ +static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + vc->cyclic = vd; + tasklet_schedule(&vc->task); +} + +/** + * vchan_terminate_vdesc - Disable pending cyclic callback + * @vd: virtual descriptor to be terminated + * + * vc.lock must be held by caller + */ +static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + list_add_tail(&vd->node, &vc->desc_terminated); + + if (vc->cyclic == vd) + vc->cyclic = NULL; +} + +/** + * vchan_next_desc - peek at the next descriptor to be processed + * @vc: virtual channel to obtain descriptor from + * + * vc.lock must be held by caller + */ +static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) +{ + return list_first_entry_or_null(&vc->desc_issued, + struct virt_dma_desc, node); +} + +/** + * vchan_get_all_descriptors - obtain all submitted and issued descriptors + * @vc: virtual channel to get descriptors from + * @head: list of descriptors found + * + * vc.lock must be held by caller + * + * Removes all submitted and issued descriptors from internal lists, and + * provides a list of all descriptors found + */ +static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, + struct list_head *head) +{ + list_splice_tail_init(&vc->desc_allocated, head); + list_splice_tail_init(&vc->desc_submitted, head); + list_splice_tail_init(&vc->desc_issued, head); + list_splice_tail_init(&vc->desc_completed, head); + list_splice_tail_init(&vc->desc_terminated, head); +} + +static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) +{ + struct virt_dma_desc *vd; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&vc->lock, flags); + vchan_get_all_descriptors(vc, &head); + list_for_each_entry(vd, &head, node) + dmaengine_desc_clear_reuse(&vd->tx); + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); +} + +/** + * vchan_synchronize() - synchronize callback execution to the current context + * @vc: virtual channel to synchronize + * + * Makes sure that all scheduled or active callbacks have finished running. For + * proper operation the caller has to ensure that no new callbacks are scheduled + * after the invocation of this function started. + * Free up the terminated cyclic descriptor to prevent memory leakage. + */ +static inline void vchan_synchronize(struct virt_dma_chan *vc) +{ + LIST_HEAD(head); + unsigned long flags; + + tasklet_kill(&vc->task); + + spin_lock_irqsave(&vc->lock, flags); + + list_splice_tail_init(&vc->desc_terminated, &head); + + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); +} + +#endif diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.3/xilinx/xdma-regs.h b/snd-alpx-dkms/snd-alpx/core/generic/6.3/xilinx/xdma-regs.h new file mode 100644 index 0000000..4ee96de --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.3/xilinx/xdma-regs.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. + * Copyright (C) 2022, Advanced Micro Devices, Inc. + */ + +#ifndef __DMA_XDMA_REGS_H +#define __DMA_XDMA_REGS_H + +/* The length of register space exposed to host */ +#define XDMA_REG_SPACE_LEN 65536 + +/* + * maximum number of DMA channels for each direction: + * Host to Card (H2C) or Card to Host (C2H) + */ +#define XDMA_MAX_CHANNELS 4 + +/* + * macros to define the number of descriptor blocks can be used in one + * DMA transfer request. + * the DMA engine uses a linked list of descriptor blocks that specify the + * source, destination, and length of the DMA transfers. + */ +#define XDMA_DESC_BLOCK_NUM BIT(7) +#define XDMA_DESC_BLOCK_MASK (XDMA_DESC_BLOCK_NUM - 1) + +/* descriptor definitions */ +#define XDMA_DESC_ADJACENT 32 +#define XDMA_DESC_ADJACENT_MASK (XDMA_DESC_ADJACENT - 1) +#define XDMA_DESC_ADJACENT_BITS GENMASK(13, 8) +#define XDMA_DESC_MAGIC 0xad4bUL +#define XDMA_DESC_MAGIC_BITS GENMASK(31, 16) +#define XDMA_DESC_FLAGS_BITS GENMASK(7, 0) +#define XDMA_DESC_STOPPED BIT(0) +#define XDMA_DESC_COMPLETED BIT(1) +#define XDMA_DESC_BLEN_BITS 28 +#define XDMA_DESC_BLEN_MAX (BIT(XDMA_DESC_BLEN_BITS) - PAGE_SIZE) + +/* macros to construct the descriptor control word */ +#define XDMA_DESC_CONTROL(adjacent, flag) \ + (FIELD_PREP(XDMA_DESC_MAGIC_BITS, XDMA_DESC_MAGIC) | \ + FIELD_PREP(XDMA_DESC_ADJACENT_BITS, (adjacent) - 1) | \ + FIELD_PREP(XDMA_DESC_FLAGS_BITS, (flag))) +#define XDMA_DESC_CONTROL_LAST \ + XDMA_DESC_CONTROL(1, XDMA_DESC_STOPPED | XDMA_DESC_COMPLETED) +#define XDMA_DESC_CONTROL_CYCLIC \ + XDMA_DESC_CONTROL(1, XDMA_DESC_COMPLETED) + +/* + * Descriptor for a single contiguous memory block transfer. + * + * Multiple descriptors are linked by means of the next pointer. An additional + * extra adjacent number gives the amount of extra contiguous descriptors. + * + * The descriptors are in root complex memory, and the bytes in the 32-bit + * words must be in little-endian byte ordering. + */ +struct xdma_hw_desc { + __le32 control; + __le32 bytes; + __le64 src_addr; + __le64 dst_addr; + __le64 next_desc; +}; + +#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc) +#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT) +#define XDMA_DESC_BLOCK_ALIGN 32 +#define XDMA_DESC_BLOCK_BOUNDARY 4096 + +/* + * Channel registers + */ +#define XDMA_CHAN_IDENTIFIER 0x0 +#define XDMA_CHAN_CONTROL 0x4 +#define XDMA_CHAN_CONTROL_W1S 0x8 +#define XDMA_CHAN_CONTROL_W1C 0xc +#define XDMA_CHAN_STATUS 0x40 +#define XDMA_CHAN_STATUS_RC 0x44 +#define XDMA_CHAN_COMPLETED_DESC 0x48 +#define XDMA_CHAN_ALIGNMENTS 0x4c +#define XDMA_CHAN_INTR_ENABLE 0x90 +#define XDMA_CHAN_INTR_ENABLE_W1S 0x94 +#define XDMA_CHAN_INTR_ENABLE_W1C 0x9c + +#define XDMA_CHAN_STRIDE 0x100 +#define XDMA_CHAN_H2C_OFFSET 0x0 +#define XDMA_CHAN_C2H_OFFSET 0x1000 +#define XDMA_CHAN_H2C_TARGET 0x0 +#define XDMA_CHAN_C2H_TARGET 0x1 + +/* macro to check if channel is available */ +#define XDMA_CHAN_MAGIC 0x1fc0 +#define XDMA_CHAN_CHECK_TARGET(id, target) \ + (((u32)(id) >> 16) == XDMA_CHAN_MAGIC + (target)) + +/* bits of the channel control register */ +#define CHAN_CTRL_RUN_STOP BIT(0) +#define CHAN_CTRL_IE_DESC_STOPPED BIT(1) +#define CHAN_CTRL_IE_DESC_COMPLETED BIT(2) +#define CHAN_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3) +#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4) +#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6) +#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9) +#define CHAN_CTRL_IE_WRITE_ERROR GENMASK(18, 14) +#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19) +#define CHAN_CTRL_NON_INCR_ADDR BIT(25) +#define CHAN_CTRL_POLL_MODE_WB BIT(26) +#define CHAN_CTRL_TRANSFER_INFO_WB BIT(27) + +#define CHAN_CTRL_START (CHAN_CTRL_RUN_STOP | \ + CHAN_CTRL_IE_DESC_STOPPED | \ + CHAN_CTRL_IE_DESC_COMPLETED | \ + CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \ + CHAN_CTRL_IE_MAGIC_STOPPED | \ + CHAN_CTRL_IE_READ_ERROR | \ + CHAN_CTRL_IE_WRITE_ERROR | \ + CHAN_CTRL_IE_DESC_ERROR) + +#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START + +#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \ + CHAN_CTRL_IE_MAGIC_STOPPED | \ + CHAN_CTRL_IE_READ_ERROR | \ + CHAN_CTRL_IE_WRITE_ERROR | \ + CHAN_CTRL_IE_DESC_ERROR) + +/* bits of the channel interrupt enable mask */ +#define CHAN_IM_DESC_ERROR BIT(19) +#define CHAN_IM_READ_ERROR BIT(9) +#define CHAN_IM_IDLE_STOPPED BIT(6) +#define CHAN_IM_MAGIC_STOPPED BIT(4) +#define CHAN_IM_DESC_COMPLETED BIT(2) +#define CHAN_IM_DESC_STOPPED BIT(1) + +#define CHAN_IM_ALL (CHAN_IM_DESC_ERROR | CHAN_IM_READ_ERROR | \ + CHAN_IM_IDLE_STOPPED | CHAN_IM_MAGIC_STOPPED | \ + CHAN_IM_DESC_COMPLETED | CHAN_IM_DESC_STOPPED) + +/* + * Channel SGDMA registers + */ +#define XDMA_SGDMA_IDENTIFIER 0x4000 +#define XDMA_SGDMA_DESC_LO 0x4080 +#define XDMA_SGDMA_DESC_HI 0x4084 +#define XDMA_SGDMA_DESC_ADJ 0x4088 +#define XDMA_SGDMA_DESC_CREDIT 0x408c + +/* + * interrupt registers + */ +#define XDMA_IRQ_IDENTIFIER 0x2000 +#define XDMA_IRQ_USER_INT_EN 0x2004 +#define XDMA_IRQ_USER_INT_EN_W1S 0x2008 +#define XDMA_IRQ_USER_INT_EN_W1C 0x200c +#define XDMA_IRQ_CHAN_INT_EN 0x2010 +#define XDMA_IRQ_CHAN_INT_EN_W1S 0x2014 +#define XDMA_IRQ_CHAN_INT_EN_W1C 0x2018 +#define XDMA_IRQ_USER_INT_REQ 0x2040 +#define XDMA_IRQ_CHAN_INT_REQ 0x2044 +#define XDMA_IRQ_USER_INT_PEND 0x2048 +#define XDMA_IRQ_CHAN_INT_PEND 0x204c +#define XDMA_IRQ_USER_VEC_NUM 0x2080 +#define XDMA_IRQ_CHAN_VEC_NUM 0x20a0 + +#define XDMA_IRQ_VEC_SHIFT 8 + +#endif /* __DMA_XDMA_REGS_H */ diff --git a/snd-alpx-dkms/snd-alpx/core/generic/6.3/xilinx/xdma.c b/snd-alpx-dkms/snd-alpx/core/generic/6.3/xilinx/xdma.c new file mode 100644 index 0000000..ad42f05 --- /dev/null +++ b/snd-alpx-dkms/snd-alpx/core/generic/6.3/xilinx/xdma.c @@ -0,0 +1,1403 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * DMA driver for Xilinx DMA/Bridge Subsystem + * + * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. + * Copyright (C) 2022, Advanced Micro Devices, Inc. + */ + +/* + * The DMA/Bridge Subsystem for PCI Express allows for the movement of data + * between Host memory and the DMA subsystem. It does this by operating on + * 'descriptors' that contain information about the source, destination and + * amount of data to transfer. These direct memory transfers can be both in + * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be + * configured to have a single AXI4 Master interface shared by all channels + * or one AXI4-Stream interface for each channel enabled. Memory transfers are + * specified on a per-channel basis in descriptor linked lists, which the DMA + * fetches from host memory and processes. Events such as descriptor completion + * and errors are signaled using interrupts. The core also provides up to 16 + * user interrupt wires that generate interrupts to the host. + */ + +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/bitfield.h> +#include <linux/dmapool.h> + +#include <linux/regmap.h> + +#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE +#include <linux/dmaengine.h> +#elif KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE +#include "../../../../include/5.3/dmaengine.h" +#else +#include "../../../../include/4.16/dmaengine.h" +#endif +#include "../../../../include/6.3/amd_xdma.h" +#include <linux/platform_device.h> +#include "../amd_xdma.h" +#include <linux/dma-mapping.h> +#include <linux/pci.h> +#if KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE +#include "../virt-dma.h" +#elif KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE +#include "../../../../include/5.6/virt-dma.h" +#elif KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE +#include "../../../../include/5.3/virt-dma.h" +#else +#include "../../../../include/4.16/virt-dma.h" +#endif +#include "xdma-regs.h" + +/* mmio regmap config for all XDMA registers */ +static const struct regmap_config xdma_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = XDMA_REG_SPACE_LEN, +}; + +/** + * struct xdma_desc_block - Descriptor block + * @virt_addr: Virtual address of block start + * @dma_addr: DMA address of block start + */ +struct xdma_desc_block { + void *virt_addr; + dma_addr_t dma_addr; +}; + +/** + * struct xdma_c2h_write_back - Write back block , written by the XDMA. + * @magic_status_bit : magic (0x52B4) once written + * @length: effective transfer length (in bytes) + * @PADDING to be aligned on 32 bytes + * @associated dma address + */ +struct xdma_c2h_write_back { + __le32 magic_status_bit; + __le32 length; + u32 padding_1[6]; + dma_addr_t dma_addr; +}; + +/** + * struct xdma_chan - Driver specific DMA channel structure + * @vchan: Virtual channel + * @xdev_hdl: Pointer to DMA device structure + * @base: Offset of channel registers + * @desc_pool: Descriptor pool + * @busy: Busy flag of the channel + * @dir: Transferring direction of the channel + * @cfg: Transferring config of the channel + * @irq: IRQ assigned to the channel + * @write_back : C2H meta data write back + */ +struct xdma_chan { + struct virt_dma_chan vchan; + void *xdev_hdl; + u32 base; + struct dma_pool *desc_pool; + bool busy; + enum dma_transfer_direction dir; + struct dma_slave_config cfg; + u32 irq; + struct xdma_c2h_write_back* write_back; +}; + +/** + * struct xdma_desc - DMA desc structure + * @vdesc: Virtual DMA descriptor + * @chan: DMA channel pointer + * @dir: Transferring direction of the request + * @desc_blocks: Hardware descriptor blocks + * @dblk_num: Number of hardware descriptor blocks + * @desc_num: Number of hardware descriptors + * @completed_desc_num: Completed hardware descriptors + * @cyclic: Cyclic transfer vs. scatter-gather + * @interleaved_dma: Interleaved DMA transfer + * @periods: Number of periods in the cyclic transfer + * @period_size: Size of a period in bytes in cyclic transfers + * @frames_left: Number of frames left in interleaved DMA transfer + * @error: tx error flag + */ +struct xdma_desc { + struct virt_dma_desc vdesc; + struct xdma_chan *chan; + enum dma_transfer_direction dir; + struct xdma_desc_block *desc_blocks; + u32 dblk_num; + u32 desc_num; + u32 completed_desc_num; + bool cyclic; + bool interleaved_dma; + u32 periods; + u32 period_size; + u32 frames_left; + bool error; +}; + +#define XDMA_DEV_STATUS_REG_DMA BIT(0) +#define XDMA_DEV_STATUS_INIT_MSIX BIT(1) + +/** + * struct xdma_device - DMA device structure + * @pdev: Platform device pointer + * @dma_dev: DMA device structure + * @rmap: MMIO regmap for DMA registers + * @h2c_chans: Host to Card channels + * @c2h_chans: Card to Host channels + * @h2c_chan_num: Number of H2C channels + * @c2h_chan_num: Number of C2H channels + * @irq_start: Start IRQ assigned to device + * @irq_num: Number of IRQ assigned to device + * @status: Initialization status + */ +struct xdma_device { + struct platform_device *pdev; + struct dma_device dma_dev; + struct regmap *rmap; + struct xdma_chan *h2c_chans; + struct xdma_chan *c2h_chans; + u32 h2c_chan_num; + u32 c2h_chan_num; + u32 irq_start; + u32 irq_num; + u32 status; +}; + +#define xdma_err(xdev, fmt, args...) \ + dev_err(&(xdev)->pdev->dev, fmt, ##args) +#define XDMA_CHAN_NUM(_xd) ({ \ + typeof(_xd) (xd) = (_xd); \ + ((xd)->h2c_chan_num + (xd)->c2h_chan_num); }) + +/* Get the last desc in a desc block */ +static inline void *xdma_blk_last_desc(struct xdma_desc_block *block) +{ + return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE; +} + +/** + * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer + * @sw_desc: Tx descriptor pointer + */ +static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc) +{ + struct xdma_desc_block *block; + u32 last_blk_desc, desc_control; + struct xdma_hw_desc *desc; + int i; + + desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0); + for (i = 1; i < sw_desc->dblk_num; i++) { + block = &sw_desc->desc_blocks[i - 1]; + desc = xdma_blk_last_desc(block); + + if (!(i & XDMA_DESC_BLOCK_MASK)) { + desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); + continue; + } + desc->control = cpu_to_le32(desc_control); + desc->next_desc = cpu_to_le64(block[1].dma_addr); + } + + /* update the last block */ + last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; + if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) { + block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2]; + desc = xdma_blk_last_desc(block); + desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0); + desc->control = cpu_to_le32(desc_control); + } + + block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1]; + desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE; + desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); +} + +/** + * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer + * @sw_desc: Tx descriptor pointer + */ +static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc) +{ + struct xdma_desc_block *block; + struct xdma_hw_desc *desc; + int i; + + block = sw_desc->desc_blocks; + for (i = 0; i < sw_desc->desc_num - 1; i++) { + desc = block->virt_addr + i * XDMA_DESC_SIZE; + desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE)); + } + desc = block->virt_addr + i * XDMA_DESC_SIZE; + desc->next_desc = cpu_to_le64(block->dma_addr); +} + +static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct xdma_chan, vchan.chan); +} + +static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc) +{ + return container_of(vdesc, struct xdma_desc, vdesc); +} + +/** + * xdma_channel_init - Initialize DMA channel registers + * @chan: DMA channel pointer + */ +static int xdma_channel_init(struct xdma_chan *chan) +{ + struct xdma_device *xdev = chan->xdev_hdl; + int ret; + unsigned int reg_ctrl = 0; + + regmap_read(xdev->rmap, chan->base + XDMA_CHAN_CONTROL, ®_ctrl); + dev_dbg(&xdev->pdev->dev, "CONTROL Init: 0x%08x\n", reg_ctrl); + + ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C, + CHAN_CTRL_NON_INCR_ADDR | CHAN_CTRL_TRANSFER_INFO_WB); + if (ret) + return ret; + + regmap_read(xdev->rmap, chan->base + XDMA_CHAN_CONTROL, ®_ctrl); + dev_dbg(&xdev->pdev->dev, "CONTROL Init: 0x%08x\n", reg_ctrl); + + ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE, + CHAN_IM_ALL); + if (ret) + return ret; + + return 0; +} + +/** + * xdma_free_desc - Free descriptor + * @vdesc: Virtual DMA descriptor + */ +static void xdma_free_desc(struct virt_dma_desc *vdesc) +{ + struct xdma_desc *sw_desc; + int i; + + sw_desc = to_xdma_desc(vdesc); + for (i = 0; i < sw_desc->dblk_num; i++) { + if (!sw_desc->desc_blocks[i].virt_addr) + break; + dma_pool_free(sw_desc->chan->desc_pool, + sw_desc->desc_blocks[i].virt_addr, + sw_desc->desc_blocks[i].dma_addr); + } + kfree(sw_desc->desc_blocks); + kfree(sw_desc); +} + +/** + * xdma_alloc_desc - Allocate descriptor + * @chan: DMA channel pointer + * @desc_num: Number of hardware descriptors + * @cyclic: Whether this is a cyclic transfer + */ +static struct xdma_desc * +xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic) +{ + struct xdma_desc *sw_desc; + struct xdma_hw_desc *desc; + dma_addr_t dma_addr; + u32 dblk_num; + u32 control; + void *addr; + int i, j; + + sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT); + if (!sw_desc) + return NULL; + + sw_desc->chan = chan; + sw_desc->desc_num = desc_num; + sw_desc->cyclic = cyclic; + sw_desc->error = false; + dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT); + sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks), + GFP_NOWAIT); + if (!sw_desc->desc_blocks) + goto failed; + + if (cyclic) + control = XDMA_DESC_CONTROL_CYCLIC; + else + control = XDMA_DESC_CONTROL(1, 0); + + sw_desc->dblk_num = dblk_num; + for (i = 0; i < sw_desc->dblk_num; i++) { + addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr); + if (!addr) + goto failed; + + sw_desc->desc_blocks[i].virt_addr = addr; + sw_desc->desc_blocks[i].dma_addr = dma_addr; + for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++) + desc[j].control = cpu_to_le32(control); + } + + if (cyclic) + xdma_link_cyclic_desc_blocks(sw_desc); + else + xdma_link_sg_desc_blocks(sw_desc); + + return sw_desc; + +failed: + xdma_free_desc(&sw_desc->vdesc); + return NULL; +} + +/** + * xdma_xfer_start - Start DMA transfer + * @xchan: DMA channel pointer + */ +static int xdma_xfer_start(struct xdma_chan *xchan) +{ + struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan); + struct xdma_device *xdev = xchan->xdev_hdl; + struct xdma_desc_block *block; + u32 val, completed_blocks; + struct xdma_desc *desc; + int ret; + + /* + * check if there is not any submitted descriptor or channel is busy. + * vchan lock should be held where this function is called. + */ + if (!vd || xchan->busy) + return -EINVAL; + + /* clear run stop bit to get ready for transfer */ + ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, + CHAN_CTRL_RUN_STOP); + if (ret) + return ret; + + desc = to_xdma_desc(vd); + if (desc->dir != xchan->dir) { + xdma_err(xdev, "incorrect request direction"); + return -EINVAL; + } + + /* set DMA engine to the first descriptor block */ + completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT; + block = &desc->desc_blocks[completed_blocks]; + val = lower_32_bits(block->dma_addr); + ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val); + if (ret) + return ret; + + val = upper_32_bits(block->dma_addr); + ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val); + if (ret) + return ret; + + if (completed_blocks + 1 == desc->dblk_num) + val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; + else + val = XDMA_DESC_ADJACENT - 1; + ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val); + if (ret) + return ret; + + /* kick off DMA transfer, force 0=1 transition, USE Bit clear/set registers */ + + regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, + CHAN_CTRL_START); + + ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1S, + CHAN_CTRL_START); + if (ret) + return ret; + + xchan->busy = true; + + return 0; +} + +/** + * xdma_xfer_stop - Stop DMA transfer + * @xchan: DMA channel pointer + */ +static int xdma_xfer_stop(struct xdma_chan *xchan) +{ + int ret; + u32 val; + struct xdma_device *xdev = xchan->xdev_hdl; + + /* clear run stop bit to prevent any further auto-triggering */ + ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, + CHAN_CTRL_RUN_STOP); + if (ret) + return ret; + + /* Clear the channel status register */ + ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val); + if (ret) + return ret; + + return 0; +} + +/** + * xdma_alloc_channels - Detect and allocate DMA channels + * @xdev: DMA device pointer + * @dir: Channel direction + */ +static int xdma_alloc_channels(struct xdma_device *xdev, + enum dma_transfer_direction dir) +{ + struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev); + struct xdma_chan **chans, *xchan; + u32 base, identifier, target; + u32 *chan_num; + int i, j, ret; + + if (dir == DMA_MEM_TO_DEV) { + base = XDMA_CHAN_H2C_OFFSET; + target = XDMA_CHAN_H2C_TARGET; + chans = &xdev->h2c_chans; + chan_num = &xdev->h2c_chan_num; + } else if (dir == DMA_DEV_TO_MEM) { + base = XDMA_CHAN_C2H_OFFSET; + target = XDMA_CHAN_C2H_TARGET; + chans = &xdev->c2h_chans; + chan_num = &xdev->c2h_chan_num; + } else { + xdma_err(xdev, "invalid direction specified"); + return -EINVAL; + } + + /* detect number of available DMA channels */ + for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) { + ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE, + &identifier); + if (ret) + return ret; + + /* check if it is available DMA channel */ + if (XDMA_CHAN_CHECK_TARGET(identifier, target)) + (*chan_num)++; + } + + if (!*chan_num) { + xdma_err(xdev, "does not probe any channel"); + return -EINVAL; + } + + *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans), + GFP_KERNEL); + if (!*chans) + return -ENOMEM; + + for (i = 0, j = 0; i < pdata->max_dma_channels; i++) { + ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE, + &identifier); + if (ret) + return ret; + + if (!XDMA_CHAN_CHECK_TARGET(identifier, target)) + continue; + + if (j == *chan_num) { + xdma_err(xdev, "invalid channel number"); + return -EIO; + } + + /* init channel structure and hardware */ + xchan = &(*chans)[j]; + xchan->xdev_hdl = xdev; + xchan->base = base + i * XDMA_CHAN_STRIDE; + xchan->dir = dir; + + ret = xdma_channel_init(xchan); + if (ret) + return ret; + xchan->vchan.desc_free = xdma_free_desc; + vchan_init(&xchan->vchan, &xdev->dma_dev); + + j++; + } + + dev_info(&xdev->pdev->dev, "configured %d %s channels", j, + (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H"); + + return 0; +} + +/** + * xdma_issue_pending - Issue pending transactions + * @chan: DMA channel pointer + */ +static void xdma_issue_pending(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&xdma_chan->vchan.lock, flags); + if (vchan_issue_pending(&xdma_chan->vchan)) + xdma_xfer_start(xdma_chan); + spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); +} + +/** + * xdma_terminate_all - Terminate all transactions + * @chan: DMA channel pointer + */ +static int xdma_terminate_all(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct virt_dma_desc *vd; + unsigned long flags; + LIST_HEAD(head); + + xdma_xfer_stop(xdma_chan); + + spin_lock_irqsave(&xdma_chan->vchan.lock, flags); + + xdma_chan->busy = false; + vd = vchan_next_desc(&xdma_chan->vchan); + if (vd) { + list_del(&vd->node); + dma_cookie_complete(&vd->tx); + vchan_terminate_vdesc(vd); + } + vchan_get_all_descriptors(&xdma_chan->vchan, &head); + list_splice_tail(&head, &xdma_chan->vchan.desc_terminated); + + spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); + + return 0; +} + +/** + * xdma_synchronize - Synchronize terminated transactions + * @chan: DMA channel pointer + */ +static void xdma_synchronize(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + + vchan_synchronize(&xdma_chan->vchan); +} + +/** + * xdma_fill_descs - Fill hardware descriptors for one contiguous memory chunk. + * More than one descriptor will be used if the size is bigger + * than XDMA_DESC_BLEN_MAX. + * @sw_desc: Descriptor container + * @src_addr: First value for the ->src_addr field + * @dst_addr: First value for the ->dst_addr field + * @size: Size of the contiguous memory block + * @desc_start_num: Index of the first descriptor to take care of in @sw_desc + */ +static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr, + u64 dst_addr, u32 size, u32 filled_descs_num) +{ + u32 left = size, len, desc_num = filled_descs_num; + struct xdma_desc_block *dblk; + struct xdma_hw_desc *desc; + + dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT); + desc = dblk->virt_addr; + desc += desc_num & XDMA_DESC_ADJACENT_MASK; + do { + len = min_t(u32, left, XDMA_DESC_BLEN_MAX); + /* set hardware descriptor */ + desc->bytes = cpu_to_le32(len); + desc->src_addr = cpu_to_le64(src_addr); + desc->dst_addr = cpu_to_le64(dst_addr); + + dev_dbg(NULL, "desc[%u]:%p {src:0x%llx, dst: 0x%llx, length: %u}", + desc_num, + desc, + src_addr, + dst_addr, + len); + + if (!(++desc_num & XDMA_DESC_ADJACENT_MASK)) + desc = (++dblk)->virt_addr; + else + desc++; + + src_addr += len; + dst_addr += len; + left -= len; + } while (left); + + return desc_num - filled_descs_num; +} + +/** + * xdma_prep_device_sg - prepare a descriptor for a DMA transaction + * @chan: DMA channel pointer + * @sgl: Transfer scatter gather list + * @sg_len: Length of scatter gather list + * @dir: Transfer direction + * @flags: transfer ack flags + * @context: APP words of the descriptor + */ +static struct dma_async_tx_descriptor * +xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_device *xdev = xdma_chan ->xdev_hdl; + + struct dma_async_tx_descriptor *tx_desc; + struct xdma_desc *sw_desc; + u32 desc_num = 0, i; + u64 addr, dev_addr, *src, *dst; + struct scatterlist *sg; + + for_each_sg(sgl, sg, sg_len, i) + desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX); + + sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false); + if (!sw_desc) + return NULL; + sw_desc->dir = dir; + sw_desc->cyclic = false; + sw_desc->interleaved_dma = false; + + if (dir == DMA_MEM_TO_DEV) { + dev_addr = xdma_chan->cfg.dst_addr; + src = &addr; + dst = &dev_addr; + } else { + dev_addr = xdma_chan->cfg.src_addr ? xdma_chan->cfg.src_addr : xdma_chan->write_back->dma_addr; + src = &dev_addr; + dst = &addr; + } + + dev_dbg(&xdev->pdev->dev, "desc[%s]:%p {src: %p, dst: %p, length: %u}", + dir == DMA_MEM_TO_DEV ? "C2H" : "H2C", + sw_desc, + src, + dst, + sg_dma_len(sg)); + + + desc_num = 0; + for_each_sg(sgl, sg, sg_len, i) { + addr = sg_dma_address(sg); + desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num); + dev_addr += sg_dma_len(sg); + } + + tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); + if (!tx_desc) + goto failed; + + return tx_desc; + +failed: + xdma_free_desc(&sw_desc->vdesc); + + return NULL; +} + +/** + * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions + * @chan: DMA channel pointer + * @address: Device DMA address to access + * @size: Total length to transfer + * @period_size: Period size to use for each transfer + * @dir: Transfer direction + * @flags: Transfer ack flags + */ +static struct dma_async_tx_descriptor * +xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address, + size_t size, size_t period_size, + enum dma_transfer_direction dir, + unsigned long flags) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_device *xdev = xdma_chan->xdev_hdl; + unsigned int periods = size / period_size; + struct dma_async_tx_descriptor *tx_desc; + struct xdma_desc *sw_desc; + u64 addr, dev_addr, *src, *dst; + u32 desc_num = 0; + unsigned int i; + + /* + * Simplify the whole logic by preventing an abnormally high number of + * periods and periods size. + */ + if (period_size > XDMA_DESC_BLEN_MAX) { + xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX); + return NULL; + } + + if (periods > XDMA_DESC_ADJACENT) { + xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT); + return NULL; + } + + sw_desc = xdma_alloc_desc(xdma_chan, periods, true); + if (!sw_desc) + return NULL; + + sw_desc->periods = periods; + sw_desc->period_size = period_size; + sw_desc->dir = dir; + sw_desc->interleaved_dma = false; + + addr = address; + if (dir == DMA_MEM_TO_DEV) { + dev_addr = xdma_chan->cfg.dst_addr; + src = &addr; + dst = &dev_addr; + } else { + dev_addr = xdma_chan->cfg.src_addr ? xdma_chan->cfg.src_addr : xdma_chan->write_back->dma_addr; + src = &dev_addr; + dst = &addr; + } + + dev_dbg(&xdev->pdev->dev, "desc[%s]:%p {src: %p, dst: %p, length: %lu}", + dir == DMA_MEM_TO_DEV ? "C2H" : "H2C", + sw_desc, + src, + dst, + period_size); + + for (i = 0; i < periods; i++) { + xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num++); + addr += period_size; + } + + tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); + if (!tx_desc) + goto failed; + + return tx_desc; + +failed: + xdma_free_desc(&sw_desc->vdesc); + + return NULL; +} + +/** + * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers + * @chan: DMA channel + * @xt: DMA transfer template + * @flags: tx flags + */ +static struct dma_async_tx_descriptor * +xdma_prep_interleaved_dma(struct dma_chan *chan, + struct dma_interleaved_template *xt, + unsigned long flags) +{ + int i; + u32 desc_num = 0, period_size = 0; + struct dma_async_tx_descriptor *tx_desc; + struct xdma_chan *xchan = to_xdma_chan(chan); + struct xdma_desc *sw_desc; + u64 src_addr, dst_addr; + + for (i = 0; i < xt->frame_size; ++i) + desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX); + + sw_desc = xdma_alloc_desc(xchan, desc_num, false); + if (!sw_desc) + return NULL; + sw_desc->dir = xt->dir; + sw_desc->interleaved_dma = true; + sw_desc->cyclic = flags & DMA_PREP_REPEAT; + sw_desc->frames_left = xt->numf; + sw_desc->periods = xt->numf; + + desc_num = 0; + src_addr = xt->src_start; + dst_addr = xt->dst_start; + for (i = 0; i < xt->frame_size; ++i) { + desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num); + src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ? + xt->sgl[i].size : 0); + dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ? + xt->sgl[i].size : 0); + period_size += xt->sgl[i].size; + } + sw_desc->period_size = period_size; + + tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags); + if (tx_desc) + return tx_desc; + + xdma_free_desc(&sw_desc->vdesc); + return NULL; +} + +/** + * xdma_device_config - Configure the DMA channel + * @chan: DMA channel + * @cfg: channel configuration + */ +static int xdma_device_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + + memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg)); + + return 0; +} + +/** + * xdma_free_chan_resources - Free channel resources + * @chan: DMA channel + */ +static void xdma_free_chan_resources(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + + vchan_free_chan_resources(&xdma_chan->vchan); + dma_pool_free(xdma_chan->desc_pool, + xdma_chan->write_back, + xdma_chan->write_back->dma_addr); + dma_pool_destroy(xdma_chan->desc_pool); + xdma_chan->desc_pool = NULL; +} + +/** + * xdma_alloc_chan_resources - Allocate channel resources + * @chan: DMA channel + */ +static int xdma_alloc_chan_resources(struct dma_chan *chan) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_device *xdev = xdma_chan->xdev_hdl; + struct device *dev = xdev->dma_dev.dev; + dma_addr_t write_back_addr; + + while (dev && !dev_is_pci(dev)) + dev = dev->parent; + if (!dev) { + xdma_err(xdev, "unable to find pci device"); + return -EINVAL; + } + + //Allocate the pool WITH the H2C write back + xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE + + sizeof(struct xdma_c2h_write_back), + XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY); + if (!xdma_chan->desc_pool) { + xdma_err(xdev, "unable to allocate descriptor pool"); + return -ENOMEM; + } + + /* Allocate the C2H write back out of the pool*/ + + xdma_chan->write_back = dma_pool_alloc(xdma_chan->desc_pool, GFP_NOWAIT, &write_back_addr); + + dev_dbg(dev, "C2H write_back : %p, dma_addr: %lld", xdma_chan->write_back, write_back_addr); + + if (!xdma_chan->write_back) { + xdma_err(xdev, "unable to allocate C2H write back block"); + return -ENOMEM; + } + xdma_chan->write_back->dma_addr = write_back_addr; + + + return 0; +} + +static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *state) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_desc *desc = NULL; + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + unsigned int period_idx; + u32 residue = 0; + + ret = dma_cookie_status(chan, cookie, state); + if (ret == DMA_COMPLETE) + return ret; + + spin_lock_irqsave(&xdma_chan->vchan.lock, flags); + + vd = vchan_find_desc(&xdma_chan->vchan, cookie); + if (!vd) + goto out; + + desc = to_xdma_desc(vd); + if (desc->error) { + ret = DMA_ERROR; + } else if (desc->cyclic) { + period_idx = desc->completed_desc_num % desc->periods; + residue = (desc->periods - period_idx) * desc->period_size; + dma_set_residue(state, residue); + } +out: + spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); + + return ret; +} + +/** + * xdma_channel_isr - XDMA channel interrupt handler + * @irq: IRQ number + * @dev_id: Pointer to the DMA channel structure + */ +static irqreturn_t xdma_channel_isr(int irq, void *dev_id) +{ + struct xdma_chan *xchan = dev_id; + u32 complete_desc_num = 0; + struct xdma_device *xdev = xchan->xdev_hdl; + struct virt_dma_desc *vd, *next_vd; + struct xdma_desc *desc; + int ret; + u32 st; + bool repeat_tx; + + spin_lock(&xchan->vchan.lock); + + /* get submitted request */ + vd = vchan_next_desc(&xchan->vchan); + if (!vd) + goto out; + + /* Clear-on-read the status register */ + ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st); + if (ret) + goto out; + + desc = to_xdma_desc(vd); + + st &= XDMA_CHAN_STATUS_MASK; + if ((st & XDMA_CHAN_ERROR_MASK) || + !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) { + desc->error = true; + xdma_err(xdev, "channel error, status register value: 0x%x", st); + goto out; + } + + ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC, + &complete_desc_num); + if (ret) + goto out; + + if (desc->interleaved_dma) { + xchan->busy = false; + desc->completed_desc_num += complete_desc_num; + if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) { + xdma_xfer_start(xchan); + goto out; + } + + /* last desc of any frame */ + desc->frames_left--; + if (desc->frames_left) + goto out; + + /* last desc of the last frame */ + repeat_tx = vd->tx.flags & DMA_PREP_REPEAT; + next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node); + if (next_vd) + repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT); + if (repeat_tx) { + desc->frames_left = desc->periods; + desc->completed_desc_num = 0; + vchan_cyclic_callback(vd); + } else { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */ + xdma_xfer_start(xchan); + } else if (!desc->cyclic) { + xchan->busy = false; + desc->completed_desc_num += complete_desc_num; + + /* if all data blocks are transferred, remove and complete the request */ + if (desc->completed_desc_num == desc->desc_num) { + list_del(&vd->node); + vchan_cookie_complete(vd); + goto out; + } + + if (desc->completed_desc_num > desc->desc_num || + complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) + goto out; + + /* transfer the rest of data */ + xdma_xfer_start(xchan); + } else { + desc->completed_desc_num = complete_desc_num; + vchan_cyclic_callback(vd); + } + +out: + spin_unlock(&xchan->vchan.lock); + return IRQ_HANDLED; +} + +/** + * xdma_irq_fini - Uninitialize IRQ + * @xdev: DMA device pointer + */ +static void xdma_irq_fini(struct xdma_device *xdev) +{ + int i; + + /* disable interrupt */ + regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0); + + /* free irq handler */ + for (i = 0; i < xdev->h2c_chan_num; i++) + free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); + + for (i = 0; i < xdev->c2h_chan_num; i++) + free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]); +} + +/** + * xdma_set_vector_reg - configure hardware IRQ registers + * @xdev: DMA device pointer + * @vec_tbl_start: Start of IRQ registers + * @irq_start: Start of IRQ + * @irq_num: Number of IRQ + */ +static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start, + u32 irq_start, u32 irq_num) +{ + u32 shift, i, val = 0; + int ret; + + /* Each IRQ register is 32 bit and contains 4 IRQs */ + while (irq_num > 0) { + for (i = 0; i < 4; i++) { + shift = XDMA_IRQ_VEC_SHIFT * i; + val |= irq_start << shift; + irq_start++; + irq_num--; + if (!irq_num) + break; + } + + /* write IRQ register */ + ret = regmap_write(xdev->rmap, vec_tbl_start, val); + if (ret) + return ret; + vec_tbl_start += sizeof(u32); + val = 0; + } + + return 0; +} + +/** + * xdma_irq_init - initialize IRQs + * @xdev: DMA device pointer + */ +static int xdma_irq_init(struct xdma_device *xdev) +{ + u32 irq = xdev->irq_start; + u32 user_irq_start; + int i, j, ret; + + /* return failure if there are not enough IRQs */ + if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) { + xdma_err(xdev, "not enough irq"); + return -EINVAL; + } + + /* setup H2C interrupt handler */ + for (i = 0; i < xdev->h2c_chan_num; i++) { + ret = request_irq(irq, xdma_channel_isr, 0, + "xdma-h2c-channel", &xdev->h2c_chans[i]); + if (ret) { + xdma_err(xdev, "H2C channel%d request irq%d failed: %d", + i, irq, ret); + goto failed_init_h2c; + } + xdev->h2c_chans[i].irq = irq; + irq++; + } + + /* setup C2H interrupt handler */ + for (j = 0; j < xdev->c2h_chan_num; j++) { + ret = request_irq(irq, xdma_channel_isr, 0, + "xdma-c2h-channel", &xdev->c2h_chans[j]); + if (ret) { + xdma_err(xdev, "C2H channel%d request irq%d failed: %d", + j, irq, ret); + goto failed_init_c2h; + } + xdev->c2h_chans[j].irq = irq; + irq++; + } + + /* config hardware IRQ registers */ + ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0, + XDMA_CHAN_NUM(xdev)); + if (ret) { + xdma_err(xdev, "failed to set channel vectors: %d", ret); + goto failed_init_c2h; + } + + /* config user IRQ registers if needed */ + user_irq_start = XDMA_CHAN_NUM(xdev); + if (xdev->irq_num > user_irq_start) { + ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM, + user_irq_start, + xdev->irq_num - user_irq_start); + if (ret) { + xdma_err(xdev, "failed to set user vectors: %d", ret); + goto failed_init_c2h; + } + } + + /* enable interrupt */ + ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0); + if (ret) + goto failed_init_c2h; + + return 0; + +failed_init_c2h: + while (j--) + free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]); +failed_init_h2c: + while (i--) + free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); + + return ret; +} + +static bool xdma_filter_fn(struct dma_chan *chan, void *param) +{ + struct xdma_chan *xdma_chan = to_xdma_chan(chan); + struct xdma_chan_info *chan_info = param; + + return chan_info->dir == xdma_chan->dir; +} + +/** + * xdma_disable_user_irq - Disable user interrupt + * @pdev: Pointer to the platform_device structure + * @irq_num: System IRQ number + */ +void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num) +{ + struct xdma_device *xdev = platform_get_drvdata(pdev); + u32 index; + + index = irq_num - xdev->irq_start; + if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { + xdma_err(xdev, "invalid user irq number"); + return; + } + index -= XDMA_CHAN_NUM(xdev); + + regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index); +} +EXPORT_SYMBOL(xdma_disable_user_irq); + +/** + * xdma_enable_user_irq - Enable user logic interrupt + * @pdev: Pointer to the platform_device structure + * @irq_num: System IRQ number + */ +int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num) +{ + struct xdma_device *xdev = platform_get_drvdata(pdev); + u32 index; + int ret; + + index = irq_num - xdev->irq_start; + if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { + xdma_err(xdev, "invalid user irq number"); + return -EINVAL; + } + index -= XDMA_CHAN_NUM(xdev); + + ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL(xdma_enable_user_irq); + +/** + * xdma_get_user_irq - Get system IRQ number + * @pdev: Pointer to the platform_device structure + * @user_irq_index: User logic IRQ wire index + * + * Return: The system IRQ number allocated for the given wire index. + */ +int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index) +{ + struct xdma_device *xdev = platform_get_drvdata(pdev); + + if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) { + xdma_err(xdev, "invalid user irq index"); + return -EINVAL; + } + + return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index; +} +EXPORT_SYMBOL(xdma_get_user_irq); + +/** + * xdma_remove - Driver remove function + * @pdev: Pointer to the platform_device structure + */ +static void xdma_remove(struct platform_device *pdev) +{ + struct xdma_device *xdev = platform_get_drvdata(pdev); + + if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX) + xdma_irq_fini(xdev); + + if (xdev->status & XDMA_DEV_STATUS_REG_DMA) + dma_async_device_unregister(&xdev->dma_dev); +} + +/** + * xdma_probe - Driver probe function + * @pdev: Pointer to the platform_device structure + */ +static int xdma_probe(struct platform_device *pdev) +{ + struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev); + struct xdma_device *xdev; + void __iomem *reg_base; + struct resource *res; + int ret = -ENODEV; + + if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) { + dev_err(&pdev->dev, "invalid max dma channels %d", + pdata->max_dma_channels); + return -EINVAL; + } + + xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); + if (!xdev) + return -ENOMEM; + + platform_set_drvdata(pdev, xdev); + xdev->pdev = pdev; + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + xdma_err(xdev, "failed to get irq resource"); + goto failed; + } + xdev->irq_start = res->start; + xdev->irq_num = resource_size(res); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + xdma_err(xdev, "failed to get io resource"); + goto failed; + } + + reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(reg_base)) { + xdma_err(xdev, "ioremap failed"); + goto failed; + } + + xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base, + &xdma_regmap_config); + if (IS_ERR(xdev->rmap)) { + ret = PTR_ERR(xdev->rmap); + xdma_err(xdev, "config regmap failed: %d", ret); + goto failed; + } + + INIT_LIST_HEAD(&xdev->dma_dev.channels); + + ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV); + if (ret) { + xdma_err(xdev, "config H2C channels failed: %d", ret); + goto failed; + } + + ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM); + if (ret) { + xdma_err(xdev, "config C2H channels failed: %d", ret); + goto failed; + } + + dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask); + dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask); + + xdev->dma_dev.dev = &pdev->dev; + xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources; + xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources; + xdev->dma_dev.device_tx_status = xdma_tx_status; + xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg; + xdev->dma_dev.device_config = xdma_device_config; + xdev->dma_dev.device_issue_pending = xdma_issue_pending; + xdev->dma_dev.device_terminate_all = xdma_terminate_all; + xdev->dma_dev.device_synchronize = xdma_synchronize; + xdev->dma_dev.filter.map = pdata->device_map; + xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt; + xdev->dma_dev.filter.fn = xdma_filter_fn; + xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic; + xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma; + + ret = dma_async_device_register(&xdev->dma_dev); + if (ret) { + xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret); + goto failed; + } + xdev->status |= XDMA_DEV_STATUS_REG_DMA; + + ret = xdma_irq_init(xdev); + if (ret) { + xdma_err(xdev, "failed to init msix: %d", ret); + goto failed; + } + xdev->status |= XDMA_DEV_STATUS_INIT_MSIX; + + return 0; + +failed: + xdma_remove(pdev); + + return ret; +} + +static const struct platform_device_id xdma_id_table[] = { + { "xdma", 0}, + { }, +}; + +static struct platform_driver xdma_driver = { + .driver = { + .name = "xdma", + }, + .id_table = xdma_id_table, + .probe = xdma_probe, + .remove_new = xdma_remove, +}; + +module_platform_driver(xdma_driver); + +MODULE_DESCRIPTION("AMD XDMA driver"); +MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>"); +MODULE_LICENSE("GPL"); |