diff options
author | Christian Pointner <equinox@helsinki.at> | 2024-05-10 18:52:23 (GMT) |
---|---|---|
committer | Christian Pointner <equinox@helsinki.at> | 2024-05-10 18:52:23 (GMT) |
commit | a641800acf13b5fb1463d4280c3ee7fc267143fb (patch) | |
tree | 248b647a682f71d9eb90d14d24081368ea905a42 /snd-alpx/core/generic/6.2 | |
parent | cc4badffe0e02d159c21eb90ea080a6a2f90cb4b (diff) |
import whole driver package
Diffstat (limited to 'snd-alpx/core/generic/6.2')
-rw-r--r-- | snd-alpx/core/generic/6.2/amd_xdma.h | 34 | ||||
-rw-r--r-- | snd-alpx/core/generic/6.2/dmaengine.c | 1652 | ||||
-rw-r--r-- | snd-alpx/core/generic/6.2/dmaengine.h | 201 | ||||
-rw-r--r-- | snd-alpx/core/generic/6.2/virt-dma.c | 142 | ||||
-rw-r--r-- | snd-alpx/core/generic/6.2/virt-dma.h | 227 | ||||
-rw-r--r-- | snd-alpx/core/generic/6.2/xilinx/xdma-regs.h | 169 | ||||
-rw-r--r-- | snd-alpx/core/generic/6.2/xilinx/xdma.c | 1437 |
7 files changed, 0 insertions, 3862 deletions
diff --git a/snd-alpx/core/generic/6.2/amd_xdma.h b/snd-alpx/core/generic/6.2/amd_xdma.h deleted file mode 100644 index b5e23e1..0000000 --- a/snd-alpx/core/generic/6.2/amd_xdma.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2022, Advanced Micro Devices, Inc. - */ - -#ifndef _PLATDATA_AMD_XDMA_H -#define _PLATDATA_AMD_XDMA_H - -#include <linux/dmaengine.h> - -/** - * struct xdma_chan_info - DMA channel information - * This information is used to match channel when request dma channel - * @dir: Channel transfer direction - */ -struct xdma_chan_info { - enum dma_transfer_direction dir; -}; - -#define XDMA_FILTER_PARAM(chan_info) ((void *)(chan_info)) - -struct dma_slave_map; - -/** - * struct xdma_platdata - platform specific data for XDMA engine - * @max_dma_channels: Maximum dma channels in each direction - */ -struct xdma_platdata { - u32 max_dma_channels; - u32 device_map_cnt; - struct dma_slave_map *device_map; -}; - -#endif /* _PLATDATA_AMD_XDMA_H */ diff --git a/snd-alpx/core/generic/6.2/dmaengine.c b/snd-alpx/core/generic/6.2/dmaengine.c deleted file mode 100644 index 8a6e6b6..0000000 --- a/snd-alpx/core/generic/6.2/dmaengine.c +++ /dev/null @@ -1,1652 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. - */ - -/* - * This code implements the DMA subsystem. It provides a HW-neutral interface - * for other kernel code to use asynchronous memory copy capabilities, - * if present, and allows different HW DMA drivers to register as providing - * this capability. - * - * Due to the fact we are accelerating what is already a relatively fast - * operation, the code goes to great lengths to avoid additional overhead, - * such as locking. - * - * LOCKING: - * - * The subsystem keeps a global list of dma_device structs it is protected by a - * mutex, dma_list_mutex. - * - * A subsystem can get access to a channel by calling dmaengine_get() followed - * by dma_find_channel(), or if it has need for an exclusive channel it can call - * dma_request_channel(). Once a channel is allocated a reference is taken - * against its corresponding driver to disable removal. - * - * Each device has a channels list, which runs unlocked but is never modified - * once the device is registered, it's just setup by the driver. - * - * See Documentation/driver-api/dmaengine for more details - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/platform_device.h> -#include <linux/dma-mapping.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/device.h> -#include <linux/dmaengine.h> -#include <linux/hardirq.h> -#include <linux/spinlock.h> -#include <linux/percpu.h> -#include <linux/rcupdate.h> -#include <linux/mutex.h> -#include <linux/jiffies.h> -#include <linux/rculist.h> -#include <linux/idr.h> -#include <linux/slab.h> -#include <linux/acpi.h> -#include <linux/acpi_dma.h> -#include <linux/of_dma.h> -#include <linux/mempool.h> -#include <linux/numa.h> - -#include "dmaengine.h" - -static DEFINE_MUTEX(dma_list_mutex); -static DEFINE_IDA(dma_ida); -static LIST_HEAD(dma_device_list); -static long dmaengine_ref_count; - -/* --- debugfs implementation --- */ -#ifdef CONFIG_DEBUG_FS -#include <linux/debugfs.h> - -static struct dentry *rootdir; - -static void dmaengine_debug_register(struct dma_device *dma_dev) -{ - dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev), - rootdir); - if (IS_ERR(dma_dev->dbg_dev_root)) - dma_dev->dbg_dev_root = NULL; -} - -static void dmaengine_debug_unregister(struct dma_device *dma_dev) -{ - debugfs_remove_recursive(dma_dev->dbg_dev_root); - dma_dev->dbg_dev_root = NULL; -} - -static void dmaengine_dbg_summary_show(struct seq_file *s, - struct dma_device *dma_dev) -{ - struct dma_chan *chan; - - list_for_each_entry(chan, &dma_dev->channels, device_node) { - if (chan->client_count) { - seq_printf(s, " %-13s| %s", dma_chan_name(chan), - chan->dbg_client_name ?: "in-use"); - - if (chan->router) - seq_printf(s, " (via router: %s)\n", - dev_name(chan->router->dev)); - else - seq_puts(s, "\n"); - } - } -} - -static int dmaengine_summary_show(struct seq_file *s, void *data) -{ - struct dma_device *dma_dev = NULL; - - mutex_lock(&dma_list_mutex); - list_for_each_entry(dma_dev, &dma_device_list, global_node) { - seq_printf(s, "dma%d (%s): number of channels: %u\n", - dma_dev->dev_id, dev_name(dma_dev->dev), - dma_dev->chancnt); - - if (dma_dev->dbg_summary_show) - dma_dev->dbg_summary_show(s, dma_dev); - else - dmaengine_dbg_summary_show(s, dma_dev); - - if (!list_is_last(&dma_dev->global_node, &dma_device_list)) - seq_puts(s, "\n"); - } - mutex_unlock(&dma_list_mutex); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(dmaengine_summary); - -static void __init dmaengine_debugfs_init(void) -{ - rootdir = debugfs_create_dir("dmaengine", NULL); - - /* /sys/kernel/debug/dmaengine/summary */ - debugfs_create_file("summary", 0444, rootdir, NULL, - &dmaengine_summary_fops); -} -#else -static inline void dmaengine_debugfs_init(void) { } -static inline int dmaengine_debug_register(struct dma_device *dma_dev) -{ - return 0; -} - -static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { } -#endif /* DEBUG_FS */ - -/* --- sysfs implementation --- */ - -#define DMA_SLAVE_NAME "slave" - -/** - * dev_to_dma_chan - convert a device pointer to its sysfs container object - * @dev: device node - * - * Must be called under dma_list_mutex. - */ -static struct dma_chan *dev_to_dma_chan(struct device *dev) -{ - struct dma_chan_dev *chan_dev; - - chan_dev = container_of(dev, typeof(*chan_dev), device); - return chan_dev->chan; -} - -static ssize_t memcpy_count_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dma_chan *chan; - unsigned long count = 0; - int i; - int err; - - mutex_lock(&dma_list_mutex); - chan = dev_to_dma_chan(dev); - if (chan) { - for_each_possible_cpu(i) - count += per_cpu_ptr(chan->local, i)->memcpy_count; - err = sprintf(buf, "%lu\n", count); - } else - err = -ENODEV; - mutex_unlock(&dma_list_mutex); - - return err; -} -static DEVICE_ATTR_RO(memcpy_count); - -static ssize_t bytes_transferred_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dma_chan *chan; - unsigned long count = 0; - int i; - int err; - - mutex_lock(&dma_list_mutex); - chan = dev_to_dma_chan(dev); - if (chan) { - for_each_possible_cpu(i) - count += per_cpu_ptr(chan->local, i)->bytes_transferred; - err = sprintf(buf, "%lu\n", count); - } else - err = -ENODEV; - mutex_unlock(&dma_list_mutex); - - return err; -} -static DEVICE_ATTR_RO(bytes_transferred); - -static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct dma_chan *chan; - int err; - - mutex_lock(&dma_list_mutex); - chan = dev_to_dma_chan(dev); - if (chan) - err = sprintf(buf, "%d\n", chan->client_count); - else - err = -ENODEV; - mutex_unlock(&dma_list_mutex); - - return err; -} -static DEVICE_ATTR_RO(in_use); - -static struct attribute *dma_dev_attrs[] = { - &dev_attr_memcpy_count.attr, - &dev_attr_bytes_transferred.attr, - &dev_attr_in_use.attr, - NULL, -}; -ATTRIBUTE_GROUPS(dma_dev); - -static void chan_dev_release(struct device *dev) -{ - struct dma_chan_dev *chan_dev; - - chan_dev = container_of(dev, typeof(*chan_dev), device); - kfree(chan_dev); -} - -static struct class dma_devclass = { - .name = "dma", - .dev_groups = dma_dev_groups, - .dev_release = chan_dev_release, -}; - -/* --- client and device registration --- */ - -/* enable iteration over all operation types */ -static dma_cap_mask_t dma_cap_mask_all; - -/** - * struct dma_chan_tbl_ent - tracks channel allocations per core/operation - * @chan: associated channel for this entry - */ -struct dma_chan_tbl_ent { - struct dma_chan *chan; -}; - -/* percpu lookup table for memory-to-memory offload providers */ -static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; - -static int __init dma_channel_table_init(void) -{ - enum dma_transaction_type cap; - int err = 0; - - bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); - - /* 'interrupt', 'private', and 'slave' are channel capabilities, - * but are not associated with an operation so they do not need - * an entry in the channel_table - */ - clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); - clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); - clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); - - for_each_dma_cap_mask(cap, dma_cap_mask_all) { - channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); - if (!channel_table[cap]) { - err = -ENOMEM; - break; - } - } - - if (err) { - pr_err("dmaengine dma_channel_table_init failure: %d\n", err); - for_each_dma_cap_mask(cap, dma_cap_mask_all) - free_percpu(channel_table[cap]); - } - - return err; -} -arch_initcall(dma_channel_table_init); - -/** - * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU - * @chan: DMA channel to test - * @cpu: CPU index which the channel should be close to - * - * Returns true if the channel is in the same NUMA-node as the CPU. - */ -static bool dma_chan_is_local(struct dma_chan *chan, int cpu) -{ - int node = dev_to_node(chan->device->dev); - return node == NUMA_NO_NODE || - cpumask_test_cpu(cpu, cpumask_of_node(node)); -} - -/** - * min_chan - finds the channel with min count and in the same NUMA-node as the CPU - * @cap: capability to match - * @cpu: CPU index which the channel should be close to - * - * If some channels are close to the given CPU, the one with the lowest - * reference count is returned. Otherwise, CPU is ignored and only the - * reference count is taken into account. - * - * Must be called under dma_list_mutex. - */ -static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) -{ - struct dma_device *device; - struct dma_chan *chan; - struct dma_chan *min = NULL; - struct dma_chan *localmin = NULL; - - list_for_each_entry(device, &dma_device_list, global_node) { - if (!dma_has_cap(cap, device->cap_mask) || - dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) { - if (!chan->client_count) - continue; - if (!min || chan->table_count < min->table_count) - min = chan; - - if (dma_chan_is_local(chan, cpu)) - if (!localmin || - chan->table_count < localmin->table_count) - localmin = chan; - } - } - - chan = localmin ? localmin : min; - - if (chan) - chan->table_count++; - - return chan; -} - -/** - * dma_channel_rebalance - redistribute the available channels - * - * Optimize for CPU isolation (each CPU gets a dedicated channel for an - * operation type) in the SMP case, and operation isolation (avoid - * multi-tasking channels) in the non-SMP case. - * - * Must be called under dma_list_mutex. - */ -static void dma_channel_rebalance(void) -{ - struct dma_chan *chan; - struct dma_device *device; - int cpu; - int cap; - - /* undo the last distribution */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_possible_cpu(cpu) - per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; - - list_for_each_entry(device, &dma_device_list, global_node) { - if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) - chan->table_count = 0; - } - - /* don't populate the channel_table if no clients are available */ - if (!dmaengine_ref_count) - return; - - /* redistribute available channels */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_online_cpu(cpu) { - chan = min_chan(cap, cpu); - per_cpu_ptr(channel_table[cap], cpu)->chan = chan; - } -} - -static int dma_device_satisfies_mask(struct dma_device *device, - const dma_cap_mask_t *want) -{ - dma_cap_mask_t has; - - bitmap_and(has.bits, want->bits, device->cap_mask.bits, - DMA_TX_TYPE_END); - return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); -} - -static struct module *dma_chan_to_owner(struct dma_chan *chan) -{ - return chan->device->owner; -} - -/** - * balance_ref_count - catch up the channel reference count - * @chan: channel to balance ->client_count versus dmaengine_ref_count - * - * Must be called under dma_list_mutex. - */ -static void balance_ref_count(struct dma_chan *chan) -{ - struct module *owner = dma_chan_to_owner(chan); - - while (chan->client_count < dmaengine_ref_count) { - __module_get(owner); - chan->client_count++; - } -} - -static void dma_device_release(struct kref *ref) -{ - struct dma_device *device = container_of(ref, struct dma_device, ref); - - list_del_rcu(&device->global_node); - dma_channel_rebalance(); - - if (device->device_release) - device->device_release(device); -} - -static void dma_device_put(struct dma_device *device) -{ - lockdep_assert_held(&dma_list_mutex); - kref_put(&device->ref, dma_device_release); -} - -/** - * dma_chan_get - try to grab a DMA channel's parent driver module - * @chan: channel to grab - * - * Must be called under dma_list_mutex. - */ -static int dma_chan_get(struct dma_chan *chan) -{ - struct module *owner = dma_chan_to_owner(chan); - int ret; - - /* The channel is already in use, update client count */ - if (chan->client_count) { - __module_get(owner); - chan->client_count++; - return 0; - } - - if (!try_module_get(owner)) - return -ENODEV; - - ret = kref_get_unless_zero(&chan->device->ref); - if (!ret) { - ret = -ENODEV; - goto module_put_out; - } - - /* allocate upon first client reference */ - if (chan->device->device_alloc_chan_resources) { - ret = chan->device->device_alloc_chan_resources(chan); - if (ret < 0) - goto err_out; - } - - chan->client_count++; - - if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) - balance_ref_count(chan); - - return 0; - -err_out: - dma_device_put(chan->device); -module_put_out: - module_put(owner); - return ret; -} - -/** - * dma_chan_put - drop a reference to a DMA channel's parent driver module - * @chan: channel to release - * - * Must be called under dma_list_mutex. - */ -static void dma_chan_put(struct dma_chan *chan) -{ - /* This channel is not in use, bail out */ - if (!chan->client_count) - return; - - chan->client_count--; - - /* This channel is not in use anymore, free it */ - if (!chan->client_count && chan->device->device_free_chan_resources) { - /* Make sure all operations have completed */ - dmaengine_synchronize(chan); - chan->device->device_free_chan_resources(chan); - } - - /* If the channel is used via a DMA request router, free the mapping */ - if (chan->router && chan->router->route_free) { - chan->router->route_free(chan->router->dev, chan->route_data); - chan->router = NULL; - chan->route_data = NULL; - } - - dma_device_put(chan->device); - module_put(dma_chan_to_owner(chan)); -} - -enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) -{ - enum dma_status status; - unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); - - dma_async_issue_pending(chan); - do { - status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); - if (time_after_eq(jiffies, dma_sync_wait_timeout)) { - dev_err(chan->device->dev, "%s: timeout!\n", __func__); - return DMA_ERROR; - } - if (status != DMA_IN_PROGRESS) - break; - cpu_relax(); - } while (1); - - return status; -} -EXPORT_SYMBOL(dma_sync_wait); - -/** - * dma_find_channel - find a channel to carry out the operation - * @tx_type: transaction type - */ -struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) -{ - return this_cpu_read(channel_table[tx_type]->chan); -} -EXPORT_SYMBOL(dma_find_channel); - -/** - * dma_issue_pending_all - flush all pending operations across all channels - */ -void dma_issue_pending_all(void) -{ - struct dma_device *device; - struct dma_chan *chan; - - rcu_read_lock(); - list_for_each_entry_rcu(device, &dma_device_list, global_node) { - if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) - if (chan->client_count) - device->device_issue_pending(chan); - } - rcu_read_unlock(); -} -EXPORT_SYMBOL(dma_issue_pending_all); - -int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) -{ - struct dma_device *device; - - if (!chan || !caps) - return -EINVAL; - - device = chan->device; - - /* check if the channel supports slave transactions */ - if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || - test_bit(DMA_CYCLIC, device->cap_mask.bits))) - return -ENXIO; - - /* - * Check whether it reports it uses the generic slave - * capabilities, if not, that means it doesn't support any - * kind of slave capabilities reporting. - */ - if (!device->directions) - return -ENXIO; - - caps->src_addr_widths = device->src_addr_widths; - caps->dst_addr_widths = device->dst_addr_widths; - caps->directions = device->directions; - caps->min_burst = device->min_burst; - caps->max_burst = device->max_burst; - caps->max_sg_burst = device->max_sg_burst; - caps->residue_granularity = device->residue_granularity; - caps->descriptor_reuse = device->descriptor_reuse; - caps->cmd_pause = !!device->device_pause; - caps->cmd_resume = !!device->device_resume; - caps->cmd_terminate = !!device->device_terminate_all; - - /* - * DMA engine device might be configured with non-uniformly - * distributed slave capabilities per device channels. In this - * case the corresponding driver may provide the device_caps - * callback to override the generic capabilities with - * channel-specific ones. - */ - if (device->device_caps) - device->device_caps(chan, caps); - - return 0; -} -EXPORT_SYMBOL_GPL(dma_get_slave_caps); - -static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, - struct dma_device *dev, - dma_filter_fn fn, void *fn_param) -{ - struct dma_chan *chan; - - if (mask && !dma_device_satisfies_mask(dev, mask)) { - dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); - return NULL; - } - /* devices with multiple channels need special handling as we need to - * ensure that all channels are either private or public. - */ - if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) - list_for_each_entry(chan, &dev->channels, device_node) { - /* some channels are already publicly allocated */ - if (chan->client_count) - return NULL; - } - - list_for_each_entry(chan, &dev->channels, device_node) { - if (chan->client_count) { - dev_dbg(dev->dev, "%s: %s busy\n", - __func__, dma_chan_name(chan)); - continue; - } - if (fn && !fn(chan, fn_param)) { - dev_dbg(dev->dev, "%s: %s filter said false\n", - __func__, dma_chan_name(chan)); - continue; - } - return chan; - } - - return NULL; -} - -static struct dma_chan *find_candidate(struct dma_device *device, - const dma_cap_mask_t *mask, - dma_filter_fn fn, void *fn_param) -{ - struct dma_chan *chan = private_candidate(mask, device, fn, fn_param); - int err; - - if (chan) { - /* Found a suitable channel, try to grab, prep, and return it. - * We first set DMA_PRIVATE to disable balance_ref_count as this - * channel will not be published in the general-purpose - * allocator - */ - dma_cap_set(DMA_PRIVATE, device->cap_mask); - device->privatecnt++; - err = dma_chan_get(chan); - - if (err) { - if (err == -ENODEV) { - dev_dbg(device->dev, "%s: %s module removed\n", - __func__, dma_chan_name(chan)); - list_del_rcu(&device->global_node); - } else - dev_dbg(device->dev, - "%s: failed to get %s: (%d)\n", - __func__, dma_chan_name(chan), err); - - if (--device->privatecnt == 0) - dma_cap_clear(DMA_PRIVATE, device->cap_mask); - - chan = ERR_PTR(err); - } - } - - return chan ? chan : ERR_PTR(-EPROBE_DEFER); -} - -/** - * dma_get_slave_channel - try to get specific channel exclusively - * @chan: target channel - */ -struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) -{ - /* lock against __dma_request_channel */ - mutex_lock(&dma_list_mutex); - - if (chan->client_count == 0) { - struct dma_device *device = chan->device; - int err; - - dma_cap_set(DMA_PRIVATE, device->cap_mask); - device->privatecnt++; - err = dma_chan_get(chan); - if (err) { - dev_dbg(chan->device->dev, - "%s: failed to get %s: (%d)\n", - __func__, dma_chan_name(chan), err); - chan = NULL; - if (--device->privatecnt == 0) - dma_cap_clear(DMA_PRIVATE, device->cap_mask); - } - } else - chan = NULL; - - mutex_unlock(&dma_list_mutex); - - - return chan; -} -EXPORT_SYMBOL_GPL(dma_get_slave_channel); - -struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) -{ - dma_cap_mask_t mask; - struct dma_chan *chan; - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - - /* lock against __dma_request_channel */ - mutex_lock(&dma_list_mutex); - - chan = find_candidate(device, &mask, NULL, NULL); - - mutex_unlock(&dma_list_mutex); - - return IS_ERR(chan) ? NULL : chan; -} -EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); - -/** - * __dma_request_channel - try to allocate an exclusive channel - * @mask: capabilities that the channel must satisfy - * @fn: optional callback to disposition available channels - * @fn_param: opaque parameter to pass to dma_filter_fn() - * @np: device node to look for DMA channels - * - * Returns pointer to appropriate DMA channel on success or NULL. - */ -struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, - dma_filter_fn fn, void *fn_param, - struct device_node *np) -{ - struct dma_device *device, *_d; - struct dma_chan *chan = NULL; - - /* Find a channel */ - mutex_lock(&dma_list_mutex); - list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { - /* Finds a DMA controller with matching device node */ - if (np && device->dev->of_node && np != device->dev->of_node) - continue; - - chan = find_candidate(device, mask, fn, fn_param); - if (!IS_ERR(chan)) - break; - - chan = NULL; - } - mutex_unlock(&dma_list_mutex); - - pr_debug("%s: %s (%s)\n", - __func__, - chan ? "success" : "fail", - chan ? dma_chan_name(chan) : NULL); - - return chan; -} -EXPORT_SYMBOL_GPL(__dma_request_channel); - -static const struct dma_slave_map *dma_filter_match(struct dma_device *device, - const char *name, - struct device *dev) -{ - int i; - - if (!device->filter.mapcnt) - return NULL; - - for (i = 0; i < device->filter.mapcnt; i++) { - const struct dma_slave_map *map = &device->filter.map[i]; - - if (!strcmp(map->devname, dev_name(dev)) && - !strcmp(map->slave, name)) - return map; - } - - return NULL; -} - -/** - * dma_request_chan - try to allocate an exclusive slave channel - * @dev: pointer to client device structure - * @name: slave channel name - * - * Returns pointer to appropriate DMA channel on success or an error pointer. - */ -struct dma_chan *dma_request_chan(struct device *dev, const char *name) -{ - struct dma_device *d, *_d; - struct dma_chan *chan = NULL; - - /* If device-tree is present get slave info from here */ - if (dev->of_node) - chan = of_dma_request_slave_channel(dev->of_node, name); - - /* If device was enumerated by ACPI get slave info from here */ - if (has_acpi_companion(dev) && !chan) - chan = acpi_dma_request_slave_chan_by_name(dev, name); - - if (PTR_ERR(chan) == -EPROBE_DEFER) - return chan; - - if (!IS_ERR_OR_NULL(chan)) - goto found; - - /* Try to find the channel via the DMA filter map(s) */ - mutex_lock(&dma_list_mutex); - list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { - dma_cap_mask_t mask; - const struct dma_slave_map *map = dma_filter_match(d, name, dev); - - if (!map) - continue; - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - - chan = find_candidate(d, &mask, d->filter.fn, map->param); - if (!IS_ERR(chan)) - break; - } - mutex_unlock(&dma_list_mutex); - - if (IS_ERR(chan)) - return chan; - if (!chan) - return ERR_PTR(-EPROBE_DEFER); - -found: -#ifdef CONFIG_DEBUG_FS - chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev), - name); -#endif - - chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); - if (!chan->name) - return chan; - chan->slave = dev; - - if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, - DMA_SLAVE_NAME)) - dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME); - if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) - dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name); - - return chan; -} -EXPORT_SYMBOL_GPL(dma_request_chan); - -/** - * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities - * @mask: capabilities that the channel must satisfy - * - * Returns pointer to appropriate DMA channel on success or an error pointer. - */ -struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) -{ - struct dma_chan *chan; - - if (!mask) - return ERR_PTR(-ENODEV); - - chan = __dma_request_channel(mask, NULL, NULL, NULL); - if (!chan) { - mutex_lock(&dma_list_mutex); - if (list_empty(&dma_device_list)) - chan = ERR_PTR(-EPROBE_DEFER); - else - chan = ERR_PTR(-ENODEV); - mutex_unlock(&dma_list_mutex); - } - - return chan; -} -EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); - -void dma_release_channel(struct dma_chan *chan) -{ - mutex_lock(&dma_list_mutex); - WARN_ONCE(chan->client_count != 1, - "chan reference count %d != 1\n", chan->client_count); - dma_chan_put(chan); - /* drop PRIVATE cap enabled by __dma_request_channel() */ - if (--chan->device->privatecnt == 0) - dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); - - if (chan->slave) { - sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); - sysfs_remove_link(&chan->slave->kobj, chan->name); - kfree(chan->name); - chan->name = NULL; - chan->slave = NULL; - } - -#ifdef CONFIG_DEBUG_FS - kfree(chan->dbg_client_name); - chan->dbg_client_name = NULL; -#endif - mutex_unlock(&dma_list_mutex); -} -EXPORT_SYMBOL_GPL(dma_release_channel); - -/** - * dmaengine_get - register interest in dma_channels - */ -void dmaengine_get(void) -{ - struct dma_device *device, *_d; - struct dma_chan *chan; - int err; - - mutex_lock(&dma_list_mutex); - dmaengine_ref_count++; - - /* try to grab channels */ - list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { - if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) { - err = dma_chan_get(chan); - if (err == -ENODEV) { - /* module removed before we could use it */ - list_del_rcu(&device->global_node); - break; - } else if (err) - dev_dbg(chan->device->dev, - "%s: failed to get %s: (%d)\n", - __func__, dma_chan_name(chan), err); - } - } - - /* if this is the first reference and there were channels - * waiting we need to rebalance to get those channels - * incorporated into the channel table - */ - if (dmaengine_ref_count == 1) - dma_channel_rebalance(); - mutex_unlock(&dma_list_mutex); -} -EXPORT_SYMBOL(dmaengine_get); - -/** - * dmaengine_put - let DMA drivers be removed when ref_count == 0 - */ -void dmaengine_put(void) -{ - struct dma_device *device, *_d; - struct dma_chan *chan; - - mutex_lock(&dma_list_mutex); - dmaengine_ref_count--; - BUG_ON(dmaengine_ref_count < 0); - /* drop channel references */ - list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { - if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) - dma_chan_put(chan); - } - mutex_unlock(&dma_list_mutex); -} -EXPORT_SYMBOL(dmaengine_put); - -static bool device_has_all_tx_types(struct dma_device *device) -{ - /* A device that satisfies this test has channels that will never cause - * an async_tx channel switch event as all possible operation types can - * be handled. - */ - #ifdef CONFIG_ASYNC_TX_DMA - if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) - return false; - #endif - - #if IS_ENABLED(CONFIG_ASYNC_MEMCPY) - if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) - return false; - #endif - - #if IS_ENABLED(CONFIG_ASYNC_XOR) - if (!dma_has_cap(DMA_XOR, device->cap_mask)) - return false; - - #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA - if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) - return false; - #endif - #endif - - #if IS_ENABLED(CONFIG_ASYNC_PQ) - if (!dma_has_cap(DMA_PQ, device->cap_mask)) - return false; - - #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA - if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) - return false; - #endif - #endif - - return true; -} - -static int get_dma_id(struct dma_device *device) -{ - int rc = ida_alloc(&dma_ida, GFP_KERNEL); - - if (rc < 0) - return rc; - device->dev_id = rc; - return 0; -} - -static int __dma_async_device_channel_register(struct dma_device *device, - struct dma_chan *chan) -{ - int rc; - - chan->local = alloc_percpu(typeof(*chan->local)); - if (!chan->local) - return -ENOMEM; - chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); - if (!chan->dev) { - rc = -ENOMEM; - goto err_free_local; - } - - /* - * When the chan_id is a negative value, we are dynamically adding - * the channel. Otherwise we are static enumerating. - */ - chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); - if (chan->chan_id < 0) { - pr_err("%s: unable to alloc ida for chan: %d\n", - __func__, chan->chan_id); - rc = chan->chan_id; - goto err_free_dev; - } - - chan->dev->device.class = &dma_devclass; - chan->dev->device.parent = device->dev; - chan->dev->chan = chan; - chan->dev->dev_id = device->dev_id; - dev_set_name(&chan->dev->device, "dma%dchan%d", - device->dev_id, chan->chan_id); - rc = device_register(&chan->dev->device); - if (rc) - goto err_out_ida; - chan->client_count = 0; - device->chancnt++; - - return 0; - - err_out_ida: - ida_free(&device->chan_ida, chan->chan_id); - err_free_dev: - kfree(chan->dev); - err_free_local: - free_percpu(chan->local); - chan->local = NULL; - return rc; -} - -int dma_async_device_channel_register(struct dma_device *device, - struct dma_chan *chan) -{ - int rc; - - rc = __dma_async_device_channel_register(device, chan); - if (rc < 0) - return rc; - - dma_channel_rebalance(); - return 0; -} -EXPORT_SYMBOL_GPL(dma_async_device_channel_register); - -static void __dma_async_device_channel_unregister(struct dma_device *device, - struct dma_chan *chan) -{ - WARN_ONCE(!device->device_release && chan->client_count, - "%s called while %d clients hold a reference\n", - __func__, chan->client_count); - mutex_lock(&dma_list_mutex); - device->chancnt--; - chan->dev->chan = NULL; - mutex_unlock(&dma_list_mutex); - ida_free(&device->chan_ida, chan->chan_id); - device_unregister(&chan->dev->device); - free_percpu(chan->local); -} - -void dma_async_device_channel_unregister(struct dma_device *device, - struct dma_chan *chan) -{ - __dma_async_device_channel_unregister(device, chan); - dma_channel_rebalance(); -} -EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); - -/** - * dma_async_device_register - registers DMA devices found - * @device: pointer to &struct dma_device - * - * After calling this routine the structure should not be freed except in the - * device_release() callback which will be called after - * dma_async_device_unregister() is called and no further references are taken. - */ -int dma_async_device_register(struct dma_device *device) -{ - int rc; - struct dma_chan* chan; - - if (!device) - return -ENODEV; - - /* validate device routines */ - if (!device->dev) { - pr_err("DMAdevice must have dev\n"); - return -EIO; - } - - device->owner = device->dev->driver->owner; - - if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_MEMCPY"); - return -EIO; - } - - if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_XOR"); - return -EIO; - } - - if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_XOR_VAL"); - return -EIO; - } - - if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_PQ"); - return -EIO; - } - - if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_PQ_VAL"); - return -EIO; - } - - if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_MEMSET"); - return -EIO; - } - - if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_INTERRUPT"); - return -EIO; - } - - if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_CYCLIC"); - return -EIO; - } - - if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_INTERLEAVE"); - return -EIO; - } - - - if (!device->device_tx_status) { - dev_err(device->dev, "Device tx_status is not defined\n"); - return -EIO; - } - - - if (!device->device_issue_pending) { - dev_err(device->dev, "Device issue_pending is not defined\n"); - return -EIO; - } - - if (!device->device_release) - dev_dbg(device->dev, - "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); - - kref_init(&device->ref); - - /* note: this only matters in the - * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case - */ - if (device_has_all_tx_types(device)) - dma_cap_set(DMA_ASYNC_TX, device->cap_mask); - - rc = get_dma_id(device); - if (rc != 0) - return rc; - - ida_init(&device->chan_ida); - - /* represent channels in sysfs. Probably want devs too */ - list_for_each_entry(chan, &device->channels, device_node) { - rc = __dma_async_device_channel_register(device, chan); - if (rc < 0) - goto err_out; - } - - mutex_lock(&dma_list_mutex); - /* take references on public channels */ - if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) - list_for_each_entry(chan, &device->channels, device_node) { - /* if clients are already waiting for channels we need - * to take references on their behalf - */ - if (dma_chan_get(chan) == -ENODEV) { - /* note we can only get here for the first - * channel as the remaining channels are - * guaranteed to get a reference - */ - rc = -ENODEV; - mutex_unlock(&dma_list_mutex); - goto err_out; - } - } - list_add_tail_rcu(&device->global_node, &dma_device_list); - if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) - device->privatecnt++; /* Always private */ - dma_channel_rebalance(); - mutex_unlock(&dma_list_mutex); - - dmaengine_debug_register(device); - - return 0; - -err_out: - /* if we never registered a channel just release the idr */ - if (!device->chancnt) { - ida_free(&dma_ida, device->dev_id); - return rc; - } - - list_for_each_entry(chan, &device->channels, device_node) { - if (chan->local == NULL) - continue; - mutex_lock(&dma_list_mutex); - chan->dev->chan = NULL; - mutex_unlock(&dma_list_mutex); - device_unregister(&chan->dev->device); - free_percpu(chan->local); - } - return rc; -} -EXPORT_SYMBOL(dma_async_device_register); - -/** - * dma_async_device_unregister - unregister a DMA device - * @device: pointer to &struct dma_device - * - * This routine is called by dma driver exit routines, dmaengine holds module - * references to prevent it being called while channels are in use. - */ -void dma_async_device_unregister(struct dma_device *device) -{ - struct dma_chan *chan, *n; - - dmaengine_debug_unregister(device); - - list_for_each_entry_safe(chan, n, &device->channels, device_node) - __dma_async_device_channel_unregister(device, chan); - - mutex_lock(&dma_list_mutex); - /* - * setting DMA_PRIVATE ensures the device being torn down will not - * be used in the channel_table - */ - dma_cap_set(DMA_PRIVATE, device->cap_mask); - dma_channel_rebalance(); - ida_free(&dma_ida, device->dev_id); - dma_device_put(device); - mutex_unlock(&dma_list_mutex); -} -EXPORT_SYMBOL(dma_async_device_unregister); - -static void dmam_device_release(struct device *dev, void *res) -{ - struct dma_device *device; - - device = *(struct dma_device **)res; - dma_async_device_unregister(device); -} - -/** - * dmaenginem_async_device_register - registers DMA devices found - * @device: pointer to &struct dma_device - * - * The operation is managed and will be undone on driver detach. - */ -int dmaenginem_async_device_register(struct dma_device *device) -{ - void *p; - int ret; - - p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL); - if (!p) - return -ENOMEM; - - ret = dma_async_device_register(device); - if (!ret) { - *(struct dma_device **)p = device; - devres_add(device->dev, p); - } else { - devres_free(p); - } - - return ret; -} -EXPORT_SYMBOL(dmaenginem_async_device_register); - -struct dmaengine_unmap_pool { - struct kmem_cache *cache; - const char *name; - mempool_t *pool; - size_t size; -}; - -#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } -static struct dmaengine_unmap_pool unmap_pool[] = { - __UNMAP_POOL(2), - #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) - __UNMAP_POOL(16), - __UNMAP_POOL(128), - __UNMAP_POOL(256), - #endif -}; - -static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) -{ - int order = get_count_order(nr); - - switch (order) { - case 0 ... 1: - return &unmap_pool[0]; -#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) - case 2 ... 4: - return &unmap_pool[1]; - case 5 ... 7: - return &unmap_pool[2]; - case 8: - return &unmap_pool[3]; -#endif - default: - BUG(); - return NULL; - } -} - -static void dmaengine_unmap(struct kref *kref) -{ - struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); - struct device *dev = unmap->dev; - int cnt, i; - - cnt = unmap->to_cnt; - for (i = 0; i < cnt; i++) - dma_unmap_page(dev, unmap->addr[i], unmap->len, - DMA_TO_DEVICE); - cnt += unmap->from_cnt; - for (; i < cnt; i++) - dma_unmap_page(dev, unmap->addr[i], unmap->len, - DMA_FROM_DEVICE); - cnt += unmap->bidi_cnt; - for (; i < cnt; i++) { - if (unmap->addr[i] == 0) - continue; - dma_unmap_page(dev, unmap->addr[i], unmap->len, - DMA_BIDIRECTIONAL); - } - cnt = unmap->map_cnt; - mempool_free(unmap, __get_unmap_pool(cnt)->pool); -} - -void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) -{ - if (unmap) - kref_put(&unmap->kref, dmaengine_unmap); -} -EXPORT_SYMBOL_GPL(dmaengine_unmap_put); - -static void dmaengine_destroy_unmap_pool(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { - struct dmaengine_unmap_pool *p = &unmap_pool[i]; - - mempool_destroy(p->pool); - p->pool = NULL; - kmem_cache_destroy(p->cache); - p->cache = NULL; - } -} - -static int __init dmaengine_init_unmap_pool(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { - struct dmaengine_unmap_pool *p = &unmap_pool[i]; - size_t size; - - size = sizeof(struct dmaengine_unmap_data) + - sizeof(dma_addr_t) * p->size; - - p->cache = kmem_cache_create(p->name, size, 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!p->cache) - break; - p->pool = mempool_create_slab_pool(1, p->cache); - if (!p->pool) - break; - } - - if (i == ARRAY_SIZE(unmap_pool)) - return 0; - - dmaengine_destroy_unmap_pool(); - return -ENOMEM; -} - -struct dmaengine_unmap_data * -dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) -{ - struct dmaengine_unmap_data *unmap; - - unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); - if (!unmap) - return NULL; - - memset(unmap, 0, sizeof(*unmap)); - kref_init(&unmap->kref); - unmap->dev = dev; - unmap->map_cnt = nr; - - return unmap; -} -EXPORT_SYMBOL(dmaengine_get_unmap_data); - -void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, - struct dma_chan *chan) -{ - tx->chan = chan; - #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH - spin_lock_init(&tx->lock); - #endif -} -EXPORT_SYMBOL(dma_async_tx_descriptor_init); - -static inline int desc_check_and_set_metadata_mode( - struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode) -{ - /* Make sure that the metadata mode is not mixed */ - if (!desc->desc_metadata_mode) { - if (dmaengine_is_metadata_mode_supported(desc->chan, mode)) - desc->desc_metadata_mode = mode; - else - return -ENOTSUPP; - } else if (desc->desc_metadata_mode != mode) { - return -EINVAL; - } - - return 0; -} - -int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, - void *data, size_t len) -{ - int ret; - - if (!desc) - return -EINVAL; - - ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT); - if (ret) - return ret; - - if (!desc->metadata_ops || !desc->metadata_ops->attach) - return -ENOTSUPP; - - return desc->metadata_ops->attach(desc, data, len); -} -EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata); - -void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, - size_t *payload_len, size_t *max_len) -{ - int ret; - - if (!desc) - return ERR_PTR(-EINVAL); - - ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); - if (ret) - return ERR_PTR(ret); - - if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) - return ERR_PTR(-ENOTSUPP); - - return desc->metadata_ops->get_ptr(desc, payload_len, max_len); -} -EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr); - -int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, - size_t payload_len) -{ - int ret; - - if (!desc) - return -EINVAL; - - ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); - if (ret) - return ret; - - if (!desc->metadata_ops || !desc->metadata_ops->set_len) - return -ENOTSUPP; - - return desc->metadata_ops->set_len(desc, payload_len); -} -EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); - -/** - * dma_wait_for_async_tx - spin wait for a transaction to complete - * @tx: in-flight transaction to wait on - */ -enum dma_status -dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) -{ - unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); - - if (!tx) - return DMA_COMPLETE; - - while (tx->cookie == -EBUSY) { - if (time_after_eq(jiffies, dma_sync_wait_timeout)) { - dev_err(tx->chan->device->dev, - "%s timeout waiting for descriptor submission\n", - __func__); - return DMA_ERROR; - } - cpu_relax(); - } - return dma_sync_wait(tx->chan, tx->cookie); -} -EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); - -/** - * dma_run_dependencies - process dependent operations on the target channel - * @tx: transaction with dependencies - * - * Helper routine for DMA drivers to process (start) dependent operations - * on their target channel. - */ -void dma_run_dependencies(struct dma_async_tx_descriptor *tx) -{ - struct dma_async_tx_descriptor *dep = txd_next(tx); - struct dma_async_tx_descriptor *dep_next; - struct dma_chan *chan; - - if (!dep) - return; - - /* we'll submit tx->next now, so clear the link */ - txd_clear_next(tx); - chan = dep->chan; - - /* keep submitting up until a channel switch is detected - * in that case we will be called again as a result of - * processing the interrupt from async_tx_channel_switch - */ - for (; dep; dep = dep_next) { - txd_lock(dep); - txd_clear_parent(dep); - dep_next = txd_next(dep); - if (dep_next && dep_next->chan == chan) - txd_clear_next(dep); /* ->next will be submitted */ - else - dep_next = NULL; /* submit current dep and terminate */ - txd_unlock(dep); - - dep->tx_submit(dep); - } - - chan->device->device_issue_pending(chan); -} -EXPORT_SYMBOL_GPL(dma_run_dependencies); - -static int __init dma_bus_init(void) -{ - int err = dmaengine_init_unmap_pool(); - - if (err) - return err; - - err = class_register(&dma_devclass); - if (!err) - dmaengine_debugfs_init(); - - return err; -} -arch_initcall(dma_bus_init); diff --git a/snd-alpx/core/generic/6.2/dmaengine.h b/snd-alpx/core/generic/6.2/dmaengine.h deleted file mode 100644 index 53f16d3..0000000 --- a/snd-alpx/core/generic/6.2/dmaengine.h +++ /dev/null @@ -1,201 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * The contents of this file are private to DMA engine drivers, and is not - * part of the API to be used by DMA engine users. - */ -#ifndef DMAENGINE_H -#define DMAENGINE_H - -#include <linux/bug.h> -#include <linux/dmaengine.h> - -/** - * dma_cookie_init - initialize the cookies for a DMA channel - * @chan: dma channel to initialize - */ -static inline void dma_cookie_init(struct dma_chan *chan) -{ - chan->cookie = DMA_MIN_COOKIE; - chan->completed_cookie = DMA_MIN_COOKIE; -} - -/** - * dma_cookie_assign - assign a DMA engine cookie to the descriptor - * @tx: descriptor needing cookie - * - * Assign a unique non-zero per-channel cookie to the descriptor. - * Note: caller is expected to hold a lock to prevent concurrency. - */ -static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx) -{ - struct dma_chan *chan = tx->chan; - dma_cookie_t cookie; - - cookie = chan->cookie + 1; - if (cookie < DMA_MIN_COOKIE) - cookie = DMA_MIN_COOKIE; - tx->cookie = chan->cookie = cookie; - - return cookie; -} - -/** - * dma_cookie_complete - complete a descriptor - * @tx: descriptor to complete - * - * Mark this descriptor complete by updating the channels completed - * cookie marker. Zero the descriptors cookie to prevent accidental - * repeated completions. - * - * Note: caller is expected to hold a lock to prevent concurrency. - */ -static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx) -{ - BUG_ON(tx->cookie < DMA_MIN_COOKIE); - tx->chan->completed_cookie = tx->cookie; - tx->cookie = 0; -} - -/** - * dma_cookie_status - report cookie status - * @chan: dma channel - * @cookie: cookie we are interested in - * @state: dma_tx_state structure to return last/used cookies - * - * Report the status of the cookie, filling in the state structure if - * non-NULL. No locking is required. - */ -static inline enum dma_status dma_cookie_status(struct dma_chan *chan, - dma_cookie_t cookie, struct dma_tx_state *state) -{ - dma_cookie_t used, complete; - - used = chan->cookie; - complete = chan->completed_cookie; - barrier(); - if (state) { - state->last = complete; - state->used = used; - state->residue = 0; - state->in_flight_bytes = 0; - } - return dma_async_is_complete(cookie, complete, used); -} - -static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) -{ - if (state) - state->residue = residue; -} - -static inline void dma_set_in_flight_bytes(struct dma_tx_state *state, - u32 in_flight_bytes) -{ - if (state) - state->in_flight_bytes = in_flight_bytes; -} - -struct dmaengine_desc_callback { - dma_async_tx_callback callback; - dma_async_tx_callback_result callback_result; - void *callback_param; -}; - -/** - * dmaengine_desc_get_callback - get the passed in callback function - * @tx: tx descriptor - * @cb: temp struct to hold the callback info - * - * Fill the passed in cb struct with what's available in the passed in - * tx descriptor struct - * No locking is required. - */ -static inline void -dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx, - struct dmaengine_desc_callback *cb) -{ - cb->callback = tx->callback; - cb->callback_result = tx->callback_result; - cb->callback_param = tx->callback_param; -} - -/** - * dmaengine_desc_callback_invoke - call the callback function in cb struct - * @cb: temp struct that is holding the callback info - * @result: transaction result - * - * Call the callback function provided in the cb struct with the parameter - * in the cb struct. - * Locking is dependent on the driver. - */ -static inline void -dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb, - const struct dmaengine_result *result) -{ - struct dmaengine_result dummy_result = { - .result = DMA_TRANS_NOERROR, - .residue = 0 - }; - - if (cb->callback_result) { - if (!result) - result = &dummy_result; - cb->callback_result(cb->callback_param, result); - } else if (cb->callback) { - cb->callback(cb->callback_param); - } -} - -/** - * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and - * then immediately call the callback. - * @tx: dma async tx descriptor - * @result: transaction result - * - * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke() - * in a single function since no work is necessary in between for the driver. - * Locking is dependent on the driver. - */ -static inline void -dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx, - const struct dmaengine_result *result) -{ - struct dmaengine_desc_callback cb; - - dmaengine_desc_get_callback(tx, &cb); - dmaengine_desc_callback_invoke(&cb, result); -} - -/** - * dmaengine_desc_callback_valid - verify the callback is valid in cb - * @cb: callback info struct - * - * Return a bool that verifies whether callback in cb is valid or not. - * No locking is required. - */ -static inline bool -dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) -{ - return cb->callback || cb->callback_result; -} - -struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); -struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); - -#ifdef CONFIG_DEBUG_FS -#include <linux/debugfs.h> - -static inline struct dentry * -dmaengine_get_debugfs_root(struct dma_device *dma_dev) { - return dma_dev->dbg_dev_root; -} -#else -struct dentry; -static inline struct dentry * -dmaengine_get_debugfs_root(struct dma_device *dma_dev) -{ - return NULL; -} -#endif /* CONFIG_DEBUG_FS */ - -#endif diff --git a/snd-alpx/core/generic/6.2/virt-dma.c b/snd-alpx/core/generic/6.2/virt-dma.c deleted file mode 100644 index a6f4265..0000000 --- a/snd-alpx/core/generic/6.2/virt-dma.c +++ /dev/null @@ -1,142 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Virtual DMA channel support for DMAengine - * - * Copyright (C) 2012 Russell King - */ -#include <linux/device.h> -#include <linux/dmaengine.h> -#include <linux/module.h> -#include <linux/spinlock.h> - -#include "virt-dma.h" - -static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) -{ - return container_of(tx, struct virt_dma_desc, tx); -} - -dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct virt_dma_chan *vc = to_virt_chan(tx->chan); - struct virt_dma_desc *vd = to_virt_desc(tx); - unsigned long flags; - dma_cookie_t cookie; - - spin_lock_irqsave(&vc->lock, flags); - cookie = dma_cookie_assign(tx); - - list_move_tail(&vd->node, &vc->desc_submitted); - spin_unlock_irqrestore(&vc->lock, flags); - - dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", - vc, vd, cookie); - - return cookie; -} -EXPORT_SYMBOL_GPL(vchan_tx_submit); - -/** - * vchan_tx_desc_free - free a reusable descriptor - * @tx: the transfer - * - * This function frees a previously allocated reusable descriptor. The only - * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the - * transfer. - * - * Returns 0 upon success - */ -int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx) -{ - struct virt_dma_chan *vc = to_virt_chan(tx->chan); - struct virt_dma_desc *vd = to_virt_desc(tx); - unsigned long flags; - - spin_lock_irqsave(&vc->lock, flags); - list_del(&vd->node); - spin_unlock_irqrestore(&vc->lock, flags); - - dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n", - vc, vd, vd->tx.cookie); - vc->desc_free(vd); - return 0; -} -EXPORT_SYMBOL_GPL(vchan_tx_desc_free); - -struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, - dma_cookie_t cookie) -{ - struct virt_dma_desc *vd; - - list_for_each_entry(vd, &vc->desc_issued, node) - if (vd->tx.cookie == cookie) - return vd; - - return NULL; -} -EXPORT_SYMBOL_GPL(vchan_find_desc); - -/* - * This tasklet handles the completion of a DMA descriptor by - * calling its callback and freeing it. - */ -static void vchan_complete(struct tasklet_struct *t) -{ - struct virt_dma_chan *vc = from_tasklet(vc, t, task); - struct virt_dma_desc *vd, *_vd; - struct dmaengine_desc_callback cb; - LIST_HEAD(head); - - spin_lock_irq(&vc->lock); - list_splice_tail_init(&vc->desc_completed, &head); - vd = vc->cyclic; - if (vd) { - vc->cyclic = NULL; - dmaengine_desc_get_callback(&vd->tx, &cb); - } else { - memset(&cb, 0, sizeof(cb)); - } - spin_unlock_irq(&vc->lock); - - dmaengine_desc_callback_invoke(&cb, &vd->tx_result); - - list_for_each_entry_safe(vd, _vd, &head, node) { - dmaengine_desc_get_callback(&vd->tx, &cb); - - list_del(&vd->node); - dmaengine_desc_callback_invoke(&cb, &vd->tx_result); - vchan_vdesc_fini(vd); - } -} - -void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) -{ - struct virt_dma_desc *vd, *_vd; - - list_for_each_entry_safe(vd, _vd, head, node) { - list_del(&vd->node); - vchan_vdesc_fini(vd); - } -} -EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); - -void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) -{ - dma_cookie_init(&vc->chan); - - spin_lock_init(&vc->lock); - INIT_LIST_HEAD(&vc->desc_allocated); - INIT_LIST_HEAD(&vc->desc_submitted); - INIT_LIST_HEAD(&vc->desc_issued); - INIT_LIST_HEAD(&vc->desc_completed); - INIT_LIST_HEAD(&vc->desc_terminated); - - tasklet_setup(&vc->task, vchan_complete); - - vc->chan.device = dmadev; - list_add_tail(&vc->chan.device_node, &dmadev->channels); -} -EXPORT_SYMBOL_GPL(vchan_init); - -MODULE_AUTHOR("Russell King"); -MODULE_LICENSE("GPL"); diff --git a/snd-alpx/core/generic/6.2/virt-dma.h b/snd-alpx/core/generic/6.2/virt-dma.h deleted file mode 100644 index e9f5250..0000000 --- a/snd-alpx/core/generic/6.2/virt-dma.h +++ /dev/null @@ -1,227 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Virtual DMA channel support for DMAengine - * - * Copyright (C) 2012 Russell King - */ -#ifndef VIRT_DMA_H -#define VIRT_DMA_H - -#include <linux/dmaengine.h> -#include <linux/interrupt.h> - -#include "dmaengine.h" - -struct virt_dma_desc { - struct dma_async_tx_descriptor tx; - struct dmaengine_result tx_result; - /* protected by vc.lock */ - struct list_head node; -}; - -struct virt_dma_chan { - struct dma_chan chan; - struct tasklet_struct task; - void (*desc_free)(struct virt_dma_desc *); - - spinlock_t lock; - - /* protected by vc.lock */ - struct list_head desc_allocated; - struct list_head desc_submitted; - struct list_head desc_issued; - struct list_head desc_completed; - struct list_head desc_terminated; - - struct virt_dma_desc *cyclic; -}; - -static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) -{ - return container_of(chan, struct virt_dma_chan, chan); -} - -void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); -void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); -struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); -extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); -extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *); - -/** - * vchan_tx_prep - prepare a descriptor - * @vc: virtual channel allocating this descriptor - * @vd: virtual descriptor to prepare - * @tx_flags: flags argument passed in to prepare function - */ -static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, - struct virt_dma_desc *vd, unsigned long tx_flags) -{ - unsigned long flags; - - dma_async_tx_descriptor_init(&vd->tx, &vc->chan); - vd->tx.flags = tx_flags; - vd->tx.tx_submit = vchan_tx_submit; - vd->tx.desc_free = vchan_tx_desc_free; - - vd->tx_result.result = DMA_TRANS_NOERROR; - vd->tx_result.residue = 0; - - spin_lock_irqsave(&vc->lock, flags); - list_add_tail(&vd->node, &vc->desc_allocated); - spin_unlock_irqrestore(&vc->lock, flags); - - return &vd->tx; -} - -/** - * vchan_issue_pending - move submitted descriptors to issued list - * @vc: virtual channel to update - * - * vc.lock must be held by caller - */ -static inline bool vchan_issue_pending(struct virt_dma_chan *vc) -{ - list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); - return !list_empty(&vc->desc_issued); -} - -/** - * vchan_cookie_complete - report completion of a descriptor - * @vd: virtual descriptor to update - * - * vc.lock must be held by caller - */ -static inline void vchan_cookie_complete(struct virt_dma_desc *vd) -{ - struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); - dma_cookie_t cookie; - - cookie = vd->tx.cookie; - dma_cookie_complete(&vd->tx); - dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", - vd, cookie); - list_add_tail(&vd->node, &vc->desc_completed); - - tasklet_schedule(&vc->task); -} - -/** - * vchan_vdesc_fini - Free or reuse a descriptor - * @vd: virtual descriptor to free/reuse - */ -static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) -{ - struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); - - if (dmaengine_desc_test_reuse(&vd->tx)) { - unsigned long flags; - - spin_lock_irqsave(&vc->lock, flags); - list_add(&vd->node, &vc->desc_allocated); - spin_unlock_irqrestore(&vc->lock, flags); - } else { - vc->desc_free(vd); - } -} - -/** - * vchan_cyclic_callback - report the completion of a period - * @vd: virtual descriptor - */ -static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) -{ - struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); - - vc->cyclic = vd; - tasklet_schedule(&vc->task); -} - -/** - * vchan_terminate_vdesc - Disable pending cyclic callback - * @vd: virtual descriptor to be terminated - * - * vc.lock must be held by caller - */ -static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) -{ - struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); - - list_add_tail(&vd->node, &vc->desc_terminated); - - if (vc->cyclic == vd) - vc->cyclic = NULL; -} - -/** - * vchan_next_desc - peek at the next descriptor to be processed - * @vc: virtual channel to obtain descriptor from - * - * vc.lock must be held by caller - */ -static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) -{ - return list_first_entry_or_null(&vc->desc_issued, - struct virt_dma_desc, node); -} - -/** - * vchan_get_all_descriptors - obtain all submitted and issued descriptors - * @vc: virtual channel to get descriptors from - * @head: list of descriptors found - * - * vc.lock must be held by caller - * - * Removes all submitted and issued descriptors from internal lists, and - * provides a list of all descriptors found - */ -static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, - struct list_head *head) -{ - list_splice_tail_init(&vc->desc_allocated, head); - list_splice_tail_init(&vc->desc_submitted, head); - list_splice_tail_init(&vc->desc_issued, head); - list_splice_tail_init(&vc->desc_completed, head); - list_splice_tail_init(&vc->desc_terminated, head); -} - -static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) -{ - struct virt_dma_desc *vd; - unsigned long flags; - LIST_HEAD(head); - - spin_lock_irqsave(&vc->lock, flags); - vchan_get_all_descriptors(vc, &head); - list_for_each_entry(vd, &head, node) - dmaengine_desc_clear_reuse(&vd->tx); - spin_unlock_irqrestore(&vc->lock, flags); - - vchan_dma_desc_free_list(vc, &head); -} - -/** - * vchan_synchronize() - synchronize callback execution to the current context - * @vc: virtual channel to synchronize - * - * Makes sure that all scheduled or active callbacks have finished running. For - * proper operation the caller has to ensure that no new callbacks are scheduled - * after the invocation of this function started. - * Free up the terminated cyclic descriptor to prevent memory leakage. - */ -static inline void vchan_synchronize(struct virt_dma_chan *vc) -{ - LIST_HEAD(head); - unsigned long flags; - - tasklet_kill(&vc->task); - - spin_lock_irqsave(&vc->lock, flags); - - list_splice_tail_init(&vc->desc_terminated, &head); - - spin_unlock_irqrestore(&vc->lock, flags); - - vchan_dma_desc_free_list(vc, &head); -} - -#endif diff --git a/snd-alpx/core/generic/6.2/xilinx/xdma-regs.h b/snd-alpx/core/generic/6.2/xilinx/xdma-regs.h deleted file mode 100644 index 4ee96de..0000000 --- a/snd-alpx/core/generic/6.2/xilinx/xdma-regs.h +++ /dev/null @@ -1,169 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. - * Copyright (C) 2022, Advanced Micro Devices, Inc. - */ - -#ifndef __DMA_XDMA_REGS_H -#define __DMA_XDMA_REGS_H - -/* The length of register space exposed to host */ -#define XDMA_REG_SPACE_LEN 65536 - -/* - * maximum number of DMA channels for each direction: - * Host to Card (H2C) or Card to Host (C2H) - */ -#define XDMA_MAX_CHANNELS 4 - -/* - * macros to define the number of descriptor blocks can be used in one - * DMA transfer request. - * the DMA engine uses a linked list of descriptor blocks that specify the - * source, destination, and length of the DMA transfers. - */ -#define XDMA_DESC_BLOCK_NUM BIT(7) -#define XDMA_DESC_BLOCK_MASK (XDMA_DESC_BLOCK_NUM - 1) - -/* descriptor definitions */ -#define XDMA_DESC_ADJACENT 32 -#define XDMA_DESC_ADJACENT_MASK (XDMA_DESC_ADJACENT - 1) -#define XDMA_DESC_ADJACENT_BITS GENMASK(13, 8) -#define XDMA_DESC_MAGIC 0xad4bUL -#define XDMA_DESC_MAGIC_BITS GENMASK(31, 16) -#define XDMA_DESC_FLAGS_BITS GENMASK(7, 0) -#define XDMA_DESC_STOPPED BIT(0) -#define XDMA_DESC_COMPLETED BIT(1) -#define XDMA_DESC_BLEN_BITS 28 -#define XDMA_DESC_BLEN_MAX (BIT(XDMA_DESC_BLEN_BITS) - PAGE_SIZE) - -/* macros to construct the descriptor control word */ -#define XDMA_DESC_CONTROL(adjacent, flag) \ - (FIELD_PREP(XDMA_DESC_MAGIC_BITS, XDMA_DESC_MAGIC) | \ - FIELD_PREP(XDMA_DESC_ADJACENT_BITS, (adjacent) - 1) | \ - FIELD_PREP(XDMA_DESC_FLAGS_BITS, (flag))) -#define XDMA_DESC_CONTROL_LAST \ - XDMA_DESC_CONTROL(1, XDMA_DESC_STOPPED | XDMA_DESC_COMPLETED) -#define XDMA_DESC_CONTROL_CYCLIC \ - XDMA_DESC_CONTROL(1, XDMA_DESC_COMPLETED) - -/* - * Descriptor for a single contiguous memory block transfer. - * - * Multiple descriptors are linked by means of the next pointer. An additional - * extra adjacent number gives the amount of extra contiguous descriptors. - * - * The descriptors are in root complex memory, and the bytes in the 32-bit - * words must be in little-endian byte ordering. - */ -struct xdma_hw_desc { - __le32 control; - __le32 bytes; - __le64 src_addr; - __le64 dst_addr; - __le64 next_desc; -}; - -#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc) -#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT) -#define XDMA_DESC_BLOCK_ALIGN 32 -#define XDMA_DESC_BLOCK_BOUNDARY 4096 - -/* - * Channel registers - */ -#define XDMA_CHAN_IDENTIFIER 0x0 -#define XDMA_CHAN_CONTROL 0x4 -#define XDMA_CHAN_CONTROL_W1S 0x8 -#define XDMA_CHAN_CONTROL_W1C 0xc -#define XDMA_CHAN_STATUS 0x40 -#define XDMA_CHAN_STATUS_RC 0x44 -#define XDMA_CHAN_COMPLETED_DESC 0x48 -#define XDMA_CHAN_ALIGNMENTS 0x4c -#define XDMA_CHAN_INTR_ENABLE 0x90 -#define XDMA_CHAN_INTR_ENABLE_W1S 0x94 -#define XDMA_CHAN_INTR_ENABLE_W1C 0x9c - -#define XDMA_CHAN_STRIDE 0x100 -#define XDMA_CHAN_H2C_OFFSET 0x0 -#define XDMA_CHAN_C2H_OFFSET 0x1000 -#define XDMA_CHAN_H2C_TARGET 0x0 -#define XDMA_CHAN_C2H_TARGET 0x1 - -/* macro to check if channel is available */ -#define XDMA_CHAN_MAGIC 0x1fc0 -#define XDMA_CHAN_CHECK_TARGET(id, target) \ - (((u32)(id) >> 16) == XDMA_CHAN_MAGIC + (target)) - -/* bits of the channel control register */ -#define CHAN_CTRL_RUN_STOP BIT(0) -#define CHAN_CTRL_IE_DESC_STOPPED BIT(1) -#define CHAN_CTRL_IE_DESC_COMPLETED BIT(2) -#define CHAN_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3) -#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4) -#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6) -#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9) -#define CHAN_CTRL_IE_WRITE_ERROR GENMASK(18, 14) -#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19) -#define CHAN_CTRL_NON_INCR_ADDR BIT(25) -#define CHAN_CTRL_POLL_MODE_WB BIT(26) -#define CHAN_CTRL_TRANSFER_INFO_WB BIT(27) - -#define CHAN_CTRL_START (CHAN_CTRL_RUN_STOP | \ - CHAN_CTRL_IE_DESC_STOPPED | \ - CHAN_CTRL_IE_DESC_COMPLETED | \ - CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \ - CHAN_CTRL_IE_MAGIC_STOPPED | \ - CHAN_CTRL_IE_READ_ERROR | \ - CHAN_CTRL_IE_WRITE_ERROR | \ - CHAN_CTRL_IE_DESC_ERROR) - -#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START - -#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \ - CHAN_CTRL_IE_MAGIC_STOPPED | \ - CHAN_CTRL_IE_READ_ERROR | \ - CHAN_CTRL_IE_WRITE_ERROR | \ - CHAN_CTRL_IE_DESC_ERROR) - -/* bits of the channel interrupt enable mask */ -#define CHAN_IM_DESC_ERROR BIT(19) -#define CHAN_IM_READ_ERROR BIT(9) -#define CHAN_IM_IDLE_STOPPED BIT(6) -#define CHAN_IM_MAGIC_STOPPED BIT(4) -#define CHAN_IM_DESC_COMPLETED BIT(2) -#define CHAN_IM_DESC_STOPPED BIT(1) - -#define CHAN_IM_ALL (CHAN_IM_DESC_ERROR | CHAN_IM_READ_ERROR | \ - CHAN_IM_IDLE_STOPPED | CHAN_IM_MAGIC_STOPPED | \ - CHAN_IM_DESC_COMPLETED | CHAN_IM_DESC_STOPPED) - -/* - * Channel SGDMA registers - */ -#define XDMA_SGDMA_IDENTIFIER 0x4000 -#define XDMA_SGDMA_DESC_LO 0x4080 -#define XDMA_SGDMA_DESC_HI 0x4084 -#define XDMA_SGDMA_DESC_ADJ 0x4088 -#define XDMA_SGDMA_DESC_CREDIT 0x408c - -/* - * interrupt registers - */ -#define XDMA_IRQ_IDENTIFIER 0x2000 -#define XDMA_IRQ_USER_INT_EN 0x2004 -#define XDMA_IRQ_USER_INT_EN_W1S 0x2008 -#define XDMA_IRQ_USER_INT_EN_W1C 0x200c -#define XDMA_IRQ_CHAN_INT_EN 0x2010 -#define XDMA_IRQ_CHAN_INT_EN_W1S 0x2014 -#define XDMA_IRQ_CHAN_INT_EN_W1C 0x2018 -#define XDMA_IRQ_USER_INT_REQ 0x2040 -#define XDMA_IRQ_CHAN_INT_REQ 0x2044 -#define XDMA_IRQ_USER_INT_PEND 0x2048 -#define XDMA_IRQ_CHAN_INT_PEND 0x204c -#define XDMA_IRQ_USER_VEC_NUM 0x2080 -#define XDMA_IRQ_CHAN_VEC_NUM 0x20a0 - -#define XDMA_IRQ_VEC_SHIFT 8 - -#endif /* __DMA_XDMA_REGS_H */ diff --git a/snd-alpx/core/generic/6.2/xilinx/xdma.c b/snd-alpx/core/generic/6.2/xilinx/xdma.c deleted file mode 100644 index 6c89145..0000000 --- a/snd-alpx/core/generic/6.2/xilinx/xdma.c +++ /dev/null @@ -1,1437 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * DMA driver for Xilinx DMA/Bridge Subsystem - * - * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. - * Copyright (C) 2022, Advanced Micro Devices, Inc. - */ - -/* - * The DMA/Bridge Subsystem for PCI Express allows for the movement of data - * between Host memory and the DMA subsystem. It does this by operating on - * 'descriptors' that contain information about the source, destination and - * amount of data to transfer. These direct memory transfers can be both in - * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be - * configured to have a single AXI4 Master interface shared by all channels - * or one AXI4-Stream interface for each channel enabled. Memory transfers are - * specified on a per-channel basis in descriptor linked lists, which the DMA - * fetches from host memory and processes. Events such as descriptor completion - * and errors are signaled using interrupts. The core also provides up to 16 - * user interrupt wires that generate interrupts to the host. - */ - -#include <linux/module.h> -#include <linux/mod_devicetable.h> -#include <linux/bitfield.h> -#include <linux/dmapool.h> - -#include <linux/regmap.h> - -#if defined (CONFIG_KERNEL_REDHAT) - #warning REDHAT Kernel - #if KERNEL_VERSION(5, 19, 0) <= LINUX_VERSION_CODE - /* Use Generic include */ - #include "../../../../include/5.6/virt-dma.h" - #elif KERNEL_VERSION(4, 18, 0) <= LINUX_VERSION_CODE - /* Use Generic include : files equal !! */ - #warning ReadHat 4.18 at least - #include "../../../../include/5.6/virt-dma.h" - #else - #error Redhat kernel NOT Supported - #endif - -#else - /* Generic Kernels */ - #warning "Generic Kernels" - #if KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE - #include "../virt-dma.h" - #elif KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE - #include "../../../../include/5.6/virt-dma.h" - #elif KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE - #include "../../../../include/5.3/virt-dma.h" - #else - #include "../../../../include/4.16/virt-dma.h" - #endif - -#endif - - -#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE -#include <linux/dmaengine.h> -#elif KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE -#include "../../../../include/5.3/dmaengine.h" -#else -#include "../../../../include/4.16/dmaengine.h" -#endif - -#include "../../../../include/6.2/amd_xdma.h" -#include <linux/platform_device.h> -#include "../amd_xdma.h" -#include <linux/dma-mapping.h> -#include <linux/pci.h> - -#include "xdma-regs.h" - -/* mmio regmap config for all XDMA registers */ -static const struct regmap_config xdma_regmap_config = { - .reg_bits = 32, - .val_bits = 32, - .reg_stride = 4, - .max_register = XDMA_REG_SPACE_LEN, -}; - -/** - * struct xdma_desc_block - Descriptor block - * @virt_addr: Virtual address of block start - * @dma_addr: DMA address of block start - */ -struct xdma_desc_block { - void *virt_addr; - dma_addr_t dma_addr; -}; - -/** - * struct xdma_c2h_write_back - Write back block , written by the XDMA. - * @magic_status_bit : magic (0x52B4) once written - * @length: effective transfer length (in bytes) - * @PADDING to be aligned on 32 bytes - * @associated dma address - */ -struct xdma_c2h_write_back { - __le32 magic_status_bit; - __le32 length; - u32 padding_1[6]; - dma_addr_t dma_addr; -}; - -/** - * struct xdma_chan - Driver specific DMA channel structure - * @vchan: Virtual channel - * @xdev_hdl: Pointer to DMA device structure - * @base: Offset of channel registers - * @desc_pool: Descriptor pool - * @busy: Busy flag of the channel - * @dir: Transferring direction of the channel - * @cfg: Transferring config of the channel - * @irq: IRQ assigned to the channel - * @write_back : C2H meta data write back - */ -struct xdma_chan { - struct virt_dma_chan vchan; - void *xdev_hdl; - u32 base; - struct dma_pool *desc_pool; - bool busy; - enum dma_transfer_direction dir; - struct dma_slave_config cfg; - u32 irq; - struct xdma_c2h_write_back* write_back; -}; - -/** - * struct xdma_desc - DMA desc structure - * @vdesc: Virtual DMA descriptor - * @chan: DMA channel pointer - * @dir: Transferring direction of the request - * @desc_blocks: Hardware descriptor blocks - * @dblk_num: Number of hardware descriptor blocks - * @desc_num: Number of hardware descriptors - * @completed_desc_num: Completed hardware descriptors - * @cyclic: Cyclic transfer vs. scatter-gather - * @interleaved_dma: Interleaved DMA transfer - * @periods: Number of periods in the cyclic transfer - * @period_size: Size of a period in bytes in cyclic transfers - * @frames_left: Number of frames left in interleaved DMA transfer - * @error: tx error flag - */ -struct xdma_desc { - struct virt_dma_desc vdesc; - struct xdma_chan *chan; - enum dma_transfer_direction dir; - struct xdma_desc_block *desc_blocks; - u32 dblk_num; - u32 desc_num; - u32 completed_desc_num; - bool cyclic; - bool interleaved_dma; - u32 periods; - u32 period_size; - u32 frames_left; - bool error; -}; - -#define XDMA_DEV_STATUS_REG_DMA BIT(0) -#define XDMA_DEV_STATUS_INIT_MSIX BIT(1) - -/** - * struct xdma_device - DMA device structure - * @pdev: Platform device pointer - * @dma_dev: DMA device structure - * @rmap: MMIO regmap for DMA registers - * @h2c_chans: Host to Card channels - * @c2h_chans: Card to Host channels - * @h2c_chan_num: Number of H2C channels - * @c2h_chan_num: Number of C2H channels - * @irq_start: Start IRQ assigned to device - * @irq_num: Number of IRQ assigned to device - * @status: Initialization status - */ -struct xdma_device { - struct platform_device *pdev; - struct dma_device dma_dev; - struct regmap *rmap; - struct xdma_chan *h2c_chans; - struct xdma_chan *c2h_chans; - u32 h2c_chan_num; - u32 c2h_chan_num; - u32 irq_start; - u32 irq_num; - u32 status; -}; - -#define xdma_err(xdev, fmt, args...) \ - dev_err(&(xdev)->pdev->dev, fmt, ##args) -#define XDMA_CHAN_NUM(_xd) ({ \ - typeof(_xd) (xd) = (_xd); \ - ((xd)->h2c_chan_num + (xd)->c2h_chan_num); }) - -/* Get the last desc in a desc block */ -static inline void *xdma_blk_last_desc(struct xdma_desc_block *block) -{ - return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE; -} - -/** - * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer - * @sw_desc: Tx descriptor pointer - */ -static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc) -{ - struct xdma_desc_block *block; - u32 last_blk_desc, desc_control; - struct xdma_hw_desc *desc; - int i; - - desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0); - for (i = 1; i < sw_desc->dblk_num; i++) { - block = &sw_desc->desc_blocks[i - 1]; - desc = xdma_blk_last_desc(block); - - if (!(i & XDMA_DESC_BLOCK_MASK)) { - desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); - continue; - } - desc->control = cpu_to_le32(desc_control); - desc->next_desc = cpu_to_le64(block[1].dma_addr); - } - - /* update the last block */ - last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; - if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) { - block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2]; - desc = xdma_blk_last_desc(block); - desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0); - desc->control = cpu_to_le32(desc_control); - } - - block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1]; - desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE; - desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); -} - -/** - * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer - * @sw_desc: Tx descriptor pointer - */ -static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc) -{ - struct xdma_desc_block *block; - struct xdma_hw_desc *desc; - int i; - - block = sw_desc->desc_blocks; - for (i = 0; i < sw_desc->desc_num - 1; i++) { - desc = block->virt_addr + i * XDMA_DESC_SIZE; - desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE)); - } - desc = block->virt_addr + i * XDMA_DESC_SIZE; - desc->next_desc = cpu_to_le64(block->dma_addr); -} - -static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan) -{ - return container_of(chan, struct xdma_chan, vchan.chan); -} - -static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc) -{ - return container_of(vdesc, struct xdma_desc, vdesc); -} - -/** - * xdma_channel_init - Initialize DMA channel registers - * @chan: DMA channel pointer - */ -static int xdma_channel_init(struct xdma_chan *chan) -{ - struct xdma_device *xdev = chan->xdev_hdl; - int ret; - unsigned int reg_ctrl = 0; - - regmap_read(xdev->rmap, chan->base + XDMA_CHAN_CONTROL, ®_ctrl); - dev_dbg(&xdev->pdev->dev, "CONTROL Init: 0x%08x\n", reg_ctrl); - - ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C, - CHAN_CTRL_NON_INCR_ADDR | CHAN_CTRL_TRANSFER_INFO_WB); - if (ret) - return ret; - - regmap_read(xdev->rmap, chan->base + XDMA_CHAN_CONTROL, ®_ctrl); - dev_dbg(&xdev->pdev->dev, "CONTROL Init: 0x%08x\n", reg_ctrl); - - ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE, - CHAN_IM_ALL); - if (ret) - return ret; - - return 0; -} - -/** - * xdma_free_desc - Free descriptor - * @vdesc: Virtual DMA descriptor - */ -static void xdma_free_desc(struct virt_dma_desc *vdesc) -{ - struct xdma_desc *sw_desc; - int i; - - sw_desc = to_xdma_desc(vdesc); - for (i = 0; i < sw_desc->dblk_num; i++) { - if (!sw_desc->desc_blocks[i].virt_addr) - break; - dma_pool_free(sw_desc->chan->desc_pool, - sw_desc->desc_blocks[i].virt_addr, - sw_desc->desc_blocks[i].dma_addr); - } - kfree(sw_desc->desc_blocks); - kfree(sw_desc); -} - -/** - * xdma_alloc_desc - Allocate descriptor - * @chan: DMA channel pointer - * @desc_num: Number of hardware descriptors - * @cyclic: Whether this is a cyclic transfer - */ -static struct xdma_desc * -xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic) -{ - struct xdma_desc *sw_desc; - struct xdma_hw_desc *desc; - dma_addr_t dma_addr; - u32 dblk_num; - u32 control; - void *addr; - int i, j; - - sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT); - if (!sw_desc) - return NULL; - - sw_desc->chan = chan; - sw_desc->desc_num = desc_num; - sw_desc->cyclic = cyclic; - sw_desc->error = false; - dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT); - sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks), - GFP_NOWAIT); - if (!sw_desc->desc_blocks) - goto failed; - - if (cyclic) - control = XDMA_DESC_CONTROL_CYCLIC; - else - control = XDMA_DESC_CONTROL(1, 0); - - sw_desc->dblk_num = dblk_num; - for (i = 0; i < sw_desc->dblk_num; i++) { - addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr); - if (!addr) - goto failed; - - sw_desc->desc_blocks[i].virt_addr = addr; - sw_desc->desc_blocks[i].dma_addr = dma_addr; - for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++) - desc[j].control = cpu_to_le32(control); - } - - if (cyclic) - xdma_link_cyclic_desc_blocks(sw_desc); - else - xdma_link_sg_desc_blocks(sw_desc); - - return sw_desc; - -failed: - xdma_free_desc(&sw_desc->vdesc); - return NULL; -} - -/** - * xdma_xfer_start - Start DMA transfer - * @xchan: DMA channel pointer - */ -static int xdma_xfer_start(struct xdma_chan *xchan) -{ - struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan); - struct xdma_device *xdev = xchan->xdev_hdl; - struct xdma_desc_block *block; - u32 val, completed_blocks; - struct xdma_desc *desc; - int ret; - - /* - * check if there is not any submitted descriptor or channel is busy. - * vchan lock should be held where this function is called. - */ - if (!vd || xchan->busy) - return -EINVAL; - - /* clear run stop bit to get ready for transfer */ - ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, - CHAN_CTRL_RUN_STOP); - if (ret) - return ret; - - desc = to_xdma_desc(vd); - if (desc->dir != xchan->dir) { - xdma_err(xdev, "incorrect request direction"); - return -EINVAL; - } - - /* set DMA engine to the first descriptor block */ - completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT; - block = &desc->desc_blocks[completed_blocks]; - val = lower_32_bits(block->dma_addr); - ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val); - if (ret) - return ret; - - val = upper_32_bits(block->dma_addr); - ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val); - if (ret) - return ret; - - if (completed_blocks + 1 == desc->dblk_num) - val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; - else - val = XDMA_DESC_ADJACENT - 1; - ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val); - if (ret) - return ret; - - /* kick off DMA transfer, force 0=1 transition, USE Bit clear/set registers */ - - regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, - CHAN_CTRL_START); - - ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1S, - CHAN_CTRL_START); - if (ret) - return ret; - - xchan->busy = true; - - return 0; -} - -/** - * xdma_xfer_stop - Stop DMA transfer - * @xchan: DMA channel pointer - */ -static int xdma_xfer_stop(struct xdma_chan *xchan) -{ - int ret; - u32 val; - struct xdma_device *xdev = xchan->xdev_hdl; - - /* clear run stop bit to prevent any further auto-triggering */ - ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, - CHAN_CTRL_RUN_STOP); - if (ret) - return ret; - - /* Clear the channel status register */ - ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val); - if (ret) - return ret; - - return 0; -} - -/** - * xdma_alloc_channels - Detect and allocate DMA channels - * @xdev: DMA device pointer - * @dir: Channel direction - */ -static int xdma_alloc_channels(struct xdma_device *xdev, - enum dma_transfer_direction dir) -{ - struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev); - struct xdma_chan **chans, *xchan; - u32 base, identifier, target; - u32 *chan_num; - int i, j, ret; - - if (dir == DMA_MEM_TO_DEV) { - base = XDMA_CHAN_H2C_OFFSET; - target = XDMA_CHAN_H2C_TARGET; - chans = &xdev->h2c_chans; - chan_num = &xdev->h2c_chan_num; - } else if (dir == DMA_DEV_TO_MEM) { - base = XDMA_CHAN_C2H_OFFSET; - target = XDMA_CHAN_C2H_TARGET; - chans = &xdev->c2h_chans; - chan_num = &xdev->c2h_chan_num; - } else { - xdma_err(xdev, "invalid direction specified"); - return -EINVAL; - } - - /* detect number of available DMA channels */ - for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) { - ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE, - &identifier); - if (ret) - return ret; - - /* check if it is available DMA channel */ - if (XDMA_CHAN_CHECK_TARGET(identifier, target)) - (*chan_num)++; - } - - if (!*chan_num) { - xdma_err(xdev, "does not probe any channel"); - return -EINVAL; - } - - *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans), - GFP_KERNEL); - if (!*chans) - return -ENOMEM; - - for (i = 0, j = 0; i < pdata->max_dma_channels; i++) { - ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE, - &identifier); - if (ret) - return ret; - - if (!XDMA_CHAN_CHECK_TARGET(identifier, target)) - continue; - - if (j == *chan_num) { - xdma_err(xdev, "invalid channel number"); - return -EIO; - } - - /* init channel structure and hardware */ - xchan = &(*chans)[j]; - xchan->xdev_hdl = xdev; - xchan->base = base + i * XDMA_CHAN_STRIDE; - xchan->dir = dir; - - ret = xdma_channel_init(xchan); - if (ret) - return ret; - xchan->vchan.desc_free = xdma_free_desc; - vchan_init(&xchan->vchan, &xdev->dma_dev); - - j++; - } - - dev_info(&xdev->pdev->dev, "configured %d %s channels", j, - (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H"); - - return 0; -} - -/** - * xdma_issue_pending - Issue pending transactions - * @chan: DMA channel pointer - */ -static void xdma_issue_pending(struct dma_chan *chan) -{ - struct xdma_chan *xdma_chan = to_xdma_chan(chan); - unsigned long flags; - - spin_lock_irqsave(&xdma_chan->vchan.lock, flags); - if (vchan_issue_pending(&xdma_chan->vchan)) - xdma_xfer_start(xdma_chan); - spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); -} - -/** - * xdma_terminate_all - Terminate all transactions - * @chan: DMA channel pointer - */ -static int xdma_terminate_all(struct dma_chan *chan) -{ - struct xdma_chan *xdma_chan = to_xdma_chan(chan); - struct virt_dma_desc *vd; - unsigned long flags; - LIST_HEAD(head); - - xdma_xfer_stop(xdma_chan); - - spin_lock_irqsave(&xdma_chan->vchan.lock, flags); - - xdma_chan->busy = false; - vd = vchan_next_desc(&xdma_chan->vchan); - if (vd) { - list_del(&vd->node); - dma_cookie_complete(&vd->tx); - vchan_terminate_vdesc(vd); - } - vchan_get_all_descriptors(&xdma_chan->vchan, &head); - list_splice_tail(&head, &xdma_chan->vchan.desc_terminated); - - spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); - - return 0; -} - -/** - * xdma_synchronize - Synchronize terminated transactions - * @chan: DMA channel pointer - */ -static void xdma_synchronize(struct dma_chan *chan) -{ - struct xdma_chan *xdma_chan = to_xdma_chan(chan); - - vchan_synchronize(&xdma_chan->vchan); -} - -/** - * xdma_fill_descs - Fill hardware descriptors for one contiguous memory chunk. - * More than one descriptor will be used if the size is bigger - * than XDMA_DESC_BLEN_MAX. - * @sw_desc: Descriptor container - * @src_addr: First value for the ->src_addr field - * @dst_addr: First value for the ->dst_addr field - * @size: Size of the contiguous memory block - * @desc_start_num: Index of the first descriptor to take care of in @sw_desc - */ -static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr, - u64 dst_addr, u32 size, u32 filled_descs_num) -{ - u32 left = size, len, desc_num = filled_descs_num; - struct xdma_desc_block *dblk; - struct xdma_hw_desc *desc; - - dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT); - desc = dblk->virt_addr; - desc += desc_num & XDMA_DESC_ADJACENT_MASK; - do { - len = min_t(u32, left, XDMA_DESC_BLEN_MAX); - /* set hardware descriptor */ - desc->bytes = cpu_to_le32(len); - desc->src_addr = cpu_to_le64(src_addr); - desc->dst_addr = cpu_to_le64(dst_addr); - - dev_dbg(NULL, "desc[%u]:%p {src:0x%llx, dst: 0x%llx, length: %u}", - desc_num, - desc, - src_addr, - dst_addr, - len); - - if (!(++desc_num & XDMA_DESC_ADJACENT_MASK)) - desc = (++dblk)->virt_addr; - else - desc++; - - src_addr += len; - dst_addr += len; - left -= len; - } while (left); - - return desc_num - filled_descs_num; -} - -/** - * xdma_prep_device_sg - prepare a descriptor for a DMA transaction - * @chan: DMA channel pointer - * @sgl: Transfer scatter gather list - * @sg_len: Length of scatter gather list - * @dir: Transfer direction - * @flags: transfer ack flags - * @context: APP words of the descriptor - */ -static struct dma_async_tx_descriptor * -xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_transfer_direction dir, - unsigned long flags, void *context) -{ - struct xdma_chan *xdma_chan = to_xdma_chan(chan); - struct xdma_device *xdev = xdma_chan ->xdev_hdl; - - struct dma_async_tx_descriptor *tx_desc; - struct xdma_desc *sw_desc; - u32 desc_num = 0, i; - u64 addr, dev_addr, *src, *dst; - struct scatterlist *sg; - - for_each_sg(sgl, sg, sg_len, i) - desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX); - - sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false); - if (!sw_desc) - return NULL; - sw_desc->dir = dir; - sw_desc->cyclic = false; - sw_desc->interleaved_dma = false; - - if (dir == DMA_MEM_TO_DEV) { - dev_addr = xdma_chan->cfg.dst_addr; - src = &addr; - dst = &dev_addr; - } else { - dev_addr = xdma_chan->cfg.src_addr ? xdma_chan->cfg.src_addr : xdma_chan->write_back->dma_addr; - src = &dev_addr; - dst = &addr; - } - - dev_dbg(&xdev->pdev->dev, "desc[%s]:%p {src: %p, dst: %p, length: %u}", - dir == DMA_MEM_TO_DEV ? "C2H" : "H2C", - sw_desc, - src, - dst, - sg_dma_len(sg)); - - - desc_num = 0; - for_each_sg(sgl, sg, sg_len, i) { - addr = sg_dma_address(sg); - desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num); - dev_addr += sg_dma_len(sg); - } - - tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); - if (!tx_desc) - goto failed; - - return tx_desc; - -failed: - xdma_free_desc(&sw_desc->vdesc); - - return NULL; -} - -/** - * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions - * @chan: DMA channel pointer - * @address: Device DMA address to access - * @size: Total length to transfer - * @period_size: Period size to use for each transfer - * @dir: Transfer direction - * @flags: Transfer ack flags - */ -static struct dma_async_tx_descriptor * -xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address, - size_t size, size_t period_size, - enum dma_transfer_direction dir, - unsigned long flags) -{ - struct xdma_chan *xdma_chan = to_xdma_chan(chan); - struct xdma_device *xdev = xdma_chan->xdev_hdl; - unsigned int periods = size / period_size; - struct dma_async_tx_descriptor *tx_desc; - struct xdma_desc *sw_desc; - u64 addr, dev_addr, *src, *dst; - u32 desc_num = 0; - unsigned int i; - - /* - * Simplify the whole logic by preventing an abnormally high number of - * periods and periods size. - */ - if (period_size > XDMA_DESC_BLEN_MAX) { - xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX); - return NULL; - } - - if (periods > XDMA_DESC_ADJACENT) { - xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT); - return NULL; - } - - sw_desc = xdma_alloc_desc(xdma_chan, periods, true); - if (!sw_desc) - return NULL; - - sw_desc->periods = periods; - sw_desc->period_size = period_size; - sw_desc->dir = dir; - sw_desc->interleaved_dma = false; - - addr = address; - if (dir == DMA_MEM_TO_DEV) { - dev_addr = xdma_chan->cfg.dst_addr; - src = &addr; - dst = &dev_addr; - } else { - dev_addr = xdma_chan->cfg.src_addr ? xdma_chan->cfg.src_addr : xdma_chan->write_back->dma_addr; - src = &dev_addr; - dst = &addr; - } - - dev_dbg(&xdev->pdev->dev, "desc[%s]:%p {src: %p, dst: %p, length: %lu}", - dir == DMA_MEM_TO_DEV ? "C2H" : "H2C", - sw_desc, - src, - dst, - period_size); - - for (i = 0; i < periods; i++) { - xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num++); - addr += period_size; - } - - tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); - if (!tx_desc) - goto failed; - - return tx_desc; - -failed: - xdma_free_desc(&sw_desc->vdesc); - - return NULL; -} - -/** - * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers - * @chan: DMA channel - * @xt: DMA transfer template - * @flags: tx flags - */ -static struct dma_async_tx_descriptor * -xdma_prep_interleaved_dma(struct dma_chan *chan, - struct dma_interleaved_template *xt, - unsigned long flags) -{ - int i; - u32 desc_num = 0, period_size = 0; - struct dma_async_tx_descriptor *tx_desc; - struct xdma_chan *xchan = to_xdma_chan(chan); - struct xdma_desc *sw_desc; - u64 src_addr, dst_addr; - - for (i = 0; i < xt->frame_size; ++i) - desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX); - - sw_desc = xdma_alloc_desc(xchan, desc_num, false); - if (!sw_desc) - return NULL; - sw_desc->dir = xt->dir; - sw_desc->interleaved_dma = true; - sw_desc->cyclic = flags & DMA_PREP_REPEAT; - sw_desc->frames_left = xt->numf; - sw_desc->periods = xt->numf; - - desc_num = 0; - src_addr = xt->src_start; - dst_addr = xt->dst_start; - for (i = 0; i < xt->frame_size; ++i) { - desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num); - src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ? - xt->sgl[i].size : 0); - dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ? - xt->sgl[i].size : 0); - period_size += xt->sgl[i].size; - } - sw_desc->period_size = period_size; - - tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags); - if (tx_desc) - return tx_desc; - - xdma_free_desc(&sw_desc->vdesc); - return NULL; -} - -/** - * xdma_device_config - Configure the DMA channel - * @chan: DMA channel - * @cfg: channel configuration - */ -static int xdma_device_config(struct dma_chan *chan, - struct dma_slave_config *cfg) -{ - struct xdma_chan *xdma_chan = to_xdma_chan(chan); - - memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg)); - - return 0; -} - -/** - * xdma_free_chan_resources - Free channel resources - * @chan: DMA channel - */ -static void xdma_free_chan_resources(struct dma_chan *chan) -{ - struct xdma_chan *xdma_chan = to_xdma_chan(chan); - - vchan_free_chan_resources(&xdma_chan->vchan); - dma_pool_free(xdma_chan->desc_pool, - xdma_chan->write_back, - xdma_chan->write_back->dma_addr); - dma_pool_destroy(xdma_chan->desc_pool); - xdma_chan->desc_pool = NULL; -} - -/** - * xdma_alloc_chan_resources - Allocate channel resources - * @chan: DMA channel - */ -static int xdma_alloc_chan_resources(struct dma_chan *chan) -{ - struct xdma_chan *xdma_chan = to_xdma_chan(chan); - struct xdma_device *xdev = xdma_chan->xdev_hdl; - struct device *dev = xdev->dma_dev.dev; - dma_addr_t write_back_addr; - - while (dev && !dev_is_pci(dev)) - dev = dev->parent; - if (!dev) { - xdma_err(xdev, "unable to find pci device"); - return -EINVAL; - } - - //Allocate the pool WITH the H2C write back - xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE + - sizeof(struct xdma_c2h_write_back), - XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY); - if (!xdma_chan->desc_pool) { - xdma_err(xdev, "unable to allocate descriptor pool"); - return -ENOMEM; - } - - /* Allocate the C2H write back out of the pool*/ - - xdma_chan->write_back = dma_pool_alloc(xdma_chan->desc_pool, GFP_NOWAIT, &write_back_addr); - - dev_dbg(dev, "C2H write_back : %p, dma_addr: %lld", xdma_chan->write_back, write_back_addr); - - if (!xdma_chan->write_back) { - xdma_err(xdev, "unable to allocate C2H write back block"); - return -ENOMEM; - } - xdma_chan->write_back->dma_addr = write_back_addr; - - - return 0; -} - -static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, - struct dma_tx_state *state) -{ - struct xdma_chan *xdma_chan = to_xdma_chan(chan); - struct xdma_desc *desc = NULL; - struct virt_dma_desc *vd; - enum dma_status ret; - unsigned long flags; - unsigned int period_idx; - u32 residue = 0; - - ret = dma_cookie_status(chan, cookie, state); - if (ret == DMA_COMPLETE) - return ret; - - spin_lock_irqsave(&xdma_chan->vchan.lock, flags); - - vd = vchan_find_desc(&xdma_chan->vchan, cookie); - if (!vd) - goto out; - - desc = to_xdma_desc(vd); - if (desc->error) { - ret = DMA_ERROR; - } else if (desc->cyclic) { - period_idx = desc->completed_desc_num % desc->periods; - residue = (desc->periods - period_idx) * desc->period_size; - dma_set_residue(state, residue); - } -out: - spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); - - return ret; -} - -/** - * xdma_channel_isr - XDMA channel interrupt handler - * @irq: IRQ number - * @dev_id: Pointer to the DMA channel structure - */ -static irqreturn_t xdma_channel_isr(int irq, void *dev_id) -{ - struct xdma_chan *xchan = dev_id; - u32 complete_desc_num = 0; - struct xdma_device *xdev = xchan->xdev_hdl; - struct virt_dma_desc *vd, *next_vd; - struct xdma_desc *desc; - int ret; - u32 st; - bool repeat_tx; - - spin_lock(&xchan->vchan.lock); - - /* get submitted request */ - vd = vchan_next_desc(&xchan->vchan); - if (!vd) - goto out; - - /* Clear-on-read the status register */ - ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st); - if (ret) - goto out; - - desc = to_xdma_desc(vd); - - st &= XDMA_CHAN_STATUS_MASK; - if ((st & XDMA_CHAN_ERROR_MASK) || - !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) { - desc->error = true; - xdma_err(xdev, "channel error, status register value: 0x%x", st); - goto out; - } - - ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC, - &complete_desc_num); - if (ret) - goto out; - - if (desc->interleaved_dma) { - xchan->busy = false; - desc->completed_desc_num += complete_desc_num; - if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) { - xdma_xfer_start(xchan); - goto out; - } - - /* last desc of any frame */ - desc->frames_left--; - if (desc->frames_left) - goto out; - - /* last desc of the last frame */ - repeat_tx = vd->tx.flags & DMA_PREP_REPEAT; - next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node); - if (next_vd) - repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT); - if (repeat_tx) { - desc->frames_left = desc->periods; - desc->completed_desc_num = 0; - vchan_cyclic_callback(vd); - } else { - list_del(&vd->node); - vchan_cookie_complete(vd); - } - /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */ - xdma_xfer_start(xchan); - } else if (!desc->cyclic) { - xchan->busy = false; - desc->completed_desc_num += complete_desc_num; - - /* if all data blocks are transferred, remove and complete the request */ - if (desc->completed_desc_num == desc->desc_num) { - list_del(&vd->node); - vchan_cookie_complete(vd); - goto out; - } - - if (desc->completed_desc_num > desc->desc_num || - complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) - goto out; - - /* transfer the rest of data */ - xdma_xfer_start(xchan); - } else { - desc->completed_desc_num = complete_desc_num; - vchan_cyclic_callback(vd); - } - -out: - spin_unlock(&xchan->vchan.lock); - return IRQ_HANDLED; -} - -/** - * xdma_irq_fini - Uninitialize IRQ - * @xdev: DMA device pointer - */ -static void xdma_irq_fini(struct xdma_device *xdev) -{ - int i; - - /* disable interrupt */ - regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0); - - /* free irq handler */ - for (i = 0; i < xdev->h2c_chan_num; i++) - free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); - - for (i = 0; i < xdev->c2h_chan_num; i++) - free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]); -} - -/** - * xdma_set_vector_reg - configure hardware IRQ registers - * @xdev: DMA device pointer - * @vec_tbl_start: Start of IRQ registers - * @irq_start: Start of IRQ - * @irq_num: Number of IRQ - */ -static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start, - u32 irq_start, u32 irq_num) -{ - u32 shift, i, val = 0; - int ret; - - /* Each IRQ register is 32 bit and contains 4 IRQs */ - while (irq_num > 0) { - for (i = 0; i < 4; i++) { - shift = XDMA_IRQ_VEC_SHIFT * i; - val |= irq_start << shift; - irq_start++; - irq_num--; - if (!irq_num) - break; - } - - /* write IRQ register */ - ret = regmap_write(xdev->rmap, vec_tbl_start, val); - if (ret) - return ret; - vec_tbl_start += sizeof(u32); - val = 0; - } - - return 0; -} - -/** - * xdma_irq_init - initialize IRQs - * @xdev: DMA device pointer - */ -static int xdma_irq_init(struct xdma_device *xdev) -{ - u32 irq = xdev->irq_start; - u32 user_irq_start; - int i, j, ret; - - /* return failure if there are not enough IRQs */ - if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) { - xdma_err(xdev, "not enough irq"); - return -EINVAL; - } - - /* setup H2C interrupt handler */ - for (i = 0; i < xdev->h2c_chan_num; i++) { - ret = request_irq(irq, xdma_channel_isr, 0, - "xdma-h2c-channel", &xdev->h2c_chans[i]); - if (ret) { - xdma_err(xdev, "H2C channel%d request irq%d failed: %d", - i, irq, ret); - goto failed_init_h2c; - } - xdev->h2c_chans[i].irq = irq; - irq++; - } - - /* setup C2H interrupt handler */ - for (j = 0; j < xdev->c2h_chan_num; j++) { - ret = request_irq(irq, xdma_channel_isr, 0, - "xdma-c2h-channel", &xdev->c2h_chans[j]); - if (ret) { - xdma_err(xdev, "C2H channel%d request irq%d failed: %d", - j, irq, ret); - goto failed_init_c2h; - } - xdev->c2h_chans[j].irq = irq; - irq++; - } - - /* config hardware IRQ registers */ - ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0, - XDMA_CHAN_NUM(xdev)); - if (ret) { - xdma_err(xdev, "failed to set channel vectors: %d", ret); - goto failed_init_c2h; - } - - /* config user IRQ registers if needed */ - user_irq_start = XDMA_CHAN_NUM(xdev); - if (xdev->irq_num > user_irq_start) { - ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM, - user_irq_start, - xdev->irq_num - user_irq_start); - if (ret) { - xdma_err(xdev, "failed to set user vectors: %d", ret); - goto failed_init_c2h; - } - } - - /* enable interrupt */ - ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0); - if (ret) - goto failed_init_c2h; - - return 0; - -failed_init_c2h: - while (j--) - free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]); -failed_init_h2c: - while (i--) - free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); - - return ret; -} - -static bool xdma_filter_fn(struct dma_chan *chan, void *param) -{ - struct xdma_chan *xdma_chan = to_xdma_chan(chan); - struct xdma_chan_info *chan_info = param; - - return chan_info->dir == xdma_chan->dir; -} - -/** - * xdma_disable_user_irq - Disable user interrupt - * @pdev: Pointer to the platform_device structure - * @irq_num: System IRQ number - */ -void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num) -{ - struct xdma_device *xdev = platform_get_drvdata(pdev); - u32 index; - - index = irq_num - xdev->irq_start; - if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { - xdma_err(xdev, "invalid user irq number"); - return; - } - index -= XDMA_CHAN_NUM(xdev); - - regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index); -} -EXPORT_SYMBOL(xdma_disable_user_irq); - -/** - * xdma_enable_user_irq - Enable user logic interrupt - * @pdev: Pointer to the platform_device structure - * @irq_num: System IRQ number - */ -int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num) -{ - struct xdma_device *xdev = platform_get_drvdata(pdev); - u32 index; - int ret; - - index = irq_num - xdev->irq_start; - if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { - xdma_err(xdev, "invalid user irq number"); - return -EINVAL; - } - index -= XDMA_CHAN_NUM(xdev); - - ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index); - if (ret) - return ret; - - return 0; -} -EXPORT_SYMBOL(xdma_enable_user_irq); - -/** - * xdma_get_user_irq - Get system IRQ number - * @pdev: Pointer to the platform_device structure - * @user_irq_index: User logic IRQ wire index - * - * Return: The system IRQ number allocated for the given wire index. - */ -int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index) -{ - struct xdma_device *xdev = platform_get_drvdata(pdev); - - if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) { - xdma_err(xdev, "invalid user irq index"); - return -EINVAL; - } - - return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index; -} -EXPORT_SYMBOL(xdma_get_user_irq); - -/** - * xdma_remove - Driver remove function - * @pdev: Pointer to the platform_device structure - */ -static int xdma_remove(struct platform_device *pdev) -{ - struct xdma_device *xdev = platform_get_drvdata(pdev); - - if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX) - xdma_irq_fini(xdev); - - if (xdev->status & XDMA_DEV_STATUS_REG_DMA) - dma_async_device_unregister(&xdev->dma_dev); - - return 0; -} - -/** - * xdma_probe - Driver probe function - * @pdev: Pointer to the platform_device structure - */ -static int xdma_probe(struct platform_device *pdev) -{ - struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev); - struct xdma_device *xdev; - void __iomem *reg_base; - struct resource *res; - int ret = -ENODEV; - - if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) { - dev_err(&pdev->dev, "invalid max dma channels %d", - pdata->max_dma_channels); - return -EINVAL; - } - - xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); - if (!xdev) - return -ENOMEM; - - platform_set_drvdata(pdev, xdev); - xdev->pdev = pdev; - - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - xdma_err(xdev, "failed to get irq resource"); - goto failed; - } - xdev->irq_start = res->start; - xdev->irq_num = resource_size(res); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - xdma_err(xdev, "failed to get io resource"); - goto failed; - } - - reg_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(reg_base)) { - xdma_err(xdev, "ioremap failed"); - goto failed; - } - - dev_dbg(&pdev->dev, " %s - config: %p (%lu bytes), reg_bits:%d, reg_stride:%d, pad_bits:%d, val_bits:%d, &val_bits:%p", - __func__, - &xdma_regmap_config, - sizeof(struct regmap_config), - xdma_regmap_config.reg_bits, - xdma_regmap_config.reg_stride, - xdma_regmap_config.pad_bits, - xdma_regmap_config.val_bits, - &xdma_regmap_config.val_bits); - - xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base, - &xdma_regmap_config); - if (IS_ERR(xdev->rmap)) { - ret = PTR_ERR(xdev->rmap); - xdma_err(xdev, "config regmap failed: %d", ret); - goto failed; - } - - INIT_LIST_HEAD(&xdev->dma_dev.channels); - - ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV); - if (ret) { - xdma_err(xdev, "config H2C channels failed: %d", ret); - goto failed; - } - - ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM); - if (ret) { - xdma_err(xdev, "config C2H channels failed: %d", ret); - goto failed; - } - - dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask); - dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask); - dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask); - dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask); - dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask); - dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask); - - xdev->dma_dev.dev = &pdev->dev; - xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; - xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources; - xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources; - xdev->dma_dev.device_tx_status = xdma_tx_status; - xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg; - xdev->dma_dev.device_config = xdma_device_config; - xdev->dma_dev.device_issue_pending = xdma_issue_pending; - xdev->dma_dev.device_terminate_all = xdma_terminate_all; - xdev->dma_dev.device_synchronize = xdma_synchronize; - xdev->dma_dev.filter.map = pdata->device_map; - xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt; - xdev->dma_dev.filter.fn = xdma_filter_fn; - xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic; - xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma; - - ret = dma_async_device_register(&xdev->dma_dev); - if (ret) { - xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret); - goto failed; - } - xdev->status |= XDMA_DEV_STATUS_REG_DMA; - - ret = xdma_irq_init(xdev); - if (ret) { - xdma_err(xdev, "failed to init msix: %d", ret); - goto failed; - } - xdev->status |= XDMA_DEV_STATUS_INIT_MSIX; - - return 0; - -failed: - xdma_remove(pdev); - - return ret; -} - -static const struct platform_device_id xdma_id_table[] = { - { "xdma", 0}, - { }, -}; - -static struct platform_driver xdma_driver = { - .driver = { - .name = "xdma", - }, - .id_table = xdma_id_table, - .probe = xdma_probe, - .remove = xdma_remove, -}; - -module_platform_driver(xdma_driver); - -MODULE_DESCRIPTION("AMD XDMA driver"); -MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>"); -MODULE_LICENSE("GPL"); |