// SPDX-License-Identifier: GPL-2.0-or-later /* * Support for Digigram AlpX PCI-e boards * * Copyright (c) 2024 Digigram Digital (info@digigram.com) */ #include #include "alpx_reg.h" #include "alpx.h" #include "alpx_streams.h" #include "alpx_led.h" #include #include #include extern unsigned int log_transfers; //defined in alpx_core.c #if !IS_ENABLED(CONFIG_SND_DMAENGINE_PCM) #if defined (CONFIG_KERNEL_GENERIC) #if KERNEL_VERSION(5, 17, 0) <= LINUX_VERSION_CODE #include "core/generic/5.17/dmaengine_pcm.h" #include "core/generic/5.17/pcm_dmaengine.c" #elif KERNEL_VERSION(5, 12, 0) <= LINUX_VERSION_CODE #include "core/generic/5.12/dmaengine_pcm.h" #include "core/generic/5.12/pcm_dmaengine.c" #elif KERNEL_VERSION(5, 7, 0) <= LINUX_VERSION_CODE #include "core/generic/5.7/dmaengine_pcm.h" #include "core/generic/5.7/pcm_dmaengine.c" #elif KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE #include "core/generic/5.5/dmaengine_pcm.h" #include "core/generic/5.5/pcm_dmaengine.c" #elif KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE #include "core/generic/5.2/dmaengine_pcm.h" #include "core/generic/5.2/pcm_dmaengine.c" #elif KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE #include "core/generic/4.19/dmaengine_pcm.h" #include "core/generic/4.19/pcm_dmaengine.c" #endif #elif defined (CONFIG_KERNEL_REDHAT) #if KERNEL_VERSION(5, 14, 0) <= LINUX_VERSION_CODE #include "core/RedHat/5.14/dmaengine_pcm.h" #include "core/RedHat/5.14/pcm_dmaengine.c" #elif KERNEL_VERSION(4, 18, 0) == LINUX_VERSION_CODE #include "core/RedHat/4.18/dmaengine_pcm.h" #include "core/RedHat/4.18/pcm_dmaengine.c" #else #error RedHat kernel not supported yet. #endif #endif #elif defined (CONFIG_KERNEL_GENERIC) #include #else #error "No valid DMA Engine support!" #endif /* The size (in bytes) of sample's container */ const unsigned int ALPX_SAMPLE_CONTAINER_SIZE = 4; /* Configure */ static int alpmadi_configure(struct alpx_device *alpx_dev, unsigned int rate) { u32 config_fs, value; dev_dbg(alpx_dev->dev, "%s(): requesting rate %d\n", __func__, rate); switch (rate) { case 44100: config_fs = ALPMADI_CLK_MANAGER_CONFIG_FS_44_1K; break; case 48000: config_fs = ALPMADI_CLK_MANAGER_CONFIG_FS_48K; break; case 88200: config_fs = ALPMADI_CLK_MANAGER_CONFIG_FS_88_2K; break; case 96000: config_fs = ALPMADI_CLK_MANAGER_CONFIG_FS_96K; break; case 176400: config_fs = ALPMADI_CLK_MANAGER_CONFIG_FS_176_4K; break; case 192000: config_fs = ALPMADI_CLK_MANAGER_CONFIG_FS_192K; break; default: return -EINVAL; } value = readl(ALPX_REG(alpx_dev, ALPMADI, CLK_MANAGER, CONFIG)); dev_dbg(alpx_dev->dev, "%s(): alpMadi clk manager config %x\n", __func__, value); value &= ~ALPMADI_CLK_MANAGER_CONFIG_FS_MASK; value |= config_fs; writel(value, ALPX_REG(alpx_dev, ALPMADI, CLK_MANAGER, CONFIG)); dev_dbg(alpx_dev->dev, "%s(): alpMadi clk manager set to %x\n", __func__, value); return 0; } static int alpdante_configure(struct alpx_device *alpx_dev, unsigned int rate) { dev_dbg(alpx_dev->dev, "%s(): requesting rate %dHz\n", __func__, rate); if (alpx_dev->variant->capture_hw->rate_min == rate) { dev_dbg(alpx_dev->dev, "%s(): requested rate %dHz is supported\n", __func__, rate); return 0; } else { dev_dbg(alpx_dev->dev, "Requested %dHz not supported, Currently supported rate is %dHz\n", rate, alpx_dev->variant->capture_hw->rate_min); return -EINVAL; } } static int alpstereo_configure(struct alpx_device *alpx_dev, unsigned int rate) { u32 config_fs, read_value, target_value; u32 try_qty = 10; /* retries */ dev_dbg(alpx_dev->dev, "%s(): requesting rate %d\n", __func__, rate); switch (rate) { case 8000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_8K; break; case 11025: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_11_025K; break; case 16000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_16K; break; case 22000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_22_05K; break; case 24000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_24K; break; case 32000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_32K; break; case 44100: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_44_1K; break; case 48000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_48K; break; case 64000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_64K; break; case 88200: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_88_2K; break; case 96000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_96K; break; case 128000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_128K; break; case 176400: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_176_4K; break; case 192000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_192K; break; default: return -EINVAL; } read_value = readl(ALPX_REG(alpx_dev, ALP222, CLK_MANAGER, CONFIG)); /* Only if needed */ if ((read_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK) == config_fs) return 0; read_value &= ~ALP222_CLK_MANAGER_CONFIG_FS_MASK; target_value = read_value | config_fs; do { writel(target_value, ALPX_REG(alpx_dev, ALP222, CLK_MANAGER, CONFIG)); dev_dbg(alpx_dev->dev, "%s(): alp222 new clk config set to %x try: %d\n", __func__, target_value, try_qty); /* Add a delay to give time to the card to initialize its * internals (PLL, buses,...) to avoid internal stalls. */ mdelay(1); read_value = readl(ALPX_REG(alpx_dev, ALP222, CLK_MANAGER, CONFIG)); dev_dbg(alpx_dev->dev, "%s(): alp222 check clk config: %x\n", __func__, read_value); } while (--try_qty && ((read_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK ) != (target_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK))); if ((read_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK) != (target_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK)) dev_err(alpx_dev->dev, "%s(): alp222 ERROR clk config, expected: 0x%x, actual: 0x%x, \n", __func__, target_value, read_value); dev_dbg(alpx_dev->dev, "%s(): alp222 new current clk config: 0x%x\n", __func__, readl(ALPX_REG(alpx_dev, ALP222, CLK_MANAGER, CONFIG))); return (read_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK) == (target_value & ALP222_CLK_MANAGER_CONFIG_FS_MASK) ? 0 : -EIO; } static int alpmultichannel_configure(struct alpx_device *alpx_dev, unsigned int rate) { u32 config_fs, value; u32 effective_fs, source; /* XXX: check clock source, if not internal, error out if rate is not external one */ dev_dbg(alpx_dev->dev, "%s(): requesting rate %d\n", __func__, rate); value = readl(ALPX_REG(alpx_dev, ALPMC, CLK_MANAGER, CONFIG)); source = ALPMC_CLK_MANAGER_SOURCE(value); effective_fs = ALPMC_CLK_MANAGER_EFFECTIVE_FS(value); switch (rate) { case 8000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_8K; break; case 11025: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_11_025K; break; case 16000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_16K; break; case 22050: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_22_05K; break; case 24000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_24K; break; case 32000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_32K; break; case 44100: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_44_1K; break; case 48000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_48K; break; case 64000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_64K; break; case 88200: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_88_2K; break; case 96000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_96K; break; case 128000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_128K; break; case 176400: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_176_4K; break; case 192000: config_fs = ALPxxx_CLK_MANAGER_CLK_VALUE_192K; break; default: return -EINVAL; } if (source != ALPMC_CLK_MANAGER_SOURCE_INTERNAL) { unsigned int effective_rate; switch(effective_fs) { case ALPxxx_CLK_MANAGER_CLK_VALUE_8K : effective_rate = 8000; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_11_025K : effective_rate = 11025; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_16K : effective_rate = 16000; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_22_05K : effective_rate = 22050; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_24K : effective_rate = 24000; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_32K: effective_rate = 32000; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_44_1K: effective_rate = 44100; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_48K : effective_rate = 48000; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_64K : effective_rate = 64000; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_88_2K : effective_rate = 88200; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_96K : effective_rate = 96000; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_128K : effective_rate = 128000; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_176_4K : effective_rate = 176400; break; case ALPxxx_CLK_MANAGER_CLK_VALUE_192K : effective_rate = 192000; break; default : dev_err(alpx_dev->dev, "Invalid effective sample rate\n"); return -EINVAL; }; if (effective_rate != rate) { dev_err(alpx_dev->dev, "Requested sample rate (%u) does not match external clock sample rate (%u)\n", rate, effective_rate); return -EINVAL; } } else { if (ALPMC_CLK_MANAGER_CONFIG_FS(value) != config_fs) { value &= ~ALPMC_CLK_MANAGER_CONFIG_FS_MASK; value |= config_fs; writel(value, ALPX_REG(alpx_dev, ALPMC, CLK_MANAGER, CONFIG)); //Add a delay to give time to the card (PLL, internal buses,...), w/o, the setting is not effective ! mdelay(ALPMC_FS_SWITCH_DELAY); } } dev_dbg(alpx_dev->dev, "%s() : %s new current clk config: 0x%x\n", __func__, alpx_dev->variant->shortname, readl(ALPX_REG(alpx_dev, ALPMC, CLK_MANAGER, CONFIG))); return 0; } static int alpx_configure(struct alpx_device *alpx_dev, unsigned int rate) { unsigned long flags; int ret = 0; spin_lock_irqsave(&alpx_dev->config.lock, flags); if (alpx_dev->config.users > 0 && rate != alpx_dev->config.rate) { ret = -EINVAL; goto complete; } alpx_dev->config.rate = rate; if (alpx_is_882(alpx_dev)) ret = alpmultichannel_configure(alpx_dev, rate); else if (alpx_is_222(alpx_dev)) ret = alpstereo_configure(alpx_dev, rate); else if (alpx_is_madi(alpx_dev)) ret = alpmadi_configure(alpx_dev, rate); else if (alpx_is_dante(alpx_dev)) ret = alpdante_configure(alpx_dev, rate); else snd_BUG(); if (!ret) alpx_dev->config.users++; complete: spin_unlock_irqrestore(&alpx_dev->config.lock, flags); return ret; } static void alpx_configure_close(struct alpx_device *alpx_dev) { unsigned long flags; spin_lock_irqsave(&alpx_dev->config.lock, flags); alpx_dev->config.users--; spin_unlock_irqrestore(&alpx_dev->config.lock, flags); } /* Pipe */ static int alpx_pipe_start(struct alpx_pipe *pipe, struct snd_pcm_substream *substream) { struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream); dev_dbg( alpx_dev->dev, "%s starting\n", pipe->xdma_write ? "Playback" : "Capture"); if (pipe->status != ALPX_PIPE_STATUS_IDLE && pipe->status != ALPX_PIPE_STATUS_STOPPED) { dev_warn(alpx_dev->dev, "%s pipe is busy!\n", pipe->xdma_write ? "playback" : "capture"); return -EBUSY; } if (alpx_is_madi(alpx_dev)) alpmadi_set_led_state(alpx_dev, true); snd_dmaengine_pcm_trigger(substream, SNDRV_PCM_TRIGGER_START); pipe->period_qty_on_start = alpx_get_samples_counter(alpx_dev); dev_dbg( alpx_dev->dev, "%s Period:%u\n", pipe->xdma_write ? "Playback" : "Capture", pipe->period_qty_on_start); return 0; } static int alpx_pipe_open(struct alpx_pipe *pipe, struct snd_pcm_substream *substream) { struct alpx_device *alpx_dev = substream->pcm->private_data; struct dma_chan *chan; /* Check */ if (!alpx_dev) { printk(KERN_ERR"%s() : alpx_dev is NULL\n",__func__); return -EINVAL; } if (!alpx_dev->dev) { printk(KERN_ERR"%s() : alpx_dev->dev is NULL\n",__func__); return -EINVAL; } dev_dbg(alpx_dev->dev, "%s(): CALLED\n", __func__); dev_dbg(alpx_dev->dev, "%s: matching with dev [%s]\n", __func__, dev_name(alpx_dev->dev)); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) chan = dma_request_chan(alpx_dev->dev, "h2c-0"); else chan = dma_request_chan(alpx_dev->dev, "c2h-0"); if (IS_ERR(chan)) { dev_err(alpx_dev->dev, "%s() : Error %ld when requesting DMA channel\n", __func__, PTR_ERR(chan)); return PTR_ERR(chan); } pipe->substream = substream; return snd_dmaengine_pcm_open(substream, chan); } static int alpx_pipe_stop(struct alpx_pipe *pipe, struct snd_pcm_substream *substream) { struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned int samples_counter, card_samples_qty; /* Retrieve the samples counter as soon as possible */ samples_counter = alpx_get_samples_counter(alpx_dev); snd_dmaengine_pcm_trigger(substream, SNDRV_PCM_TRIGGER_STOP); if (alpx_is_madi(alpx_dev)) alpmadi_set_led_state(alpx_dev, false); /* * Since we are under stream lock here and the transfer done callback * also needs to grab stream lock, we cannot have a sync wait for the * completion. As a result, we request a stop with the stopping status. */ card_samples_qty = samples_counter - pipe->period_qty_on_start; dev_dbg(alpx_dev->dev, "%s stopped after %u samples (%u bytes)\n", pipe->xdma_write ? "Playback" : "Capture", card_samples_qty, runtime->hw.channels_max * ALPX_SAMPLE_CONTAINER_SIZE * card_samples_qty); if (pipe->status == ALPX_PIPE_STATUS_RUNNING) pipe->status = ALPX_PIPE_STATUS_STOPPING; else pipe->status = ALPX_PIPE_STATUS_STOPPED; return 0; } static int alpx_pipe_configure(struct alpx_pipe *pipe, struct snd_pcm_hw_params *hw_params) { struct alpx_device *alpx_dev = pipe->substream->pcm->private_data; unsigned int rate = params_rate(hw_params); int ret; dev_dbg(alpx_dev->dev, "%s hardware parameters:\n", pipe->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? "Playback" : "Capture"); dev_dbg(alpx_dev->dev, "* channels: %d\n", params_channels(hw_params)); dev_dbg(alpx_dev->dev, "* rate: %d\n", params_rate(hw_params)); dev_dbg(alpx_dev->dev, "* periods: %d\n", params_periods(hw_params)); dev_dbg(alpx_dev->dev, "* period size: %d frames\n", params_period_size(hw_params)); dev_dbg(alpx_dev->dev, "* period bytes: %d bytes\n", params_period_size(hw_params) * params_channels(hw_params) * 4); dev_dbg(alpx_dev->dev, "* buffer size: %d frames\n", params_buffer_size(hw_params)); dev_dbg(alpx_dev->dev, "* buffer bytes: %d bytes\n", params_buffer_bytes(hw_params)); ret = alpx_configure(alpx_dev, rate); if (ret) return ret; pipe->configured = true; return 0; } static void alpx_pipe_close(struct alpx_pipe *pipe) { struct alpx_device *alpx_dev = pipe->substream->pcm->private_data; dev_dbg(alpx_dev->dev, "%s(): CALLED\n", __func__); snd_dmaengine_pcm_close_release_chan(pipe->substream); if (pipe->configured) { alpx_configure_close(alpx_dev); pipe->configured = false; } } int alpx_pipe_init(struct alpx_pipe *pipe, bool xdma_write) { pipe->xdma_write = xdma_write; pipe->status = ALPX_PIPE_STATUS_IDLE; return 0; } /* Playback */ int alpx_playback_open(struct snd_pcm_substream *substream) { struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int ret; //Add a break to avoid playback lock when chaining playbacks. This is required for AlpDANTE cards. Looks like the XDMA needs some time here. msleep(200); dev_dbg(alpx_dev->dev, "CALLED\n"); ret = alpx_pipe_open(&alpx_dev->playback, substream); if (!ret) runtime->hw = *alpx_dev->variant->playback_hw; dev_dbg(alpx_dev->dev, "=> %u\n", ret); return ret; } int alpx_playback_close(struct snd_pcm_substream *substream) { struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream); alpx_pipe_close(&alpx_dev->playback); return 0; } int alpx_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream); int ret; ret = alpx_pipe_configure(&alpx_dev->playback, hw_params); if (ret) return ret; #if KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE return 0; #else return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); #endif } int alpx_playback_hw_free(struct snd_pcm_substream *substream) { #if KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE return 0; #else return snd_pcm_lib_free_pages(substream); #endif } int alpx_playback_prepare(struct snd_pcm_substream *substream) { return 0; } int alpx_playback_trigger(struct snd_pcm_substream *substream, int command) { struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream); dev_dbg(alpx_dev->dev, "CALLED\n"); switch (command) { case SNDRV_PCM_TRIGGER_START: alpx_pipe_start(&alpx_dev->playback, substream); break; case SNDRV_PCM_TRIGGER_STOP: alpx_pipe_stop(&alpx_dev->playback, substream); break; default: return -EINVAL; } return 0; } const struct snd_pcm_ops alpx_playback_ops = { .open = alpx_playback_open, .close = alpx_playback_close, #if KERNEL_VERSION(5, 6, 0) > LINUX_VERSION_CODE .ioctl = snd_pcm_lib_ioctl, #endif .hw_params = alpx_playback_hw_params, .hw_free = alpx_playback_hw_free, .prepare = alpx_playback_prepare, .trigger = alpx_playback_trigger, .pointer = snd_dmaengine_pcm_pointer, }; /* Capture */ int alpx_capture_open(struct snd_pcm_substream *substream) { struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int ret; ret = alpx_pipe_open(&alpx_dev->capture, substream); if (ret) return ret; runtime->hw = *alpx_dev->variant->capture_hw; return 0; } int alpx_capture_close(struct snd_pcm_substream *substream) { struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream); alpx_pipe_close(&alpx_dev->capture); return 0; } int alpx_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream); int ret; ret = alpx_pipe_configure(&alpx_dev->capture, hw_params); if (ret) return ret; #if KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE return 0; #else return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); #endif } int alpx_capture_hw_free(struct snd_pcm_substream *substream) { #if KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE return 0; #else return snd_pcm_lib_free_pages(substream); #endif } int alpx_capture_prepare(struct snd_pcm_substream *substream) { return 0; } int alpx_capture_trigger(struct snd_pcm_substream *substream, int command) { struct alpx_device *alpx_dev = snd_pcm_substream_chip(substream); switch (command) { case SNDRV_PCM_TRIGGER_START: alpx_pipe_start(&alpx_dev->capture, substream); break; case SNDRV_PCM_TRIGGER_STOP: alpx_pipe_stop(&alpx_dev->capture, substream); break; default: return -EINVAL; } return 0; } const struct snd_pcm_ops alpx_capture_ops = { .open = alpx_capture_open, .close = alpx_capture_close, #if KERNEL_VERSION(5, 6, 0) > LINUX_VERSION_CODE .ioctl = snd_pcm_lib_ioctl, #endif .hw_params = alpx_capture_hw_params, .hw_free = alpx_capture_hw_free, .prepare = alpx_capture_prepare, .trigger = alpx_capture_trigger, .pointer = snd_dmaengine_pcm_pointer, };