diff --git a/arch/arm/boot/dts/xilinx/zynq-zed-adv7511-adaq4216.dts b/arch/arm/boot/dts/xilinx/zynq-zed-adv7511-adaq4216.dts index 734c61d899d2f7..4c829574934ccf 100644 --- a/arch/arm/boot/dts/xilinx/zynq-zed-adv7511-adaq4216.dts +++ b/arch/arm/boot/dts/xilinx/zynq-zed-adv7511-adaq4216.dts @@ -39,6 +39,12 @@ regulator-always-on; }; + trigger_pwm: adc-pwm-trigger { + compatible = "pwm-trigger"; + #trigger-source-cells = <0>; + pwms = <&adc_trigger 0 1000000 0>; + }; + clocks { cnv_ext_clk: ext-clk { #clock-cells = <0x0>; @@ -66,6 +72,15 @@ }; }; + adc_trigger: pwm@44b00000 { + compatible = "adi,axi-pwmgen-2.00.a"; + reg = <0x44b00000 0x1000>; + label = "ad463x_cnv"; + #pwm-cells = <3>; + clocks = <&clkc 15>, <&cnv_ext_clk>; + clock-names = "axi", "ext"; + }; + rx_dma: dma-controller@44a30000 { compatible = "adi,axi-dmac-1.00.a"; reg = <0x44a30000 0x1000>; @@ -83,34 +98,27 @@ clock-output-names = "spi_clk"; }; - axi_pwm_gen: pwm@44b00000 { - compatible = "adi,axi-pwmgen-2.00.a"; - reg = <0x44b00000 0x1000>; - label = "ad463x_cnv"; - #pwm-cells = <2>; - clocks = <&clkc 15>, <&cnv_ext_clk>; - clock-names = "axi", "ext"; - }; - axi_spi_engine: spi@44a00000 { - compatible = "adi-ex,axi-spi-engine-1.00.a"; + compatible = "adi,axi-spi-engine-1.00.a"; reg = <0x44a00000 0x1FF>; interrupt-parent = <&intc>; interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clkc 15>, <&spi_clk>; clock-names = "s_axi_aclk", "spi_clk"; - num-cs = <1>; + dmas = <&rx_dma 0>; + dma-names = "offload0-rx"; + trigger-sources = <&trigger_pwm>; #address-cells = <0x1>; #size-cells = <0x0>; adaq4216: adaq4216@0 { compatible = "adi,adaq4216"; reg = <0>; + spi-max-frequency = <80000000>; vdd-supply = <&vref>; vdd_1_8-supply = <&vdd_1_8>; vio-supply = <&vio>; vref-supply = <&vref>; - spi-max-frequency = <80000000>; reset-gpios = <&gpio0 86 GPIO_ACTIVE_LOW>; adi,pga-gpios = <&gpio0 87 GPIO_ACTIVE_HIGH>, <&gpio0 88 GPIO_ACTIVE_HIGH>; @@ -118,12 +126,8 @@ adi,clock-mode = <0>; adi,out-data-mode = <0>; adi,spi-trigger; - clocks = <&cnv_ext_clk>; - clock-names = "trigger_clock"; - dmas = <&rx_dma 0>; - dma-names = "rx"; - pwm-names = "spi_trigger", "cnv"; - pwms = <&axi_pwm_gen 0 0>, <&axi_pwm_gen 1 0>; + pwm-names = "cnv"; + pwms = <&adc_trigger 1 1000000 0>; }; }; }; diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 3b2a85bf9d942c..741ccf2ee4c654 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -71,6 +71,7 @@ config AD4630 select IIO_BUFFER select IIO_BUFFER_DMA select IIO_BUFFER_DMAENGINE + select IIO_BUFFER_DMAENGINE_FILTERED select SPI_OFFLOAD help Say yes here to build support for Analog Devices AD4630 high speed diff --git a/drivers/iio/adc/ad4630.c b/drivers/iio/adc/ad4630.c index e6365e78cda450..e9d3f327a368ab 100644 --- a/drivers/iio/adc/ad4630.c +++ b/drivers/iio/adc/ad4630.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -337,7 +338,7 @@ static int ad4630_get_avg_frame_len(struct iio_dev *dev, unsigned int *avg_len) out: iio_device_release_direct_mode(dev); - return 0; + return ret; } static int ad4630_read_raw(struct iio_dev *indio_dev, @@ -527,7 +528,6 @@ static int ad4630_set_chan_offset(struct iio_dev *indio_dev, int ch, int offset) static void ad4630_fill_scale_tbl(struct ad4630_state *st) { int val, val2, tmp0, tmp1, i; - u64 tmp2; val2 = st->chip->modes[st->out_data].channels->scan_type.realbits; for (i = 0; i < ARRAY_SIZE(ad4630_gains); i++) { @@ -536,8 +536,7 @@ static void ad4630_fill_scale_tbl(struct ad4630_state *st) val = mult_frac(val, ad4630_gains_frac[i][1] * MILLI, ad4630_gains_frac[i][0]); /* Would multiply by NANO here but we already multiplied by MILLI */ - tmp2 = shift_right((u64)val * MICRO, val2); - tmp0 = (int)div_s64_rem(tmp2, NANO, &tmp1); + tmp0 = (int)div_u64_rem(((u64)val * MICRO) >> val2, NANO, &tmp1); st->scale_tbl[i][0] = tmp0; /* Integer part */ st->scale_tbl[i][1] = abs(tmp1); /* Fractional part */ } @@ -551,7 +550,7 @@ static int ad4630_calc_pga_gain(int gain_int, int gain_fract, int vref, gain_nano = gain_int * NANO + gain_fract; - if (gain_nano < 0 || gain_nano > ADAQ4224_GAIN_MAX_NANO) + if (gain_nano > ADAQ4224_GAIN_MAX_NANO) return -EINVAL; tmp = DIV_ROUND_CLOSEST_ULL(gain_nano << precision, NANO); @@ -605,7 +604,7 @@ static int ad4630_set_chan_gain(struct iio_dev *indio_dev, int ch, gain = gain_int * MICRO + gain_frac; - if (gain < 0 || gain > AD4630_GAIN_MAX) + if (gain > AD4630_GAIN_MAX) return -EINVAL; gain = DIV_ROUND_CLOSEST_ULL(gain * 0x8000, 1000000); @@ -642,7 +641,7 @@ static int ad4630_set_avg_frame_len(struct iio_dev *dev, unsigned int last_avg_idx = ARRAY_SIZE(ad4630_average_modes) - 1; int ret, freq; - if (avg_val < 0 || avg_val > ad4630_average_modes[last_avg_idx]) + if (avg_val > ad4630_average_modes[last_avg_idx]) return -EINVAL; ret = iio_device_claim_direct_mode(dev); @@ -767,8 +766,6 @@ static int ad4630_buffer_predisable(struct iio_dev *indio_dev) BIT(IIO_CHAN_INFO_CALIBBIAS), \ .info_mask_separate_available = _msk_avail, \ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ - .info_mask_shared_by_all_available = \ - BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .info_mask_shared_by_type = _msk_type | \ BIT(IIO_CHAN_INFO_SCALE), \ .info_mask_shared_by_type_available = _msk_type, \ @@ -882,20 +879,20 @@ static const struct ad4630_out_mode ad4630_24_modes[] = { static const struct ad4630_out_mode adaq4216_modes[] = { [AD4630_16_DIFF] = { .channels = { - AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 64, 16, 0, AD4630_CHAN_INFO_NONE), + AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 32, 16, 0, AD4630_CHAN_INFO_NONE), }, .data_width = 16, }, [AD4630_16_DIFF_8_COM] = { .channels = { - AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 64, 16, 8, AD4630_CHAN_INFO_NONE), + AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 32, 16, 8, AD4630_CHAN_INFO_NONE), }, .data_width = 24, }, [AD4630_30_AVERAGED_DIFF] = { .channels = { - AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 64, 30, 2, - BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO)), + AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 32, 30, 2, + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO)), }, .data_width = 32, } @@ -904,20 +901,20 @@ static const struct ad4630_out_mode adaq4216_modes[] = { static const struct ad4630_out_mode adaq4220_modes[] = { [AD4630_16_DIFF] = { .channels = { - AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 64, 20, 0, AD4630_CHAN_INFO_NONE), + AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 32, 20, 0, AD4630_CHAN_INFO_NONE), }, .data_width = 20, }, [AD4630_16_DIFF_8_COM] = { .channels = { - AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 64, 16, 8, AD4630_CHAN_INFO_NONE), + AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 32, 16, 8, AD4630_CHAN_INFO_NONE), }, .data_width = 24, }, [AD4630_30_AVERAGED_DIFF] = { .channels = { - AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 64, 30, 2, - BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO)), + AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 32, 30, 2, + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO)), }, .data_width = 32, } @@ -926,26 +923,26 @@ static const struct ad4630_out_mode adaq4220_modes[] = { static const struct ad4630_out_mode adaq4224_modes[] = { [AD4630_24_DIFF] = { .channels = { - AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 64, 24, 0, AD4630_CHAN_INFO_NONE), + AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 32, 24, 0, AD4630_CHAN_INFO_NONE), }, .data_width = 24, }, [AD4630_16_DIFF_8_COM] = { .channels = { - AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 64, 16, 8, AD4630_CHAN_INFO_NONE), + AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 32, 16, 8, AD4630_CHAN_INFO_NONE), }, .data_width = 24, }, [AD4630_24_DIFF_8_COM] = { .channels = { - AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 64, 24, 8, AD4630_CHAN_INFO_NONE), + AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 32, 24, 8, AD4630_CHAN_INFO_NONE), }, .data_width = 32, }, [AD4630_30_AVERAGED_DIFF] = { .channels = { - AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 64, 30, 2, - BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO)), + AD4630_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 32, 30, 2, + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO)), }, .data_width = 32, } @@ -1032,7 +1029,7 @@ static const struct ad4630_chip_info ad4630_chip_info[] = { [ID_ADAQ4216] = { .available_masks = ad4030_channel_masks, .modes = adaq4216_modes, - .out_modes_mask = GENMASK(3, 0), + .out_modes_mask = BIT(3) | GENMASK(1, 0), .name = "adaq4216", .grade = 0x1E, .min_offset = (int)BIT(15) * -1, @@ -1537,8 +1534,24 @@ static int ad4630_probe(struct spi_device *spi) return dev_err_probe(dev, PTR_ERR(rx_dma), "failed to get offload RX DMA\n"); - ret = devm_iio_dmaengine_buffer_setup_with_handle(dev, indio_dev, - rx_dma, IIO_BUFFER_DIRECTION_IN); + /* + * The ad4630_fmc HDL project was designed for ADCs with two channels + * and always streams two data channels to DMA (even when the ADC has + * only one physical channel). Though, if the ADC has only one physical + * channel, the data that would come from the second ADC channel comes + * in as noise and has to be discarded. Because of that, when using + * single-channel ADCs, the ADC driver needs to use a special DMA buffer + * that filters out half of the data that reaches DMA memory. With that, + * the ADC sample data can be delivered to user space without any noise + * being added to the IIO buffer. + */ + if (indio_dev->num_channels == 1) + ret = devm_iio_dmaengine_filtered_buffer_setup_with_handle(dev, + indio_dev, rx_dma, + IIO_BUFFER_DIRECTION_IN); + else + ret = devm_iio_dmaengine_buffer_setup_with_handle(dev, indio_dev, + rx_dma, IIO_BUFFER_DIRECTION_IN); if (ret) return dev_err_probe(dev, ret, "Failed to get DMA buffer\n"); @@ -1634,3 +1647,4 @@ MODULE_AUTHOR("Liviu Adace "); MODULE_DESCRIPTION("Analog Devices AD4630 and ADAQ4224 ADC family driver"); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(IIO_DMAENGINE_BUFFER); +MODULE_IMPORT_NS(IIO_DMAENGINE_FILTERED_BUFFER); diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig index a372056f42c5b1..2f08e49009ac69 100644 --- a/drivers/iio/buffer/Kconfig +++ b/drivers/iio/buffer/Kconfig @@ -30,6 +30,19 @@ config IIO_BUFFER_DMAENGINE Should be selected by drivers that want to use this functionality. +config IIO_BUFFER_DMAENGINE_FILTERED + tristate "Industrial I/O DMA buffer integration with DMAEngine that filters data" + select IIO_BUFFER_DMA + select IIO_BUFFER_DMAENGINE + help + Provides a bonding of the generic IIO DMA buffer infrastructure with the + DMAEngine framework. This can be used by converter drivers with a DMA port + connected to an external DMA controller which is supported by the + DMAEngine framework. + + Should NOT be selected by anything, unless using ADAQ4216 or similar + single-channel ADCs supported by the ad4630.c driver. + config IIO_DMA_BUF_MMAP_LEGACY bool "Enables I/O DMA buffer legacy MMAP support" depends on IIO_BUFFER_DMAENGINE diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile index 1403eb2f9409fa..4a8d3a5adf6895 100644 --- a/drivers/iio/buffer/Makefile +++ b/drivers/iio/buffer/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o obj-$(CONFIG_IIO_BUFFER_DMAENGINE) += industrialio-buffer-dmaengine.o +obj-$(CONFIG_IIO_BUFFER_DMAENGINE_FILTERED) += industrialio-buffer-dmaengine-filtered.o obj-$(CONFIG_IIO_BUFFER_HW_CONSUMER) += industrialio-hw-consumer.o obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine-filtered.c b/drivers/iio/buffer/industrialio-buffer-dmaengine-filtered.c new file mode 100644 index 00000000000000..d3cb1fab6b4328 --- /dev/null +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine-filtered.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2025 Analog Devices Inc. + * Author: Marcelo Schmitt + * + * A nasty hack to filter out part of the sample data that comes from FPGA IPs. + * + * The reasonable parts of this are based on (copied from) Lars-Peter's + * industrialio-buffer-dmaengine.c. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure + * with the DMAengine framework. The generic IIO DMA buffer infrastructure is + * used to manage the buffer memory and implement the IIO buffer operations + * while the DMAengine framework is used to perform the DMA transfers. Combined + * this results in a device independent fully functional DMA buffer + * implementation that can be used by device drivers for peripherals which are + * connected to a DMA controller which has a DMAengine driver implementation. + */ + +struct dmaengine_buffer { + struct iio_dma_buffer_queue queue; + + struct dma_chan *chan; + struct list_head active; + + size_t align; + size_t max_size; +}; + +static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(struct iio_buffer *buffer) +{ + return container_of(buffer, struct dmaengine_buffer, queue.buffer); +} + +static void iio_dmaengine_filtered_buffer_block_done(void *data, + const struct dmaengine_result *result) +{ + struct iio_dma_buffer_block *block = data; + unsigned long flags; + /* + * Hack to discard ADAQ4216 second channel data. + * The ad4630_fmc HDL project was designed for ADCs with two channels + * and always stream two data channels to DMA (even when the ADC has + * only one physical channel). Though, if the ADC has only one physical + * channel, the data that would be from the second ADC channel comes in + * as noise and has to be discarded. Because of that, only half of data + * coming from DMA is used for single channel ADCs such as ADAQ4216. + */ + struct iio_dma_buffer_queue *queue = block->queue; + unsigned int i; + void *addr; + size_t n; +#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY + n = block->block.bytes_used - result->residue; +#else + n = block->bytes_used - result->residue; +#endif + + /* + * Each ADC sample is a 32-bit data element. + * Modify the DMA block memory to copy over one value every each 4 bytes. + * In memory, the ADC data is arrange the following way: + * CH1_SAMPLE1 | CH2_SAMPLE1 | CH1_SAMPLE2 | CH2_SAMPLE2 | + * CH1_SAMPLE3 | CH2_SAMPLE3 | CH1_SAMPLE4 | CH2_SAMPLE4 | + * ... + * CH1_SAMPLEN | CH2_SAMPLEN | + */ + addr = block->vaddr + queue->fileio.pos; + + for (i = 0; i < (n / 2); i = i + queue->buffer.bytes_per_datum) + memcpy((addr + i), (addr + (i * 2)), queue->buffer.bytes_per_datum); + + spin_lock_irqsave(&block->queue->list_lock, flags); + list_del(&block->head); + spin_unlock_irqrestore(&block->queue->list_lock, flags); +#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY + block->block.bytes_used -= result->residue; + block->block.bytes_used /= 2; +#else + block->bytes_used -= result->residue; + block->bytes_used /= 2; +#endif + iio_dma_buffer_block_done(block); +} + +int iio_dmaengine_filtered_buffer_submit_block(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block) +{ + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(&block->queue->buffer); + struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction dma_dir; +#ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY + struct scatterlist *sgl; + struct dma_vec *vecs; +#endif + size_t max_size; + dma_cookie_t cookie; +#ifndef CONFIG_IIO_DMA_BUF_MMAP_LEGACY + size_t len_total; + unsigned int i; + int nents; +#endif + +#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) { + dma_dir = DMA_DEV_TO_MEM; + block->block.bytes_used = block->block.size; + } else { + dma_dir = DMA_MEM_TO_DEV; + } + + max_size = min_t(size_t, block->block.bytes_used, dmaengine_buffer->max_size); + max_size = round_down(max_size, dmaengine_buffer->align); + + if (!block->block.bytes_used || block->block.bytes_used > max_size) { + iio_dma_buffer_block_done(block); + return 0; + } + + if (block->block.flags & IIO_BUFFER_BLOCK_FLAG_CYCLIC) { + desc = dmaengine_prep_dma_cyclic(dmaengine_buffer->chan, + block->phys_addr, + block->block.bytes_used, + block->block.bytes_used, + dma_dir, 0); + if (!desc) + return -ENOMEM; + } else { + desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, + block->phys_addr, + block->block.bytes_used, + dma_dir, DMA_PREP_INTERRUPT); + if (!desc) + return -ENOMEM; + + desc->callback_result = iio_dmaengine_filtered_buffer_block_done; + desc->callback_param = block; + } +#else + max_size = min(block->size, dmaengine_buffer->max_size); + max_size = round_down(max_size, dmaengine_buffer->align); + + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) + dma_dir = DMA_DEV_TO_MEM; + else + dma_dir = DMA_MEM_TO_DEV; + + if (block->sg_table) { + sgl = block->sg_table->sgl; + nents = sg_nents_for_len(sgl, block->bytes_used); + if (nents < 0) + return nents; + + vecs = kmalloc_array(nents, sizeof(*vecs), GFP_ATOMIC); + if (!vecs) + return -ENOMEM; + + len_total = block->bytes_used; + + for (i = 0; i < nents; i++) { + vecs[i].addr = sg_dma_address(sgl); + /* + * out of tree change since we still don't have the + * latest changes on the min macro where comparing two + * unsigned values is perfectly fine. + */ + vecs[i].len = min_t(size_t, sg_dma_len(sgl), len_total); + len_total -= vecs[i].len; + + sgl = sg_next(sgl); + } + + desc = dmaengine_prep_peripheral_dma_vec(dmaengine_buffer->chan, + vecs, nents, dma_dir, + DMA_PREP_INTERRUPT); + kfree(vecs); + } else { + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) + block->bytes_used = max_size; + + if (!block->bytes_used || block->bytes_used > max_size) + return -EINVAL; + + desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, + block->phys_addr, + block->bytes_used, + dma_dir, + DMA_PREP_INTERRUPT); + } + if (!desc) + return -ENOMEM; + + desc->callback_result = iio_dmaengine_filtered_buffer_block_done; + desc->callback_param = block; +#endif + cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) + return dma_submit_error(cookie); + + spin_lock_irq(&dmaengine_buffer->queue.list_lock); + list_add_tail(&block->head, &dmaengine_buffer->active); + spin_unlock_irq(&dmaengine_buffer->queue.list_lock); + + dma_async_issue_pending(dmaengine_buffer->chan); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(iio_dmaengine_filtered_buffer_submit_block, IIO_DMAENGINE_FILTERED_BUFFER); + +static void iio_dmaengine_buffer_release(struct iio_buffer *buf) +{ + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(buf); + + iio_dma_buffer_release(&dmaengine_buffer->queue); + kfree(dmaengine_buffer); +} + +#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY +static int iio_dma_filtered_buffer_alloc_blocks(struct iio_buffer *buffer, + struct iio_buffer_block_alloc_req *req) +{ + /* + * Allocate blocks with two times the usual size because half of the + * will be discarded so the block has to be double size to fill the + * IIO buffer. + */ + req->size = req->size * 2; + return iio_dma_buffer_alloc_blocks(buffer, req); +} +#endif + +static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = { + .read = iio_dma_buffer_read, + .write = iio_dma_buffer_write, + .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum, + .set_length = iio_dma_buffer_set_length, + .request_update = iio_dma_buffer_request_update, + .enable = iio_dma_buffer_enable, + .disable = iio_dma_buffer_disable, + .data_available = iio_dma_buffer_usage, + .space_available = iio_dma_buffer_usage, + .release = iio_dmaengine_buffer_release, +#ifdef CONFIG_IIO_DMA_BUF_MMAP_LEGACY + .alloc_blocks = iio_dma_filtered_buffer_alloc_blocks, + .free_blocks = iio_dma_buffer_free_blocks, + .query_block = iio_dma_buffer_query_block, + .enqueue_block = iio_dma_buffer_enqueue_block, + .dequeue_block = iio_dma_buffer_dequeue_block, + .mmap = iio_dma_buffer_mmap, +#else + .enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf, + .attach_dmabuf = iio_dma_buffer_attach_dmabuf, + .detach_dmabuf = iio_dma_buffer_detach_dmabuf, + + .lock_queue = iio_dma_buffer_lock_queue, + .unlock_queue = iio_dma_buffer_unlock_queue, +#endif + .modes = INDIO_BUFFER_HARDWARE, + .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK, +}; + +static const struct iio_dma_buffer_ops iio_dmaengine_filtered_default_ops = { + .submit = iio_dmaengine_filtered_buffer_submit_block, + .abort = iio_dmaengine_buffer_abort, +}; + +static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(buffer); + + return sysfs_emit(buf, "%zu\n", dmaengine_buffer->align); +} + +static IIO_DEVICE_ATTR(length_align_bytes, 0444, + iio_dmaengine_buffer_get_length_align, NULL, 0); + +static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = { + &iio_dev_attr_length_align_bytes, + NULL, +}; + +/** + * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine + * @chan: DMA channel. + * + * This allocates a new IIO buffer which internally uses the DMAengine framework + * to perform its transfers. + * + * Once done using the buffer iio_dmaengine_buffer_free() should be used to + * release it. + */ +static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan, + const struct iio_dma_buffer_ops *ops, + void *data) +{ + struct dmaengine_buffer *dmaengine_buffer; + unsigned int width, src_width, dest_width; + struct dma_slave_caps caps; + int ret; + + ret = dma_get_slave_caps(chan, &caps); + if (ret < 0) + return ERR_PTR(ret); + + dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL); + if (!dmaengine_buffer) + return ERR_PTR(-ENOMEM); + + /* Needs to be aligned to the maximum of the minimums */ + if (caps.src_addr_widths) + src_width = __ffs(caps.src_addr_widths); + else + src_width = 1; + if (caps.dst_addr_widths) + dest_width = __ffs(caps.dst_addr_widths); + else + dest_width = 1; + width = max(src_width, dest_width); + + if (!width) { /* FIXME */ + pr_warn("%s:%d width %d (DMA width >= 256-bits ?)\n", + __func__, __LINE__, width); + width = 32; + } + + INIT_LIST_HEAD(&dmaengine_buffer->active); + dmaengine_buffer->chan = chan; + dmaengine_buffer->align = width; + dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev); + + if (!ops) + ops = &iio_dmaengine_filtered_default_ops; + + iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev, ops, data); + + dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs; + dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops; + + return &dmaengine_buffer->queue.buffer; +} + +/** + * iio_dmaengine_buffer_free() - Free dmaengine buffer + * @buffer: Buffer to free + * + * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc(). + */ +static void iio_dmaengine_buffer_free(struct iio_buffer *buffer) +{ + struct dmaengine_buffer *dmaengine_buffer = + iio_buffer_to_dmaengine_buffer(buffer); + + iio_dma_buffer_exit(&dmaengine_buffer->queue); + iio_buffer_put(buffer); +} + +static struct iio_buffer +*__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev, + struct dma_chan *chan, + enum iio_buffer_direction dir, + const struct iio_dma_buffer_ops *ops, + void *data) +{ + struct iio_buffer *buffer; + int ret; + + buffer = iio_dmaengine_buffer_alloc(chan, ops, data); + if (IS_ERR(buffer)) + return ERR_CAST(buffer); + + indio_dev->modes |= INDIO_BUFFER_HARDWARE; + + buffer->direction = dir; + + ret = iio_device_attach_buffer(indio_dev, buffer); + if (ret) { + iio_dmaengine_buffer_free(buffer); + return ERR_PTR(ret); + } + + return buffer; +} + +/** + * iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device + * @dev: DMA channel consumer device + * @indio_dev: IIO device to which to attach this buffer. + * @channel: DMA channel name, typically "rx". + * @dir: Direction of buffer (in or out) + * + * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() + * and attaches it to an IIO device with iio_device_attach_buffer(). + * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the + * IIO device. + * + * Once done using the buffer iio_dmaengine_buffer_teardown() should be used to + * release it. + */ +struct iio_buffer *iio_dmaengine_filtered_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + const char *channel, + enum iio_buffer_direction dir) +{ + struct dma_chan *chan; + struct iio_buffer *buffer; + + chan = dma_request_chan(dev, channel); + if (IS_ERR(chan)) + return ERR_CAST(chan); + + buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir, NULL, NULL); + if (IS_ERR(buffer)) + dma_release_channel(chan); + + return buffer; +} +EXPORT_SYMBOL_NS_GPL(iio_dmaengine_filtered_buffer_setup_ext, IIO_DMAENGINE_FILTERED_BUFFER); + +static void devm_iio_dmaengine_buffer_free(void *buffer) +{ + iio_dmaengine_buffer_free(buffer); +} + +/** + * devm_iio_dmaengine_buffer_setup_with_handle() - Setup a DMA buffer for an + * IIO device + * @dev: Device for devm ownership + * @indio_dev: IIO device to which to attach this buffer. + * @chan: DMA channel + * @dir: Direction of buffer (in or out) + * + * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() + * and attaches it to an IIO device with iio_device_attach_buffer(). + * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the + * IIO device. + * + * This is the same as devm_iio_dmaengine_buffer_setup_ext() except that the + * caller manages requesting and releasing the DMA channel handle. + */ +int devm_iio_dmaengine_filtered_buffer_setup_with_handle(struct device *dev, + struct iio_dev *indio_dev, + struct dma_chan *chan, + enum iio_buffer_direction dir) +{ + struct iio_buffer *buffer; + + buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir, NULL, NULL); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); + + return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_free, + buffer); +} +EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_filtered_buffer_setup_with_handle, + IIO_DMAENGINE_FILTERED_BUFFER); + +MODULE_AUTHOR("Marcelo Schmitt "); +MODULE_DESCRIPTION("IIO framework DMA buffer that filters data"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_DMAENGINE_BUFFER); diff --git a/include/linux/iio/buffer-dmaengine-filtered.h b/include/linux/iio/buffer-dmaengine-filtered.h new file mode 100644 index 00000000000000..abc503411d352b --- /dev/null +++ b/include/linux/iio/buffer-dmaengine-filtered.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2025 Analog Devices Inc. + * Author: Marcelo Schmitt + * Based on buffer-dmaengine.h. + */ + +#ifndef __IIO_DMAENGINE_FILTERED_H__ +#define __IIO_DMAENGINE_FILTERED_H__ + +#include + +struct iio_dev; +struct device; +struct dma_chan; +struct iio_buffer; +struct iio_dma_buffer_ops; +struct iio_dma_buffer_block; +struct iio_dma_buffer_queue; + +int iio_dmaengine_filtered_buffer_submit_block(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block); + +struct iio_buffer *iio_dmaengine_filtered_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + const char *channel, + enum iio_buffer_direction dir); + +int devm_iio_dmaengine_filtered_buffer_setup_with_handle(struct device *dev, + struct iio_dev *indio_dev, + struct dma_chan *chan, + enum iio_buffer_direction dir); + +#endif