diff --git a/sdk/linux_kernel_drivers/xdma/10-xdma.rules b/sdk/linux_kernel_drivers/xdma/10-xdma.rules old mode 100755 new mode 100644 diff --git a/sdk/linux_kernel_drivers/xdma/Makefile b/sdk/linux_kernel_drivers/xdma/Makefile old mode 100755 new mode 100644 index 182051b70..2427268a7 --- a/sdk/linux_kernel_drivers/xdma/Makefile +++ b/sdk/linux_kernel_drivers/xdma/Makefile @@ -42,6 +42,7 @@ all : clean: $(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) clean + @/bin/rm -f *.ko modules.order *.mod.c *.o *.o.ur-safe .*.o.cmd install: all $(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) modules_install diff --git a/sdk/linux_kernel_drivers/xdma/cdev_bypass.c b/sdk/linux_kernel_drivers/xdma/cdev_bypass.c index 9ab445ea7..5e40526be 100644 --- a/sdk/linux_kernel_drivers/xdma/cdev_bypass.c +++ b/sdk/linux_kernel_drivers/xdma/cdev_bypass.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,11 +21,12 @@ * Karen Xie * ******************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include "libxdma_api.h" #include "xdma_cdev.h" -#define write_register(v,mem,off) iowrite32(v, mem) +#define write_register(v, mem, off) iowrite32(v, mem) static int copy_desc_data(struct xdma_transfer *transfer, char __user *buf, size_t *buf_offset, size_t buf_size) @@ -34,8 +35,15 @@ static int copy_desc_data(struct xdma_transfer *transfer, char __user *buf, int copy_err; int rc = 0; - BUG_ON(!buf); - BUG_ON(!buf_offset); + if (!buf) { + pr_err("Invalid user buffer\n"); + return -EINVAL; + } + + if (!buf_offset) { + pr_err("Invalid user buffer offset\n"); + return -EINVAL; + } /* Fill user buffer with descriptor data */ for (i = 0; i < transfer->desc_num; i++) { @@ -76,7 +84,7 @@ static ssize_t char_bypass_read(struct file *file, char __user *buf, xdev = xcdev->xdev; engine = xcdev->engine; - dbg_sg("In char_bypass_read()\n"); + dbg_sg("In %s()\n", __func__); if (count & 3) { dbg_sg("Buffer size must be a multiple of 4 bytes\n"); @@ -119,7 +127,7 @@ static ssize_t char_bypass_write(struct file *file, const char __user *buf, struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data; u32 desc_data; - u32 *bypass_addr; + void __iomem *bypass_addr; size_t buf_offset = 0; int rc = 0; int copy_err; @@ -145,18 +153,21 @@ static ssize_t char_bypass_write(struct file *file, const char __user *buf, return -ENODEV; } - dbg_sg("In char_bypass_write()\n"); + dbg_sg("In %s()\n", __func__); spin_lock(&engine->lock); /* Write descriptor data to the bypass BAR */ - bypass_addr = (u32 *)xdev->bar[xdev->bypass_bar_idx]; - bypass_addr += engine->bypass_offset; + bypass_addr = xdev->bar[xdev->bypass_bar_idx]; + bypass_addr = (void __iomem *)( + (u32 __iomem *)bypass_addr + engine->bypass_offset + ); while (buf_offset < count) { copy_err = copy_from_user(&desc_data, &buf[buf_offset], sizeof(u32)); if (!copy_err) { - write_register(desc_data, bypass_addr, bypass_addr - engine->bypass_offset); + write_register(desc_data, bypass_addr, + bypass_addr - engine->bypass_offset); buf_offset += sizeof(u32); rc = buf_offset; } else { @@ -188,5 +199,5 @@ static const struct file_operations bypass_fops = { void cdev_bypass_init(struct xdma_cdev *xcdev) { - cdev_init(&xcdev->cdev, &bypass_fops); + cdev_init(&xcdev->cdev, &bypass_fops); } diff --git a/sdk/linux_kernel_drivers/xdma/cdev_ctrl.c b/sdk/linux_kernel_drivers/xdma/cdev_ctrl.c index 404bbd7fa..9fa7a3522 100644 --- a/sdk/linux_kernel_drivers/xdma/cdev_ctrl.c +++ b/sdk/linux_kernel_drivers/xdma/cdev_ctrl.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,6 +21,7 @@ * Karen Xie * ******************************************************************************/ + #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include @@ -28,6 +29,12 @@ #include "xdma_cdev.h" #include "cdev_ctrl.h" +#if KERNEL_VERSION(5, 0, 0) <= LINUX_VERSION_CODE +#define xlx_access_ok(X, Y, Z) access_ok(Y, Z) +#else +#define xlx_access_ok(X, Y, Z) access_ok(X, Y, Z) +#endif + /* * character device file operations for control bus (through control bridge) */ @@ -36,13 +43,13 @@ static ssize_t char_ctrl_read(struct file *fp, char __user *buf, size_t count, { struct xdma_cdev *xcdev = (struct xdma_cdev *)fp->private_data; struct xdma_dev *xdev; - void *reg; + void __iomem *reg; u32 w; int rv; rv = xcdev_check(__func__, xcdev, 0); if (rv < 0) - return rv; + return rv; xdev = xcdev->xdev; /* only 32-bit aligned and 32-bit multiples */ @@ -52,8 +59,8 @@ static ssize_t char_ctrl_read(struct file *fp, char __user *buf, size_t count, reg = xdev->bar[xcdev->bar] + *pos; //w = read_register(reg); w = ioread32(reg); - dbg_sg("char_ctrl_read(@%p, count=%ld, pos=%d) value = 0x%08x\n", reg, - (long)count, (int)*pos, w); + dbg_sg("%s(@%p, count=%ld, pos=%d) value = 0x%08x\n", + __func__, reg, (long)count, (int)*pos, w); rv = copy_to_user(buf, &w, 4); if (rv) dbg_sg("Copy to userspace failed but continuing\n"); @@ -67,13 +74,13 @@ static ssize_t char_ctrl_write(struct file *file, const char __user *buf, { struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data; struct xdma_dev *xdev; - void *reg; + void __iomem *reg; u32 w; int rv; rv = xcdev_check(__func__, xcdev, 0); if (rv < 0) - return rv; + return rv; xdev = xcdev->xdev; /* only 32-bit aligned and 32-bit multiples */ @@ -83,12 +90,11 @@ static ssize_t char_ctrl_write(struct file *file, const char __user *buf, /* first address is BAR base plus file position offset */ reg = xdev->bar[xcdev->bar] + *pos; rv = copy_from_user(&w, buf, 4); - if (rv) { + if (rv) pr_info("copy from user failed %d/4, but continuing.\n", rv); - } - dbg_sg("char_ctrl_write(0x%08x @%p, count=%ld, pos=%d)\n", w, reg, - (long)count, (int)*pos); + dbg_sg("%s(0x%08x @%p, count=%ld, pos=%d)\n", + __func__, w, reg, (long)count, (int)*pos); //write_register(w, reg); iowrite32(w, reg); *pos += 4; @@ -133,9 +139,13 @@ long char_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) rv = xcdev_check(__func__, xcdev, 0); if (rv < 0) - return rv; - xdev = xcdev->xdev; + return rv; + xdev = xcdev->xdev; + if (!xdev) { + pr_info("cmd %u, xdev NULL.\n", cmd); + return -EINVAL; + } pr_info("cmd 0x%x, xdev 0x%p, pdev 0x%p.\n", cmd, xdev, xdev->pdev); if (_IOC_TYPE(cmd) != XDMA_IOC_MAGIC) { @@ -145,10 +155,10 @@ long char_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } if (_IOC_DIR(cmd) & _IOC_READ) - result = !access_ok(VERIFY_WRITE, (void __user *)arg, + result = !xlx_access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); else if (_IOC_DIR(cmd) & _IOC_WRITE) - result = !access_ok(VERIFY_READ, (void __user *)arg, + result = !xlx_access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); if (result) { @@ -158,7 +168,7 @@ long char_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case XDMA_IOCINFO: - if (copy_from_user((void *)&ioctl_obj, (void *) arg, + if (copy_from_user((void *)&ioctl_obj, (void __user *) arg, sizeof(struct xdma_ioc_base))) { pr_err("copy_from_user failed.\n"); return -EFAULT; @@ -169,20 +179,11 @@ long char_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ioctl_obj.magic, XDMA_XCL_MAGIC); return -ENOTTY; } - return version_ioctl(xcdev, (void __user *)arg); case XDMA_IOCOFFLINE: - if (!xdev) { - pr_info("cmd %u, xdev NULL.\n", cmd); - return -EINVAL; - } xdma_device_offline(xdev->pdev, xdev); break; case XDMA_IOCONLINE: - if (!xdev) { - pr_info("cmd %u, xdev NULL.\n", cmd); - return -EINVAL; - } xdma_device_online(xdev->pdev, xdev); break; default: @@ -205,7 +206,7 @@ int bridge_mmap(struct file *file, struct vm_area_struct *vma) rv = xcdev_check(__func__, xcdev, 0); if (rv < 0) - return rv; + return rv; xdev = xcdev->xdev; off = vma->vm_pgoff << PAGE_SHIFT; diff --git a/sdk/linux_kernel_drivers/xdma/cdev_ctrl.h b/sdk/linux_kernel_drivers/xdma/cdev_ctrl.h index 47e697cd6..e0a9047b6 100644 --- a/sdk/linux_kernel_drivers/xdma/cdev_ctrl.h +++ b/sdk/linux_kernel_drivers/xdma/cdev_ctrl.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,6 +21,7 @@ * Karen Xie * ******************************************************************************/ + #ifndef _XDMA_IOCALLS_POSIX_H_ #define _XDMA_IOCALLS_POSIX_H_ @@ -64,14 +65,14 @@ struct xdma_ioc_base { }; struct xdma_ioc_info { - struct xdma_ioc_base base; - unsigned short vendor; - unsigned short device; - unsigned short subsystem_vendor; - unsigned short subsystem_device; - unsigned int dma_engine_version; - unsigned int driver_version; - unsigned long long feature_id; + struct xdma_ioc_base base; + unsigned short vendor; + unsigned short device; + unsigned short subsystem_vendor; + unsigned short subsystem_device; + unsigned int dma_engine_version; + unsigned int driver_version; + unsigned long long feature_id; unsigned short domain; unsigned char bus; unsigned char dev; diff --git a/sdk/linux_kernel_drivers/xdma/cdev_events.c b/sdk/linux_kernel_drivers/xdma/cdev_events.c index 514aaf43b..2b468ed78 100644 --- a/sdk/linux_kernel_drivers/xdma/cdev_events.c +++ b/sdk/linux_kernel_drivers/xdma/cdev_events.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -39,7 +39,7 @@ static ssize_t char_events_read(struct file *file, char __user *buf, rv = xcdev_check(__func__, xcdev, 0); if (rv < 0) - return rv; + return rv; user_irq = xcdev->user_irq; if (!user_irq) { pr_info("xcdev 0x%p, user_irq NULL.\n", xcdev); @@ -88,7 +88,7 @@ static unsigned int char_events_poll(struct file *file, poll_table *wait) rv = xcdev_check(__func__, xcdev, 0); if (rv < 0) - return rv; + return rv; user_irq = xcdev->user_irq; if (!user_irq) { pr_info("xcdev 0x%p, user_irq NULL.\n", xcdev); diff --git a/sdk/linux_kernel_drivers/xdma/cdev_sgdma.c b/sdk/linux_kernel_drivers/xdma/cdev_sgdma.c index 31854f92a..2b615bb15 100644 --- a/sdk/linux_kernel_drivers/xdma/cdev_sgdma.c +++ b/sdk/linux_kernel_drivers/xdma/cdev_sgdma.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -23,6 +23,7 @@ ******************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ +#include #include #include "libxdma_api.h" #include "xdma_cdev.h" @@ -56,12 +57,9 @@ static loff_t char_sgdma_llseek(struct file *file, loff_t off, int whence) if (newpos < 0) return -EINVAL; file->f_pos = newpos; - dbg_fops("char_sgdma_llseek: pos=%lld\n", (signed long long)newpos); -#if 0 - pr_err("0x%p, off 0x%lld, whence %d -> pos %lld.\n", + dbg_fops("0x%p, off %lld, whence %d -> pos %lld.\n", file, (signed long long)off, whence, (signed long long)off); -#endif return newpos; } @@ -84,7 +82,10 @@ static loff_t char_sgdma_llseek(struct file *file, loff_t off, int whence) static int check_transfer_align(struct xdma_engine *engine, const char __user *buf, size_t count, loff_t pos, int sync) { - BUG_ON(!engine); + if (!engine) { + pr_err("Invalid DMA engine\n"); + return -EINVAL; + } /* AXI ST or AXI MM non-incremental addressing mode? */ if (engine->non_incr_addr) { @@ -175,17 +176,16 @@ static int char_sgdma_map_user_buf_to_sgl(struct xdma_io_cb *cb, bool write) { struct sg_table *sgt = &cb->sgt; unsigned long len = cb->len; - char *buf = cb->buf; + char __user *buf = cb->buf; struct scatterlist *sg; - unsigned int pages_nr = (((unsigned long)buf + len + PAGE_SIZE -1) - + unsigned int pages_nr = (((unsigned long)buf + len + PAGE_SIZE - 1) - ((unsigned long)buf & PAGE_MASK)) >> PAGE_SHIFT; int i; int rv; - if (pages_nr == 0) { + if (pages_nr == 0) return -EINVAL; - } if (sg_alloc_table(sgt, pages_nr, GFP_KERNEL)) { pr_err("sgl OOM.\n"); @@ -211,8 +211,8 @@ static int char_sgdma_map_user_buf_to_sgl(struct xdma_io_cb *cb, bool write) if (rv != pages_nr) { pr_err("unable to pin down all %u user pages, %d.\n", pages_nr, rv); - rv = -EFAULT; cb->pages_nr = rv; + rv = -EFAULT; goto err_out; } @@ -228,9 +228,9 @@ static int char_sgdma_map_user_buf_to_sgl(struct xdma_io_cb *cb, bool write) sg = sgt->sgl; for (i = 0; i < pages_nr; i++, sg = sg_next(sg)) { - //unsigned int offset = (uintptr_t)buf & ~PAGE_MASK; unsigned int offset = offset_in_page(buf); - unsigned int nbytes = min_t(unsigned int, PAGE_SIZE - offset, len); + unsigned int nbytes = min_t(unsigned int, + PAGE_SIZE - offset, len); flush_dcache_page(cb->pages[i]); sg_set_page(sg, cb->pages[i], nbytes, offset); @@ -239,7 +239,11 @@ static int char_sgdma_map_user_buf_to_sgl(struct xdma_io_cb *cb, bool write) len -= nbytes; } - BUG_ON(len); + if (len) { + pr_err("Invalid user buffer length. Cannot map to sgl\n"); + return -EINVAL; + } + cb->pages_nr = pages_nr; return 0; @@ -249,7 +253,7 @@ static int char_sgdma_map_user_buf_to_sgl(struct xdma_io_cb *cb, bool write) return rv; } -static ssize_t char_sgdma_read_write(struct file *file, char __user *buf, +static ssize_t char_sgdma_read_write(struct file *file, const char __user *buf, size_t count, loff_t *pos, bool write) { int rv; @@ -283,7 +287,7 @@ static ssize_t char_sgdma_read_write(struct file *file, char __user *buf, } memset(&cb, 0, sizeof(struct xdma_io_cb)); - cb.buf = buf; + cb.buf = (char __user *)buf; cb.len = count; rv = char_sgdma_map_user_buf_to_sgl(&cb, write); if (rv < 0) @@ -291,10 +295,6 @@ static ssize_t char_sgdma_read_write(struct file *file, char __user *buf, res = xdma_xfer_submit(xdev, engine->channel, write, *pos, &cb.sgt, 0, sgdma_timeout * 1000); - //pr_err("xfer_submit return=%lld.\n", (s64)res); - - //interrupt_status(xdev); - char_sgdma_unmap_user_buf(&cb, write); return res; @@ -302,138 +302,134 @@ static ssize_t char_sgdma_read_write(struct file *file, char __user *buf, static ssize_t char_sgdma_write(struct file *file, const char __user *buf, - size_t count, loff_t *pos) + size_t count, loff_t *pos) { - return char_sgdma_read_write(file, (char *)buf, count, pos, 1); + return char_sgdma_read_write(file, (char *)buf, count, pos, 1); } static ssize_t char_sgdma_read(struct file *file, char __user *buf, - size_t count, loff_t *pos) + size_t count, loff_t *pos) { - struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data; - struct xdma_engine *engine; - int rv; - - rv = xcdev_check(__func__, xcdev, 1); - if (rv < 0) - return rv; - - engine = xcdev->engine; - - if (engine->streaming && engine->dir == DMA_FROM_DEVICE) { - rv = xdma_cyclic_transfer_setup(engine); - if (rv < 0 && rv != -EBUSY) - return rv; - /* 600 sec. timeout */ - return xdma_engine_read_cyclic(engine, buf, count, 600000); - } - - return char_sgdma_read_write(file, (char *)buf, count, pos, 0); + return char_sgdma_read_write(file, (char *)buf, count, pos, 0); } static int ioctl_do_perf_start(struct xdma_engine *engine, unsigned long arg) { - int rv; - struct xdma_dev *xdev; - - BUG_ON(!engine); - xdev = engine->xdev; - BUG_ON(!xdev); - - /* performance measurement already running on this engine? */ - if (engine->xdma_perf) { - dbg_perf("IOCTL_XDMA_PERF_START failed!\n"); - dbg_perf("Perf measurement already seems to be running!\n"); - return -EBUSY; - } - engine->xdma_perf = kzalloc(sizeof(struct xdma_performance_ioctl), - GFP_KERNEL); + int rv; + struct xdma_dev *xdev; - if (!engine->xdma_perf) - return -ENOMEM; + if (!engine || !engine->xdev) { + pr_err("Invalid DMA engine 0x%p, 0x%p.\n", + engine, engine ? engine->xdev : NULL); + return -EINVAL; + } - rv = copy_from_user(engine->xdma_perf, - (struct xdma_performance_ioctl *)arg, - sizeof(struct xdma_performance_ioctl)); + xdev = engine->xdev; - if (rv < 0) { - dbg_perf("Failed to copy from user space 0x%lx\n", arg); - return -EINVAL; - } - if (engine->xdma_perf->version != IOCTL_XDMA_PERF_V1) { - dbg_perf("Unsupported IOCTL version %d\n", - engine->xdma_perf->version); - return -EINVAL; - } + /* if performance measurement already running on this engine */ + if (engine->xdma_perf) { + dbg_perf("Perf measurement already seems to be running!\n"); + return -EBUSY; + } + + engine->xdma_perf = kzalloc(sizeof(struct xdma_performance_ioctl), + GFP_KERNEL); + if (!engine->xdma_perf) + return -ENOMEM; + + rv = copy_from_user(engine->xdma_perf, + (struct xdma_performance_ioctl *)arg, + sizeof(struct xdma_performance_ioctl)); + if (rv < 0) { + dbg_perf("Failed to copy from user space 0x%lx\n", arg); + return -EINVAL; + } + if (engine->xdma_perf->version != IOCTL_XDMA_PERF_V1) { + dbg_perf("Unsupported IOCTL version %d\n", + engine->xdma_perf->version); + return -EINVAL; + } enable_perf(engine); - dbg_perf("transfer_size = %d\n", engine->xdma_perf->transfer_size); - /* initialize wait queue */ - init_waitqueue_head(&engine->xdma_perf_wq); - xdma_performance_submit(xdev, engine); + dbg_perf("transfer_size = %d\n", engine->xdma_perf->transfer_size); + + /* initialize wait queue */ + init_waitqueue_head(&engine->xdma_perf_wq); + + rv = xdma_performance_submit(xdev, engine); + if (rv < 0) + pr_err("Failed to submit dma performance\n"); - return 0; + return 0; } static int ioctl_do_perf_stop(struct xdma_engine *engine, unsigned long arg) { - struct xdma_transfer *transfer = NULL; - int rv; - - dbg_perf("IOCTL_XDMA_PERF_STOP\n"); + struct xdma_transfer *transfer = NULL; + int rv; - /* no performance measurement running on this engine? */ - if (!engine->xdma_perf) { - dbg_perf("No measurement in progress\n"); - return -EINVAL; - } + if (!engine) { + pr_err("DMA engine NULL.\n"); + return -EINVAL; + } - /* stop measurement */ - transfer = engine_cyclic_stop(engine); - dbg_perf("Waiting for measurement to stop\n"); + dbg_perf("IOCTL_XDMA_PERF_STOP\n"); - if (engine->xdma_perf) { - get_perf_stats(engine); + /* if no performance measurement running on this engine */ + if (!engine->xdma_perf) { + dbg_perf("No measurement in progress\n"); + return -EINVAL; + } - rv = copy_to_user((void __user *)arg, engine->xdma_perf, - sizeof(struct xdma_performance_ioctl)); - if (rv) { - dbg_perf("Error copying result to user\n"); - return -EINVAL; - } - } else { - dbg_perf("engine->xdma_perf == NULL?\n"); + /* stop measurement */ + dbg_perf("Waiting for measurement to stop\n"); + transfer = engine_cyclic_stop(engine); + if (!transfer) { + pr_err("Failed to stop cyclic transfer\n"); + return -EINVAL; } - kfree(engine->xdma_perf); - engine->xdma_perf = NULL; + get_perf_stats(engine); + rv = copy_to_user((void __user *)arg, engine->xdma_perf, + sizeof(struct xdma_performance_ioctl)); + if (rv) { + dbg_perf("Error copying result to user\n"); + return -EINVAL; + } + + kfree(transfer); + kfree(engine->xdma_perf); + engine->xdma_perf = NULL; - return 0; + return 0; } static int ioctl_do_perf_get(struct xdma_engine *engine, unsigned long arg) { - int rc; + int rc; - BUG_ON(!engine); + if (!engine) { + pr_err("DMA engine NULL.\n"); + return -EINVAL; + } - dbg_perf("IOCTL_XDMA_PERF_GET\n"); + dbg_perf("IOCTL_XDMA_PERF_GET\n"); - if (engine->xdma_perf) { + if (engine->xdma_perf) { get_perf_stats(engine); - rc = copy_to_user((void __user *)arg, engine->xdma_perf, - sizeof(struct xdma_performance_ioctl)); - if (rc) { - dbg_perf("Error copying result to user\n"); - return -EINVAL; - } - } else { - dbg_perf("engine->xdma_perf == NULL?\n"); - return -EPROTO; - } + rc = copy_to_user((void __user *)arg, engine->xdma_perf, + sizeof(struct xdma_performance_ioctl)); + if (rc) { + dbg_perf("Error copying result to user\n"); + return -EINVAL; + } + } else { + dbg_perf("engine->xdma_perf == NULL?\n"); + return -EPROTO; + } - return 0; + return 0; } static int ioctl_do_addrmode_set(struct xdma_engine *engine, unsigned long arg) @@ -446,7 +442,10 @@ static int ioctl_do_addrmode_get(struct xdma_engine *engine, unsigned long arg) int rv; unsigned long src; - BUG_ON(!engine); + if (!engine) { + pr_err("DMA engine NULL.\n"); + return -EINVAL; + } src = !!engine->non_incr_addr; dbg_perf("IOCTL_XDMA_ADDRMODE_GET\n"); @@ -455,22 +454,24 @@ static int ioctl_do_addrmode_get(struct xdma_engine *engine, unsigned long arg) return rv; } -static int ioctl_do_align_get(struct xdma_engine *engine, unsigned long arg) +static int ioctl_do_align_get(struct xdma_engine *engine, unsigned long arg) { - BUG_ON(!engine); + if (!engine) { + pr_err("DMA engine NULL.\n"); + return -EINVAL; + } dbg_perf("IOCTL_XDMA_ALIGN_GET\n"); return put_user(engine->addr_align, (int __user *)arg); } static long char_sgdma_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) + unsigned long arg) { struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data; struct xdma_dev *xdev; struct xdma_engine *engine; - - int rv = 0; + int rv = 0; rv = xcdev_check(__func__, xcdev, 1); if (rv < 0) @@ -480,15 +481,15 @@ static long char_sgdma_ioctl(struct file *file, unsigned int cmd, engine = xcdev->engine; switch (cmd) { - case IOCTL_XDMA_PERF_START: - rv = ioctl_do_perf_start(engine, arg); - break; - case IOCTL_XDMA_PERF_STOP: - rv = ioctl_do_perf_stop(engine, arg); - break; - case IOCTL_XDMA_PERF_GET: - rv = ioctl_do_perf_get(engine, arg); - break; + case IOCTL_XDMA_PERF_START: + rv = ioctl_do_perf_start(engine, arg); + break; + case IOCTL_XDMA_PERF_STOP: + rv = ioctl_do_perf_stop(engine, arg); + break; + case IOCTL_XDMA_PERF_GET: + rv = ioctl_do_perf_get(engine, arg); + break; case IOCTL_XDMA_ADDRMODE_SET: rv = ioctl_do_addrmode_set(engine, arg); break; @@ -498,13 +499,13 @@ static long char_sgdma_ioctl(struct file *file, unsigned int cmd, case IOCTL_XDMA_ALIGN_GET: rv = ioctl_do_align_get(engine, arg); break; - default: - dbg_perf("Unsupported operation\n"); - rv = -EINVAL; - break; - } + default: + dbg_perf("Unsupported operation 0x%x.\n", cmd); + rv = -EINVAL; + break; + } - return rv; + return rv; } static int char_sgdma_open(struct inode *inode, struct file *file) @@ -520,8 +521,7 @@ static int char_sgdma_open(struct inode *inode, struct file *file) if (engine->streaming && engine->dir == DMA_FROM_DEVICE) { if (engine->device_open == 1) return -EBUSY; - else - engine->device_open = 1; + engine->device_open = 1; } return 0; diff --git a/sdk/linux_kernel_drivers/xdma/cdev_sgdma.h b/sdk/linux_kernel_drivers/xdma/cdev_sgdma.h index c67bf99f5..4f0a38cce 100644 --- a/sdk/linux_kernel_drivers/xdma/cdev_sgdma.h +++ b/sdk/linux_kernel_drivers/xdma/cdev_sgdma.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,6 +21,7 @@ * Karen Xie * ******************************************************************************/ + #ifndef _XDMA_IOCALLS_POSIX_H_ #define _XDMA_IOCALLS_POSIX_H_ @@ -50,17 +51,16 @@ * _IOC_SIZE(nr) returns size */ -struct xdma_performance_ioctl -{ - /* IOCTL_XDMA_IOCTL_Vx */ - uint32_t version; - uint32_t transfer_size; - /* measurement */ - uint32_t stopped; - uint32_t iterations; - uint64_t clock_cycle_count; - uint64_t data_cycle_count; - uint64_t pending_count; +struct xdma_performance_ioctl { + /* IOCTL_XDMA_IOCTL_Vx */ + uint32_t version; + uint32_t transfer_size; + /* measurement */ + uint32_t stopped; + uint32_t iterations; + uint64_t clock_cycle_count; + uint64_t data_cycle_count; + uint64_t pending_count; }; diff --git a/sdk/linux_kernel_drivers/xdma/cdev_xvc.c b/sdk/linux_kernel_drivers/xdma/cdev_xvc.c index adafa7fc8..e346bc79f 100644 --- a/sdk/linux_kernel_drivers/xdma/cdev_xvc.c +++ b/sdk/linux_kernel_drivers/xdma/cdev_xvc.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,6 +21,7 @@ * Karen Xie * ******************************************************************************/ + #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include "xdma_cdev.h" @@ -36,30 +37,30 @@ #ifdef __REG_DEBUG__ /* SECTION: Function definitions */ -inline void __write_register(const char *fn, u32 value, void *base, +inline void __write_register(const char *fn, u32 value, void __iomem *base, unsigned int off) { - pr_info("%s: 0x%p, W reg 0x%lx, 0x%x.\n", fn, base, off, value); - iowrite32(value, base + off); + pr_info("%s: 0x%p, W reg 0x%lx, 0x%x.\n", fn, base, off, value); + iowrite32(value, base + off); } -inline u32 __read_register(const char *fn, void *base, unsigned int off) +inline u32 __read_register(const char *fn, void __iomem *base, unsigned int off) { u32 v = ioread32(base + off); - pr_info("%s: 0x%p, R reg 0x%lx, 0x%x.\n", fn, base, off, v); - return v; + pr_info("%s: 0x%p, R reg 0x%lx, 0x%x.\n", fn, base, off, v); + return v; } -#define write_register(v,base,off) __write_register(__func__, v, base, off) -#define read_register(base,off) __read_register(__func__, base, off) +#define write_register(v, base, off) __write_register(__func__, v, base, off) +#define read_register(base, off) __read_register(__func__, base, off) #else -#define write_register(v,base,off) iowrite32(v, (base) + (off)) -#define read_register(base,off) ioread32((base) + (off)) +#define write_register(v, base, off) iowrite32(v, (base) + (off)) +#define read_register(base, off) ioread32((base) + (off)) #endif /* #ifdef __REG_DEBUG__ */ -static int xvc_shift_bits(void *base, u32 tms_bits, u32 tdi_bits, +static int xvc_shift_bits(void __iomem *base, u32 tms_bits, u32 tdi_bits, u32 *tdo_bits) { u32 control; @@ -96,7 +97,7 @@ static int xvc_shift_bits(void *base, u32 tms_bits, u32 tdi_bits, static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - struct xdma_cdev *xcdev = (struct xdma_cdev *)filp->private_data; + struct xdma_cdev *xcdev = (struct xdma_cdev *)filp->private_data; struct xdma_dev *xdev; struct xvc_ioc xvc_obj; unsigned int opcode; @@ -113,6 +114,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) rv = xcdev_check(__func__, xcdev, 0); if (rv < 0) return rv; + xdev = xcdev->xdev; if (cmd != XDMA_IOCXVC) { @@ -139,7 +141,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) total_bits = xvc_obj.length; total_bytes = (total_bits + 7) >> 3; - buffer = (char *)kmalloc(total_bytes * 3, GFP_KERNEL); + buffer = kmalloc(total_bytes * 3, GFP_KERNEL); if (!buffer) { pr_info("OOM %u, op 0x%x, len %u bits, %u bytes.\n", 3 * total_bytes, opcode, total_bits, total_bytes); @@ -150,12 +152,16 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) tdi_buf = tms_buf + total_bytes; tdo_buf = tdi_buf + total_bytes; - rv = copy_from_user((void *)tms_buf, xvc_obj.tms_buf, total_bytes); + rv = copy_from_user((void *)tms_buf, + (const char __user *)xvc_obj.tms_buf, + total_bytes); if (rv) { pr_info("copy tmfs_buf failed: %d/%u.\n", rv, total_bytes); goto cleanup; } - rv = copy_from_user((void *)tdi_buf, xvc_obj.tdi_buf, total_bytes); + rv = copy_from_user((void *)tdi_buf, + (const char __user *)xvc_obj.tdi_buf, + total_bytes); if (rv) { pr_info("copy tdi_buf failed: %d/%u.\n", rv, total_bytes); goto cleanup; @@ -166,7 +172,8 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) iobase = xdev->bar[xcdev->bar] + xcdev->base; /* set length register to 32 initially if more than one - * word-transaction is to be done */ + * word-transaction is to be done + */ if (total_bits >= 32) write_register(0x20, iobase, XVC_BAR_LENGTH_REG); @@ -177,7 +184,7 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) u32 tms_store = 0; u32 tdi_store = 0; u32 tdo_store = 0; - + if (bits_left < 32) { /* set number of bits to shift out */ write_register(bits_left, iobase, XVC_BAR_LENGTH_REG); @@ -190,33 +197,35 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) /* Shift data out and copy to output buffer */ rv = xvc_shift_bits(iobase, tms_store, tdi_store, &tdo_store); if (rv < 0) - goto cleanup; + break; memcpy(tdo_buf + bytes, &tdo_store, shift_bytes); } + if (rv < 0) + goto unlock; + /* if testing bar access swap tdi and tdo bufferes to "loopback" */ if (opcode == 0x2) { - char *tmp = tdo_buf; + unsigned char *tmp = tdo_buf; tdo_buf = tdi_buf; tdi_buf = tmp; } - rv = copy_to_user((void *)xvc_obj.tdo_buf, tdo_buf, total_bytes); - if (rv) { + rv = copy_to_user(xvc_obj.tdo_buf, (const void *)tdo_buf, total_bytes); + if (rv) pr_info("copy back tdo_buf failed: %d/%u.\n", rv, total_bytes); - rv = -EFAULT; - goto cleanup; - } - -cleanup: - if (buffer) - kfree(buffer); - mmiowb(); +unlock: +#if KERNEL_VERSION(5, 1, 0) >= LINUX_VERSION_CODE + wmb(); +#endif spin_unlock(&xcdev->lock); +cleanup: + kfree(buffer); + return rv; } @@ -224,10 +233,10 @@ static long xvc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) * character device file operations for the XVC */ static const struct file_operations xvc_fops = { - .owner = THIS_MODULE, - .open = char_open, - .release = char_close, - .unlocked_ioctl = xvc_ioctl, + .owner = THIS_MODULE, + .open = char_open, + .release = char_close, + .unlocked_ioctl = xvc_ioctl, }; void cdev_xvc_init(struct xdma_cdev *xcdev) diff --git a/sdk/linux_kernel_drivers/xdma/cdev_xvc.h b/sdk/linux_kernel_drivers/xdma/cdev_xvc.h index de9473a37..9a2b8689f 100644 --- a/sdk/linux_kernel_drivers/xdma/cdev_xvc.h +++ b/sdk/linux_kernel_drivers/xdma/cdev_xvc.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,14 +21,14 @@ * Karen Xie * ******************************************************************************/ + #ifndef __XVC_IOCTL_H__ #define __XVC_IOCTL_H__ #include /* - * !!! TODO !!! - * need a better way set the bar offset dynamicly + * the bar offset can be changed at compile time via xvc_bar_offset */ #define XVC_BAR_OFFSET_DFLT 0x40000 /* DSA 4.0 */ @@ -37,9 +37,9 @@ struct xvc_ioc { unsigned int opcode; unsigned int length; - unsigned char *tms_buf; - unsigned char *tdi_buf; - unsigned char *tdo_buf; + const char __user *tms_buf; + const char __user *tdi_buf; + void __user *tdo_buf; }; #define XDMA_IOCXVC _IOWR(XVC_MAGIC, 1, struct xvc_ioc) diff --git a/sdk/linux_kernel_drivers/xdma/libxdma.c b/sdk/linux_kernel_drivers/xdma/libxdma.c old mode 100755 new mode 100644 index 32523e500..9dc519b69 --- a/sdk/linux_kernel_drivers/xdma/libxdma.c +++ b/sdk/linux_kernel_drivers/xdma/libxdma.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,23 +36,6 @@ #include "libxdma_api.h" #include "cdev_sgdma.h" -/* SECTION: Module licensing */ - -#ifdef __LIBXDMA_MOD__ -#include "version.h" -#define DRV_MODULE_NAME "libxdma" -#define DRV_MODULE_DESC "Xilinx XDMA Base Driver" -#define DRV_MODULE_RELDATE "Feb. 2017" - -static char version[] = - DRV_MODULE_DESC " " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; - -MODULE_AUTHOR("Xilinx, Inc."); -MODULE_DESCRIPTION(DRV_MODULE_DESC); -MODULE_VERSION(DRV_MODULE_VERSION); -MODULE_LICENSE("GPL v2"); -#endif - /* Module Parameters */ static unsigned int poll_mode; module_param(poll_mode, uint, 0644); @@ -123,19 +106,18 @@ static inline void xdev_list_remove(struct xdma_dev *xdev) struct xdma_dev *xdev_find_by_pdev(struct pci_dev *pdev) { - struct xdma_dev *xdev, *tmp; - - mutex_lock(&xdev_mutex); - list_for_each_entry_safe(xdev, tmp, &xdev_list, list_head) { - if (xdev->pdev == pdev) { - mutex_unlock(&xdev_mutex); - return xdev; - } - } - mutex_unlock(&xdev_mutex); - return NULL; + struct xdma_dev *xdev, *tmp; + + mutex_lock(&xdev_mutex); + list_for_each_entry_safe(xdev, tmp, &xdev_list, list_head) { + if (xdev->pdev == pdev) { + mutex_unlock(&xdev_mutex); + return xdev; + } + } + mutex_unlock(&xdev_mutex); + return NULL; } -EXPORT_SYMBOL_GPL(xdev_find_by_pdev); static inline int debug_check_dev_hndl(const char *fname, struct pci_dev *pdev, void *hndl) @@ -167,9 +149,9 @@ inline void __write_register(const char *fn, u32 value, void *iomem, unsigned lo pr_err("%s: w reg 0x%lx(0x%p), 0x%x.\n", fn, off, iomem, value); iowrite32(value, iomem); } -#define write_register(v,mem,off) __write_register(__func__, v, mem, off) +#define write_register(v, mem, off) __write_register(__func__, v, mem, off) #else -#define write_register(v,mem,off) iowrite32(v, mem) +#define write_register(v, mem, off) iowrite32(v, mem) #endif inline u32 read_register(void *iomem) @@ -200,7 +182,7 @@ static void check_nonzero_interrupt_status(struct xdma_dev *xdev) w = read_register(®->channel_int_enable); if (w) - pr_info("%s xdma%d channel_int_enable = 0x%08x\n", + pr_info("%s xdma%d channel_int_enable = 0x%08x\n", dev_name(&xdev->pdev->dev), xdev->idx, w); w = read_register(®->user_int_request); @@ -296,14 +278,16 @@ void enable_perf(struct xdma_engine *engine) dbg_perf("IOCTL_XDMA_PERF_START\n"); } -EXPORT_SYMBOL_GPL(enable_perf); void get_perf_stats(struct xdma_engine *engine) { u32 hi; u32 lo; - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return; + } if (!engine->xdma_perf) { pr_info("%s perf struct not set up.\n", engine->name); @@ -327,13 +311,15 @@ void get_perf_stats(struct xdma_engine *engine) lo = read_register(&engine->regs->perf_pnd_lo); engine->xdma_perf->pending_count = build_u64(hi, lo); } -EXPORT_SYMBOL_GPL(get_perf_stats); static void engine_reg_dump(struct xdma_engine *engine) { u32 w; - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return; + } w = read_register(&engine->regs->identifier); pr_info("%s: ioread32(0x%p) = 0x%08x (id).\n", @@ -391,8 +377,8 @@ static void engine_status_dump(struct xdma_engine *engine) if ((v & XDMA_STAT_DESC_COMPLETED)) len += sprintf(buf + len, "DESC_COMPL,"); - /* common H2C & C2H */ - if ((v & XDMA_STAT_COMMON_ERR_MASK)) { + /* common H2C & C2H */ + if ((v & XDMA_STAT_COMMON_ERR_MASK)) { if ((v & XDMA_STAT_ALIGN_MISMATCH)) len += sprintf(buf + len, "ALIGN_MISMATCH "); if ((v & XDMA_STAT_MAGIC_STOPPED)) @@ -404,7 +390,7 @@ static void engine_status_dump(struct xdma_engine *engine) buf[len - 1] = ','; } - if ((engine->dir == DMA_TO_DEVICE)) { + if (engine->dir == DMA_TO_DEVICE) { /* H2C only */ if ((v & XDMA_STAT_H2C_R_ERR_MASK)) { len += sprintf(buf + len, "R:"); @@ -442,8 +428,8 @@ static void engine_status_dump(struct xdma_engine *engine) } } - /* common H2C & C2H */ - if ((v & XDMA_STAT_DESC_ERR_MASK)) { + /* common H2C & C2H */ + if ((v & XDMA_STAT_DESC_ERR_MASK)) { len += sprintf(buf + len, "DESC_ERR:"); if ((v & XDMA_STAT_DESC_UNSUPP_REQ)) len += sprintf(buf + len, "UNSUPP_REQ "); @@ -462,26 +448,24 @@ static void engine_status_dump(struct xdma_engine *engine) pr_info("%s\n", buffer); } -static u32 engine_status_read(struct xdma_engine *engine, bool clear, bool dump) +static void engine_status_read(struct xdma_engine *engine, bool clr, bool dump) { - u32 value; - - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return; + } if (dump) engine_reg_dump(engine); /* read status register */ - if (clear) - value = engine->status = - read_register(&engine->regs->status_rc); + if (clr) + engine->status = read_register(&engine->regs->status_rc); else - value = engine->status = read_register(&engine->regs->status); + engine->status = read_register(&engine->regs->status); if (dump) engine_status_dump(engine); - - return value; } /** @@ -492,7 +476,10 @@ static void xdma_engine_stop(struct xdma_engine *engine) { u32 w; - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return; + } dbg_tfr("xdma_engine_stop(engine=%p)\n", engine); w = 0; @@ -526,7 +513,10 @@ static void engine_start_mode_config(struct xdma_engine *engine) { u32 w; - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return; + } /* If a perf test is running, enable the engine interrupts */ if (engine->xdma_perf) { @@ -559,11 +549,10 @@ static void engine_start_mode_config(struct xdma_engine *engine) if ((engine->streaming && (engine->dir == DMA_FROM_DEVICE)) || (engine->xdma_perf)) w |= (u32)XDMA_CTRL_IE_IDLE_STOPPED; - - /* set non-incremental addressing mode */ - if (engine->non_incr_addr) - w |= (u32)XDMA_CTRL_NON_INCR_ADDR; } + /* set non-incremental addressing mode */ + if (engine->non_incr_addr) + w |= (u32)XDMA_CTRL_NON_INCR_ADDR; dbg_tfr("iowrite32(0x%08x to 0x%p) (control)\n", w, (void *)&engine->regs->control); @@ -601,13 +590,22 @@ static struct xdma_transfer *engine_start(struct xdma_engine *engine) int extra_adj = 0; /* engine must be idle */ - BUG_ON(engine->running); + if (unlikely(!engine || engine->running)) { + pr_err("engine 0x%p running.\n", engine); + return NULL; + } /* engine transfer queue must not be empty */ - BUG_ON(list_empty(&engine->transfer_list)); + if (unlikely(list_empty(&engine->transfer_list))) { + pr_err("engine %s queue empty.\n", engine->name); + return NULL; + } /* inspect first transfer queued on the engine */ transfer = list_entry(engine->transfer_list.next, struct xdma_transfer, entry); - BUG_ON(!transfer); + if (unlikely(!transfer)) { + pr_err("engine %s no xfer queued.\n", engine->name); + return NULL; + } /* engine is no longer shutdown */ engine->shutdown = ENGINE_SHUTDOWN_NONE; @@ -645,8 +643,9 @@ static struct xdma_transfer *engine_start(struct xdma_engine *engine) dbg_tfr("ioread32(0x%p) (dummy read flushes writes).\n", &engine->regs->status); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 0) mmiowb(); - +#endif engine_start_mode_config(engine); engine_status_read(engine, 0, 0); @@ -679,7 +678,10 @@ static void engine_service_shutdown(struct xdma_engine *engine) struct xdma_transfer *engine_transfer_completion(struct xdma_engine *engine, struct xdma_transfer *transfer) { - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return NULL; + } if (unlikely(!transfer)) { pr_info("%s: xfer empty.\n", engine->name); @@ -696,12 +698,9 @@ struct xdma_transfer *engine_transfer_completion(struct xdma_engine *engine, struct xdma_transfer *engine_service_transfer_list(struct xdma_engine *engine, struct xdma_transfer *transfer, u32 *pdesc_completed) { - BUG_ON(!engine); - BUG_ON(!pdesc_completed); - - if (unlikely(!transfer)) { - pr_info("%s xfer empty, pdesc completed %u.\n", - engine->name, *pdesc_completed); + if (unlikely(!engine || !pdesc_completed || !transfer)) { + pr_err("engine 0x%p, pdesc_completed 0x%p, xfer 0x%p.\n", + engine, pdesc_completed, transfer); return NULL; } @@ -752,16 +751,16 @@ static void engine_err_handle(struct xdma_engine *engine, */ if (engine->status & XDMA_STAT_BUSY) { value = read_register(&engine->regs->status); - if ((value & XDMA_STAT_BUSY) && printk_ratelimit()) - pr_info("%s has errors but is still BUSY\n", - engine->name); + if ((value & XDMA_STAT_BUSY)) + printk_ratelimited(KERN_INFO + "%s has errors but is still BUSY\n", + engine->name); } - if (printk_ratelimit()) { - pr_info("%s, s 0x%x, aborted xfer 0x%p, cmpl %d/%d\n", + printk_ratelimited(KERN_INFO + "%s, s 0x%x, aborted xfer 0x%p, cmpl %d/%d\n", engine->name, engine->status, transfer, desc_completed, transfer->desc_num); - } /* mark transfer as failed */ transfer->state = TRANSFER_STATE_FAILED; @@ -771,72 +770,71 @@ static void engine_err_handle(struct xdma_engine *engine, struct xdma_transfer *engine_service_final_transfer(struct xdma_engine *engine, struct xdma_transfer *transfer, u32 *pdesc_completed) { - BUG_ON(!engine); - BUG_ON(!pdesc_completed); - - /* inspect the current transfer */ - if (unlikely(!transfer)) { - pr_info("%s xfer empty, pdesc completed %u.\n", - engine->name, *pdesc_completed); + if (unlikely(!engine || !pdesc_completed || !transfer)) { + pr_err("engine 0x%p, pdesc_completed 0x%p, xfer 0x%p.\n", + engine, pdesc_completed, transfer); return NULL; - } else { - if (((engine->dir == DMA_FROM_DEVICE) && - (engine->status & XDMA_STAT_C2H_ERR_MASK)) || - ((engine->dir == DMA_TO_DEVICE) && - (engine->status & XDMA_STAT_H2C_ERR_MASK))) { - pr_info("engine %s, status error 0x%x.\n", - engine->name, engine->status); - engine_status_dump(engine); - engine_err_handle(engine, transfer, *pdesc_completed); - goto transfer_del; - } + } + /* inspect the current transfer */ + if (((engine->dir == DMA_FROM_DEVICE) && + (engine->status & XDMA_STAT_C2H_ERR_MASK)) || + ((engine->dir == DMA_TO_DEVICE) && + (engine->status & XDMA_STAT_H2C_ERR_MASK))) { + pr_info("engine %s, status error 0x%x.\n", + engine->name, engine->status); + engine_status_dump(engine); + engine_err_handle(engine, transfer, *pdesc_completed); + goto transfer_del; + } - if (engine->status & XDMA_STAT_BUSY) - pr_debug("engine %s is unexpectedly busy - ignoring\n", - engine->name); + if (engine->status & XDMA_STAT_BUSY) + pr_debug("engine %s is unexpectedly busy - ignoring\n", + engine->name); - /* the engine stopped on current transfer? */ - if (*pdesc_completed < transfer->desc_num) { - transfer->state = TRANSFER_STATE_FAILED; - pr_info("%s, xfer 0x%p, stopped half-way, %d/%d.\n", - engine->name, transfer, *pdesc_completed, - transfer->desc_num); - } else { - dbg_tfr("engine %s completed transfer\n", engine->name); - dbg_tfr("Completed transfer ID = 0x%p\n", transfer); - dbg_tfr("*pdesc_completed=%d, transfer->desc_num=%d", - *pdesc_completed, transfer->desc_num); - - if (!transfer->cyclic) { - /* - * if the engine stopped on this transfer, - * it should be the last - */ - WARN_ON(*pdesc_completed > transfer->desc_num); - } - /* mark transfer as succesfully completed */ - transfer->state = TRANSFER_STATE_COMPLETED; + /* the engine stopped on current transfer? */ + if (*pdesc_completed < transfer->desc_num) { + transfer->state = TRANSFER_STATE_FAILED; + pr_info("%s, xfer 0x%p, stopped half-way, %d/%d.\n", + engine->name, transfer, *pdesc_completed, + transfer->desc_num); + } else { + dbg_tfr("engine %s completed transfer\n", engine->name); + dbg_tfr("Completed transfer ID = 0x%p\n", transfer); + dbg_tfr("*pdesc_completed=%d, transfer->desc_num=%d", + *pdesc_completed, transfer->desc_num); + + if (!transfer->cyclic) { + /* + * if the engine stopped on this transfer, + * it should be the last + */ + WARN_ON(*pdesc_completed > transfer->desc_num); } + /* mark transfer as succesfully completed */ + transfer->state = TRANSFER_STATE_COMPLETED; + } transfer_del: - /* remove completed transfer from list */ - list_del(engine->transfer_list.next); - /* add to dequeued number of descriptors during this run */ - engine->desc_dequeued += transfer->desc_num; + /* remove completed transfer from list */ + list_del(engine->transfer_list.next); + /* add to dequeued number of descriptors during this run */ + engine->desc_dequeued += transfer->desc_num; - /* - * Complete transfer - sets transfer to NULL if an asynchronous - * transfer has completed - */ - transfer = engine_transfer_completion(engine, transfer); - } + /* + * Complete transfer - sets transfer to NULL if an asynchronous + * transfer has completed + */ + transfer = engine_transfer_completion(engine, transfer); return transfer; } static void engine_service_perf(struct xdma_engine *engine, u32 desc_completed) { - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return; + } /* performance measurement is running? */ if (engine->xdma_perf) { @@ -864,7 +862,10 @@ static void engine_transfer_dequeue(struct xdma_engine *engine) { struct xdma_transfer *transfer; - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return; + } /* pick first transfer on the queue (was submitted to the engine) */ transfer = list_entry(engine->transfer_list.next, struct xdma_transfer, @@ -886,9 +887,12 @@ static int engine_ring_process(struct xdma_engine *engine) int start; int eop_count = 0; - BUG_ON(!engine); + if (unlikely(!engine || !engine->cyclic_result)) { + pr_err("engine 0x%p, cyclic_result 0x%p.\n", + engine, engine ? engine->cyclic_result : NULL); + return -EINVAL; + } result = engine->cyclic_result; - BUG_ON(!result); /* where we start receiving in the ring buffer */ start = engine->rx_tail; @@ -929,8 +933,11 @@ static int engine_service_cyclic_polled(struct xdma_engine *engine) struct xdma_poll_wb *writeback_data; u32 sched_limit = 0; - BUG_ON(!engine); - BUG_ON(engine->magic != MAGIC_ENGINE); + if (unlikely(!engine || (engine->magic != MAGIC_ENGINE))) { + pr_err("bad engine 0x%p, magic 0x%lx.\n", + engine, engine ? engine->magic : 0UL); + return -EINVAL; + } writeback_data = (struct xdma_poll_wb *)engine->poll_mode_addr_virt; @@ -948,6 +955,11 @@ static int engine_service_cyclic_polled(struct xdma_engine *engine) } eop_count = engine_ring_process(engine); + if (eop_count < 0) { + pr_err("%s failed to process engine ring\n", + engine->name); + return eop_count; + } } if (eop_count == 0) { @@ -969,8 +981,11 @@ static int engine_service_cyclic_interrupt(struct xdma_engine *engine) int eop_count = 0; struct xdma_transfer *xfer; - BUG_ON(!engine); - BUG_ON(engine->magic != MAGIC_ENGINE); + if (unlikely(!engine || (engine->magic != MAGIC_ENGINE))) { + pr_err("bad engine 0x%p, magic 0x%lx.\n", + engine, engine ? engine->magic : 0UL); + return -EINVAL; + } engine_status_read(engine, 1, 0); @@ -981,14 +996,12 @@ static int engine_service_cyclic_interrupt(struct xdma_engine *engine) */ xfer = &engine->cyclic_req->xfer; if(enable_credit_mp){ - if (eop_count > 0) { - //engine->eop_found = 1; - } wake_up_interruptible(&xfer->wq); }else{ if (eop_count > 0) { /* awake task on transfer's wait queue */ - dbg_tfr("wake_up_interruptible() due to %d EOP's\n", eop_count); + dbg_tfr("wake_up_interruptible() due to %d EOP's\n", + eop_count); engine->eop_found = 1; wake_up_interruptible(&xfer->wq); } @@ -1013,8 +1026,11 @@ static int engine_service_cyclic(struct xdma_engine *engine) dbg_tfr("engine_service_cyclic()"); - BUG_ON(!engine); - BUG_ON(engine->magic != MAGIC_ENGINE); + if (unlikely(!engine || (engine->magic != MAGIC_ENGINE))) { + pr_err("bad engine 0x%p, magic 0x%lx.\n", + engine, engine ? engine->magic : 0UL); + return -EINVAL; + } if (poll_mode) rc = engine_service_cyclic_polled(engine); @@ -1029,7 +1045,10 @@ static void engine_service_resume(struct xdma_engine *engine) { struct xdma_transfer *transfer_started; - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return; + } /* engine stopped? */ if (!engine->running) { @@ -1037,8 +1056,14 @@ static void engine_service_resume(struct xdma_engine *engine) if (!list_empty(&engine->transfer_list)) { /* (re)start engine */ transfer_started = engine_start(engine); - pr_info("re-started %s engine with pending xfer 0x%p\n", + if (!transfer_started) { + pr_err("%s failed to start dma engine\n", + engine->name); + return; + } + dbg_tfr("re-started %s engine with pending xfer 0x%p\n", engine->name, transfer_started); + /* engine was requested to be shutdown? */ } else if (engine->shutdown & ENGINE_SHUTDOWN_REQUEST) { engine->shutdown |= ENGINE_SHUTDOWN_IDLE; @@ -1074,7 +1099,10 @@ static int engine_service(struct xdma_engine *engine, int desc_writeback) int rv = 0; struct xdma_poll_wb *wb_data; - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return -EINVAL; + } /* If polling detected an error, signal to the caller */ if (err_flag) @@ -1142,7 +1170,7 @@ static int engine_service(struct xdma_engine *engine, int desc_writeback) transfer = engine_service_final_transfer(engine, transfer, &desc_count); /* Before starting engine again, clear the writeback data */ - if (poll_mode) { + if (poll_mode) { wb_data = (struct xdma_poll_wb *)engine->poll_mode_addr_virt; wb_data->completed_desc_count = 0; } @@ -1160,7 +1188,11 @@ static void engine_service_work(struct work_struct *work) unsigned long flags; engine = container_of(work, struct xdma_engine, work); - BUG_ON(engine->magic != MAGIC_ENGINE); + if (unlikely(!engine || (engine->magic != MAGIC_ENGINE))) { + pr_err("bad engine 0x%p, magic 0x%lx.\n", + engine, engine ? engine->magic : 0UL); + return; + } /* lock the engine */ spin_lock_irqsave(&engine->lock, flags); @@ -1185,15 +1217,18 @@ static void engine_service_work(struct work_struct *work) spin_unlock_irqrestore(&engine->lock, flags); } -static u32 engine_service_wb_monitor(struct xdma_engine *engine, - u32 expected_wb) +static int engine_service_wb_monitor(struct xdma_engine *engine, + u32 expected_wb, u32 *wb) { struct xdma_poll_wb *wb_data; u32 desc_wb = 0; u32 sched_limit = 0; unsigned long timeout; - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return -EINVAL; + } wb_data = (struct xdma_poll_wb *)engine->poll_mode_addr_virt; /* @@ -1235,7 +1270,8 @@ static u32 engine_service_wb_monitor(struct xdma_engine *engine, sched_limit++; } - return desc_wb; + *wb = desc_wb; + return 0; } static int engine_service_poll(struct xdma_engine *engine, @@ -1246,8 +1282,11 @@ static int engine_service_poll(struct xdma_engine *engine, unsigned long flags; int rv = 0; - BUG_ON(!engine); - BUG_ON(engine->magic != MAGIC_ENGINE); + if (unlikely(!engine || (engine->magic != MAGIC_ENGINE))) { + pr_err("bad engine 0x%p, magic 0x%lx.\n", + engine, engine ? engine->magic : 0UL); + return -EINVAL; + } writeback_data = (struct xdma_poll_wb *)engine->poll_mode_addr_virt; @@ -1263,15 +1302,16 @@ static int engine_service_poll(struct xdma_engine *engine, * determined before the function is called */ - desc_wb = engine_service_wb_monitor(engine, expected_desc_count); + rv = engine_service_wb_monitor(engine, expected_desc_count, &desc_wb); + if (rv < 0) + return rv; spin_lock_irqsave(&engine->lock, flags); dbg_tfr("%s service.\n", engine->name); - if (engine->cyclic_req) { + if (engine->cyclic_req) rv = engine_service_cyclic(engine); - } else { + else rv = engine_service(engine, desc_wb); - } spin_unlock_irqrestore(&engine->lock, flags); return rv; @@ -1281,7 +1321,10 @@ static irqreturn_t user_irq_service(int irq, struct xdma_user_irq *user_irq) { unsigned long flags; - BUG_ON(!user_irq); + if (unlikely(!user_irq)) { + pr_err("user_irq NULL.\n"); + return IRQ_NONE; + } if (user_irq->handler) return user_irq->handler(user_irq->user_idx, user_irq->dev); @@ -1309,16 +1352,14 @@ static irqreturn_t xdma_isr(int irq, void *dev_id) struct xdma_dev *xdev; struct interrupt_regs *irq_regs; - dbg_irq("(irq=%d, dev 0x%p) <<<< ISR.\n", irq, dev_id); - BUG_ON(!dev_id); - xdev = (struct xdma_dev *)dev_id; - - if (!xdev) { - WARN_ON(!xdev); - dbg_irq("xdma_isr(irq=%d) xdev=%p ??\n", irq, xdev); + if (unlikely(!dev_id)) { + pr_err("irq %d, xdev NULL.\n", irq); return IRQ_NONE; } + dbg_irq("(irq=%d, dev 0x%p) <<<< ISR.\n", irq, dev_id); + xdev = (struct xdma_dev *)dev_id; + irq_regs = (struct interrupt_regs *)(xdev->bar[xdev->config_bar_idx] + XDMA_OFS_INT_CTRL); @@ -1360,8 +1401,8 @@ static irqreturn_t xdma_isr(int irq, void *dev_id) struct xdma_engine *engine = &xdev->engine_h2c[channel]; /* engine present and its interrupt fired? */ - if((engine->irq_bitmask & mask) && - (engine->magic == MAGIC_ENGINE)) { + if ((engine->irq_bitmask & mask) && + (engine->magic == MAGIC_ENGINE)) { mask &= ~engine->irq_bitmask; dbg_tfr("schedule_work, %s.\n", engine->name); schedule_work(&engine->work); @@ -1379,8 +1420,8 @@ static irqreturn_t xdma_isr(int irq, void *dev_id) struct xdma_engine *engine = &xdev->engine_c2h[channel]; /* engine present and its interrupt fired? */ - if((engine->irq_bitmask & mask) && - (engine->magic == MAGIC_ENGINE)) { + if ((engine->irq_bitmask & mask) && + (engine->magic == MAGIC_ENGINE)) { mask &= ~engine->irq_bitmask; dbg_tfr("schedule_work, %s.\n", engine->name); schedule_work(&engine->work); @@ -1401,12 +1442,15 @@ static irqreturn_t xdma_user_irq(int irq, void *dev_id) { struct xdma_user_irq *user_irq; - dbg_irq("(irq=%d) <<<< INTERRUPT SERVICE ROUTINE\n", irq); + if (unlikely(!dev_id)) { + pr_err("irq %d, dev_id NULL.\n", irq); + return IRQ_NONE; + } - BUG_ON(!dev_id); + dbg_irq("(irq=%d) <<<< INTERRUPT SERVICE ROUTINE\n", irq); user_irq = (struct xdma_user_irq *)dev_id; - return user_irq_service(irq, user_irq); + return user_irq_service(irq, user_irq); } /* @@ -1420,15 +1464,18 @@ static irqreturn_t xdma_channel_irq(int irq, void *dev_id) struct xdma_engine *engine; struct interrupt_regs *irq_regs; + if (unlikely(!dev_id)) { + pr_err("irq %d, dev_id NULL.\n", irq); + return IRQ_NONE; + } dbg_irq("(irq=%d) <<<< INTERRUPT service ROUTINE\n", irq); - BUG_ON(!dev_id); engine = (struct xdma_engine *)dev_id; xdev = engine->xdev; - if (!xdev) { - WARN_ON(!xdev); - dbg_irq("xdma_channel_irq(irq=%d) xdev=%p ??\n", irq, xdev); + if (unlikely(!xdev)) { + pr_err("xdma_channel_irq(irq=%d) engine 0x%p, xdev NULL.\n", + irq, engine); return IRQ_NONE; } @@ -1446,10 +1493,6 @@ static irqreturn_t xdma_channel_irq(int irq, void *dev_id) /* Schedule the bottom half */ schedule_work(&engine->work); - /* - * RTO - need to protect access here if multiple MSI-X are used for - * user interrupts - */ xdev->irq_count++; return IRQ_HANDLED; } @@ -1541,6 +1584,7 @@ static int is_config_bar(struct xdma_dev *xdev, int idx) return flag; } +#ifndef XDMA_CONFIG_BAR_NUM static void identify_bars(struct xdma_dev *xdev, int *bar_id_list, int num_bars, int config_bar_pos) { @@ -1558,8 +1602,10 @@ static void identify_bars(struct xdma_dev *xdev, int *bar_id_list, int num_bars, * correctly with both 32-bit and 64-bit BARs. */ - BUG_ON(!xdev); - BUG_ON(!bar_id_list); + if (unlikely(!xdev || !bar_id_list)) { + pr_err("xdev 0x%p, bar_id_list 0x%p.\n", xdev, bar_id_list); + return; + } dbg_init("xdev 0x%p, bars %d, config at %d.\n", xdev, num_bars, config_bar_pos); @@ -1604,6 +1650,7 @@ static void identify_bars(struct xdma_dev *xdev, int *bar_id_list, int num_bars, num_bars, config_bar_pos, xdev->user_bar_idx, xdev->bypass_bar_idx); } +#endif /* map_bars() -- map device regions into kernel virtual address space * @@ -1613,6 +1660,24 @@ static void identify_bars(struct xdma_dev *xdev, int *bar_id_list, int num_bars, static int map_bars(struct xdma_dev *xdev, struct pci_dev *dev) { int rv; + +#ifdef XDMA_CONFIG_BAR_NUM + rv = map_single_bar(xdev, dev, XDMA_CONFIG_BAR_NUM); + if (rv <= 0) { + pr_info("%s, map config bar %d failed, %d.\n", + dev_name(&dev->dev), XDMA_CONFIG_BAR_NUM, rv); + return -EINVAL; + } + + if (is_config_bar(xdev, XDMA_CONFIG_BAR_NUM) == 0) { + pr_info("%s, unable to identify config bar %d.\n", + dev_name(&dev->dev), XDMA_CONFIG_BAR_NUM); + return -EINVAL; + } + xdev->config_bar_idx = XDMA_CONFIG_BAR_NUM; + + return 0; +#else int i; int bar_id_list[XDMA_BAR_NUM]; int bar_id_idx = 0; @@ -1661,20 +1726,21 @@ static int map_bars(struct xdma_dev *xdev, struct pci_dev *dev) /* unwind; unmap any BARs that we did map */ unmap_bars(xdev, dev); return rv; +#endif } /* - * MSI-X interrupt: + * MSI-X interrupt: * vectors, followed by vectors */ /* - * RTO - code to detect if MSI/MSI-X capability exists is derived + * code to detect if MSI/MSI-X capability exists is derived * from linux/pci/msi.c - pci_msi_check_device */ #ifndef arch_msi_check_device -int arch_msi_check_device(struct pci_dev *dev, int nvec, int type) +static int arch_msi_check_device(struct pci_dev *dev, int nvec, int type) { return 0; } @@ -1718,8 +1784,10 @@ static int enable_msi_msix(struct xdma_dev *xdev, struct pci_dev *pdev) { int rv = 0; - BUG_ON(!xdev); - BUG_ON(!pdev); + if (unlikely(!xdev || !pdev)) { + pr_err("xdev 0x%p, pdev 0x%p.\n", xdev, pdev); + return -EINVAL; + } if (!interrupt_mode && msi_msix_capable(pdev, PCI_CAP_ID_MSIX)) { int req_nvec = xdev->c2h_channel_max + xdev->h2c_channel_max + @@ -1859,7 +1927,7 @@ static void irq_msix_channel_teardown(struct xdma_dev *xdev) prog_irq_msix_channel(xdev, 1); - engine = xdev->engine_h2c; + engine = xdev->engine_h2c; for (i = 0; i < xdev->h2c_channel_max; i++, j++, engine++) { if (!engine->msix_irq_line) break; @@ -1868,7 +1936,7 @@ static void irq_msix_channel_teardown(struct xdma_dev *xdev) free_irq(engine->msix_irq_line, engine); } - engine = xdev->engine_c2h; + engine = xdev->engine_c2h; for (i = 0; i < xdev->c2h_channel_max; i++, j++, engine++) { if (!engine->msix_irq_line) break; @@ -1881,15 +1949,19 @@ static void irq_msix_channel_teardown(struct xdma_dev *xdev) static int irq_msix_channel_setup(struct xdma_dev *xdev) { int i; - int j = xdev->h2c_channel_max; + int j; int rv = 0; u32 vector; struct xdma_engine *engine; - BUG_ON(!xdev); + if (unlikely(!xdev)) { + pr_err("xdev NULL.\n"); + return -EINVAL; + } if (!xdev->msix_enabled) return 0; + j = xdev->h2c_channel_max; engine = xdev->engine_h2c; for (i = 0; i < xdev->h2c_channel_max; i++, engine++) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) @@ -1932,15 +2004,19 @@ static int irq_msix_channel_setup(struct xdma_dev *xdev) static void irq_msix_user_teardown(struct xdma_dev *xdev) { int i; - int j = xdev->h2c_channel_max + xdev->c2h_channel_max; + int j; - BUG_ON(!xdev); + if (unlikely(!xdev)) { + pr_err("xdev NULL.\n"); + return; + } if (!xdev->msix_enabled) return; prog_irq_msix_user(xdev, 1); + j = xdev->h2c_channel_max + xdev->c2h_channel_max; for (i = 0; i < xdev->user_max; i++, j++) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) u32 vector = pci_irq_vector(xdev->pdev, j); @@ -1956,7 +2032,7 @@ static int irq_msix_user_setup(struct xdma_dev *xdev) { int i; int j = xdev->h2c_channel_max + xdev->c2h_channel_max; - int rv = 0; + int rv = 0; /* vectors set in probe_scan_for_msi() */ for (i = 0; i < xdev->user_max; i++, j++) { @@ -1974,7 +2050,7 @@ static int irq_msix_user_setup(struct xdma_dev *xdev) } pr_info("%d-USR-%d, IRQ#%d with 0x%p\n", xdev->idx, i, vector, &xdev->user_irq[i]); - } + } /* If any errors occur, free IRQs that were successfully requested */ if (rv) { @@ -2016,17 +2092,17 @@ static int irq_legacy_setup(struct xdma_dev *xdev, struct pci_dev *pdev) dbg_init("Legacy Interrupt register value = %d\n", val); if (val > 1) { val--; - w = (val<<24) | (val<<16) | (val<<8)| val; + w = (val << 24) | (val << 16) | (val << 8)| val; /* Program IRQ Block Channel vactor and IRQ Block User vector * with Legacy interrupt value */ - reg = xdev->bar[xdev->config_bar_idx] + 0x2080; // IRQ user + reg = xdev->bar[xdev->config_bar_idx] + 0x2080; // IRQ user write_register(w, reg, 0x2080); - write_register(w, reg+0x4, 0x2084); - write_register(w, reg+0x8, 0x2088); - write_register(w, reg+0xC, 0x208C); - reg = xdev->bar[xdev->config_bar_idx] + 0x20A0; // IRQ Block + write_register(w, reg + 0x4, 0x2084); + write_register(w, reg + 0x8, 0x2088); + write_register(w, reg + 0xC, 0x208C); + reg = xdev->bar[xdev->config_bar_idx] + 0x20A0; // IRQ Block write_register(w, reg, 0x20A0); - write_register(w, reg+0x4, 0x20A4); + write_register(w, reg + 0x4, 0x20A4); } xdev->irq_line = (int)pdev->irq; @@ -2077,10 +2153,14 @@ static void dump_desc(struct xdma_desc *desc_virt) { int j; u32 *p = (u32 *)desc_virt; - static char * const field_name[] = { - "magic|extra_adjacent|control", "bytes", "src_addr_lo", - "src_addr_hi", "dst_addr_lo", "dst_addr_hi", "next_addr", - "next_addr_pad"}; + static char * const field_name[] = { "magic|extra_adjacent|control", + "bytes", + "src_addr_lo", + "src_addr_hi", + "dst_addr_lo", + "dst_addr_hi", + "next_addr", + "next_addr_pad"}; char *dummy; /* remove warning about unused variable when debug printing is off */ @@ -2112,21 +2192,19 @@ static void transfer_dump(struct xdma_transfer *transfer) } #endif /* __LIBXDMA_DEBUG__ */ -/* xdma_desc_alloc() - Allocate cache-coherent array of N descriptors. - * - * Allocates an array of 'number' descriptors in contiguous PCI bus addressable - * memory. Chains the descriptors as a singly-linked list; the descriptor's - * next * pointer specifies the bus address of the next descriptor. +/* transfer_desc_init() - Chains the descriptors as a singly-linked list * + * Each descriptor's next * pointer specifies the bus address + * of the next descriptor. + * Terminates the last descriptor to form a singly-linked list * - * @dev Pointer to pci_dev - * @number Number of descriptors to be allocated - * @desc_bus_p Pointer where to store the first descriptor bus address - * - * @return Virtual address of the first descriptor + * @transfer Pointer to SG DMA transfers + * @count Number of descriptors allocated in continuous PCI bus addressable + * memory * + * @return 0 on success, EINVAL on failure */ -static void transfer_desc_init(struct xdma_transfer *transfer, int count) +static int transfer_desc_init(struct xdma_transfer *transfer, int count) { struct xdma_desc *desc_virt = transfer->desc_virt; dma_addr_t desc_bus = transfer->desc_bus; @@ -2135,7 +2213,10 @@ static void transfer_desc_init(struct xdma_transfer *transfer, int count) int extra_adj; u32 temp_control; - BUG_ON(count > XDMA_TRANSFER_MAX_DESC); + if (unlikely(count > XDMA_TRANSFER_MAX_DESC)) { + pr_err("xfer 0x%p, too many desc 0x%x.\n", transfer, count); + return -EINVAL; + } /* create singly-linked list for SG DMA controller */ for (i = 0; i < count - 1; i++) { @@ -2171,6 +2252,8 @@ static void transfer_desc_init(struct xdma_transfer *transfer, int count) temp_control = DESC_MAGIC; desc_virt[i].control = cpu_to_le32(temp_control); + + return 0; } /* xdma_desc_link() - Link two descriptors @@ -2188,8 +2271,7 @@ static void xdma_desc_link(struct xdma_desc *first, struct xdma_desc *second, * remember reserved control in first descriptor, but zero * extra_adjacent! */ - /* RTO - what's this about? Shouldn't it be 0x0000c0ffUL? */ - u32 control = le32_to_cpu(first->control) & 0x0000f0ffUL; + u32 control = le32_to_cpu(first->control) & 0x00FFC0FFUL; /* second descriptor given? */ if (second) { /* @@ -2215,42 +2297,35 @@ static void xdma_desc_link(struct xdma_desc *first, struct xdma_desc *second, /* xdma_desc_adjacent -- Set how many descriptors are adjacent to this one */ static void xdma_desc_adjacent(struct xdma_desc *desc, int next_adjacent) { - int extra_adj = 0; /* remember reserved and control bits */ - u32 control = le32_to_cpu(desc->control) & 0x0000f0ffUL; - u32 max_adj_4k = 0; + u32 control = le32_to_cpu(desc->control) & 0xFFFFC0FFUL; + + if (next_adjacent) + next_adjacent = next_adjacent - 1; + if (next_adjacent > MAX_EXTRA_ADJ) + next_adjacent = MAX_EXTRA_ADJ; + control |= (next_adjacent << 8); - if (next_adjacent > 0) { - extra_adj = next_adjacent - 1; - if (extra_adj > MAX_EXTRA_ADJ){ - extra_adj = MAX_EXTRA_ADJ; - } - max_adj_4k = (0x1000 - ((le32_to_cpu(desc->next_lo))&0xFFF))/32 - 1; - if (extra_adj>max_adj_4k) { - extra_adj = max_adj_4k; - } - if(extra_adj<0){ - printk("Warning: extra_adj<0, converting it to 0\n"); - extra_adj = 0; - } - } - /* merge adjacent and control field */ - control |= 0xAD4B0000UL | (extra_adj << 8); /* write control and next_adjacent */ desc->control = cpu_to_le32(control); } /* xdma_desc_control -- Set complete control field of a descriptor. */ -static void xdma_desc_control_set(struct xdma_desc *first, u32 control_field) +static int xdma_desc_control_set(struct xdma_desc *first, u32 control_field) { /* remember magic and adjacent number */ u32 control = le32_to_cpu(first->control) & ~(LS_BYTE_MASK); - BUG_ON(control_field & ~(LS_BYTE_MASK)); + if (unlikely(control_field & ~(LS_BYTE_MASK))) { + pr_err("control_field bad 0x%x.\n", control_field); + return -EINVAL; + } /* merge adjacent and control field */ control |= control_field; /* write control and next_adjacent */ first->control = cpu_to_le32(control); + + return 0; } /* xdma_desc_clear -- Clear bits in control field of a descriptor. */ @@ -2259,26 +2334,12 @@ static void xdma_desc_control_clear(struct xdma_desc *first, u32 clear_mask) /* remember magic and adjacent number */ u32 control = le32_to_cpu(first->control); - BUG_ON(clear_mask & ~(LS_BYTE_MASK)); - /* merge adjacent and control field */ control &= (~clear_mask); /* write control and next_adjacent */ first->control = cpu_to_le32(control); } -/* xdma_desc_done - recycle cache-coherent linked list of descriptors. - * - * @dev Pointer to pci_dev - * @number Number of descriptors to be allocated - * @desc_virt Pointer to (i.e. virtual address of) first descriptor in list - * @desc_bus Bus address of first descriptor in list - */ -static inline void xdma_desc_done(struct xdma_desc *desc_virt) -{ - memset(desc_virt, 0, XDMA_TRANSFER_MAX_DESC * sizeof(struct xdma_desc)); -} - /* xdma_desc() - Fill a descriptor with the transfer details * * @desc pointer to descriptor to be filled @@ -2320,9 +2381,15 @@ static void transfer_abort(struct xdma_engine *engine, { struct xdma_transfer *head; - BUG_ON(!engine); - BUG_ON(!transfer); - BUG_ON(transfer->desc_num == 0); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return; + } + if (unlikely(!transfer || (transfer->desc_num == 0))) { + pr_err("engine %s, xfer 0x%p, desc 0.\n", + engine->name, transfer); + return; + } pr_info("abort transfer 0x%p, desc %d, engine desc queued %d.\n", transfer, transfer->desc_num, engine->desc_dequeued); @@ -2354,10 +2421,16 @@ static int transfer_queue(struct xdma_engine *engine, struct xdma_dev *xdev; unsigned long flags; - BUG_ON(!engine); - BUG_ON(!engine->xdev); - BUG_ON(!transfer); - BUG_ON(transfer->desc_num == 0); + if (unlikely(!engine || !engine->xdev)) { + pr_err("bad engine 0x%p, xdev 0x%p.\n", + engine, engine ? engine->xdev : NULL); + return -EINVAL; + } + if (unlikely(!transfer || (transfer->desc_num == 0))) { + pr_err("engine %s, xfer 0x%p, desc 0.\n", + engine->name, transfer); + return -EINVAL; + } dbg_tfr("transfer_queue(transfer=0x%p).\n", transfer); xdev = engine->xdev; @@ -2473,8 +2546,10 @@ static void engine_free_resource(struct xdma_engine *engine) static void engine_destroy(struct xdma_dev *xdev, struct xdma_engine *engine) { - BUG_ON(!xdev); - BUG_ON(!engine); + if (unlikely(!xdev || !engine)) { + pr_err("xdev 0x%p, engine 0x%p.\n", xdev, engine); + return; + } dbg_sg("Shutting down engine %s%d", engine->name, engine->channel); @@ -2514,26 +2589,26 @@ struct xdma_transfer *engine_cyclic_stop(struct xdma_engine *engine) /* pick first transfer on the queue (was submitted to engine) */ transfer = list_entry(engine->transfer_list.next, struct xdma_transfer, entry); - BUG_ON(!transfer); xdma_engine_stop(engine); + engine->running = 0; - if (transfer->cyclic) { + if (transfer && transfer->cyclic) { if (engine->xdma_perf) dbg_perf("Stopping perf transfer on %s\n", engine->name); else dbg_perf("Stopping cyclic transfer on %s\n", engine->name); - /* make sure the handler sees correct transfer state */ - transfer->cyclic = 1; - /* - * set STOP flag and interrupt on completion, on the - * last descriptor - */ - xdma_desc_control_set( - transfer->desc_virt + transfer->desc_num - 1, - XDMA_DESC_COMPLETED | XDMA_DESC_STOPPED); + + /* free up the buffer allocated for perf run */ + if (engine->perf_buf_virt) + dma_free_coherent(&engine->xdev->pdev->dev, + engine->xdma_perf->transfer_size, + engine->perf_buf_virt, + engine->perf_buf_bus); + engine->perf_buf_virt = NULL; + list_del(&transfer->entry); } else { dbg_sg("(engine=%p) running transfer is not cyclic\n", engine); @@ -2543,7 +2618,6 @@ struct xdma_transfer *engine_cyclic_stop(struct xdma_engine *engine) } return transfer; } -EXPORT_SYMBOL_GPL(engine_cyclic_stop); static int engine_writeback_setup(struct xdma_engine *engine) { @@ -2551,9 +2625,11 @@ static int engine_writeback_setup(struct xdma_engine *engine) struct xdma_dev *xdev; struct xdma_poll_wb *writeback; - BUG_ON(!engine); + if (unlikely(!engine || !engine->xdev)) { + pr_err("engine 0x%p, xdev NULL.\n", engine); + return -EINVAL; + } xdev = engine->xdev; - BUG_ON(!xdev); /* * RTO - doing the allocation per engine is wasteful since a full page @@ -2755,7 +2831,7 @@ static int engine_init(struct xdma_engine *engine, struct xdma_dev *xdev, static void transfer_destroy(struct xdma_dev *xdev, struct xdma_transfer *xfer) { /* free descriptors */ - xdma_desc_done(xfer->desc_virt); + memset(xfer->desc_virt, 0, xfer->desc_num * sizeof(struct xdma_desc)); if (xfer->last_in_request && (xfer->flags & XFER_FLAG_NEED_UNMAP)) { struct sg_table *sgt = xfer->sgt; @@ -2803,6 +2879,7 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req int i = 0; int last = 0; u32 control; + int rv; memset(xfer, 0, sizeof(*xfer)); @@ -2815,7 +2892,9 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req xfer->desc_virt = engine->desc; xfer->desc_bus = engine->desc_bus; - transfer_desc_init(xfer, desc_max); + rv = transfer_desc_init(xfer, desc_max); + if (rv < 0) + return rv; dbg_sg("transfer->desc_bus = 0x%llx.\n", (u64)xfer->desc_bus); @@ -2828,7 +2907,9 @@ static int transfer_init(struct xdma_engine *engine, struct xdma_request_cb *req control = XDMA_DESC_STOPPED; control |= XDMA_DESC_EOP; control |= XDMA_DESC_COMPLETED; - xdma_desc_control_set(xfer->desc_virt + last, control); + rv = xdma_desc_control_set(xfer->desc_virt + last, control); + if (rv < 0) + return rv; xfer->desc_num = xfer->desc_adjacent = desc_max; @@ -2940,9 +3021,19 @@ static struct xdma_request_cb * xdma_init_request(struct sg_table *sgt, tlen = 0; } j++; + if (j > max) + break; } } - BUG_ON(j > max); + + if (unlikely(j > max)) { + pr_err("too many sdesc %d > %d\n", j, max); +#ifdef __LIBXDMA_DEBUG__ + xdma_request_cb_dump(req); +#endif + xdma_request_free(req); + return NULL; + } req->sw_desc_cnt = j; #ifdef __LIBXDMA_DEBUG__ @@ -2988,8 +3079,11 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, return -EINVAL; } - BUG_ON(!engine); - BUG_ON(engine->magic != MAGIC_ENGINE); + if (unlikely(!engine || (engine->magic != MAGIC_ENGINE))) { + pr_err("bad engine 0x%p, magic 0x%lx.\n", + engine, engine ? engine->magic : 0UL); + return -EINVAL; + } xdev = engine->xdev; if (xdma_device_flag_check(xdev, XDEV_FLAG_OFFLINE)) { @@ -3012,7 +3106,10 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, } sgt->nents = nents; } else { - BUG_ON(!sgt->nents); + if (unlikely(!sgt->nents)) { + pr_err("%s, sgt NOT dma_mapped.\n", engine->name); + return -EINVAL; + } } req = xdma_init_request(sgt, ep_addr); @@ -3026,17 +3123,16 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, sg = sgt->sgl; nents = req->sw_desc_cnt; + mutex_lock(&engine->desc_lock); + while (nents) { unsigned long flags; struct xdma_transfer *xfer; - /* one transfer at a time */ - spin_lock(&engine->desc_lock); - /* build transfer */ rv = transfer_init(engine, req); if (rv < 0) { - spin_unlock(&engine->desc_lock); + mutex_unlock(&engine->desc_lock); goto unmap_sgl; } xfer = &req->xfer; @@ -3061,7 +3157,7 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, rv = transfer_queue(engine, xfer); if (rv < 0) { - spin_unlock(&engine->desc_lock); + mutex_unlock(&engine->desc_lock); pr_info("unable to submit %s, %d.\n", engine->name, rv); goto unmap_sgl; } @@ -3128,11 +3224,11 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, } transfer_destroy(xdev, xfer); - spin_unlock(&engine->desc_lock); if (rv < 0) - goto unmap_sgl; + break; } /* while (sg) */ + mutex_unlock(&engine->desc_lock); unmap_sgl: if (!dma_mapped && sgt->nents) { @@ -3148,33 +3244,43 @@ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, return done; } -EXPORT_SYMBOL_GPL(xdma_xfer_submit); int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine) { - u8 *buffer_virt; u32 max_consistent_size = 128 * 32 * 1024; /* 1024 pages, 4MB */ - dma_addr_t buffer_bus; /* bus address */ struct xdma_transfer *transfer; u64 ep_addr = 0; int num_desc_in_a_loop = 128; int size_in_desc = engine->xdma_perf->transfer_size; int size = size_in_desc * num_desc_in_a_loop; + int free_desc = 0; int i; + int rv = -ENOMEM; - BUG_ON(size_in_desc > max_consistent_size); + if (unlikely(size_in_desc > max_consistent_size)) { + pr_err("%s, size too big %d > %u.\n", + engine->name, size_in_desc, max_consistent_size); + return -EINVAL; + } if (size > max_consistent_size) { size = max_consistent_size; num_desc_in_a_loop = size / size_in_desc; } - buffer_virt = dma_alloc_coherent(&xdev->pdev->dev, size, - &buffer_bus, GFP_KERNEL); + engine->perf_buf_virt = dma_alloc_coherent(&xdev->pdev->dev, size, + &engine->perf_buf_bus, GFP_KERNEL); + if (unlikely(!engine->perf_buf_virt)) { + pr_err("engine %s perf buf OOM.\n", engine->name); + return -ENOMEM; + } /* allocate transfer data structure */ transfer = kzalloc(sizeof(struct xdma_transfer), GFP_KERNEL); - BUG_ON(!transfer); + if (unlikely(!transfer)) { + pr_err("engine %s transfer OOM.\n", engine->name); + goto free_buffer; + } /* 0 = write engine (to_dev=0) , 1 = read engine (to_dev=1) */ transfer->dir = engine->dir; @@ -3186,21 +3292,28 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine) engine->desc = dma_alloc_coherent(&xdev->pdev->dev, num_desc_in_a_loop * sizeof(struct xdma_desc), &engine->desc_bus, GFP_KERNEL); - BUG_ON(!engine->desc); + if (unlikely(!engine->desc)) { + pr_err("%s desc OOM.\n", engine->name); + goto free_xfer; + } dbg_init("device %s, engine %s pre-alloc desc 0x%p,0x%llx.\n", dev_name(&xdev->pdev->dev), engine->name, engine->desc, engine->desc_bus); + free_desc = 1; } transfer->desc_virt = engine->desc; transfer->desc_bus = engine->desc_bus; - transfer_desc_init(transfer, transfer->desc_num); + rv = transfer_desc_init(transfer, transfer->desc_num); + if (rv < 0) + goto free_desc; dbg_sg("transfer->desc_bus = 0x%llx.\n", (u64)transfer->desc_bus); for (i = 0; i < transfer->desc_num; i++) { struct xdma_desc *desc = transfer->desc_virt + i; - dma_addr_t rc_bus_addr = buffer_bus + size_in_desc * i; + dma_addr_t rc_bus_addr = engine->perf_buf_bus + + size_in_desc * i; /* fill in descriptor entry with transfer details */ xdma_desc_set(desc, rc_bus_addr, ep_addr, size_in_desc, @@ -3208,7 +3321,12 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine) } /* stop engine and request interrupt on last descriptor */ - xdma_desc_control_set(transfer->desc_virt, 0); + rv = xdma_desc_control_set(transfer->desc_virt, 0); + if (rv < 0) { + pr_err("%s: Failed to set desc control\n", engine->name); + goto free_desc; + } + /* create a linked loop */ xdma_desc_link(transfer->desc_virt + transfer->desc_num - 1, transfer->desc_virt, transfer->desc_bus); @@ -3218,16 +3336,34 @@ int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine) /* initialize wait queue */ init_waitqueue_head(&transfer->wq); - //printk("=== Descriptor print for PERF \n"); - //transfer_dump(transfer); - dbg_perf("Queueing XDMA I/O %s request for performance measurement.\n", engine->dir ? "write (to dev)" : "read (from dev)"); - transfer_queue(engine, transfer); + rv = transfer_queue(engine, transfer); + if (rv < 0) + goto free_desc; + return 0; +free_desc: + if (free_desc && engine->desc) + dma_free_coherent(&xdev->pdev->dev, + num_desc_in_a_loop * sizeof(struct xdma_desc), + engine->desc, engine->desc_bus); + engine->desc = NULL; + +free_xfer: + if (transfer) { + list_del(&transfer->entry); + kfree(transfer); + } + +free_buffer: + if (engine->perf_buf_virt) + dma_free_coherent(&xdev->pdev->dev, size_in_desc, + engine->perf_buf_virt, engine->perf_buf_bus); + engine->perf_buf_virt = NULL; + return rv; } -EXPORT_SYMBOL_GPL(xdma_performance_submit); static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev) { @@ -3235,12 +3371,15 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev) struct xdma_dev *xdev; struct xdma_engine *engine; - BUG_ON(!pdev); + if (unlikely(!pdev)) { + pr_err("pdev NULL.\n"); + return NULL; + } /* allocate zeroed device book keeping structure */ xdev = kzalloc(sizeof(struct xdma_dev), GFP_KERNEL); if (!xdev) { - pr_info("OOM, xdma_dev.\n"); + pr_info("xdev OOM.\n"); return NULL; } spin_lock_init(&xdev->lock); @@ -3267,7 +3406,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev) engine = xdev->engine_h2c; for (i = 0; i < XDMA_CHANNEL_NUM_MAX; i++, engine++) { spin_lock_init(&engine->lock); - spin_lock_init(&engine->desc_lock); + mutex_init(&engine->desc_lock); INIT_LIST_HEAD(&engine->transfer_list); init_waitqueue_head(&engine->shutdown_wq); init_waitqueue_head(&engine->xdma_perf_wq); @@ -3276,7 +3415,7 @@ static struct xdma_dev *alloc_dev_instance(struct pci_dev *pdev) engine = xdev->engine_c2h; for (i = 0; i < XDMA_CHANNEL_NUM_MAX; i++, engine++) { spin_lock_init(&engine->lock); - spin_lock_init(&engine->desc_lock); + mutex_init(&engine->desc_lock); INIT_LIST_HEAD(&engine->transfer_list); init_waitqueue_head(&engine->shutdown_wq); init_waitqueue_head(&engine->xdma_perf_wq); @@ -3289,8 +3428,10 @@ static int request_regions(struct xdma_dev *xdev, struct pci_dev *pdev) { int rv; - BUG_ON(!xdev); - BUG_ON(!pdev); + if (unlikely(!xdev || !pdev)) { + pr_err("xdev 0x%p, pdev 0x%p.\n", xdev, pdev); + return -EINVAL; + } dbg_init("pci_request_regions()\n"); rv = pci_request_regions(pdev, xdev->mod_name); @@ -3308,7 +3449,10 @@ static int request_regions(struct xdma_dev *xdev, struct pci_dev *pdev) static int set_dma_mask(struct pci_dev *pdev) { - BUG_ON(!pdev); + if (unlikely(!pdev)) { + pr_err("pdev NULL.\n"); + return -EINVAL; + } dbg_init("sizeof(dma_addr_t) == %ld\n", sizeof(dma_addr_t)); /* 64-bit addressing capability for XDMA? */ @@ -3338,7 +3482,10 @@ static u32 get_engine_channel_id(struct engine_regs *regs) { u32 value; - BUG_ON(!regs); + if (unlikely(!regs)) { + pr_err("regs NULL.\n"); + return 0xFFFFFFFF; + } value = read_register(®s->identifier); @@ -3349,7 +3496,10 @@ static u32 get_engine_id(struct engine_regs *regs) { u32 value; - BUG_ON(!regs); + if (unlikely(!regs)) { + pr_err("regs NULL.\n"); + return 0xFFFFFFFF; + } value = read_register(®s->identifier); return (value & 0xffff0000U) >> 16; @@ -3360,7 +3510,10 @@ static void remove_engines(struct xdma_dev *xdev) struct xdma_engine *engine; int i; - BUG_ON(!xdev); + if (unlikely(!xdev)) { + pr_err("xdev NULL.\n"); + return; + } /* iterate over channels */ for (i = 0; i < xdev->h2c_channel_max; i++) { @@ -3439,7 +3592,10 @@ static int probe_engines(struct xdma_dev *xdev) int i; int rv = 0; - BUG_ON(!xdev); + if (unlikely(!xdev)) { + pr_err("xdev NULL.\n"); + return -EINVAL; + } /* iterate over channels */ for (i = 0; i < xdev->h2c_channel_max; i++) { @@ -3460,12 +3616,12 @@ static int probe_engines(struct xdma_dev *xdev) } #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) -static void pci_enable_relaxed_ordering(struct pci_dev *pdev) +static void pci_enable_capability(struct pci_dev *pdev, int cap) { - pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, cap); } #else -static void pci_enable_relaxed_ordering(struct pci_dev *pdev) +static void pci_enable_capability(struct pci_dev *pdev, int cap) { u16 v; int pos; @@ -3473,50 +3629,12 @@ static void pci_enable_relaxed_ordering(struct pci_dev *pdev) pos = pci_pcie_cap(pdev); if (pos > 0) { pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &v); - v |= PCI_EXP_DEVCTL_RELAX_EN; + v |= cap; pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, v); } } #endif -static void pci_check_extended_tag(struct xdma_dev *xdev, struct pci_dev *pdev) -{ - u16 cap; - u32 v; - void *__iomem reg; - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) - pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap); -#else - int pos; - - pos = pci_pcie_cap(pdev); - if (pos > 0) - pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &cap); - else { - pr_info("pdev 0x%p, unable to access pcie cap.\n", pdev); - return; - } -#endif - - if ((cap & PCI_EXP_DEVCTL_EXT_TAG)) - return; - - /* extended tag not enabled */ - pr_info("0x%p EXT_TAG disabled.\n", pdev); - - if (xdev->config_bar_idx < 0) { - pr_info("pdev 0x%p, xdev 0x%p, config bar UNKNOWN.\n", - pdev, xdev); - return; - } - - reg = xdev->bar[xdev->config_bar_idx] + XDMA_OFS_CONFIG + 0x4C; - v = read_register(reg); - v = (v & 0xFF) | (((u32)32) << 8); - write_register(v, reg, XDMA_OFS_CONFIG + 0x4C); -} - void *xdma_device_open(const char *mname, struct pci_dev *pdev, int *user_max, int *h2c_channel_max, int *c2h_channel_max) { @@ -3556,9 +3674,10 @@ void *xdma_device_open(const char *mname, struct pci_dev *pdev, int *user_max, pci_check_intr_pend(pdev); /* enable relaxed ordering */ - pci_enable_relaxed_ordering(pdev); + pci_enable_capability(pdev, PCI_EXP_DEVCTL_RELAX_EN); - pci_check_extended_tag(xdev, pdev); + /* enable extended tag */ + pci_enable_capability(pdev, PCI_EXP_DEVCTL_EXT_TAG); /* force MRRS to be 512 */ rv = pcie_set_readrq(pdev, 512); @@ -3631,7 +3750,6 @@ void *xdma_device_open(const char *mname, struct pci_dev *pdev, int *user_max, kfree(xdev); return NULL; } -EXPORT_SYMBOL_GPL(xdma_device_open); void xdma_device_close(struct pci_dev *pdev, void *dev_hndl) { @@ -3676,7 +3794,6 @@ void xdma_device_close(struct pci_dev *pdev, void *dev_hndl) kfree(xdev); } -EXPORT_SYMBOL_GPL(xdma_device_close); void xdma_device_offline(struct pci_dev *pdev, void *dev_hndl) { @@ -3690,11 +3807,11 @@ void xdma_device_offline(struct pci_dev *pdev, void *dev_hndl) if (debug_check_dev_hndl(__func__, pdev, dev_hndl) < 0) return; -pr_info("pdev 0x%p, xdev 0x%p.\n", pdev, xdev); + pr_info("pdev 0x%p, xdev 0x%p.\n", pdev, xdev); xdma_device_flag_set(xdev, XDEV_FLAG_OFFLINE); /* wait for all engines to be idle */ - for (i = 0; i < xdev->h2c_channel_max; i++) { + for (i = 0; i < xdev->h2c_channel_max; i++) { unsigned long flags; engine = &xdev->engine_h2c[i]; @@ -3709,7 +3826,7 @@ pr_info("pdev 0x%p, xdev 0x%p.\n", pdev, xdev); } } - for (i = 0; i < xdev->c2h_channel_max; i++) { + for (i = 0; i < xdev->c2h_channel_max; i++) { unsigned long flags; engine = &xdev->engine_c2h[i]; @@ -3731,7 +3848,6 @@ pr_info("pdev 0x%p, xdev 0x%p.\n", pdev, xdev); pr_info("xdev 0x%p, done.\n", xdev); } -EXPORT_SYMBOL_GPL(xdma_device_offline); void xdma_device_online(struct pci_dev *pdev, void *dev_hndl) { @@ -3778,9 +3894,8 @@ pr_info("pdev 0x%p, xdev 0x%p.\n", pdev, xdev); } xdma_device_flag_clear(xdev, XDEV_FLAG_OFFLINE); -pr_info("xdev 0x%p, done.\n", xdev); + pr_info("xdev 0x%p, done.\n", xdev); } -EXPORT_SYMBOL_GPL(xdma_device_online); int xdma_device_restart(struct pci_dev *pdev, void *dev_hndl) { @@ -3795,7 +3910,6 @@ int xdma_device_restart(struct pci_dev *pdev, void *dev_hndl) pr_info("NOT implemented, 0x%p.\n", xdev); return -EINVAL; } -EXPORT_SYMBOL_GPL(xdma_device_restart); int xdma_user_isr_register(void *dev_hndl, unsigned int mask, irq_handler_t handler, void *dev) @@ -3822,7 +3936,6 @@ int xdma_user_isr_register(void *dev_hndl, unsigned int mask, return 0; } -EXPORT_SYMBOL_GPL(xdma_user_isr_register); int xdma_user_isr_enable(void *dev_hndl, unsigned int mask) { @@ -3841,7 +3954,6 @@ int xdma_user_isr_enable(void *dev_hndl, unsigned int mask) return 0; } -EXPORT_SYMBOL_GPL(xdma_user_isr_enable); int xdma_user_isr_disable(void *dev_hndl, unsigned int mask) { @@ -3859,23 +3971,7 @@ int xdma_user_isr_disable(void *dev_hndl, unsigned int mask) return 0; } -EXPORT_SYMBOL_GPL(xdma_user_isr_disable); - -#ifdef __LIBXDMA_MOD__ -static int __init xdma_base_init(void) -{ - printk(KERN_INFO "%s", version); - return 0; -} -static void __exit xdma_base_exit(void) -{ - return; -} - -module_init(xdma_base_init); -module_exit(xdma_base_exit); -#endif /* makes an existing transfer cyclic */ static void xdma_transfer_cyclic(struct xdma_transfer *transfer) { @@ -3892,11 +3988,13 @@ static int transfer_monitor_cyclic(struct xdma_engine *engine, struct xdma_result *result; int rc = 0; - BUG_ON(!engine); - BUG_ON(!transfer); + if (unlikely(!engine || !engine->cyclic_result || !transfer)) { + pr_err("engine 0x%p, cyclic_result 0x%p, xfer 0x%p.\n", + engine, engine->cyclic_result, transfer); + return -EINVAL; + } result = engine->cyclic_result; - BUG_ON(!result); if (poll_mode) { int i ; @@ -3956,8 +4054,10 @@ static int copy_cyclic_to_user(struct xdma_engine *engine, int pkt_length, struct scatterlist *sg; int more = pkt_length; - BUG_ON(!engine); - BUG_ON(!buf); + if (unlikely(!buf || !engine)) { + pr_err("engine 0x%p, buf 0x%p.\n", engine, buf); + return -EINVAL; + } dbg_tfr("%s, pkt_len %d, head %d, user buf idx %u.\n", engine->name, pkt_length, head, engine->user_buffer_index); @@ -4021,9 +4121,11 @@ static int complete_cyclic(struct xdma_engine *engine, char __user *buf, int num_credit = 0; unsigned long flags; - BUG_ON(!engine); + if (unlikely(!engine || !engine->cyclic_result)) { + pr_err("engine 0x%p, cyclic_result NULL.\n", engine); + return -EINVAL; + } result = engine->cyclic_result; - BUG_ON(!result); spin_lock_irqsave(&engine->lock, flags); @@ -4104,11 +4206,17 @@ ssize_t xdma_engine_read_cyclic(struct xdma_engine *engine, char __user *buf, int rc_len = 0; struct xdma_transfer *transfer; - BUG_ON(!engine); - BUG_ON(engine->magic != MAGIC_ENGINE); + if (unlikely(!engine || (engine->magic != MAGIC_ENGINE))) { + pr_err("bad engine 0x%p, magic 0x%lx.\n", + engine, engine ? engine->magic : 0UL); + return -EINVAL; + } + if (unlikely(!engine->cyclic_req)) { + pr_err("engine %s, cyclic_req NULL.\n", engine->name); + return -EINVAL; + } transfer = &engine->cyclic_req->xfer; - BUG_ON(!transfer); engine->user_buffer_index = 0; @@ -4207,9 +4315,11 @@ int xdma_cyclic_transfer_setup(struct xdma_engine *engine) int i; int rc; - BUG_ON(!engine); + if (unlikely(!engine || !engine->xdev)) { + pr_err("engine 0x%p, xdev NULL.\n", engine); + return -EINVAL; + } xdev = engine->xdev; - BUG_ON(!xdev); if (engine->cyclic_req) { pr_info("%s: exclusive access already taken.\n", @@ -4272,18 +4382,17 @@ int xdma_cyclic_transfer_setup(struct xdma_engine *engine) transfer_dump(xfer); #endif - if(enable_credit_mp){ - //write_register(RX_BUF_PAGES,&engine->sgdma_regs->credits); + if (enable_credit_mp) write_register(128, &engine->sgdma_regs->credits, 0); - } spin_unlock_irqrestore(&engine->lock, flags); /* start cyclic transfer */ - transfer_queue(engine, xfer); - - return 0; + rc = transfer_queue(engine, xfer); + if (!rc) + return 0; + spin_lock_irqsave(&engine->lock, flags); /* unwind on errors */ err_out: if (engine->cyclic_req) { @@ -4304,10 +4413,12 @@ int xdma_cyclic_transfer_setup(struct xdma_engine *engine) return rc; } - static int cyclic_shutdown_polled(struct xdma_engine *engine) { - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return -EINVAL; + } spin_lock(&engine->lock); @@ -4336,18 +4447,13 @@ static int cyclic_shutdown_interrupt(struct xdma_engine *engine) { int rc; - BUG_ON(!engine); + if (unlikely(!engine)) { + pr_err("engine NULL.\n"); + return -EINVAL; + } rc = wait_event_interruptible_timeout(engine->shutdown_wq, !engine->running, msecs_to_jiffies(10000)); - -#if 0 - if (rc) { - dbg_tfr("wait_event_interruptible=%d\n", rc); - return rc; - } -#endif - if (engine->running) { pr_info("%s still running?!, %d\n", engine->name, rc); return -EINVAL; @@ -4364,6 +4470,10 @@ int xdma_cyclic_transfer_teardown(struct xdma_engine *engine) unsigned long flags; transfer = engine_cyclic_stop(engine); + if (transfer == NULL) { + pr_err("Failed to stop cyclic engine\n"); + return -EINVAL; + } spin_lock_irqsave(&engine->lock, flags); if (transfer) { @@ -4378,16 +4488,20 @@ int xdma_cyclic_transfer_teardown(struct xdma_engine *engine) spin_unlock_irqrestore(&engine->lock, flags); /* wait for engine to be no longer running */ - if (poll_mode) + if (poll_mode) rc = cyclic_shutdown_polled(engine); else rc = cyclic_shutdown_interrupt(engine); + if (rc < 0) { + pr_err("Failed to shutdown cyclic transfers\n"); + return rc; + } /* obtain spin lock to atomically remove resources */ spin_lock_irqsave(&engine->lock, flags); if (engine->cyclic_req) { - xdma_request_free(engine->cyclic_req); + xdma_request_free(engine->cyclic_req); engine->cyclic_req = NULL; } @@ -4413,7 +4527,7 @@ int engine_addrmode_set(struct xdma_engine *engine, unsigned long arg) dbg_perf("IOCTL_XDMA_ADDRMODE_SET\n"); rv = get_user(dst, (int __user *)arg); - if (rv == 0) { + if (rv == 0) { engine->non_incr_addr = !!dst; if (engine->non_incr_addr) write_register(w, &engine->regs->control_w1s, @@ -4428,4 +4542,3 @@ int engine_addrmode_set(struct xdma_engine *engine, unsigned long arg) return rv; } - diff --git a/sdk/linux_kernel_drivers/xdma/libxdma.h b/sdk/linux_kernel_drivers/xdma/libxdma.h old mode 100755 new mode 100644 index 07d016c28..1fbee5aaf --- a/sdk/linux_kernel_drivers/xdma/libxdma.h +++ b/sdk/linux_kernel_drivers/xdma/libxdma.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,26 +36,32 @@ #include #include +/* + * if the config bar is fixed, the driver does not neeed to search through + * all of the bars + */ +//#define XDMA_CONFIG_BAR_NUM 1 + /* Switch debug printing on/off */ -#define XDMA_DEBUG 0 +#define XDMA_DEBUG 0 /* SECTION: Preprocessor macros/constants */ -#define XDMA_BAR_NUM (6) +#define XDMA_BAR_NUM (6) /* maximum amount of register space to map */ -#define XDMA_BAR_SIZE (0x8000UL) +#define XDMA_BAR_SIZE (0x8000UL) /* Use this definition to poll several times between calls to schedule */ -#define NUM_POLLS_PER_SCHED 100 +#define NUM_POLLS_PER_SCHED 100 -#define XDMA_CHANNEL_NUM_MAX (4) +#define XDMA_CHANNEL_NUM_MAX (4) /* * interrupts per engine, rad2_vul.sv:237 * .REG_IRQ_OUT (reg_irq_from_ch[(channel*2) +: 2]), */ -#define XDMA_ENG_IRQ_NUM (1) -#define MAX_EXTRA_ADJ (15) -#define RX_STATUS_EOP (1) +#define XDMA_ENG_IRQ_NUM (1) +#define MAX_EXTRA_ADJ (0x3F) +#define RX_STATUS_EOP (1) /* Target internal components on XDMA control BAR */ #define XDMA_OFS_INT_CTRL (0x2000UL) @@ -65,7 +71,7 @@ #define XDMA_TRANSFER_MAX_DESC (2048) /* maximum size of a single DMA transfer descriptor */ -#define XDMA_DESC_BLEN_BITS 28 +#define XDMA_DESC_BLEN_BITS 28 #define XDMA_DESC_BLEN_MAX ((1 << (XDMA_DESC_BLEN_BITS)) - 1) /* bits of the SG DMA control register */ @@ -157,7 +163,7 @@ #define XDMA_ID_C2H 0x1fc1U /* for C2H AXI-ST mode */ -#define CYCLIC_RX_PAGES_MAX 256 +#define CYCLIC_RX_PAGES_MAX 256 #define LS_BYTE_MASK 0x000000FFUL @@ -442,7 +448,8 @@ struct xdma_engine { int max_extra_adj; /* descriptor prefetch capability */ int desc_dequeued; /* num descriptors of completed transfers */ u32 status; /* last known status of device */ - u32 interrupt_enable_mask_value;/* only used for MSIX mode to store per-engine interrupt mask value */ + /* only used for MSIX mode to store per-engine interrupt mask value */ + u32 interrupt_enable_mask_value; /* Transfer list management */ struct list_head transfer_list; /* queue of transfers */ @@ -452,6 +459,10 @@ struct xdma_engine { dma_addr_t cyclic_result_bus; /* bus addr for transfer */ struct xdma_request_cb *cyclic_req; struct sg_table cyclic_sgt; + + u8 *perf_buf_virt; + dma_addr_t perf_buf_bus; /* bus address */ + u8 eop_found; /* used only for cyclic(rx:c2h) */ int rx_tail; /* follows the HW */ @@ -473,7 +484,7 @@ struct xdma_engine { u32 irq_bitmask; /* IRQ bit mask for this engine */ struct work_struct work; /* Work queue for interrupt handling */ - spinlock_t desc_lock; /* protects concurrent access */ + struct mutex desc_lock; /* protects concurrent access */ dma_addr_t desc_bus; struct xdma_desc *desc; @@ -490,14 +501,14 @@ struct xdma_user_irq { wait_queue_head_t events_wq; /* wait queue to sync waiting threads */ irq_handler_t handler; - void *dev; + void *dev; }; /* XDMA PCIe device specific book-keeping */ #define XDEV_FLAG_OFFLINE 0x1 struct xdma_dev { struct list_head list_head; - struct list_head rcu_node; + struct list_head rcu_node; unsigned long magic; /* structure ID for sanity checks */ struct pci_dev *pdev; /* pci device struct from probe() */ @@ -509,7 +520,7 @@ struct xdma_dev { unsigned int flags; /* PCIe BAR management */ - void *__iomem bar[XDMA_BAR_NUM]; /* addresses for mapped BARs */ + void __iomem *bar[XDMA_BAR_NUM]; /* addresses for mapped BARs */ int user_bar_idx; /* BAR index of user logic */ int config_bar_idx; /* BAR index of XDMA config logic */ int bypass_bar_idx; /* BAR index of XDMA bypass logic */ @@ -605,8 +616,8 @@ void get_perf_stats(struct xdma_engine *engine); int xdma_cyclic_transfer_setup(struct xdma_engine *engine); int xdma_cyclic_transfer_teardown(struct xdma_engine *engine); -ssize_t xdma_engine_read_cyclic(struct xdma_engine *, char __user *, size_t, - int); +ssize_t xdma_engine_read_cyclic(struct xdma_engine *engine, char __user *buf, + size_t count, int timeout_ms); int engine_addrmode_set(struct xdma_engine *engine, unsigned long arg); #endif /* XDMA_LIB_H */ diff --git a/sdk/linux_kernel_drivers/xdma/libxdma_api.h b/sdk/linux_kernel_drivers/xdma/libxdma_api.h index bf043eb12..d4ed4ec50 100644 --- a/sdk/linux_kernel_drivers/xdma/libxdma_api.h +++ b/sdk/linux_kernel_drivers/xdma/libxdma_api.h @@ -1,12 +1,24 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver + * Copyright(c) 2015 - 2020 Xilinx, Inc. * - * Copyright(c) Sidebranch. - * Copyright(c) Xilinx, Inc. + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "LICENSE". * * Karen Xie - * Leon Woestenberg * ******************************************************************************/ @@ -70,10 +82,7 @@ void xdma_device_close(struct pci_dev *pdev, void *dev_handle); /* * xdma_device_restart - restart the fpga * @pdev: ptr to struct pci_dev - * TODO: - * may need more refining on the parameter list * return < 0 in case of error - * TODO: exact error code will be defined later */ int xdma_device_restart(struct pci_dev *pdev, void *dev_handle); @@ -94,7 +103,6 @@ int xdma_device_restart(struct pci_dev *pdev, void *dev_handle); * @name: to be passed to the handler, ignored if handler is NULL` * @dev: to be passed to the handler, ignored if handler is NULL` * return < 0 in case of error - * TODO: exact error code will be defined later */ int xdma_user_isr_register(void *dev_hndl, unsigned int mask, irq_handler_t handler, void *dev); @@ -104,7 +112,6 @@ int xdma_user_isr_register(void *dev_hndl, unsigned int mask, * @pdev: ptr to the the pci_dev struct * @mask: bitmask of user interrupts (0 ~ 15)to be registered * return < 0 in case of error - * TODO: exact error code will be defined later */ int xdma_user_isr_enable(void *dev_hndl, unsigned int mask); int xdma_user_isr_disable(void *dev_hndl, unsigned int mask); @@ -121,15 +128,8 @@ int xdma_user_isr_disable(void *dev_hndl, unsigned int mask); * @timeout: timeout in mili-seconds, *currently ignored * return # of bytes transfered or * < 0 in case of error - * TODO: exact error code will be defined later */ ssize_t xdma_xfer_submit(void *dev_hndl, int channel, bool write, u64 ep_addr, struct sg_table *sgt, bool dma_mapped, int timeout_ms); - - -/////////////////////missing API//////////////////// - -//xdma_get_channle_state - if no interrupt on DMA hang is available -//xdma_channle_restart #endif diff --git a/sdk/linux_kernel_drivers/xdma/version.h b/sdk/linux_kernel_drivers/xdma/version.h old mode 100755 new mode 100644 index 64b91799f..5ed57832d --- a/sdk/linux_kernel_drivers/xdma/version.h +++ b/sdk/linux_kernel_drivers/xdma/version.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,12 +21,13 @@ * Karen Xie * ******************************************************************************/ + #ifndef __XDMA_VERSION_H__ #define __XDMA_VERSION_H__ -#define DRV_MOD_MAJOR 2017 +#define DRV_MOD_MAJOR 2020 #define DRV_MOD_MINOR 1 -#define DRV_MOD_PATCHLEVEL 47 +#define DRV_MOD_PATCHLEVEL 01 #define DRV_MODULE_VERSION \ __stringify(DRV_MOD_MAJOR) "." \ diff --git a/sdk/linux_kernel_drivers/xdma/xdma_cdev.c b/sdk/linux_kernel_drivers/xdma/xdma_cdev.c index 8a3311618..a5c3ac553 100644 --- a/sdk/linux_kernel_drivers/xdma/xdma_cdev.c +++ b/sdk/linux_kernel_drivers/xdma/xdma_cdev.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,11 +21,14 @@ * Karen Xie * ******************************************************************************/ + #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include "xdma_cdev.h" -struct class *g_xdma_class; +static struct class *g_xdma_class; + +struct kmem_cache *cdev_cache; enum cdev_type { CHAR_USER, @@ -52,12 +55,12 @@ static const char * const devnode_names[] = { }; enum xpdev_flags_bits { - XDF_CDEV_USER, - XDF_CDEV_CTRL, - XDF_CDEV_XVC, - XDF_CDEV_EVENT, - XDF_CDEV_SG, - XDF_CDEV_BYPASS, + XDF_CDEV_USER, + XDF_CDEV_CTRL, + XDF_CDEV_XVC, + XDF_CDEV_EVENT, + XDF_CDEV_SG, + XDF_CDEV_BYPASS, }; static inline void xpdev_flag_set(struct xdma_pci_dev *xpdev, @@ -79,16 +82,18 @@ static inline int xpdev_flag_test(struct xdma_pci_dev *xpdev, } #ifdef __XDMA_SYSFS__ -ssize_t show_device_numbers(struct device *dev, struct device_attribute *attr, - char *buf) +ssize_t xdma_dev_instance_show(struct device *dev, + struct device_attribute *attr, + char *buf) { - struct xdma_pci_dev *xpdev = (struct xdma_pci_dev *)dev_get_drvdata(dev); + struct xdma_pci_dev *xpdev = + (struct xdma_pci_dev *)dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d\t%d\n", xpdev->major, xpdev->xdev->idx); } -static DEVICE_ATTR(xdma_dev_instance, S_IRUGO, show_device_numbers, NULL); +static DEVICE_ATTR_RO(xdma_dev_instance); #endif static int config_kobject(struct xdma_cdev *xcdev, enum cdev_type type) @@ -102,7 +107,10 @@ static int config_kobject(struct xdma_cdev *xcdev, enum cdev_type type) case CHAR_XDMA_C2H: case CHAR_BYPASS_H2C: case CHAR_BYPASS_C2H: - BUG_ON(!engine); + if (!engine) { + pr_err("Invalid DMA engine\n"); + return rv; + } rv = kobject_set_name(&xcdev->cdev.kobj, devnode_names[type], xdev->idx, engine->channel); break; @@ -133,22 +141,23 @@ int xcdev_check(const char *fname, struct xdma_cdev *xcdev, bool check_engine) if (!xcdev || xcdev->magic != MAGIC_CHAR) { pr_info("%s, xcdev 0x%p, magic 0x%lx.\n", - fname, xcdev, xcdev ? xcdev->magic : 0xFFFFFFFF); + fname, xcdev, xcdev ? xcdev->magic : 0xFFFFFFFF); return -EINVAL; } - xdev = xcdev->xdev; + xdev = xcdev->xdev; if (!xdev || xdev->magic != MAGIC_DEVICE) { pr_info("%s, xdev 0x%p, magic 0x%lx.\n", - fname, xdev, xdev ? xdev->magic : 0xFFFFFFFF); + fname, xdev, xdev ? xdev->magic : 0xFFFFFFFF); return -EINVAL; } if (check_engine) { - struct xdma_engine *engine = xcdev->engine; + struct xdma_engine *engine = xcdev->engine; + if (!engine || engine->magic != MAGIC_ENGINE) { pr_info("%s, engine 0x%p, magic 0x%lx.\n", fname, - engine, engine ? engine->magic : 0xFFFFFFFF); + engine, engine ? engine->magic : 0xFFFFFFFF); return -EINVAL; } } @@ -162,7 +171,11 @@ int char_open(struct inode *inode, struct file *file) /* pointer to containing structure of the character device inode */ xcdev = container_of(inode->i_cdev, struct xdma_cdev, cdev); - BUG_ON(xcdev->magic != MAGIC_CHAR); + if (xcdev->magic != MAGIC_CHAR) { + pr_err("xcdev 0x%p inode 0x%lx magic mismatch 0x%lx\n", + xcdev, inode->i_ino, xcdev->magic); + return -EINVAL; + } /* create a reference to our char device in the opened file */ file->private_data = xcdev; @@ -177,13 +190,30 @@ int char_close(struct inode *inode, struct file *file) struct xdma_dev *xdev; struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data; - BUG_ON(!xcdev); - BUG_ON(xcdev->magic != MAGIC_CHAR); + if (!xcdev) { + pr_err("char device with inode 0x%lx xcdev NULL\n", + inode->i_ino); + return -EINVAL; + } + + if (xcdev->magic != MAGIC_CHAR) { + pr_err("xcdev 0x%p magic mismatch 0x%lx\n", + xcdev, xcdev->magic); + return -EINVAL; + } /* fetch device specific data stored earlier during open */ xdev = xcdev->xdev; - BUG_ON(!xdev); - BUG_ON(xdev->magic != MAGIC_DEVICE); + if (!xdev) { + pr_err("char device with inode 0x%lx xdev NULL\n", + inode->i_ino); + return -EINVAL; + } + + if (xdev->magic != MAGIC_DEVICE) { + pr_err("xdev 0x%p magic mismatch 0x%lx\n", xdev, xdev->magic); + return -EINVAL; + } return 0; } @@ -197,40 +227,52 @@ int char_close(struct inode *inode, struct file *file) static int create_sys_device(struct xdma_cdev *xcdev, enum cdev_type type) { - struct xdma_dev *xdev = xcdev->xdev; - struct xdma_engine *engine = xcdev->engine; - int last_param; + struct xdma_dev *xdev = xcdev->xdev; + struct xdma_engine *engine = xcdev->engine; + int last_param; - if (type == CHAR_EVENTS) - last_param = xcdev->bar; - else - last_param = engine ? engine->channel : 0; + if (type == CHAR_EVENTS) + last_param = xcdev->bar; + else + last_param = engine ? engine->channel : 0; - xcdev->sys_device = device_create(g_xdma_class, &xdev->pdev->dev, - xcdev->cdevno, NULL, devnode_names[type], xdev->idx, - last_param); + xcdev->sys_device = device_create(g_xdma_class, &xdev->pdev->dev, + xcdev->cdevno, NULL, devnode_names[type], xdev->idx, + last_param); - if (!xcdev->sys_device) { - pr_err("device_create(%s) failed\n", devnode_names[type]); - return -1; - } + if (!xcdev->sys_device) { + pr_err("device_create(%s) failed\n", devnode_names[type]); + return -1; + } - return 0; + return 0; } static int destroy_xcdev(struct xdma_cdev *cdev) { if (!cdev) { pr_warn("cdev NULL.\n"); - return 0; + return -EINVAL; } if (cdev->magic != MAGIC_CHAR) { pr_warn("cdev 0x%p magic mismatch 0x%lx\n", cdev, cdev->magic); - return 0; + return -EINVAL; + } + + if (!cdev->xdev) { + pr_err("xdev NULL\n"); + return -EINVAL; + } + + if (!g_xdma_class) { + pr_err("g_xdma_class NULL\n"); + return -EINVAL; + } + + if (!cdev->sys_device) { + pr_err("cdev sys_device NULL\n"); + return -EINVAL; } - BUG_ON(!cdev->xdev); - BUG_ON(!g_xdma_class); - BUG_ON(!cdev->sys_device); if (cdev->sys_device) device_destroy(g_xdma_class, cdev->cdevno); @@ -341,58 +383,91 @@ static int create_xcdev(struct xdma_pci_dev *xpdev, struct xdma_cdev *xcdev, del_cdev: cdev_del(&xcdev->cdev); unregister_region: - unregister_chrdev_region(dev, XDMA_MINOR_COUNT); + unregister_chrdev_region(xcdev->cdevno, XDMA_MINOR_COUNT); return rv; } void xpdev_destroy_interfaces(struct xdma_pci_dev *xpdev) { - int i; - + int i = 0; + int rv; #ifdef __XDMA_SYSFS__ - device_remove_file(&xpdev->pdev->dev, &dev_attr_xdma_dev_instance); + device_remove_file(&xpdev->pdev->dev, &dev_attr_xdma_dev_instance); #endif if (xpdev_flag_test(xpdev, XDF_CDEV_SG)) { /* iterate over channels */ - for (i = 0; i < xpdev->h2c_channel_max; i++) + for (i = 0; i < xpdev->h2c_channel_max; i++) { /* remove SG DMA character device */ - destroy_xcdev(&xpdev->sgdma_h2c_cdev[i]); - for (i = 0; i < xpdev->c2h_channel_max; i++) - destroy_xcdev(&xpdev->sgdma_c2h_cdev[i]); + rv = destroy_xcdev(&xpdev->sgdma_h2c_cdev[i]); + if (rv < 0) + pr_err("Failed to destroy h2c xcdev %d error :0x%x\n", + i, rv); + } + for (i = 0; i < xpdev->c2h_channel_max; i++) { + rv = destroy_xcdev(&xpdev->sgdma_c2h_cdev[i]); + if (rv < 0) + pr_err("Failed to destroy c2h xcdev %d error 0x%x\n", + i, rv); + } } if (xpdev_flag_test(xpdev, XDF_CDEV_EVENT)) { - for (i = 0; i < xpdev->user_max; i++) - destroy_xcdev(&xpdev->events_cdev[i]); + for (i = 0; i < xpdev->user_max; i++) { + rv = destroy_xcdev(&xpdev->events_cdev[i]); + if (rv < 0) + pr_err("Failed to destroy cdev event %d error 0x%x\n", + i, rv); + } } /* remove control character device */ if (xpdev_flag_test(xpdev, XDF_CDEV_CTRL)) { - destroy_xcdev(&xpdev->ctrl_cdev); + rv = destroy_xcdev(&xpdev->ctrl_cdev); + if (rv < 0) + pr_err("Failed to destroy cdev ctrl event %d error 0x%x\n", + i, rv); } /* remove user character device */ if (xpdev_flag_test(xpdev, XDF_CDEV_USER)) { - destroy_xcdev(&xpdev->user_cdev); + rv = destroy_xcdev(&xpdev->user_cdev); + if (rv < 0) + pr_err("Failed to destroy user cdev %d error 0x%x\n", + i, rv); } if (xpdev_flag_test(xpdev, XDF_CDEV_XVC)) { - destroy_xcdev(&xpdev->xvc_cdev); + rv = destroy_xcdev(&xpdev->xvc_cdev); + if (rv < 0) + pr_err("Failed to destroy xvc cdev %d error 0x%x\n", + i, rv); } if (xpdev_flag_test(xpdev, XDF_CDEV_BYPASS)) { /* iterate over channels */ - for (i = 0; i < xpdev->h2c_channel_max; i++) + for (i = 0; i < xpdev->h2c_channel_max; i++) { /* remove DMA Bypass character device */ - destroy_xcdev(&xpdev->bypass_h2c_cdev[i]); - for (i = 0; i < xpdev->c2h_channel_max; i++) - destroy_xcdev(&xpdev->bypass_c2h_cdev[i]); - destroy_xcdev(&xpdev->bypass_cdev_base); + rv = destroy_xcdev(&xpdev->bypass_h2c_cdev[i]); + if (rv < 0) + pr_err("Failed to destroy bypass h2c cdev %d error 0x%x\n", + i, rv); + } + for (i = 0; i < xpdev->c2h_channel_max; i++) { + rv = destroy_xcdev(&xpdev->bypass_c2h_cdev[i]); + if (rv < 0) + pr_err("Failed to destroy bypass c2h %d error 0x%x\n", + i, rv); + } + rv = destroy_xcdev(&xpdev->bypass_cdev_base); + if (rv < 0) + pr_err("Failed to destroy base cdev\n"); } if (xpdev->major) - unregister_chrdev_region(MKDEV(xpdev->major, XDMA_MINOR_BASE), XDMA_MINOR_COUNT); + unregister_chrdev_region( + MKDEV(xpdev->major, XDMA_MINOR_BASE), + XDMA_MINOR_COUNT); } int xpdev_create_interfaces(struct xdma_pci_dev *xpdev) @@ -452,9 +527,8 @@ int xpdev_create_interfaces(struct xdma_pci_dev *xpdev) } xpdev_flag_set(xpdev, XDF_CDEV_SG); - /* ??? Bypass */ /* Initialize Bypass Character Device */ - if (xdev->bypass_bar_idx > 0){ + if (xdev->bypass_bar_idx > 0) { for (i = 0; i < xpdev->h2c_channel_max; i++) { engine = &xdev->engine_h2c[i]; @@ -519,7 +593,7 @@ int xpdev_create_interfaces(struct xdma_pci_dev *xpdev) rv = device_create_file(&xpdev->pdev->dev, &dev_attr_xdma_dev_instance); if (rv) { - pr_err("Failed to create device file \n"); + pr_err("Failed to create device file\n"); goto fail; } #endif @@ -535,10 +609,10 @@ int xpdev_create_interfaces(struct xdma_pci_dev *xpdev) int xdma_cdev_init(void) { g_xdma_class = class_create(THIS_MODULE, XDMA_NODE_NAME); - if (IS_ERR(g_xdma_class)) { - dbg_init(XDMA_NODE_NAME ": failed to create class"); - return -1; - } + if (IS_ERR(g_xdma_class)) { + dbg_init(XDMA_NODE_NAME ": failed to create class"); + return -EINVAL; + } return 0; } diff --git a/sdk/linux_kernel_drivers/xdma/xdma_cdev.h b/sdk/linux_kernel_drivers/xdma/xdma_cdev.h index 47441fcaf..3361e8ebd 100644 --- a/sdk/linux_kernel_drivers/xdma/xdma_cdev.h +++ b/sdk/linux_kernel_drivers/xdma/xdma_cdev.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,6 +21,7 @@ * Karen Xie * ******************************************************************************/ + #ifndef __XDMA_CHRDEV_H__ #define __XDMA_CHRDEV_H__ @@ -39,13 +40,13 @@ int xdma_cdev_init(void); int char_open(struct inode *inode, struct file *file); int char_close(struct inode *inode, struct file *file); -int xcdev_check(const char *, struct xdma_cdev *, bool); - +int xcdev_check(const char *fname, struct xdma_cdev *xcdev, bool check_engine); void cdev_ctrl_init(struct xdma_cdev *xcdev); void cdev_xvc_init(struct xdma_cdev *xcdev); void cdev_event_init(struct xdma_cdev *xcdev); void cdev_sgdma_init(struct xdma_cdev *xcdev); void cdev_bypass_init(struct xdma_cdev *xcdev); +long char_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); void xpdev_destroy_interfaces(struct xdma_pci_dev *xpdev); int xpdev_create_interfaces(struct xdma_pci_dev *xpdev); diff --git a/sdk/linux_kernel_drivers/xdma/xdma_ioctl.h b/sdk/linux_kernel_drivers/xdma/xdma_ioctl.h deleted file mode 100755 index a250a1de8..000000000 --- a/sdk/linux_kernel_drivers/xdma/xdma_ioctl.h +++ /dev/null @@ -1,78 +0,0 @@ -/******************************************************************************* - * - * Xilinx XDMA IP Core Linux Driver - * - * Copyright(c) Sidebranch. - * Copyright(c) Xilinx, Inc. - * - * Karen Xie - * Leon Woestenberg - * - ******************************************************************************/ -#ifndef _XDMA_IOCALLS_POSIX_H_ -#define _XDMA_IOCALLS_POSIX_H_ - -#include - -/* Use 'x' as magic number */ -#define XDMA_IOC_MAGIC 'x' -/* XL OpenCL X->58(ASCII), L->6C(ASCII), O->0 C->C L->6C(ASCII); */ -#define XDMA_XCL_MAGIC 0X586C0C6C - -#define IOCTL_XDMA_PERF_V1 (1) -#define XDMA_ADDRMODE_MEMORY (0) -#define XDMA_ADDRMODE_FIXED (1) - -/* - * S means "Set" through a ptr, - * T means "Tell" directly with the argument value - * G means "Get": reply by setting through a pointer - * Q means "Query": response is on the return value - * X means "eXchange": switch G and S atomically - * H means "sHift": switch T and Q atomically - * - * _IO(type,nr) no arguments - * _IOR(type,nr,datatype) read data from driver - * _IOW(type,nr.datatype) write data to driver - * _IORW(type,nr,datatype) read/write data - * - * _IOC_DIR(nr) returns direction - * _IOC_TYPE(nr) returns magic - * _IOC_NR(nr) returns number - * _IOC_SIZE(nr) returns size - */ - -enum XDMA_IOC_TYPES { - XDMA_IOC_NOP, - XDMA_IOC_INFO, - XDMA_IOC_MAX -}; - -struct xdma_ioc_base { - unsigned int magic; - unsigned int command; -}; - -struct xdma_ioc_info { - struct xdma_ioc_base base; - unsigned short vendor; - unsigned short device; - unsigned short subsystem_vendor; - unsigned short subsystem_device; - unsigned dma_engine_version; - unsigned driver_version; - unsigned long long feature_id; - unsigned short domain; - unsigned char bus; - unsigned char dev; - unsigned char func; -}; - -/* IOCTL codes */ -#define XDMA_IOCINFO _IOWR(XDMA_IOC_MAGIC, XDMA_IOC_INFO, struct xdma_ioc_info) - -#define IOCTL_XDMA_ADDRMODE_SET _IOW('q', 4, int) -#define IOCTL_XDMA_ADDRMODE_GET _IOR('q', 5, int) -#define IOCTL_XDMA_ALIGN_GET _IOR('q', 6, int) - -#endif /* _XDMA_IOCALLS_POSIX_H_ */ diff --git a/sdk/linux_kernel_drivers/xdma/xdma_mod.c b/sdk/linux_kernel_drivers/xdma/xdma_mod.c old mode 100755 new mode 100644 index 3b0943220..b9dbfcfe6 --- a/sdk/linux_kernel_drivers/xdma/xdma_mod.c +++ b/sdk/linux_kernel_drivers/xdma/xdma_mod.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,6 +21,7 @@ * Karen Xie * ******************************************************************************/ + #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include @@ -35,8 +36,7 @@ #include "version.h" #define DRV_MODULE_NAME "xdma" -#define DRV_MODULE_DESC "Xilinx XDMA Classic Driver" -#define DRV_MODULE_RELDATE "Feb. 2017" +#define DRV_MODULE_DESC "Xilinx XDMA Reference Driver" static char version[] = DRV_MODULE_DESC " " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; @@ -47,48 +47,52 @@ MODULE_VERSION(DRV_MODULE_VERSION); MODULE_LICENSE("GPL v2"); /* SECTION: Module global variables */ -static int xpdev_cnt = 0; +static int xpdev_cnt; static const struct pci_device_id pci_ids[] = { + { PCI_DEVICE(0x10ee, 0x9048), }, + { PCI_DEVICE(0x10ee, 0x9044), }, + { PCI_DEVICE(0x10ee, 0x9042), }, + { PCI_DEVICE(0x10ee, 0x9041), }, { PCI_DEVICE(0x10ee, 0x903f), }, { PCI_DEVICE(0x10ee, 0x9038), }, { PCI_DEVICE(0x10ee, 0x9028), }, - { PCI_DEVICE(0x10ee, 0x9018), }, + { PCI_DEVICE(0x10ee, 0x9018), }, { PCI_DEVICE(0x10ee, 0x9034), }, { PCI_DEVICE(0x10ee, 0x9024), }, - { PCI_DEVICE(0x10ee, 0x9014), }, + { PCI_DEVICE(0x10ee, 0x9014), }, { PCI_DEVICE(0x10ee, 0x9032), }, { PCI_DEVICE(0x10ee, 0x9022), }, - { PCI_DEVICE(0x10ee, 0x9012), }, + { PCI_DEVICE(0x10ee, 0x9012), }, { PCI_DEVICE(0x10ee, 0x9031), }, { PCI_DEVICE(0x10ee, 0x9021), }, - { PCI_DEVICE(0x10ee, 0x9011), }, + { PCI_DEVICE(0x10ee, 0x9011), }, { PCI_DEVICE(0x10ee, 0x8011), }, { PCI_DEVICE(0x10ee, 0x8012), }, - { PCI_DEVICE(0x10ee, 0x8014), }, - { PCI_DEVICE(0x10ee, 0x8018), }, - { PCI_DEVICE(0x10ee, 0x8021), }, - { PCI_DEVICE(0x10ee, 0x8022), }, - { PCI_DEVICE(0x10ee, 0x8024), }, - { PCI_DEVICE(0x10ee, 0x8028), }, - { PCI_DEVICE(0x10ee, 0x8031), }, - { PCI_DEVICE(0x10ee, 0x8032), }, - { PCI_DEVICE(0x10ee, 0x8034), }, - { PCI_DEVICE(0x10ee, 0x8038), }, - - { PCI_DEVICE(0x10ee, 0x7011), }, - { PCI_DEVICE(0x10ee, 0x7012), }, - { PCI_DEVICE(0x10ee, 0x7014), }, - { PCI_DEVICE(0x10ee, 0x7018), }, - { PCI_DEVICE(0x10ee, 0x7021), }, - { PCI_DEVICE(0x10ee, 0x7022), }, - { PCI_DEVICE(0x10ee, 0x7024), }, + { PCI_DEVICE(0x10ee, 0x8014), }, + { PCI_DEVICE(0x10ee, 0x8018), }, + { PCI_DEVICE(0x10ee, 0x8021), }, + { PCI_DEVICE(0x10ee, 0x8022), }, + { PCI_DEVICE(0x10ee, 0x8024), }, + { PCI_DEVICE(0x10ee, 0x8028), }, + { PCI_DEVICE(0x10ee, 0x8031), }, + { PCI_DEVICE(0x10ee, 0x8032), }, + { PCI_DEVICE(0x10ee, 0x8034), }, + { PCI_DEVICE(0x10ee, 0x8038), }, + + { PCI_DEVICE(0x10ee, 0x7011), }, + { PCI_DEVICE(0x10ee, 0x7012), }, + { PCI_DEVICE(0x10ee, 0x7014), }, + { PCI_DEVICE(0x10ee, 0x7018), }, + { PCI_DEVICE(0x10ee, 0x7021), }, + { PCI_DEVICE(0x10ee, 0x7022), }, + { PCI_DEVICE(0x10ee, 0x7024), }, { PCI_DEVICE(0x10ee, 0x7028), }, - { PCI_DEVICE(0x10ee, 0x7031), }, - { PCI_DEVICE(0x10ee, 0x7032), }, - { PCI_DEVICE(0x10ee, 0x7034), }, - { PCI_DEVICE(0x10ee, 0x7038), }, + { PCI_DEVICE(0x10ee, 0x7031), }, + { PCI_DEVICE(0x10ee, 0x7032), }, + { PCI_DEVICE(0x10ee, 0x7034), }, + { PCI_DEVICE(0x10ee, 0x7038), }, { PCI_DEVICE(0x10ee, 0x6828), }, { PCI_DEVICE(0x10ee, 0x6830), }, @@ -105,13 +109,12 @@ static const struct pci_device_id pci_ids[] = { { PCI_DEVICE(0x10ee, 0x4B28), }, { PCI_DEVICE(0x10ee, 0x2808), }, + { PCI_DEVICE(0x1d0f, 0xf000), }, + { PCI_DEVICE(0x1d0f, 0xf001), }, - { PCI_DEVICE(0x10ee, 0x2808), }, - - { PCI_DEVICE(0x1d0f, 0xf000), }, - { PCI_DEVICE(0x1d0f, 0xf001), }, - { PCI_DEVICE(0x1d0f, 0x1042), }, - +#ifdef INTERNAL_TESTING + { PCI_DEVICE(0x1d0f, 0x1042), 0}, +#endif {0,} }; MODULE_DEVICE_TABLE(pci, pci_ids); @@ -132,7 +135,7 @@ static void xpdev_free(struct xdma_pci_dev *xpdev) static struct xdma_pci_dev *xpdev_alloc(struct pci_dev *pdev) { - struct xdma_pci_dev *xpdev = kmalloc(sizeof(*xpdev), GFP_KERNEL); + struct xdma_pci_dev *xpdev = kmalloc(sizeof(*xpdev), GFP_KERNEL); if (!xpdev) return NULL; @@ -161,12 +164,28 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) hndl = xdma_device_open(DRV_MODULE_NAME, pdev, &xpdev->user_max, &xpdev->h2c_channel_max, &xpdev->c2h_channel_max); - if (!hndl) - return -EINVAL; + if (!hndl) { + rv = -EINVAL; + goto err_out; + } - BUG_ON(xpdev->user_max > MAX_USER_IRQ); - BUG_ON(xpdev->h2c_channel_max > XDMA_CHANNEL_NUM_MAX); - BUG_ON(xpdev->c2h_channel_max > XDMA_CHANNEL_NUM_MAX); + if (xpdev->user_max > MAX_USER_IRQ) { + pr_err("Maximum users limit reached\n"); + rv = -EINVAL; + goto err_out; + } + + if (xpdev->h2c_channel_max > XDMA_CHANNEL_NUM_MAX) { + pr_err("Maximun H2C channel limit reached\n"); + rv = -EINVAL; + goto err_out; + } + + if (xpdev->c2h_channel_max > XDMA_CHANNEL_NUM_MAX) { + pr_err("Maximun C2H channel limit reached\n"); + rv = -EINVAL; + goto err_out; + } if (!xpdev->h2c_channel_max && !xpdev->c2h_channel_max) pr_warn("NO engine found!\n"); @@ -183,9 +202,15 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) xdev = xdev_find_by_pdev(pdev); if (!xdev) { pr_warn("NO xdev found!\n"); - return -EINVAL; + rv = -EINVAL; + goto err_out; + } + + if (hndl != xdev) { + pr_err("xdev handle mismatch\n"); + rv = -EINVAL; + goto err_out; } - BUG_ON(hndl != xdev ); pr_info("%s xdma%d, pdev 0x%p, xdev 0x%p, 0x%p, usr %d, ch %d,%d.\n", dev_name(&pdev->dev), xdev->idx, pdev, xpdev, xdev, @@ -198,11 +223,11 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (rv) goto err_out; - dev_set_drvdata(&pdev->dev, xpdev); + dev_set_drvdata(&pdev->dev, xpdev); return 0; -err_out: +err_out: pr_err("pdev 0x%p, err %d.\n", pdev, rv); xpdev_free(xpdev); return rv; @@ -223,7 +248,7 @@ static void remove_one(struct pci_dev *pdev) pdev, xpdev, xpdev->xdev); xpdev_free(xpdev); - dev_set_drvdata(&pdev->dev, NULL); + dev_set_drvdata(&pdev->dev, NULL); } static pci_ers_result_t xdma_error_detected(struct pci_dev *pdev, @@ -274,7 +299,7 @@ static void xdma_error_resume(struct pci_dev *pdev) pci_cleanup_aer_uncorrect_error_status(pdev); } -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0) +#if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE static void xdma_reset_prepare(struct pci_dev *pdev) { struct xdma_pci_dev *xpdev = dev_get_drvdata(&pdev->dev); @@ -291,7 +316,7 @@ static void xdma_reset_done(struct pci_dev *pdev) xdma_device_online(pdev, xpdev->xdev); } -#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) +#elif KERNEL_VERSION(3, 16, 0) <= LINUX_VERSION_CODE static void xdma_reset_notify(struct pci_dev *pdev, bool prepare) { struct xdma_pci_dev *xpdev = dev_get_drvdata(&pdev->dev); @@ -309,10 +334,10 @@ static const struct pci_error_handlers xdma_err_handler = { .error_detected = xdma_error_detected, .slot_reset = xdma_slot_reset, .resume = xdma_error_resume, -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0) +#if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE .reset_prepare = xdma_reset_prepare, .reset_done = xdma_reset_done, -#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) +#elif KERNEL_VERSION(3, 16, 0) <= LINUX_VERSION_CODE .reset_notify = xdma_reset_notify, #endif }; @@ -328,8 +353,6 @@ static struct pci_driver pci_driver = { static int __init xdma_mod_init(void) { int rv; - extern unsigned int desc_blen_max; - extern unsigned int sgdma_timeout; pr_info("%s", version); diff --git a/sdk/linux_kernel_drivers/xdma/xdma_mod.h b/sdk/linux_kernel_drivers/xdma/xdma_mod.h old mode 100755 new mode 100644 index 0ede7a080..abea67ee0 --- a/sdk/linux_kernel_drivers/xdma/xdma_mod.h +++ b/sdk/linux_kernel_drivers/xdma/xdma_mod.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Xilinx XDMA IP Core Linux Driver - * Copyright(c) 2015 - 2017 Xilinx, Inc. + * Copyright(c) 2015 - 2020 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,6 +21,7 @@ * Karen Xie * ******************************************************************************/ + #ifndef __XDMA_MODULE_H__ #define __XDMA_MODULE_H__ @@ -48,6 +49,7 @@ #include #include #include +#include #include "libxdma.h" @@ -56,6 +58,9 @@ #define MAGIC_CHAR 0xCCCCCCCCUL #define MAGIC_BITSTREAM 0xBBBBBBBBUL +extern unsigned int desc_blen_max; +extern unsigned int sgdma_timeout; + struct xdma_cdev { unsigned long magic; /* structure ID for sanity checks */ struct xdma_pci_dev *xpdev; diff --git a/shared/lib/check_src_headers.py b/shared/lib/check_src_headers.py index 8d390a3cd..fca1e8204 100755 --- a/shared/lib/check_src_headers.py +++ b/shared/lib/check_src_headers.py @@ -176,7 +176,7 @@ ''' xilinx_xdma1 = '''Xilinx XDMA IP Core Linux Driver -Copyright(c) 2015 - 2017 Xilinx, Inc. +Copyright(c) 2015 - 2020 Xilinx, Inc. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -194,12 +194,6 @@ the file called "LICENSE". ''' -xilinx_xdma2 = '''Xilinx XDMA IP Core Linux Driver - -Copyright(c) Sidebranch. -Copyright(c) Xilinx, Inc. -''' - xilinx1 = '''\xa9 Copyright 2017 Xilinx, Inc. All rights reserved. This file contains confidential and proprietary information of Xilinx, Inc. and is protected under U.S. and @@ -400,7 +394,6 @@ apache_header_2018.split("\n"), gpl2_header.split("\n"), xilinx_xdma1.split("\n"), - xilinx_xdma2.split("\n"), xilinx1.split("\n"), xilinx2_header.split("\n"), xilinx3_header.split("\n"),