|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only |
| 2 | + * |
| 3 | + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. |
| 4 | + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. |
| 5 | + */ |
| 6 | + |
| 7 | +#ifndef _QAIC_H_ |
| 8 | +#define _QAIC_H_ |
| 9 | + |
| 10 | +#include <linux/interrupt.h> |
| 11 | +#include <linux/kref.h> |
| 12 | +#include <linux/mhi.h> |
| 13 | +#include <linux/mutex.h> |
| 14 | +#include <linux/pci.h> |
| 15 | +#include <linux/spinlock.h> |
| 16 | +#include <linux/srcu.h> |
| 17 | +#include <linux/wait.h> |
| 18 | +#include <linux/workqueue.h> |
| 19 | +#include <drm/drm_device.h> |
| 20 | +#include <drm/drm_gem.h> |
| 21 | + |
| 22 | +#define QAIC_DBC_BASE SZ_128K |
| 23 | +#define QAIC_DBC_SIZE SZ_4K |
| 24 | + |
| 25 | +#define QAIC_NO_PARTITION -1 |
| 26 | + |
| 27 | +#define QAIC_DBC_OFF(i) ((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE) |
| 28 | + |
| 29 | +#define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base) |
| 30 | + |
| 31 | +extern bool datapath_polling; |
| 32 | + |
| 33 | +struct qaic_user { |
| 34 | + /* Uniquely identifies this user for the device */ |
| 35 | + int handle; |
| 36 | + struct kref ref_count; |
| 37 | + /* Char device opened by this user */ |
| 38 | + struct qaic_drm_device *qddev; |
| 39 | + /* Node in list of users that opened this drm device */ |
| 40 | + struct list_head node; |
| 41 | + /* SRCU used to synchronize this user during cleanup */ |
| 42 | + struct srcu_struct qddev_lock; |
| 43 | + atomic_t chunk_id; |
| 44 | +}; |
| 45 | + |
| 46 | +struct dma_bridge_chan { |
| 47 | + /* Pointer to device strcut maintained by driver */ |
| 48 | + struct qaic_device *qdev; |
| 49 | + /* ID of this DMA bridge channel(DBC) */ |
| 50 | + unsigned int id; |
| 51 | + /* Synchronizes access to xfer_list */ |
| 52 | + spinlock_t xfer_lock; |
| 53 | + /* Base address of request queue */ |
| 54 | + void *req_q_base; |
| 55 | + /* Base address of response queue */ |
| 56 | + void *rsp_q_base; |
| 57 | + /* |
| 58 | + * Base bus address of request queue. Response queue bus address can be |
| 59 | + * calculated by adding request queue size to this variable |
| 60 | + */ |
| 61 | + dma_addr_t dma_addr; |
| 62 | + /* Total size of request and response queue in byte */ |
| 63 | + u32 total_size; |
| 64 | + /* Capacity of request/response queue */ |
| 65 | + u32 nelem; |
| 66 | + /* The user that opened this DBC */ |
| 67 | + struct qaic_user *usr; |
| 68 | + /* |
| 69 | + * Request ID of next memory handle that goes in request queue. One |
| 70 | + * memory handle can enqueue more than one request elements, all |
| 71 | + * this requests that belong to same memory handle have same request ID |
| 72 | + */ |
| 73 | + u16 next_req_id; |
| 74 | + /* true: DBC is in use; false: DBC not in use */ |
| 75 | + bool in_use; |
| 76 | + /* |
| 77 | + * Base address of device registers. Used to read/write request and |
| 78 | + * response queue's head and tail pointer of this DBC. |
| 79 | + */ |
| 80 | + void __iomem *dbc_base; |
| 81 | + /* Head of list where each node is a memory handle queued in request queue */ |
| 82 | + struct list_head xfer_list; |
| 83 | + /* Synchronizes DBC readers during cleanup */ |
| 84 | + struct srcu_struct ch_lock; |
| 85 | + /* |
| 86 | + * When this DBC is released, any thread waiting on this wait queue is |
| 87 | + * woken up |
| 88 | + */ |
| 89 | + wait_queue_head_t dbc_release; |
| 90 | + /* Head of list where each node is a bo associated with this DBC */ |
| 91 | + struct list_head bo_lists; |
| 92 | + /* The irq line for this DBC. Used for polling */ |
| 93 | + unsigned int irq; |
| 94 | + /* Polling work item to simulate interrupts */ |
| 95 | + struct work_struct poll_work; |
| 96 | +}; |
| 97 | + |
| 98 | +struct qaic_device { |
| 99 | + /* Pointer to base PCI device struct of our physical device */ |
| 100 | + struct pci_dev *pdev; |
| 101 | + /* Req. ID of request that will be queued next in MHI control device */ |
| 102 | + u32 next_seq_num; |
| 103 | + /* Base address of bar 0 */ |
| 104 | + void __iomem *bar_0; |
| 105 | + /* Base address of bar 2 */ |
| 106 | + void __iomem *bar_2; |
| 107 | + /* Controller structure for MHI devices */ |
| 108 | + struct mhi_controller *mhi_cntrl; |
| 109 | + /* MHI control channel device */ |
| 110 | + struct mhi_device *cntl_ch; |
| 111 | + /* List of requests queued in MHI control device */ |
| 112 | + struct list_head cntl_xfer_list; |
| 113 | + /* Synchronizes MHI control device transactions and its xfer list */ |
| 114 | + struct mutex cntl_mutex; |
| 115 | + /* Array of DBC struct of this device */ |
| 116 | + struct dma_bridge_chan *dbc; |
| 117 | + /* Work queue for tasks related to MHI control device */ |
| 118 | + struct workqueue_struct *cntl_wq; |
| 119 | + /* Synchronizes all the users of device during cleanup */ |
| 120 | + struct srcu_struct dev_lock; |
| 121 | + /* true: Device under reset; false: Device not under reset */ |
| 122 | + bool in_reset; |
| 123 | + /* |
| 124 | + * true: A tx MHI transaction has failed and a rx buffer is still queued |
| 125 | + * in control device. Such a buffer is considered lost rx buffer |
| 126 | + * false: No rx buffer is lost in control device |
| 127 | + */ |
| 128 | + bool cntl_lost_buf; |
| 129 | + /* Maximum number of DBC supported by this device */ |
| 130 | + u32 num_dbc; |
| 131 | + /* Reference to the drm_device for this device when it is created */ |
| 132 | + struct qaic_drm_device *qddev; |
| 133 | + /* Generate the CRC of a control message */ |
| 134 | + u32 (*gen_crc)(void *msg); |
| 135 | + /* Validate the CRC of a control message */ |
| 136 | + bool (*valid_crc)(void *msg); |
| 137 | +}; |
| 138 | + |
| 139 | +struct qaic_drm_device { |
| 140 | + /* Pointer to the root device struct driven by this driver */ |
| 141 | + struct qaic_device *qdev; |
| 142 | + /* |
| 143 | + * The physical device can be partition in number of logical devices. |
| 144 | + * And each logical device is given a partition id. This member stores |
| 145 | + * that id. QAIC_NO_PARTITION is a sentinel used to mark that this drm |
| 146 | + * device is the actual physical device |
| 147 | + */ |
| 148 | + s32 partition_id; |
| 149 | + /* Pointer to the drm device struct of this drm device */ |
| 150 | + struct drm_device *ddev; |
| 151 | + /* Head in list of users who have opened this drm device */ |
| 152 | + struct list_head users; |
| 153 | + /* Synchronizes access to users list */ |
| 154 | + struct mutex users_mutex; |
| 155 | +}; |
| 156 | + |
| 157 | +struct qaic_bo { |
| 158 | + struct drm_gem_object base; |
| 159 | + /* Scatter/gather table for allocate/imported BO */ |
| 160 | + struct sg_table *sgt; |
| 161 | + /* BO size requested by user. GEM object might be bigger in size. */ |
| 162 | + u64 size; |
| 163 | + /* Head in list of slices of this BO */ |
| 164 | + struct list_head slices; |
| 165 | + /* Total nents, for all slices of this BO */ |
| 166 | + int total_slice_nents; |
| 167 | + /* |
| 168 | + * Direction of transfer. It can assume only two value DMA_TO_DEVICE and |
| 169 | + * DMA_FROM_DEVICE. |
| 170 | + */ |
| 171 | + int dir; |
| 172 | + /* The pointer of the DBC which operates on this BO */ |
| 173 | + struct dma_bridge_chan *dbc; |
| 174 | + /* Number of slice that belongs to this buffer */ |
| 175 | + u32 nr_slice; |
| 176 | + /* Number of slice that have been transferred by DMA engine */ |
| 177 | + u32 nr_slice_xfer_done; |
| 178 | + /* true = BO is queued for execution, true = BO is not queued */ |
| 179 | + bool queued; |
| 180 | + /* |
| 181 | + * If true then user has attached slicing information to this BO by |
| 182 | + * calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl. |
| 183 | + */ |
| 184 | + bool sliced; |
| 185 | + /* Request ID of this BO if it is queued for execution */ |
| 186 | + u16 req_id; |
| 187 | + /* Handle assigned to this BO */ |
| 188 | + u32 handle; |
| 189 | + /* Wait on this for completion of DMA transfer of this BO */ |
| 190 | + struct completion xfer_done; |
| 191 | + /* |
| 192 | + * Node in linked list where head is dbc->xfer_list. |
| 193 | + * This link list contain BO's that are queued for DMA transfer. |
| 194 | + */ |
| 195 | + struct list_head xfer_list; |
| 196 | + /* |
| 197 | + * Node in linked list where head is dbc->bo_lists. |
| 198 | + * This link list contain BO's that are associated with the DBC it is |
| 199 | + * linked to. |
| 200 | + */ |
| 201 | + struct list_head bo_list; |
| 202 | + struct { |
| 203 | + /* |
| 204 | + * Latest timestamp(ns) at which kernel received a request to |
| 205 | + * execute this BO |
| 206 | + */ |
| 207 | + u64 req_received_ts; |
| 208 | + /* |
| 209 | + * Latest timestamp(ns) at which kernel enqueued requests of |
| 210 | + * this BO for execution in DMA queue |
| 211 | + */ |
| 212 | + u64 req_submit_ts; |
| 213 | + /* |
| 214 | + * Latest timestamp(ns) at which kernel received a completion |
| 215 | + * interrupt for requests of this BO |
| 216 | + */ |
| 217 | + u64 req_processed_ts; |
| 218 | + /* |
| 219 | + * Number of elements already enqueued in DMA queue before |
| 220 | + * enqueuing requests of this BO |
| 221 | + */ |
| 222 | + u32 queue_level_before; |
| 223 | + } perf_stats; |
| 224 | + |
| 225 | +}; |
| 226 | + |
| 227 | +struct bo_slice { |
| 228 | + /* Mapped pages */ |
| 229 | + struct sg_table *sgt; |
| 230 | + /* Number of requests required to queue in DMA queue */ |
| 231 | + int nents; |
| 232 | + /* See enum dma_data_direction */ |
| 233 | + int dir; |
| 234 | + /* Actual requests that will be copied in DMA queue */ |
| 235 | + struct dbc_req *reqs; |
| 236 | + struct kref ref_count; |
| 237 | + /* true: No DMA transfer required */ |
| 238 | + bool no_xfer; |
| 239 | + /* Pointer to the parent BO handle */ |
| 240 | + struct qaic_bo *bo; |
| 241 | + /* Node in list of slices maintained by parent BO */ |
| 242 | + struct list_head slice; |
| 243 | + /* Size of this slice in bytes */ |
| 244 | + u64 size; |
| 245 | + /* Offset of this slice in buffer */ |
| 246 | + u64 offset; |
| 247 | +}; |
| 248 | + |
| 249 | +int get_dbc_req_elem_size(void); |
| 250 | +int get_dbc_rsp_elem_size(void); |
| 251 | +int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor); |
| 252 | +int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); |
| 253 | +void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result); |
| 254 | + |
| 255 | +void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result); |
| 256 | + |
| 257 | +int qaic_control_open(struct qaic_device *qdev); |
| 258 | +void qaic_control_close(struct qaic_device *qdev); |
| 259 | +void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr); |
| 260 | + |
| 261 | +irqreturn_t dbc_irq_threaded_fn(int irq, void *data); |
| 262 | +irqreturn_t dbc_irq_handler(int irq, void *data); |
| 263 | +int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr); |
| 264 | +void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr); |
| 265 | +void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id); |
| 266 | +void release_dbc(struct qaic_device *qdev, u32 dbc_id); |
| 267 | + |
| 268 | +void wake_all_cntl(struct qaic_device *qdev); |
| 269 | +void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset); |
| 270 | + |
| 271 | +struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); |
| 272 | + |
| 273 | +int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); |
| 274 | +int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); |
| 275 | +int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); |
| 276 | +int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); |
| 277 | +int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); |
| 278 | +int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); |
| 279 | +int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); |
| 280 | +void irq_polling_work(struct work_struct *work); |
| 281 | + |
| 282 | +#endif /* _QAIC_H_ */ |
0 commit comments