|
6 | 6 | #include <linux/mm.h> |
7 | 7 | #include <linux/slab.h> |
8 | 8 | #include <linux/namei.h> |
| 9 | +#include <linux/nospec.h> |
9 | 10 | #include <linux/io_uring.h> |
10 | 11 |
|
11 | 12 | #include <uapi/linux/io_uring.h> |
@@ -206,3 +207,109 @@ void init_hash_table(struct io_hash_table *table, unsigned size) |
206 | 207 | INIT_HLIST_HEAD(&table->hbs[i].list); |
207 | 208 | } |
208 | 209 | } |
| 210 | + |
| 211 | +static int __io_sync_cancel(struct io_uring_task *tctx, |
| 212 | + struct io_cancel_data *cd, int fd) |
| 213 | +{ |
| 214 | + struct io_ring_ctx *ctx = cd->ctx; |
| 215 | + |
| 216 | + /* fixed must be grabbed every time since we drop the uring_lock */ |
| 217 | + if ((cd->flags & IORING_ASYNC_CANCEL_FD) && |
| 218 | + (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) { |
| 219 | + unsigned long file_ptr; |
| 220 | + |
| 221 | + if (unlikely(fd > ctx->nr_user_files)) |
| 222 | + return -EBADF; |
| 223 | + fd = array_index_nospec(fd, ctx->nr_user_files); |
| 224 | + file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr; |
| 225 | + cd->file = (struct file *) (file_ptr & FFS_MASK); |
| 226 | + if (!cd->file) |
| 227 | + return -EBADF; |
| 228 | + } |
| 229 | + |
| 230 | + return __io_async_cancel(cd, tctx, 0); |
| 231 | +} |
| 232 | + |
| 233 | +int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg) |
| 234 | + __must_hold(&ctx->uring_lock) |
| 235 | +{ |
| 236 | + struct io_cancel_data cd = { |
| 237 | + .ctx = ctx, |
| 238 | + .seq = atomic_inc_return(&ctx->cancel_seq), |
| 239 | + }; |
| 240 | + ktime_t timeout = KTIME_MAX; |
| 241 | + struct io_uring_sync_cancel_reg sc; |
| 242 | + struct fd f = { }; |
| 243 | + DEFINE_WAIT(wait); |
| 244 | + int ret; |
| 245 | + |
| 246 | + if (copy_from_user(&sc, arg, sizeof(sc))) |
| 247 | + return -EFAULT; |
| 248 | + if (sc.flags & ~CANCEL_FLAGS) |
| 249 | + return -EINVAL; |
| 250 | + if (sc.pad[0] || sc.pad[1] || sc.pad[2] || sc.pad[3]) |
| 251 | + return -EINVAL; |
| 252 | + |
| 253 | + cd.data = sc.addr; |
| 254 | + cd.flags = sc.flags; |
| 255 | + |
| 256 | + /* we can grab a normal file descriptor upfront */ |
| 257 | + if ((cd.flags & IORING_ASYNC_CANCEL_FD) && |
| 258 | + !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) { |
| 259 | + f = fdget(sc.fd); |
| 260 | + if (!f.file) |
| 261 | + return -EBADF; |
| 262 | + cd.file = f.file; |
| 263 | + } |
| 264 | + |
| 265 | + ret = __io_sync_cancel(current->io_uring, &cd, sc.fd); |
| 266 | + |
| 267 | + /* found something, done! */ |
| 268 | + if (ret != -EALREADY) |
| 269 | + goto out; |
| 270 | + |
| 271 | + if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) { |
| 272 | + struct timespec64 ts = { |
| 273 | + .tv_sec = sc.timeout.tv_sec, |
| 274 | + .tv_nsec = sc.timeout.tv_nsec |
| 275 | + }; |
| 276 | + |
| 277 | + timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns()); |
| 278 | + } |
| 279 | + |
| 280 | + /* |
| 281 | + * Keep looking until we get -ENOENT. we'll get woken everytime |
| 282 | + * every time a request completes and will retry the cancelation. |
| 283 | + */ |
| 284 | + do { |
| 285 | + cd.seq = atomic_inc_return(&ctx->cancel_seq); |
| 286 | + |
| 287 | + prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE); |
| 288 | + |
| 289 | + ret = __io_sync_cancel(current->io_uring, &cd, sc.fd); |
| 290 | + |
| 291 | + if (ret != -EALREADY) |
| 292 | + break; |
| 293 | + |
| 294 | + mutex_unlock(&ctx->uring_lock); |
| 295 | + ret = io_run_task_work_sig(); |
| 296 | + if (ret < 0) { |
| 297 | + mutex_lock(&ctx->uring_lock); |
| 298 | + break; |
| 299 | + } |
| 300 | + ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS); |
| 301 | + mutex_lock(&ctx->uring_lock); |
| 302 | + if (!ret) { |
| 303 | + ret = -ETIME; |
| 304 | + break; |
| 305 | + } |
| 306 | + } while (1); |
| 307 | + |
| 308 | + finish_wait(&ctx->cq_wait, &wait); |
| 309 | + |
| 310 | + if (ret == -ENOENT || ret > 0) |
| 311 | + ret = 0; |
| 312 | +out: |
| 313 | + fdput(f); |
| 314 | + return ret; |
| 315 | +} |
0 commit comments