55 * Implements an efficient asynchronous io interface.
66 *
77 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
8+ * Copyright 2018 Christoph Hellwig.
89 *
910 * See ../COPYING for licensing terms.
1011 */
@@ -164,10 +165,22 @@ struct fsync_iocb {
164165 bool datasync ;
165166};
166167
168+ struct poll_iocb {
169+ struct file * file ;
170+ __poll_t events ;
171+ struct wait_queue_head * head ;
172+
173+ union {
174+ struct wait_queue_entry wait ;
175+ struct work_struct work ;
176+ };
177+ };
178+
167179struct aio_kiocb {
168180 union {
169181 struct kiocb rw ;
170182 struct fsync_iocb fsync ;
183+ struct poll_iocb poll ;
171184 };
172185
173186 struct kioctx * ki_ctx ;
@@ -1558,7 +1571,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
15581571 if (unlikely (iocb -> aio_buf || iocb -> aio_offset || iocb -> aio_nbytes ||
15591572 iocb -> aio_rw_flags ))
15601573 return - EINVAL ;
1561-
15621574 req -> file = fget (iocb -> aio_fildes );
15631575 if (unlikely (!req -> file ))
15641576 return - EBADF ;
@@ -1573,6 +1585,124 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
15731585 return - EIOCBQUEUED ;
15741586}
15751587
1588+ /* need to use list_del_init so we can check if item was present */
1589+ static inline bool __aio_poll_remove (struct poll_iocb * req )
1590+ {
1591+ if (list_empty (& req -> wait .entry ))
1592+ return false;
1593+ list_del_init (& req -> wait .entry );
1594+ return true;
1595+ }
1596+
1597+ static inline void __aio_poll_complete (struct poll_iocb * req , __poll_t mask )
1598+ {
1599+ struct aio_kiocb * iocb = container_of (req , struct aio_kiocb , poll );
1600+ struct file * file = req -> file ;
1601+
1602+ aio_complete (iocb , mangle_poll (mask ), 0 );
1603+ fput (file );
1604+ }
1605+
1606+ static void aio_poll_work (struct work_struct * work )
1607+ {
1608+ struct poll_iocb * req = container_of (work , struct poll_iocb , work );
1609+
1610+ __aio_poll_complete (req , req -> events );
1611+ }
1612+
1613+ static int aio_poll_cancel (struct kiocb * iocb )
1614+ {
1615+ struct aio_kiocb * aiocb = container_of (iocb , struct aio_kiocb , rw );
1616+ struct poll_iocb * req = & aiocb -> poll ;
1617+ struct wait_queue_head * head = req -> head ;
1618+ bool found = false;
1619+
1620+ spin_lock (& head -> lock );
1621+ found = __aio_poll_remove (req );
1622+ spin_unlock (& head -> lock );
1623+
1624+ if (found ) {
1625+ req -> events = 0 ;
1626+ INIT_WORK (& req -> work , aio_poll_work );
1627+ schedule_work (& req -> work );
1628+ }
1629+ return 0 ;
1630+ }
1631+
1632+ static int aio_poll_wake (struct wait_queue_entry * wait , unsigned mode , int sync ,
1633+ void * key )
1634+ {
1635+ struct poll_iocb * req = container_of (wait , struct poll_iocb , wait );
1636+ struct file * file = req -> file ;
1637+ __poll_t mask = key_to_poll (key );
1638+
1639+ assert_spin_locked (& req -> head -> lock );
1640+
1641+ /* for instances that support it check for an event match first: */
1642+ if (mask && !(mask & req -> events ))
1643+ return 0 ;
1644+
1645+ mask = file -> f_op -> poll_mask (file , req -> events );
1646+ if (!mask )
1647+ return 0 ;
1648+
1649+ __aio_poll_remove (req );
1650+
1651+ req -> events = mask ;
1652+ INIT_WORK (& req -> work , aio_poll_work );
1653+ schedule_work (& req -> work );
1654+ return 1 ;
1655+ }
1656+
1657+ static ssize_t aio_poll (struct aio_kiocb * aiocb , struct iocb * iocb )
1658+ {
1659+ struct kioctx * ctx = aiocb -> ki_ctx ;
1660+ struct poll_iocb * req = & aiocb -> poll ;
1661+ __poll_t mask ;
1662+
1663+ /* reject any unknown events outside the normal event mask. */
1664+ if ((u16 )iocb -> aio_buf != iocb -> aio_buf )
1665+ return - EINVAL ;
1666+ /* reject fields that are not defined for poll */
1667+ if (iocb -> aio_offset || iocb -> aio_nbytes || iocb -> aio_rw_flags )
1668+ return - EINVAL ;
1669+
1670+ req -> events = demangle_poll (iocb -> aio_buf ) | EPOLLERR | EPOLLHUP ;
1671+ req -> file = fget (iocb -> aio_fildes );
1672+ if (unlikely (!req -> file ))
1673+ return - EBADF ;
1674+ if (!file_has_poll_mask (req -> file ))
1675+ goto out_fail ;
1676+
1677+ req -> head = req -> file -> f_op -> get_poll_head (req -> file , req -> events );
1678+ if (!req -> head )
1679+ goto out_fail ;
1680+ if (IS_ERR (req -> head )) {
1681+ mask = EPOLLERR ;
1682+ goto done ;
1683+ }
1684+
1685+ init_waitqueue_func_entry (& req -> wait , aio_poll_wake );
1686+ aiocb -> ki_cancel = aio_poll_cancel ;
1687+
1688+ spin_lock_irq (& ctx -> ctx_lock );
1689+ spin_lock (& req -> head -> lock );
1690+ mask = req -> file -> f_op -> poll_mask (req -> file , req -> events );
1691+ if (!mask ) {
1692+ __add_wait_queue (req -> head , & req -> wait );
1693+ list_add_tail (& aiocb -> ki_list , & ctx -> active_reqs );
1694+ }
1695+ spin_unlock (& req -> head -> lock );
1696+ spin_unlock_irq (& ctx -> ctx_lock );
1697+ done :
1698+ if (mask )
1699+ __aio_poll_complete (req , mask );
1700+ return - EIOCBQUEUED ;
1701+ out_fail :
1702+ fput (req -> file );
1703+ return - EINVAL ; /* same as no support for IOCB_CMD_POLL */
1704+ }
1705+
15761706static int io_submit_one (struct kioctx * ctx , struct iocb __user * user_iocb ,
15771707 struct iocb * iocb , bool compat )
15781708{
@@ -1641,6 +1771,8 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
16411771 break ;
16421772 case IOCB_CMD_FDSYNC :
16431773 ret = aio_fsync (& req -> fsync , iocb , true);
1774+ case IOCB_CMD_POLL :
1775+ ret = aio_poll (req , iocb );
16441776 break ;
16451777 default :
16461778 pr_debug ("invalid aio operation %d\n" , iocb -> aio_lio_opcode );
0 commit comments