@@ -218,8 +218,7 @@ struct eventpoll {
218
218
struct file * file ;
219
219
220
220
/* used to optimize loop detection check */
221
- struct list_head visited_list_link ;
222
- int visited ;
221
+ u64 gen ;
223
222
224
223
#ifdef CONFIG_NET_RX_BUSY_POLL
225
224
/* used to track busy poll napi_id */
@@ -274,6 +273,8 @@ static long max_user_watches __read_mostly;
274
273
*/
275
274
static DEFINE_MUTEX (epmutex );
276
275
276
+ static u64 loop_check_gen = 0 ;
277
+
277
278
/* Used to check for epoll file descriptor inclusion loops */
278
279
static struct nested_calls poll_loop_ncalls ;
279
280
@@ -283,9 +284,6 @@ static struct kmem_cache *epi_cache __read_mostly;
283
284
/* Slab cache used to allocate "struct eppoll_entry" */
284
285
static struct kmem_cache * pwq_cache __read_mostly ;
285
286
286
- /* Visited nodes during ep_loop_check(), so we can unset them when we finish */
287
- static LIST_HEAD (visited_list );
288
-
289
287
/*
290
288
* List of files with newly added links, where we may need to limit the number
291
289
* of emanating paths. Protected by the epmutex.
@@ -1450,7 +1448,7 @@ static int reverse_path_check(void)
1450
1448
1451
1449
static int ep_create_wakeup_source (struct epitem * epi )
1452
1450
{
1453
- const char * name ;
1451
+ struct name_snapshot n ;
1454
1452
struct wakeup_source * ws ;
1455
1453
1456
1454
if (!epi -> ep -> ws ) {
@@ -1459,8 +1457,9 @@ static int ep_create_wakeup_source(struct epitem *epi)
1459
1457
return - ENOMEM ;
1460
1458
}
1461
1459
1462
- name = epi -> ffd .file -> f_path .dentry -> d_name .name ;
1463
- ws = wakeup_source_register (NULL , name );
1460
+ take_dentry_name_snapshot (& n , epi -> ffd .file -> f_path .dentry );
1461
+ ws = wakeup_source_register (NULL , n .name .name );
1462
+ release_dentry_name_snapshot (& n );
1464
1463
1465
1464
if (!ws )
1466
1465
return - ENOMEM ;
@@ -1522,6 +1521,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
1522
1521
RCU_INIT_POINTER (epi -> ws , NULL );
1523
1522
}
1524
1523
1524
+ /* Add the current item to the list of active epoll hook for this file */
1525
+ spin_lock (& tfile -> f_lock );
1526
+ list_add_tail_rcu (& epi -> fllink , & tfile -> f_ep_links );
1527
+ spin_unlock (& tfile -> f_lock );
1528
+
1529
+ /*
1530
+ * Add the current item to the RB tree. All RB tree operations are
1531
+ * protected by "mtx", and ep_insert() is called with "mtx" held.
1532
+ */
1533
+ ep_rbtree_insert (ep , epi );
1534
+
1535
+ /* now check if we've created too many backpaths */
1536
+ error = - EINVAL ;
1537
+ if (full_check && reverse_path_check ())
1538
+ goto error_remove_epi ;
1539
+
1525
1540
/* Initialize the poll table using the queue callback */
1526
1541
epq .epi = epi ;
1527
1542
init_poll_funcptr (& epq .pt , ep_ptable_queue_proc );
@@ -1544,22 +1559,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
1544
1559
if (epi -> nwait < 0 )
1545
1560
goto error_unregister ;
1546
1561
1547
- /* Add the current item to the list of active epoll hook for this file */
1548
- spin_lock (& tfile -> f_lock );
1549
- list_add_tail_rcu (& epi -> fllink , & tfile -> f_ep_links );
1550
- spin_unlock (& tfile -> f_lock );
1551
-
1552
- /*
1553
- * Add the current item to the RB tree. All RB tree operations are
1554
- * protected by "mtx", and ep_insert() is called with "mtx" held.
1555
- */
1556
- ep_rbtree_insert (ep , epi );
1557
-
1558
- /* now check if we've created too many backpaths */
1559
- error = - EINVAL ;
1560
- if (full_check && reverse_path_check ())
1561
- goto error_remove_epi ;
1562
-
1563
1562
/* We have to drop the new item inside our item list to keep track of it */
1564
1563
write_lock_irq (& ep -> lock );
1565
1564
@@ -1588,16 +1587,15 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
1588
1587
1589
1588
return 0 ;
1590
1589
1590
+ error_unregister :
1591
+ ep_unregister_pollwait (ep , epi );
1591
1592
error_remove_epi :
1592
1593
spin_lock (& tfile -> f_lock );
1593
1594
list_del_rcu (& epi -> fllink );
1594
1595
spin_unlock (& tfile -> f_lock );
1595
1596
1596
1597
rb_erase_cached (& epi -> rbn , & ep -> rbr );
1597
1598
1598
- error_unregister :
1599
- ep_unregister_pollwait (ep , epi );
1600
-
1601
1599
/*
1602
1600
* We need to do this because an event could have been arrived on some
1603
1601
* allocated wait queue. Note that we don't care about the ep->ovflist
@@ -1972,13 +1970,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1972
1970
struct epitem * epi ;
1973
1971
1974
1972
mutex_lock_nested (& ep -> mtx , call_nests + 1 );
1975
- ep -> visited = 1 ;
1976
- list_add (& ep -> visited_list_link , & visited_list );
1973
+ ep -> gen = loop_check_gen ;
1977
1974
for (rbp = rb_first_cached (& ep -> rbr ); rbp ; rbp = rb_next (rbp )) {
1978
1975
epi = rb_entry (rbp , struct epitem , rbn );
1979
1976
if (unlikely (is_file_epoll (epi -> ffd .file ))) {
1980
1977
ep_tovisit = epi -> ffd .file -> private_data ;
1981
- if (ep_tovisit -> visited )
1978
+ if (ep_tovisit -> gen == loop_check_gen )
1982
1979
continue ;
1983
1980
error = ep_call_nested (& poll_loop_ncalls ,
1984
1981
ep_loop_check_proc , epi -> ffd .file ,
@@ -2019,18 +2016,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
2019
2016
*/
2020
2017
static int ep_loop_check (struct eventpoll * ep , struct file * file )
2021
2018
{
2022
- int ret ;
2023
- struct eventpoll * ep_cur , * ep_next ;
2024
-
2025
- ret = ep_call_nested (& poll_loop_ncalls ,
2019
+ return ep_call_nested (& poll_loop_ncalls ,
2026
2020
ep_loop_check_proc , file , ep , current );
2027
- /* clear visited list */
2028
- list_for_each_entry_safe (ep_cur , ep_next , & visited_list ,
2029
- visited_list_link ) {
2030
- ep_cur -> visited = 0 ;
2031
- list_del (& ep_cur -> visited_list_link );
2032
- }
2033
- return ret ;
2034
2021
}
2035
2022
2036
2023
static void clear_tfile_check_list (void )
@@ -2195,11 +2182,13 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
2195
2182
goto error_tgt_fput ;
2196
2183
if (op == EPOLL_CTL_ADD ) {
2197
2184
if (!list_empty (& f .file -> f_ep_links ) ||
2185
+ ep -> gen == loop_check_gen ||
2198
2186
is_file_epoll (tf .file )) {
2199
2187
mutex_unlock (& ep -> mtx );
2200
2188
error = epoll_mutex_lock (& epmutex , 0 , nonblock );
2201
2189
if (error )
2202
2190
goto error_tgt_fput ;
2191
+ loop_check_gen ++ ;
2203
2192
full_check = 1 ;
2204
2193
if (is_file_epoll (tf .file )) {
2205
2194
error = - ELOOP ;
@@ -2263,6 +2252,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
2263
2252
error_tgt_fput :
2264
2253
if (full_check ) {
2265
2254
clear_tfile_check_list ();
2255
+ loop_check_gen ++ ;
2266
2256
mutex_unlock (& epmutex );
2267
2257
}
2268
2258
0 commit comments