@@ -472,6 +472,16 @@ static void kernfs_drain(struct kernfs_node *kn)
472
472
lockdep_assert_held_write (& root -> kernfs_rwsem );
473
473
WARN_ON_ONCE (kernfs_active (kn ));
474
474
475
+ /*
476
+ * Skip draining if already fully drained. This avoids draining and its
477
+ * lockdep annotations for nodes which have never been activated
478
+ * allowing embedding kernfs_remove() in create error paths without
479
+ * worrying about draining.
480
+ */
481
+ if (atomic_read (& kn -> active ) == KN_DEACTIVATED_BIAS &&
482
+ !kernfs_should_drain_open_files (kn ))
483
+ return ;
484
+
475
485
up_write (& root -> kernfs_rwsem );
476
486
477
487
if (kernfs_lockdep (kn )) {
@@ -480,7 +490,6 @@ static void kernfs_drain(struct kernfs_node *kn)
480
490
lock_contended (& kn -> dep_map , _RET_IP_ );
481
491
}
482
492
483
- /* but everyone should wait for draining */
484
493
wait_event (root -> deactivate_waitq ,
485
494
atomic_read (& kn -> active ) == KN_DEACTIVATED_BIAS );
486
495
@@ -489,7 +498,8 @@ static void kernfs_drain(struct kernfs_node *kn)
489
498
rwsem_release (& kn -> dep_map , _RET_IP_ );
490
499
}
491
500
492
- kernfs_drain_open_files (kn );
501
+ if (kernfs_should_drain_open_files (kn ))
502
+ kernfs_drain_open_files (kn );
493
503
494
504
down_write (& root -> kernfs_rwsem );
495
505
}
@@ -695,13 +705,7 @@ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
695
705
goto err_unlock ;
696
706
}
697
707
698
- /*
699
- * ACTIVATED is protected with kernfs_mutex but it was clear when
700
- * @kn was added to idr and we just wanna see it set. No need to
701
- * grab kernfs_mutex.
702
- */
703
- if (unlikely (!(kn -> flags & KERNFS_ACTIVATED ) ||
704
- !atomic_inc_not_zero (& kn -> count )))
708
+ if (unlikely (!kernfs_active (kn ) || !atomic_inc_not_zero (& kn -> count )))
705
709
goto err_unlock ;
706
710
707
711
spin_unlock (& kernfs_idr_lock );
@@ -743,10 +747,7 @@ int kernfs_add_one(struct kernfs_node *kn)
743
747
goto out_unlock ;
744
748
745
749
ret = - ENOENT ;
746
- if (parent -> flags & KERNFS_EMPTY_DIR )
747
- goto out_unlock ;
748
-
749
- if ((parent -> flags & KERNFS_ACTIVATED ) && !kernfs_active (parent ))
750
+ if (parent -> flags & (KERNFS_REMOVING | KERNFS_EMPTY_DIR ))
750
751
goto out_unlock ;
751
752
752
753
kn -> hash = kernfs_name_hash (kn -> name , kn -> ns );
@@ -1304,6 +1305,21 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
1304
1305
return pos -> parent ;
1305
1306
}
1306
1307
1308
+ static void kernfs_activate_one (struct kernfs_node * kn )
1309
+ {
1310
+ lockdep_assert_held_write (& kernfs_root (kn )-> kernfs_rwsem );
1311
+
1312
+ kn -> flags |= KERNFS_ACTIVATED ;
1313
+
1314
+ if (kernfs_active (kn ) || (kn -> flags & (KERNFS_HIDDEN | KERNFS_REMOVING )))
1315
+ return ;
1316
+
1317
+ WARN_ON_ONCE (kn -> parent && RB_EMPTY_NODE (& kn -> rb ));
1318
+ WARN_ON_ONCE (atomic_read (& kn -> active ) != KN_DEACTIVATED_BIAS );
1319
+
1320
+ atomic_sub (KN_DEACTIVATED_BIAS , & kn -> active );
1321
+ }
1322
+
1307
1323
/**
1308
1324
* kernfs_activate - activate a node which started deactivated
1309
1325
* @kn: kernfs_node whose subtree is to be activated
@@ -1325,15 +1341,42 @@ void kernfs_activate(struct kernfs_node *kn)
1325
1341
down_write (& root -> kernfs_rwsem );
1326
1342
1327
1343
pos = NULL ;
1328
- while ((pos = kernfs_next_descendant_post (pos , kn ))) {
1329
- if (pos -> flags & KERNFS_ACTIVATED )
1330
- continue ;
1344
+ while ((pos = kernfs_next_descendant_post (pos , kn )))
1345
+ kernfs_activate_one (pos );
1346
+
1347
+ up_write (& root -> kernfs_rwsem );
1348
+ }
1331
1349
1332
- WARN_ON_ONCE (pos -> parent && RB_EMPTY_NODE (& pos -> rb ));
1333
- WARN_ON_ONCE (atomic_read (& pos -> active ) != KN_DEACTIVATED_BIAS );
1350
+ /**
1351
+ * kernfs_show - show or hide a node
1352
+ * @kn: kernfs_node to show or hide
1353
+ * @show: whether to show or hide
1354
+ *
1355
+ * If @show is %false, @kn is marked hidden and deactivated. A hidden node is
1356
+ * ignored in future activaitons. If %true, the mark is removed and activation
1357
+ * state is restored. This function won't implicitly activate a new node in a
1358
+ * %KERNFS_ROOT_CREATE_DEACTIVATED root which hasn't been activated yet.
1359
+ *
1360
+ * To avoid recursion complexities, directories aren't supported for now.
1361
+ */
1362
+ void kernfs_show (struct kernfs_node * kn , bool show )
1363
+ {
1364
+ struct kernfs_root * root = kernfs_root (kn );
1334
1365
1335
- atomic_sub (KN_DEACTIVATED_BIAS , & pos -> active );
1336
- pos -> flags |= KERNFS_ACTIVATED ;
1366
+ if (WARN_ON_ONCE (kernfs_type (kn ) == KERNFS_DIR ))
1367
+ return ;
1368
+
1369
+ down_write (& root -> kernfs_rwsem );
1370
+
1371
+ if (show ) {
1372
+ kn -> flags &= ~KERNFS_HIDDEN ;
1373
+ if (kn -> flags & KERNFS_ACTIVATED )
1374
+ kernfs_activate_one (kn );
1375
+ } else {
1376
+ kn -> flags |= KERNFS_HIDDEN ;
1377
+ if (kernfs_active (kn ))
1378
+ atomic_add (KN_DEACTIVATED_BIAS , & kn -> active );
1379
+ kernfs_drain (kn );
1337
1380
}
1338
1381
1339
1382
up_write (& root -> kernfs_rwsem );
@@ -1358,34 +1401,27 @@ static void __kernfs_remove(struct kernfs_node *kn)
1358
1401
1359
1402
pr_debug ("kernfs %s: removing\n" , kn -> name );
1360
1403
1361
- /* prevent any new usage under @kn by deactivating all nodes */
1404
+ /* prevent new usage by marking all nodes removing and deactivating */
1362
1405
pos = NULL ;
1363
- while ((pos = kernfs_next_descendant_post (pos , kn )))
1406
+ while ((pos = kernfs_next_descendant_post (pos , kn ))) {
1407
+ pos -> flags |= KERNFS_REMOVING ;
1364
1408
if (kernfs_active (pos ))
1365
1409
atomic_add (KN_DEACTIVATED_BIAS , & pos -> active );
1410
+ }
1366
1411
1367
1412
/* deactivate and unlink the subtree node-by-node */
1368
1413
do {
1369
1414
pos = kernfs_leftmost_descendant (kn );
1370
1415
1371
1416
/*
1372
- * kernfs_drain() drops kernfs_rwsem temporarily and @pos's
1417
+ * kernfs_drain() may drop kernfs_rwsem temporarily and @pos's
1373
1418
* base ref could have been put by someone else by the time
1374
1419
* the function returns. Make sure it doesn't go away
1375
1420
* underneath us.
1376
1421
*/
1377
1422
kernfs_get (pos );
1378
1423
1379
- /*
1380
- * Drain iff @kn was activated. This avoids draining and
1381
- * its lockdep annotations for nodes which have never been
1382
- * activated and allows embedding kernfs_remove() in create
1383
- * error paths without worrying about draining.
1384
- */
1385
- if (kn -> flags & KERNFS_ACTIVATED )
1386
- kernfs_drain (pos );
1387
- else
1388
- WARN_ON_ONCE (atomic_read (& kn -> active ) != KN_DEACTIVATED_BIAS );
1424
+ kernfs_drain (pos );
1389
1425
1390
1426
/*
1391
1427
* kernfs_unlink_sibling() succeeds once per node. Use it
0 commit comments