@@ -167,7 +167,6 @@ struct nvmet_fc_tgt_assoc {
167
167
struct nvmet_fc_tgt_queue * queues [NVMET_NR_QUEUES + 1 ];
168
168
struct kref ref ;
169
169
struct work_struct del_work ;
170
- atomic_t del_work_active ;
171
170
};
172
171
173
172
@@ -1090,7 +1089,6 @@ nvmet_fc_delete_assoc(struct work_struct *work)
1090
1089
container_of (work , struct nvmet_fc_tgt_assoc , del_work );
1091
1090
1092
1091
nvmet_fc_delete_target_assoc (assoc );
1093
- atomic_set (& assoc -> del_work_active , 0 );
1094
1092
nvmet_fc_tgt_a_put (assoc );
1095
1093
}
1096
1094
@@ -1123,7 +1121,6 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1123
1121
INIT_LIST_HEAD (& assoc -> a_list );
1124
1122
kref_init (& assoc -> ref );
1125
1123
INIT_WORK (& assoc -> del_work , nvmet_fc_delete_assoc );
1126
- atomic_set (& assoc -> del_work_active , 0 );
1127
1124
atomic_set (& assoc -> terminating , 0 );
1128
1125
1129
1126
while (needrandom ) {
@@ -1478,21 +1475,15 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1478
1475
{
1479
1476
struct nvmet_fc_tgt_assoc * assoc , * next ;
1480
1477
unsigned long flags ;
1481
- int ret ;
1482
1478
1483
1479
spin_lock_irqsave (& tgtport -> lock , flags );
1484
1480
list_for_each_entry_safe (assoc , next ,
1485
1481
& tgtport -> assoc_list , a_list ) {
1486
1482
if (!nvmet_fc_tgt_a_get (assoc ))
1487
1483
continue ;
1488
- ret = atomic_cmpxchg (& assoc -> del_work_active , 0 , 1 );
1489
- if (ret == 0 ) {
1490
- if (!schedule_work (& assoc -> del_work ))
1491
- nvmet_fc_tgt_a_put (assoc );
1492
- } else {
1484
+ if (!schedule_work (& assoc -> del_work ))
1493
1485
/* already deleting - release local reference */
1494
1486
nvmet_fc_tgt_a_put (assoc );
1495
- }
1496
1487
}
1497
1488
spin_unlock_irqrestore (& tgtport -> lock , flags );
1498
1489
}
@@ -1534,7 +1525,6 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
1534
1525
struct nvmet_fc_tgt_assoc * assoc , * next ;
1535
1526
unsigned long flags ;
1536
1527
bool noassoc = true;
1537
- int ret ;
1538
1528
1539
1529
spin_lock_irqsave (& tgtport -> lock , flags );
1540
1530
list_for_each_entry_safe (assoc , next ,
@@ -1546,14 +1536,9 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
1546
1536
continue ;
1547
1537
assoc -> hostport -> invalid = 1 ;
1548
1538
noassoc = false;
1549
- ret = atomic_cmpxchg (& assoc -> del_work_active , 0 , 1 );
1550
- if (ret == 0 ) {
1551
- if (!schedule_work (& assoc -> del_work ))
1552
- nvmet_fc_tgt_a_put (assoc );
1553
- } else {
1539
+ if (!schedule_work (& assoc -> del_work ))
1554
1540
/* already deleting - release local reference */
1555
1541
nvmet_fc_tgt_a_put (assoc );
1556
- }
1557
1542
}
1558
1543
spin_unlock_irqrestore (& tgtport -> lock , flags );
1559
1544
@@ -1574,7 +1559,6 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1574
1559
struct nvmet_fc_tgt_queue * queue ;
1575
1560
unsigned long flags ;
1576
1561
bool found_ctrl = false;
1577
- int ret ;
1578
1562
1579
1563
/* this is a bit ugly, but don't want to make locks layered */
1580
1564
spin_lock_irqsave (& nvmet_fc_tgtlock , flags );
@@ -1598,14 +1582,9 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1598
1582
nvmet_fc_tgtport_put (tgtport );
1599
1583
1600
1584
if (found_ctrl ) {
1601
- ret = atomic_cmpxchg (& assoc -> del_work_active , 0 , 1 );
1602
- if (ret == 0 ) {
1603
- if (!schedule_work (& assoc -> del_work ))
1604
- nvmet_fc_tgt_a_put (assoc );
1605
- } else {
1585
+ if (!schedule_work (& assoc -> del_work ))
1606
1586
/* already deleting - release local reference */
1607
1587
nvmet_fc_tgt_a_put (assoc );
1608
- }
1609
1588
return ;
1610
1589
}
1611
1590
0 commit comments