@@ -657,7 +657,7 @@ static int format_name(struct reftable_buf *dest, uint64_t min, uint64_t max)
657
657
}
658
658
659
659
struct reftable_addition {
660
- struct lock_file tables_list_lock ;
660
+ struct reftable_flock tables_list_lock ;
661
661
struct reftable_stack * stack ;
662
662
663
663
char * * new_tables ;
@@ -676,10 +676,8 @@ static int reftable_stack_init_addition(struct reftable_addition *add,
676
676
677
677
add -> stack = st ;
678
678
679
- err = hold_lock_file_for_update_timeout (& add -> tables_list_lock ,
680
- st -> list_file ,
681
- LOCK_NO_DEREF ,
682
- st -> opts .lock_timeout_ms );
679
+ err = flock_acquire (& add -> tables_list_lock , st -> list_file ,
680
+ st -> opts .lock_timeout_ms );
683
681
if (err < 0 ) {
684
682
if (errno == EEXIST ) {
685
683
err = REFTABLE_LOCK_ERROR ;
@@ -689,7 +687,7 @@ static int reftable_stack_init_addition(struct reftable_addition *add,
689
687
goto done ;
690
688
}
691
689
if (st -> opts .default_permissions ) {
692
- if (chmod (get_lock_file_path ( & add -> tables_list_lock ) ,
690
+ if (chmod (add -> tables_list_lock . path ,
693
691
st -> opts .default_permissions ) < 0 ) {
694
692
err = REFTABLE_IO_ERROR ;
695
693
goto done ;
@@ -733,7 +731,7 @@ static void reftable_addition_close(struct reftable_addition *add)
733
731
add -> new_tables_len = 0 ;
734
732
add -> new_tables_cap = 0 ;
735
733
736
- rollback_lock_file (& add -> tables_list_lock );
734
+ flock_release (& add -> tables_list_lock );
737
735
reftable_buf_release (& nm );
738
736
}
739
737
@@ -749,7 +747,6 @@ void reftable_addition_destroy(struct reftable_addition *add)
749
747
int reftable_addition_commit (struct reftable_addition * add )
750
748
{
751
749
struct reftable_buf table_list = REFTABLE_BUF_INIT ;
752
- int lock_file_fd = get_lock_file_fd (& add -> tables_list_lock );
753
750
int err = 0 ;
754
751
size_t i ;
755
752
@@ -767,20 +764,20 @@ int reftable_addition_commit(struct reftable_addition *add)
767
764
goto done ;
768
765
}
769
766
770
- err = write_in_full (lock_file_fd , table_list .buf , table_list .len );
767
+ err = write_in_full (add -> tables_list_lock . fd , table_list .buf , table_list .len );
771
768
reftable_buf_release (& table_list );
772
769
if (err < 0 ) {
773
770
err = REFTABLE_IO_ERROR ;
774
771
goto done ;
775
772
}
776
773
777
- err = stack_fsync (& add -> stack -> opts , lock_file_fd );
774
+ err = stack_fsync (& add -> stack -> opts , add -> tables_list_lock . fd );
778
775
if (err < 0 ) {
779
776
err = REFTABLE_IO_ERROR ;
780
777
goto done ;
781
778
}
782
779
783
- err = commit_lock_file (& add -> tables_list_lock );
780
+ err = flock_commit (& add -> tables_list_lock );
784
781
if (err < 0 ) {
785
782
err = REFTABLE_IO_ERROR ;
786
783
goto done ;
@@ -1160,8 +1157,8 @@ static int stack_compact_range(struct reftable_stack *st,
1160
1157
struct reftable_buf new_table_name = REFTABLE_BUF_INIT ;
1161
1158
struct reftable_buf new_table_path = REFTABLE_BUF_INIT ;
1162
1159
struct reftable_buf table_name = REFTABLE_BUF_INIT ;
1163
- struct lock_file tables_list_lock = LOCK_INIT ;
1164
- struct lock_file * table_locks = NULL ;
1160
+ struct reftable_flock tables_list_lock = REFTABLE_FLOCK_INIT ;
1161
+ struct reftable_flock * table_locks = NULL ;
1165
1162
struct reftable_tmpfile new_table = REFTABLE_TMPFILE_INIT ;
1166
1163
int is_empty_table = 0 , err = 0 ;
1167
1164
size_t first_to_replace , last_to_replace ;
@@ -1179,10 +1176,7 @@ static int stack_compact_range(struct reftable_stack *st,
1179
1176
* Hold the lock so that we can read "tables.list" and lock all tables
1180
1177
* which are part of the user-specified range.
1181
1178
*/
1182
- err = hold_lock_file_for_update_timeout (& tables_list_lock ,
1183
- st -> list_file ,
1184
- LOCK_NO_DEREF ,
1185
- st -> opts .lock_timeout_ms );
1179
+ err = flock_acquire (& tables_list_lock , st -> list_file , st -> opts .lock_timeout_ms );
1186
1180
if (err < 0 ) {
1187
1181
if (errno == EEXIST )
1188
1182
err = REFTABLE_LOCK_ERROR ;
@@ -1205,19 +1199,20 @@ static int stack_compact_range(struct reftable_stack *st,
1205
1199
* older process is still busy compacting tables which are preexisting
1206
1200
* from the point of view of the newer process.
1207
1201
*/
1208
- REFTABLE_CALLOC_ARRAY (table_locks , last - first + 1 );
1202
+ REFTABLE_ALLOC_ARRAY (table_locks , last - first + 1 );
1209
1203
if (!table_locks ) {
1210
1204
err = REFTABLE_OUT_OF_MEMORY_ERROR ;
1211
1205
goto done ;
1212
1206
}
1207
+ for (i = 0 ; i < last - first + 1 ; i ++ )
1208
+ table_locks [i ] = REFTABLE_FLOCK_INIT ;
1213
1209
1214
1210
for (i = last + 1 ; i > first ; i -- ) {
1215
1211
err = stack_filename (& table_name , st , reader_name (st -> readers [i - 1 ]));
1216
1212
if (err < 0 )
1217
1213
goto done ;
1218
1214
1219
- err = hold_lock_file_for_update (& table_locks [nlocks ],
1220
- table_name .buf , LOCK_NO_DEREF );
1215
+ err = flock_acquire (& table_locks [nlocks ], table_name .buf , 0 );
1221
1216
if (err < 0 ) {
1222
1217
/*
1223
1218
* When the table is locked already we may do a
@@ -1253,7 +1248,7 @@ static int stack_compact_range(struct reftable_stack *st,
1253
1248
* run into file descriptor exhaustion when we compress a lot
1254
1249
* of tables.
1255
1250
*/
1256
- err = close_lock_file_gently (& table_locks [nlocks ++ ]);
1251
+ err = flock_close (& table_locks [nlocks ++ ]);
1257
1252
if (err < 0 ) {
1258
1253
err = REFTABLE_IO_ERROR ;
1259
1254
goto done ;
@@ -1265,7 +1260,7 @@ static int stack_compact_range(struct reftable_stack *st,
1265
1260
* "tables.list" lock while compacting the locked tables. This allows
1266
1261
* concurrent updates to the stack to proceed.
1267
1262
*/
1268
- err = rollback_lock_file (& tables_list_lock );
1263
+ err = flock_release (& tables_list_lock );
1269
1264
if (err < 0 ) {
1270
1265
err = REFTABLE_IO_ERROR ;
1271
1266
goto done ;
@@ -1288,10 +1283,7 @@ static int stack_compact_range(struct reftable_stack *st,
1288
1283
* "tables.list". We'll then replace the compacted range of tables with
1289
1284
* the new table.
1290
1285
*/
1291
- err = hold_lock_file_for_update_timeout (& tables_list_lock ,
1292
- st -> list_file ,
1293
- LOCK_NO_DEREF ,
1294
- st -> opts .lock_timeout_ms );
1286
+ err = flock_acquire (& tables_list_lock , st -> list_file , st -> opts .lock_timeout_ms );
1295
1287
if (err < 0 ) {
1296
1288
if (errno == EEXIST )
1297
1289
err = REFTABLE_LOCK_ERROR ;
@@ -1301,7 +1293,7 @@ static int stack_compact_range(struct reftable_stack *st,
1301
1293
}
1302
1294
1303
1295
if (st -> opts .default_permissions ) {
1304
- if (chmod (get_lock_file_path ( & tables_list_lock ) ,
1296
+ if (chmod (tables_list_lock . path ,
1305
1297
st -> opts .default_permissions ) < 0 ) {
1306
1298
err = REFTABLE_IO_ERROR ;
1307
1299
goto done ;
@@ -1456,22 +1448,22 @@ static int stack_compact_range(struct reftable_stack *st,
1456
1448
goto done ;
1457
1449
}
1458
1450
1459
- err = write_in_full (get_lock_file_fd ( & tables_list_lock ) ,
1451
+ err = write_in_full (tables_list_lock . fd ,
1460
1452
tables_list_buf .buf , tables_list_buf .len );
1461
1453
if (err < 0 ) {
1462
1454
err = REFTABLE_IO_ERROR ;
1463
1455
unlink (new_table_path .buf );
1464
1456
goto done ;
1465
1457
}
1466
1458
1467
- err = stack_fsync (& st -> opts , get_lock_file_fd ( & tables_list_lock ) );
1459
+ err = stack_fsync (& st -> opts , tables_list_lock . fd );
1468
1460
if (err < 0 ) {
1469
1461
err = REFTABLE_IO_ERROR ;
1470
1462
unlink (new_table_path .buf );
1471
1463
goto done ;
1472
1464
}
1473
1465
1474
- err = commit_lock_file (& tables_list_lock );
1466
+ err = flock_commit (& tables_list_lock );
1475
1467
if (err < 0 ) {
1476
1468
err = REFTABLE_IO_ERROR ;
1477
1469
unlink (new_table_path .buf );
@@ -1492,22 +1484,21 @@ static int stack_compact_range(struct reftable_stack *st,
1492
1484
* readers, so it is expected that unlinking tables may fail.
1493
1485
*/
1494
1486
for (i = 0 ; i < nlocks ; i ++ ) {
1495
- struct lock_file * table_lock = & table_locks [i ];
1496
- const char * lock_path = get_lock_file_path (table_lock );
1487
+ struct reftable_flock * table_lock = & table_locks [i ];
1497
1488
1498
1489
reftable_buf_reset (& table_name );
1499
- err = reftable_buf_add (& table_name , lock_path ,
1500
- strlen (lock_path ) - strlen (".lock" ));
1490
+ err = reftable_buf_add (& table_name , table_lock -> path ,
1491
+ strlen (table_lock -> path ) - strlen (".lock" ));
1501
1492
if (err )
1502
1493
continue ;
1503
1494
1504
1495
unlink (table_name .buf );
1505
1496
}
1506
1497
1507
1498
done :
1508
- rollback_lock_file (& tables_list_lock );
1499
+ flock_release (& tables_list_lock );
1509
1500
for (i = 0 ; table_locks && i < nlocks ; i ++ )
1510
- rollback_lock_file (& table_locks [i ]);
1501
+ flock_release (& table_locks [i ]);
1511
1502
reftable_free (table_locks );
1512
1503
1513
1504
tmpfile_delete (& new_table );
0 commit comments