@@ -172,21 +172,20 @@ struct cloudsync_context {
172172 int schema_version ;
173173 uint64_t schema_hash ;
174174
175- // set at the start of each transaction on the first invocation and
176- // re-set on transaction commit or rollback
175+ // set at transaction start and reset on commit/rollback
177176 db_int64 db_version ;
178- // the version that the db will be set to at the end of the transaction
179- // if that transaction were to commit at the time this value is checked
177+ // version the DB would have if the transaction committed now
180178 db_int64 pending_db_version ;
181179 // used to set an order inside each transaction
182180 int seq ;
183181
184- // augmented tables are stored in-memory so we do not need to retrieve information about col names and cid
185- // from the disk each time a write statement is performed
186- // we do also not need to use an hash map here because for few tables the direct in-memory comparison with table name is faster
187- cloudsync_table_context * * tables ;
188- int tables_count ;
189- int tables_alloc ;
182+ // augmented tables are stored in-memory so we do not need to retrieve information about
183+ // col_names and cid from the disk each time a write statement is performed
184+ // we do also not need to use an hash map here because for few tables the direct
185+ // in-memory comparison with table name is faster
186+ cloudsync_table_context * * tables ; // dense vector: [0..tables_count-1] are valid
187+ int tables_count ; // size
188+ int tables_cap ; // capacity
190189};
191190
192191typedef struct {
@@ -864,8 +863,7 @@ int table_add_stmts (sqlite3 *db, cloudsync_table_context *table, int ncols) {
864863cloudsync_table_context * table_lookup (cloudsync_context * data , const char * table_name ) {
865864 DEBUG_DBFUNCTION ("table_lookup %s" , table_name );
866865
867- for (int i = 0 ; i < data -> tables_alloc ; ++ i ) {
868- if (data -> tables [i ] == NULL ) continue ;
866+ for (int i = 0 ; i < data -> tables_count ; ++ i ) {
869867 if ((strcasecmp (data -> tables [i ]-> name , table_name ) == 0 )) return data -> tables [i ];
870868 }
871869
@@ -890,14 +888,19 @@ int table_remove (cloudsync_context *data, cloudsync_table_context *table) {
890888 const char * table_name = table -> name ;
891889 DEBUG_DBFUNCTION ("table_remove %s" , table_name );
892890
893- for (int i = 0 ; i < data -> tables_alloc ; ++ i ) {
894- if (data -> tables [i ] == NULL ) continue ;
895- if ((strcasecmp (data -> tables [i ]-> name , table_name ) == 0 )) {
896- data -> tables [i ] = NULL ;
897- -- data -> tables_count ;
891+ for (int i = 0 ; i < data -> tables_count ; ++ i ) {
892+ cloudsync_table_context * t = data -> tables [i ];
893+
894+ // pointer compare is fastest but fallback to strcasecmp if not same pointer
895+ if ((t == table ) || ((strcasecmp (t -> name , table_name ) == 0 ))) {
896+ int last = data -> tables_count - 1 ;
897+ data -> tables [i ] = data -> tables [last ]; // move last into the hole (keeps array dense)
898+ data -> tables [last ] = NULL ; // NULLify tail (as an extra security measure)
899+ data -> tables_count -- ;
898900 return data -> tables_count ;
899901 }
900902 }
903+
901904 return -1 ;
902905}
903906
@@ -939,28 +942,28 @@ int table_add_to_context_cb (void *xdata, int ncols, char **values, char **names
939942 return 0 ;
940943}
941944
945+ bool table_ensure_capacity (cloudsync_context * data ) {
946+ if (data -> tables_count < data -> tables_cap ) return true;
947+
948+ int new_cap = data -> tables_cap ? data -> tables_cap * 2 : CLOUDSYNC_INIT_NTABLES ;
949+ size_t bytes = (size_t )new_cap * sizeof (* data -> tables );
950+ void * p = cloudsync_memory_realloc (data -> tables , bytes );
951+ if (!p ) return false;
952+
953+ data -> tables = (cloudsync_table_context * * )p ;
954+ data -> tables_cap = new_cap ;
955+ return true;
956+ }
957+
942958bool table_add_to_context (sqlite3 * db , cloudsync_context * data , table_algo algo , const char * table_name ) {
943959 DEBUG_DBFUNCTION ("cloudsync_context_add_table %s" , table_name );
944960
945961 // check if table is already in the global context and in that case just return
946962 cloudsync_table_context * table = table_lookup (data , table_name );
947963 if (table ) return true;
948964
949- // is there any space available?
950- if (data -> tables_alloc <= data -> tables_count + 1 ) {
951- // realloc tables
952- cloudsync_table_context * * clone = (cloudsync_table_context * * )cloudsync_memory_realloc (data -> tables , sizeof (cloudsync_table_context ) * data -> tables_alloc + CLOUDSYNC_INIT_NTABLES );
953- if (!clone ) goto abort_add_table ;
954-
955- // reset new entries
956- for (int i = data -> tables_alloc ; i < data -> tables_alloc + CLOUDSYNC_INIT_NTABLES ; ++ i ) {
957- clone [i ] = NULL ;
958- }
959-
960- // replace old ptr
961- data -> tables = clone ;
962- data -> tables_alloc += CLOUDSYNC_INIT_NTABLES ;
963- }
965+ // check for space availability
966+ if (!table_ensure_capacity (data )) return false;
964967
965968 // setup a new table context
966969 table = table_create (table_name , algo );
@@ -1018,15 +1021,8 @@ bool table_add_to_context (sqlite3 *db, cloudsync_context *data, table_algo algo
10181021 if (rc == SQLITE_ABORT ) goto abort_add_table ;
10191022 }
10201023
1021- // lookup the first free slot
1022- for (int i = 0 ; i < data -> tables_alloc ; ++ i ) {
1023- if (data -> tables [i ] == NULL ) {
1024- data -> tables [i ] = table ;
1025- if (i > data -> tables_count - 1 ) ++ data -> tables_count ;
1026- break ;
1027- }
1028- }
1029-
1024+ // append newly created table
1025+ data -> tables [data -> tables_count ++ ] = table ;
10301026 return true;
10311027
10321028abort_add_table :
@@ -1584,7 +1580,7 @@ cloudsync_context *cloudsync_context_create (void) {
15841580 data -> tables = (cloudsync_table_context * * )cloudsync_memory_zeroalloc (mem_needed );
15851581 if (!data -> tables ) {cloudsync_memory_free (data ); return NULL ;}
15861582
1587- data -> tables_alloc = CLOUDSYNC_INIT_NTABLES ;
1583+ data -> tables_cap = CLOUDSYNC_INIT_NTABLES ;
15881584 data -> tables_count = 0 ;
15891585
15901586 return data ;
@@ -2520,9 +2516,11 @@ int cloudsync_load_siteid (db_t *db, cloudsync_context *data) {
25202516}
25212517
25222518int cloudsync_terminate (cloudsync_context * data ) {
2523- for (int i = 0 ; i < data -> tables_alloc ; ++ i ) {
2524- if (data -> tables [i ]) table_free (data -> tables [i ]);
2525- data -> tables [i ] = NULL ;
2519+ // can't use for/loop here because data->tables_count is changed by table_remove
2520+ while (data -> tables_count > 0 ) {
2521+ cloudsync_table_context * t = data -> tables [data -> tables_count - 1 ];
2522+ table_remove (data , t );
2523+ table_free (t );
25262524 }
25272525
25282526 if (data -> schema_version_stmt ) database_finalize (data -> schema_version_stmt );
0 commit comments