@@ -324,7 +324,10 @@ static int munmap_file(struct cio_ctx *ctx, struct cio_chunk *ch)
324324 }
325325
326326 /* Unmap file */
327- cio_file_native_unmap (cf );
327+ ret = cio_file_native_unmap (cf );
328+ if (ret != CIO_OK ) {
329+ return -1 ;
330+ }
328331
329332 cf -> data_size = 0 ;
330333 cf -> alloc_size = 0 ;
@@ -343,6 +346,7 @@ static int mmap_file(struct cio_ctx *ctx, struct cio_chunk *ch, size_t size)
343346{
344347 ssize_t content_size ;
345348 size_t fs_size ;
349+ size_t requested_map_size ;
346350 int ret ;
347351 struct cio_file * cf ;
348352
@@ -413,6 +417,7 @@ static int mmap_file(struct cio_ctx *ctx, struct cio_chunk *ch, size_t size)
413417 cf -> alloc_size = size ;
414418
415419 /* Map the file */
420+ requested_map_size = cf -> alloc_size ;
416421 ret = cio_file_native_map (cf , cf -> alloc_size );
417422
418423 if (ret != CIO_OK ) {
@@ -421,6 +426,21 @@ static int mmap_file(struct cio_ctx *ctx, struct cio_chunk *ch, size_t size)
421426 return CIO_ERROR ;
422427 }
423428
429+ if ((cf -> flags & CIO_OPEN_RD ) && requested_map_size != cf -> alloc_size ) {
430+ if (cf -> map_truncated_warned == CIO_FALSE ) {
431+ cio_log_warn (ctx ,
432+ "[cio file] truncated read-only map from %zu to %zu bytes: %s/%s" ,
433+ requested_map_size ,
434+ cf -> alloc_size ,
435+ ch -> st -> name ,
436+ ch -> name );
437+ cf -> map_truncated_warned = CIO_TRUE ;
438+ }
439+ }
440+ else {
441+ cf -> map_truncated_warned = CIO_FALSE ;
442+ }
443+
424444 /* check content data size */
425445 if (fs_size > 0 ) {
426446 content_size = cio_file_st_get_content_len (cf -> map ,
@@ -664,6 +684,9 @@ struct cio_file *cio_file_open(struct cio_ctx *ctx,
664684 cf -> crc_cur = cio_crc32_init ();
665685 cf -> path = path ;
666686 cf -> map = NULL ;
687+ cf -> ctx = ctx ;
688+ cf -> auto_remap_warned = CIO_FALSE ;
689+ cf -> map_truncated_warned = CIO_FALSE ;
667690 ch -> backend = cf ;
668691
669692#ifdef _WIN32
@@ -801,9 +824,9 @@ static int _cio_file_up(struct cio_chunk *ch, int enforced)
801824 return CIO_ERROR ;
802825 }
803826
804- if (cf -> fd > 0 ) {
827+ if (cio_file_native_is_open ( cf ) ) {
805828 cio_log_error (ch -> ctx , "[cio file] file descriptor already exists: "
806- "[fd=%i] % s:%s" , cf -> fd , ch -> st -> name , ch -> name );
829+ "% s:%s" , ch -> st -> name , ch -> name );
807830 return CIO_ERROR ;
808831 }
809832
@@ -908,7 +931,11 @@ int cio_file_down(struct cio_chunk *ch)
908931 }
909932
910933 /* unmap memory */
911- munmap_file (ch -> ctx , ch );
934+ ret = munmap_file (ch -> ctx , ch );
935+
936+ if (ret != 0 ) {
937+ return -1 ;
938+ }
912939
913940 /* Allocated map size is zero */
914941 cf -> alloc_size = 0 ;
@@ -921,7 +948,12 @@ int cio_file_down(struct cio_chunk *ch)
921948 }
922949
923950 /* Close file descriptor */
924- cio_file_native_close (cf );
951+ ret = cio_file_native_close (cf );
952+
953+ if (ret != CIO_OK ) {
954+ cio_errno ();
955+ return -1 ;
956+ }
925957
926958 return 0 ;
927959}
@@ -1047,7 +1079,6 @@ int cio_file_write_metadata(struct cio_chunk *ch, char *buf, size_t size)
10471079 char * cur_content_data ;
10481080 char * new_content_data ;
10491081 size_t new_size ;
1050- size_t content_av ;
10511082 size_t meta_av ;
10521083 struct cio_file * cf ;
10531084
@@ -1082,13 +1113,11 @@ int cio_file_write_metadata(struct cio_chunk *ch, char *buf, size_t size)
10821113 * where we need to increase the memory map size, move the content area
10831114 * bytes to a different position and write the metadata.
10841115 *
1085- * Calculate the available space in the content area.
1116+ * Check if resize is needed before calculating content_av to avoid
1117+ * unsigned underflow. We need: header + new_metadata + content_data <= alloc_size
10861118 */
1087- content_av = cf -> alloc_size - cf -> data_size ;
1088-
1089- /* If there is no enough space, increase the file size and it memory map */
1090- if (content_av < size ) {
1091- new_size = (size - meta_av ) + cf -> data_size + CIO_FILE_HEADER_MIN ;
1119+ if (cf -> alloc_size < CIO_FILE_HEADER_MIN + size + cf -> data_size ) {
1120+ new_size = CIO_FILE_HEADER_MIN + size + cf -> data_size ;
10921121
10931122 ret = cio_file_resize (cf , new_size );
10941123
@@ -1106,7 +1135,7 @@ int cio_file_write_metadata(struct cio_chunk *ch, char *buf, size_t size)
11061135 /* set new position for the content data */
11071136 cur_content_data = cio_file_st_get_content (cf -> map );
11081137 new_content_data = meta + size ;
1109- memmove (new_content_data , cur_content_data , size );
1138+ memmove (new_content_data , cur_content_data , cf -> data_size );
11101139
11111140 /* copy new metadata */
11121141 memcpy (meta , buf , size );
@@ -1138,6 +1167,12 @@ int cio_file_sync(struct cio_chunk *ch)
11381167 return 0 ;
11391168 }
11401169
1170+ /* If chunk is down (unmapped), there's nothing to sync */
1171+ /* You can only write to a chunk when it's up, so if it's down, no pending changes exist */
1172+ if (!cio_file_native_is_mapped (cf )) {
1173+ return 0 ;
1174+ }
1175+
11411176 if (cf -> synced == CIO_TRUE ) {
11421177 return 0 ;
11431178 }
0 commit comments