@@ -1121,34 +1121,36 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1121
1121
}
1122
1122
#endif /* CONFIG_FS_DAX_PMD */
1123
1123
1124
+ static int dax_memzero (struct dax_device * dax_dev , pgoff_t pgoff ,
1125
+ unsigned int offset , size_t size )
1126
+ {
1127
+ void * kaddr ;
1128
+ long ret ;
1129
+
1130
+ ret = dax_direct_access (dax_dev , pgoff , 1 , & kaddr , NULL );
1131
+ if (ret > 0 ) {
1132
+ memset (kaddr + offset , 0 , size );
1133
+ dax_flush (dax_dev , kaddr + offset , size );
1134
+ }
1135
+ return ret ;
1136
+ }
1137
+
1124
1138
s64 dax_iomap_zero (loff_t pos , u64 length , struct iomap * iomap )
1125
1139
{
1126
1140
pgoff_t pgoff = dax_iomap_pgoff (iomap , pos );
1127
1141
long rc , id ;
1128
- void * kaddr ;
1129
- bool page_aligned = false;
1130
1142
unsigned offset = offset_in_page (pos );
1131
1143
unsigned size = min_t (u64 , PAGE_SIZE - offset , length );
1132
1144
1133
- if (IS_ALIGNED (pos , PAGE_SIZE ) && size == PAGE_SIZE )
1134
- page_aligned = true;
1135
-
1136
1145
id = dax_read_lock ();
1137
-
1138
- if (page_aligned )
1146
+ if (IS_ALIGNED (pos , PAGE_SIZE ) && size == PAGE_SIZE )
1139
1147
rc = dax_zero_page_range (iomap -> dax_dev , pgoff , 1 );
1140
1148
else
1141
- rc = dax_direct_access (iomap -> dax_dev , pgoff , 1 , & kaddr , NULL );
1142
- if (rc < 0 ) {
1143
- dax_read_unlock (id );
1144
- return rc ;
1145
- }
1146
-
1147
- if (!page_aligned ) {
1148
- memset (kaddr + offset , 0 , size );
1149
- dax_flush (iomap -> dax_dev , kaddr + offset , size );
1150
- }
1149
+ rc = dax_memzero (iomap -> dax_dev , pgoff , offset , size );
1151
1150
dax_read_unlock (id );
1151
+
1152
+ if (rc < 0 )
1153
+ return rc ;
1152
1154
return size ;
1153
1155
}
1154
1156
0 commit comments