@@ -196,7 +196,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
196
196
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
197
197
/*
198
198
* Note, users like pmem that depend on the stricter semantics of
199
- * copy_from_iter_flushcache () than copy_from_iter_nocache () must check for
199
+ * _copy_from_iter_flushcache () than _copy_from_iter_nocache () must check for
200
200
* IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
201
201
* destination is flushed from the cache on return.
202
202
*/
@@ -211,24 +211,6 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
211
211
#define _copy_mc_to_iter _copy_to_iter
212
212
#endif
213
213
214
- static __always_inline __must_check
215
- size_t copy_from_iter_flushcache (void * addr , size_t bytes , struct iov_iter * i )
216
- {
217
- if (unlikely (!check_copy_size (addr , bytes , false)))
218
- return 0 ;
219
- else
220
- return _copy_from_iter_flushcache (addr , bytes , i );
221
- }
222
-
223
- static __always_inline __must_check
224
- size_t copy_mc_to_iter (void * addr , size_t bytes , struct iov_iter * i )
225
- {
226
- if (unlikely (!check_copy_size (addr , bytes , true)))
227
- return 0 ;
228
- else
229
- return _copy_mc_to_iter (addr , bytes , i );
230
- }
231
-
232
214
size_t iov_iter_zero (size_t bytes , struct iov_iter * );
233
215
unsigned long iov_iter_alignment (const struct iov_iter * i );
234
216
unsigned long iov_iter_gap_alignment (const struct iov_iter * i );
0 commit comments