@@ -1052,32 +1052,34 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
1052
1052
1053
1053
#ifdef CONFIG_PAGE_POOL
1054
1054
1055
- /* This is the number of tokens that the user can SO_DEVMEM_DONTNEED in
1056
- * 1 syscall. The limit exists to limit the amount of memory the kernel
1057
- * allocates to copy these tokens.
1055
+ /* This is the number of tokens and frags that the user can SO_DEVMEM_DONTNEED
1056
+ * in 1 syscall. The limit exists to limit the amount of memory the kernel
1057
+ * allocates to copy these tokens, and to prevent looping over the frags for
1058
+ * too long.
1058
1059
*/
1059
1060
#define MAX_DONTNEED_TOKENS 128
1061
+ #define MAX_DONTNEED_FRAGS 1024
1060
1062
1061
1063
static noinline_for_stack int
1062
1064
sock_devmem_dontneed (struct sock * sk , sockptr_t optval , unsigned int optlen )
1063
1065
{
1064
1066
unsigned int num_tokens , i , j , k , netmem_num = 0 ;
1065
1067
struct dmabuf_token * tokens ;
1068
+ int ret = 0 , num_frags = 0 ;
1066
1069
netmem_ref netmems [16 ];
1067
- int ret = 0 ;
1068
1070
1069
1071
if (!sk_is_tcp (sk ))
1070
1072
return - EBADF ;
1071
1073
1072
- if (optlen % sizeof (struct dmabuf_token ) ||
1074
+ if (optlen % sizeof (* tokens ) ||
1073
1075
optlen > sizeof (* tokens ) * MAX_DONTNEED_TOKENS )
1074
1076
return - EINVAL ;
1075
1077
1076
- tokens = kvmalloc_array (optlen , sizeof (* tokens ), GFP_KERNEL );
1078
+ num_tokens = optlen / sizeof (* tokens );
1079
+ tokens = kvmalloc_array (num_tokens , sizeof (* tokens ), GFP_KERNEL );
1077
1080
if (!tokens )
1078
1081
return - ENOMEM ;
1079
1082
1080
- num_tokens = optlen / sizeof (struct dmabuf_token );
1081
1083
if (copy_from_sockptr (tokens , optval , optlen )) {
1082
1084
kvfree (tokens );
1083
1085
return - EFAULT ;
@@ -1086,24 +1088,28 @@ sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen)
1086
1088
xa_lock_bh (& sk -> sk_user_frags );
1087
1089
for (i = 0 ; i < num_tokens ; i ++ ) {
1088
1090
for (j = 0 ; j < tokens [i ].token_count ; j ++ ) {
1091
+ if (++ num_frags > MAX_DONTNEED_FRAGS )
1092
+ goto frag_limit_reached ;
1093
+
1089
1094
netmem_ref netmem = (__force netmem_ref )__xa_erase (
1090
1095
& sk -> sk_user_frags , tokens [i ].token_start + j );
1091
1096
1092
- if (netmem &&
1093
- !WARN_ON_ONCE (!netmem_is_net_iov (netmem ))) {
1094
- netmems [netmem_num ++ ] = netmem ;
1095
- if (netmem_num == ARRAY_SIZE (netmems )) {
1096
- xa_unlock_bh (& sk -> sk_user_frags );
1097
- for (k = 0 ; k < netmem_num ; k ++ )
1098
- WARN_ON_ONCE (!napi_pp_put_page (netmems [k ]));
1099
- netmem_num = 0 ;
1100
- xa_lock_bh (& sk -> sk_user_frags );
1101
- }
1102
- ret ++ ;
1097
+ if (!netmem || WARN_ON_ONCE (!netmem_is_net_iov (netmem )))
1098
+ continue ;
1099
+
1100
+ netmems [netmem_num ++ ] = netmem ;
1101
+ if (netmem_num == ARRAY_SIZE (netmems )) {
1102
+ xa_unlock_bh (& sk -> sk_user_frags );
1103
+ for (k = 0 ; k < netmem_num ; k ++ )
1104
+ WARN_ON_ONCE (!napi_pp_put_page (netmems [k ]));
1105
+ netmem_num = 0 ;
1106
+ xa_lock_bh (& sk -> sk_user_frags );
1103
1107
}
1108
+ ret ++ ;
1104
1109
}
1105
1110
}
1106
1111
1112
+ frag_limit_reached :
1107
1113
xa_unlock_bh (& sk -> sk_user_frags );
1108
1114
for (k = 0 ; k < netmem_num ; k ++ )
1109
1115
WARN_ON_ONCE (!napi_pp_put_page (netmems [k ]));
0 commit comments