Skip to content

Commit eca9bfa

Browse files
kuba-moodavem330
authored andcommitted
tls: rx: strp: preserve decryption status of skbs when needed
When receive buffer is small we try to copy out the data from TCP into a skb maintained by TLS to prevent connection from stalling. Unfortunately if a single record is made up of a mix of decrypted and non-decrypted skbs combining them into a single skb leads to loss of decryption status, resulting in decryption errors or data corruption. Similarly when trying to use TCP receive queue directly we need to make sure that all the skbs within the record have the same status. If we don't the mixed status will be detected correctly but we'll CoW the anchor, again collapsing it into a single paged skb without decrypted status preserved. So the "fixup" code will not know which parts of skb to re-encrypt. Fixes: 84c61fe ("tls: rx: do not use the standard strparser") Tested-by: Shai Amiram <[email protected]> Signed-off-by: Jakub Kicinski <[email protected]> Reviewed-by: Simon Horman <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent c1c607b commit eca9bfa

File tree

4 files changed

+114
-31
lines changed

4 files changed

+114
-31
lines changed

include/net/tls.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,7 @@ struct tls_strparser {
126126
u32 mark : 8;
127127
u32 stopped : 1;
128128
u32 copy_mode : 1;
129+
u32 mixed_decrypted : 1;
129130
u32 msg_ready : 1;
130131

131132
struct strp_msg stm;

net/tls/tls.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,11 @@ static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx)
167167
return ctx->strp.msg_ready;
168168
}
169169

170+
static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx)
171+
{
172+
return ctx->strp.mixed_decrypted;
173+
}
174+
170175
#ifdef CONFIG_TLS_DEVICE
171176
int tls_device_init(void);
172177
void tls_device_cleanup(void);

net/tls/tls_device.c

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1007,20 +1007,14 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
10071007
struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
10081008
struct sk_buff *skb = tls_strp_msg(sw_ctx);
10091009
struct strp_msg *rxm = strp_msg(skb);
1010-
int is_decrypted = skb->decrypted;
1011-
int is_encrypted = !is_decrypted;
1012-
struct sk_buff *skb_iter;
1013-
int left;
1014-
1015-
left = rxm->full_len + rxm->offset - skb_pagelen(skb);
1016-
/* Check if all the data is decrypted already */
1017-
skb_iter = skb_shinfo(skb)->frag_list;
1018-
while (skb_iter && left > 0) {
1019-
is_decrypted &= skb_iter->decrypted;
1020-
is_encrypted &= !skb_iter->decrypted;
1021-
1022-
left -= skb_iter->len;
1023-
skb_iter = skb_iter->next;
1010+
int is_decrypted, is_encrypted;
1011+
1012+
if (!tls_strp_msg_mixed_decrypted(sw_ctx)) {
1013+
is_decrypted = skb->decrypted;
1014+
is_encrypted = !is_decrypted;
1015+
} else {
1016+
is_decrypted = 0;
1017+
is_encrypted = 0;
10241018
}
10251019

10261020
trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,

net/tls/tls_strp.c

Lines changed: 100 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,8 @@ static void tls_strp_anchor_free(struct tls_strparser *strp)
2929
struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
3030

3131
DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
32-
shinfo->frag_list = NULL;
32+
if (!strp->copy_mode)
33+
shinfo->frag_list = NULL;
3334
consume_skb(strp->anchor);
3435
strp->anchor = NULL;
3536
}
@@ -195,22 +196,22 @@ static void tls_strp_flush_anchor_copy(struct tls_strparser *strp)
195196
for (i = 0; i < shinfo->nr_frags; i++)
196197
__skb_frag_unref(&shinfo->frags[i], false);
197198
shinfo->nr_frags = 0;
199+
if (strp->copy_mode) {
200+
kfree_skb_list(shinfo->frag_list);
201+
shinfo->frag_list = NULL;
202+
}
198203
strp->copy_mode = 0;
204+
strp->mixed_decrypted = 0;
199205
}
200206

201-
static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
202-
unsigned int offset, size_t in_len)
207+
static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
208+
struct sk_buff *in_skb, unsigned int offset,
209+
size_t in_len)
203210
{
204-
struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
205-
struct sk_buff *skb;
206-
skb_frag_t *frag;
207211
size_t len, chunk;
212+
skb_frag_t *frag;
208213
int sz;
209214

210-
if (strp->msg_ready)
211-
return 0;
212-
213-
skb = strp->anchor;
214215
frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
215216

216217
len = in_len;
@@ -228,10 +229,8 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
228229
skb_frag_size_add(frag, chunk);
229230

230231
sz = tls_rx_msg_size(strp, skb);
231-
if (sz < 0) {
232-
desc->error = sz;
233-
return 0;
234-
}
232+
if (sz < 0)
233+
return sz;
235234

236235
/* We may have over-read, sz == 0 is guaranteed under-read */
237236
if (unlikely(sz && sz < skb->len)) {
@@ -271,15 +270,99 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
271270
offset += chunk;
272271
}
273272

274-
if (strp->stm.full_len == skb->len) {
273+
read_done:
274+
return in_len - len;
275+
}
276+
277+
static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb,
278+
struct sk_buff *in_skb, unsigned int offset,
279+
size_t in_len)
280+
{
281+
struct sk_buff *nskb, *first, *last;
282+
struct skb_shared_info *shinfo;
283+
size_t chunk;
284+
int sz;
285+
286+
if (strp->stm.full_len)
287+
chunk = strp->stm.full_len - skb->len;
288+
else
289+
chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
290+
chunk = min(chunk, in_len);
291+
292+
nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk);
293+
if (!nskb)
294+
return -ENOMEM;
295+
296+
shinfo = skb_shinfo(skb);
297+
if (!shinfo->frag_list) {
298+
shinfo->frag_list = nskb;
299+
nskb->prev = nskb;
300+
} else {
301+
first = shinfo->frag_list;
302+
last = first->prev;
303+
last->next = nskb;
304+
first->prev = nskb;
305+
}
306+
307+
skb->len += chunk;
308+
skb->data_len += chunk;
309+
310+
if (!strp->stm.full_len) {
311+
sz = tls_rx_msg_size(strp, skb);
312+
if (sz < 0)
313+
return sz;
314+
315+
/* We may have over-read, sz == 0 is guaranteed under-read */
316+
if (unlikely(sz && sz < skb->len)) {
317+
int over = skb->len - sz;
318+
319+
WARN_ON_ONCE(over > chunk);
320+
skb->len -= over;
321+
skb->data_len -= over;
322+
__pskb_trim(nskb, nskb->len - over);
323+
324+
chunk -= over;
325+
}
326+
327+
strp->stm.full_len = sz;
328+
}
329+
330+
return chunk;
331+
}
332+
333+
static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
334+
unsigned int offset, size_t in_len)
335+
{
336+
struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
337+
struct sk_buff *skb;
338+
int ret;
339+
340+
if (strp->msg_ready)
341+
return 0;
342+
343+
skb = strp->anchor;
344+
if (!skb->len)
345+
skb_copy_decrypted(skb, in_skb);
346+
else
347+
strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb);
348+
349+
if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted)
350+
ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len);
351+
else
352+
ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len);
353+
if (ret < 0) {
354+
desc->error = ret;
355+
ret = 0;
356+
}
357+
358+
if (strp->stm.full_len && strp->stm.full_len == skb->len) {
275359
desc->count = 0;
276360

277361
strp->msg_ready = 1;
278362
tls_rx_msg_ready(strp);
279363
}
280364

281-
read_done:
282-
return in_len - len;
365+
return ret;
283366
}
284367

285368
static int tls_strp_read_copyin(struct tls_strparser *strp)

0 commit comments

Comments
 (0)