Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions fs/namespace.c
Original file line number Diff line number Diff line change
Expand Up @@ -2254,6 +2254,19 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
return attach_recursive_mnt(mnt, p, mp, NULL);
}

static int may_change_propagation(const struct mount *m)
{
struct mnt_namespace *ns = m->mnt_ns;

// it must be mounted in some namespace
if (IS_ERR_OR_NULL(ns)) // is_mounted()
return -EINVAL;
// and the caller must be admin in userns of that namespace
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}

/*
* Sanity check the flags to change_mnt_propagation.
*/
Expand Down Expand Up @@ -2290,6 +2303,10 @@ static int do_change_type(struct path *path, int ms_flags)
return -EINVAL;

namespace_lock();
err = may_change_propagation(mnt);
if (err)
goto out_unlock;

if (type == MS_SHARED) {
err = invent_group_ids(mnt, recurse);
if (err)
Expand Down
15 changes: 11 additions & 4 deletions net/core/filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -3497,13 +3497,20 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);

/* Due to header grow, MSS needs to be downgraded. */
if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
skb_decrease_gso_size(shinfo, len_diff);

/* Header must be checked, and gso_segs recomputed. */
shinfo->gso_type |= gso_type;
shinfo->gso_segs = 0;

/* Due to header growth, MSS needs to be downgraded.
* There is a BUG_ON() when segmenting the frag_list with
* head_frag true, so linearize the skb after downgrading
* the MSS.
*/
if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) {
skb_decrease_gso_size(shinfo, len_diff);
if (shinfo->frag_list)
return skb_linearize(skb);
}
}

return 0;
Expand Down
28 changes: 26 additions & 2 deletions net/ipv4/udp_offload.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
*/

#include <linux/skbuff.h>
#include <net/ip6_checksum.h>
#include <net/udp.h>
#include <net/protocol.h>
#include <net/inet_common.h>
Expand Down Expand Up @@ -275,9 +276,32 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
bool copy_dtor;
__sum16 check;
__be16 newlen;
int ret = 0;

if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
return __udp_gso_segment_list(gso_skb, features, is_ipv6);
if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) {
/* Detect modified geometry and pass those to skb_segment. */
if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size)
return __udp_gso_segment_list(gso_skb, features, is_ipv6);

ret = __skb_linearize(gso_skb);
if (ret)
return ERR_PTR(ret);

/* Setup csum, as fraglist skips this in udp4_gro_receive. */
gso_skb->csum_start = skb_transport_header(gso_skb) - gso_skb->head;
gso_skb->csum_offset = offsetof(struct udphdr, check);
gso_skb->ip_summed = CHECKSUM_PARTIAL;

uh = udp_hdr(gso_skb);
if (is_ipv6)
uh->check = ~udp_v6_check(gso_skb->len,
&ipv6_hdr(gso_skb)->saddr,
&ipv6_hdr(gso_skb)->daddr, 0);
else
uh->check = ~udp_v4_check(gso_skb->len,
ip_hdr(gso_skb)->saddr,
ip_hdr(gso_skb)->daddr, 0);
}

mss = skb_shinfo(gso_skb)->gso_size;
if (gso_skb->len <= sizeof(*uh) + mss)
Expand Down
28 changes: 23 additions & 5 deletions net/vmw_vsock/af_vsock.c
Original file line number Diff line number Diff line change
Expand Up @@ -398,6 +398,8 @@ EXPORT_SYMBOL_GPL(vsock_enqueue_accept);

static bool vsock_use_local_transport(unsigned int remote_cid)
{
lockdep_assert_held(&vsock_register_mutex);

if (!transport_local)
return false;

Expand Down Expand Up @@ -455,6 +457,8 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)

remote_flags = vsk->remote_addr.svm_flags;

mutex_lock(&vsock_register_mutex);

switch (sk->sk_type) {
case SOCK_DGRAM:
new_transport = transport_dgram;
Expand All @@ -469,12 +473,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
new_transport = transport_h2g;
break;
default:
return -ESOCKTNOSUPPORT;
ret = -ESOCKTNOSUPPORT;
goto err;
}

if (vsk->transport) {
if (vsk->transport == new_transport)
return 0;
if (vsk->transport == new_transport) {
ret = 0;
goto err;
}

/* transport->release() must be called with sock lock acquired.
* This path can only be taken during vsock_stream_connect(),
Expand All @@ -489,8 +496,16 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
/* We increase the module refcnt to prevent the transport unloading
* while there are open sockets assigned to it.
*/
if (!new_transport || !try_module_get(new_transport->module))
return -ENODEV;
if (!new_transport || !try_module_get(new_transport->module)) {
ret = -ENODEV;
goto err;
}

/* It's safe to release the mutex after a successful try_module_get().
* Whichever transport `new_transport` points at, it won't go away until
* the last module_put() below or in vsock_deassign_transport().
*/
mutex_unlock(&vsock_register_mutex);

ret = new_transport->init(vsk, psk);
if (ret) {
Expand All @@ -501,6 +516,9 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
vsk->transport = new_transport;

return 0;
err:
mutex_unlock(&vsock_register_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(vsock_assign_transport);

Expand Down
Loading