|
116 | 116 | static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
|
117 | 117 | static void vsock_sk_destruct(struct sock *sk);
|
118 | 118 | static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
|
| 119 | +static void vsock_close(struct sock *sk, long timeout); |
119 | 120 |
|
120 | 121 | /* Protocol family. */
|
121 | 122 | static struct proto vsock_proto = {
|
122 | 123 | .name = "AF_VSOCK",
|
123 | 124 | .owner = THIS_MODULE,
|
124 | 125 | .obj_size = sizeof(struct vsock_sock),
|
| 126 | + .close = vsock_close, |
125 | 127 | };
|
126 | 128 |
|
127 | 129 | /* The default peer timeout indicates how long we will wait for a peer response
|
@@ -803,39 +805,37 @@ static bool sock_type_connectible(u16 type)
|
803 | 805 |
|
804 | 806 | static void __vsock_release(struct sock *sk, int level)
|
805 | 807 | {
|
806 |
| - if (sk) { |
807 |
| - struct sock *pending; |
808 |
| - struct vsock_sock *vsk; |
809 |
| - |
810 |
| - vsk = vsock_sk(sk); |
811 |
| - pending = NULL; /* Compiler warning. */ |
| 808 | + struct vsock_sock *vsk; |
| 809 | + struct sock *pending; |
812 | 810 |
|
813 |
| - /* When "level" is SINGLE_DEPTH_NESTING, use the nested |
814 |
| - * version to avoid the warning "possible recursive locking |
815 |
| - * detected". When "level" is 0, lock_sock_nested(sk, level) |
816 |
| - * is the same as lock_sock(sk). |
817 |
| - */ |
818 |
| - lock_sock_nested(sk, level); |
| 811 | + vsk = vsock_sk(sk); |
| 812 | + pending = NULL; /* Compiler warning. */ |
819 | 813 |
|
820 |
| - if (vsk->transport) |
821 |
| - vsk->transport->release(vsk); |
822 |
| - else if (sock_type_connectible(sk->sk_type)) |
823 |
| - vsock_remove_sock(vsk); |
| 814 | + /* When "level" is SINGLE_DEPTH_NESTING, use the nested |
| 815 | + * version to avoid the warning "possible recursive locking |
| 816 | + * detected". When "level" is 0, lock_sock_nested(sk, level) |
| 817 | + * is the same as lock_sock(sk). |
| 818 | + */ |
| 819 | + lock_sock_nested(sk, level); |
824 | 820 |
|
825 |
| - sock_orphan(sk); |
826 |
| - sk->sk_shutdown = SHUTDOWN_MASK; |
| 821 | + if (vsk->transport) |
| 822 | + vsk->transport->release(vsk); |
| 823 | + else if (sock_type_connectible(sk->sk_type)) |
| 824 | + vsock_remove_sock(vsk); |
827 | 825 |
|
828 |
| - skb_queue_purge(&sk->sk_receive_queue); |
| 826 | + sock_orphan(sk); |
| 827 | + sk->sk_shutdown = SHUTDOWN_MASK; |
829 | 828 |
|
830 |
| - /* Clean up any sockets that never were accepted. */ |
831 |
| - while ((pending = vsock_dequeue_accept(sk)) != NULL) { |
832 |
| - __vsock_release(pending, SINGLE_DEPTH_NESTING); |
833 |
| - sock_put(pending); |
834 |
| - } |
| 829 | + skb_queue_purge(&sk->sk_receive_queue); |
835 | 830 |
|
836 |
| - release_sock(sk); |
837 |
| - sock_put(sk); |
| 831 | + /* Clean up any sockets that never were accepted. */ |
| 832 | + while ((pending = vsock_dequeue_accept(sk)) != NULL) { |
| 833 | + __vsock_release(pending, SINGLE_DEPTH_NESTING); |
| 834 | + sock_put(pending); |
838 | 835 | }
|
| 836 | + |
| 837 | + release_sock(sk); |
| 838 | + sock_put(sk); |
839 | 839 | }
|
840 | 840 |
|
841 | 841 | static void vsock_sk_destruct(struct sock *sk)
|
@@ -912,9 +912,22 @@ void vsock_data_ready(struct sock *sk)
|
912 | 912 | }
|
913 | 913 | EXPORT_SYMBOL_GPL(vsock_data_ready);
|
914 | 914 |
|
| 915 | +/* Dummy callback required by sockmap. |
| 916 | + * See unconditional call of saved_close() in sock_map_close(). |
| 917 | + */ |
| 918 | +static void vsock_close(struct sock *sk, long timeout) |
| 919 | +{ |
| 920 | +} |
| 921 | + |
915 | 922 | static int vsock_release(struct socket *sock)
|
916 | 923 | {
|
917 |
| - __vsock_release(sock->sk, 0); |
| 924 | + struct sock *sk = sock->sk; |
| 925 | + |
| 926 | + if (!sk) |
| 927 | + return 0; |
| 928 | + |
| 929 | + sk->sk_prot->close(sk, 0); |
| 930 | + __vsock_release(sk, 0); |
918 | 931 | sock->sk = NULL;
|
919 | 932 | sock->state = SS_FREE;
|
920 | 933 |
|
|
0 commit comments