@@ -1545,6 +1545,7 @@ static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
1545
1545
struct kvec iov ;
1546
1546
struct xdr_buf verf_buf ;
1547
1547
int status ;
1548
+ u32 seqno ;
1548
1549
1549
1550
/* Credential */
1550
1551
@@ -1556,15 +1557,16 @@ static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
1556
1557
cred_len = p ++ ;
1557
1558
1558
1559
spin_lock (& ctx -> gc_seq_lock );
1559
- req -> rq_seqno = (ctx -> gc_seq < MAXSEQ ) ? ctx -> gc_seq ++ : MAXSEQ ;
1560
+ seqno = (ctx -> gc_seq < MAXSEQ ) ? ctx -> gc_seq ++ : MAXSEQ ;
1561
+ xprt_rqst_add_seqno (req , seqno );
1560
1562
spin_unlock (& ctx -> gc_seq_lock );
1561
- if (req -> rq_seqno == MAXSEQ )
1563
+ if (* req -> rq_seqnos == MAXSEQ )
1562
1564
goto expired ;
1563
1565
trace_rpcgss_seqno (task );
1564
1566
1565
1567
* p ++ = cpu_to_be32 (RPC_GSS_VERSION );
1566
1568
* p ++ = cpu_to_be32 (ctx -> gc_proc );
1567
- * p ++ = cpu_to_be32 (req -> rq_seqno );
1569
+ * p ++ = cpu_to_be32 (* req -> rq_seqnos );
1568
1570
* p ++ = cpu_to_be32 (gss_cred -> gc_service );
1569
1571
p = xdr_encode_netobj (p , & ctx -> gc_wire_ctx );
1570
1572
* cred_len = cpu_to_be32 ((p - (cred_len + 1 )) << 2 );
@@ -1678,17 +1680,31 @@ gss_refresh_null(struct rpc_task *task)
1678
1680
return 0 ;
1679
1681
}
1680
1682
1683
+ static u32
1684
+ gss_validate_seqno_mic (struct gss_cl_ctx * ctx , u32 seqno , __be32 * seq , __be32 * p , u32 len )
1685
+ {
1686
+ struct kvec iov ;
1687
+ struct xdr_buf verf_buf ;
1688
+ struct xdr_netobj mic ;
1689
+
1690
+ * seq = cpu_to_be32 (seqno );
1691
+ iov .iov_base = seq ;
1692
+ iov .iov_len = 4 ;
1693
+ xdr_buf_from_iov (& iov , & verf_buf );
1694
+ mic .data = (u8 * )p ;
1695
+ mic .len = len ;
1696
+ return gss_verify_mic (ctx -> gc_gss_ctx , & verf_buf , & mic );
1697
+ }
1698
+
1681
1699
static int
1682
1700
gss_validate (struct rpc_task * task , struct xdr_stream * xdr )
1683
1701
{
1684
1702
struct rpc_cred * cred = task -> tk_rqstp -> rq_cred ;
1685
1703
struct gss_cl_ctx * ctx = gss_cred_get_ctx (cred );
1686
1704
__be32 * p , * seq = NULL ;
1687
- struct kvec iov ;
1688
- struct xdr_buf verf_buf ;
1689
- struct xdr_netobj mic ;
1690
1705
u32 len , maj_stat ;
1691
1706
int status ;
1707
+ int i = 1 ; /* don't recheck the first item */
1692
1708
1693
1709
p = xdr_inline_decode (xdr , 2 * sizeof (* p ));
1694
1710
if (!p )
@@ -1705,13 +1721,10 @@ gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
1705
1721
seq = kmalloc (4 , GFP_KERNEL );
1706
1722
if (!seq )
1707
1723
goto validate_failed ;
1708
- * seq = cpu_to_be32 (task -> tk_rqstp -> rq_seqno );
1709
- iov .iov_base = seq ;
1710
- iov .iov_len = 4 ;
1711
- xdr_buf_from_iov (& iov , & verf_buf );
1712
- mic .data = (u8 * )p ;
1713
- mic .len = len ;
1714
- maj_stat = gss_verify_mic (ctx -> gc_gss_ctx , & verf_buf , & mic );
1724
+ maj_stat = gss_validate_seqno_mic (ctx , task -> tk_rqstp -> rq_seqnos [0 ], seq , p , len );
1725
+ /* RFC 2203 5.3.3.1 - compute the checksum of each sequence number in the cache */
1726
+ while (unlikely (maj_stat == GSS_S_BAD_SIG && i < task -> tk_rqstp -> rq_seqno_count ))
1727
+ maj_stat = gss_validate_seqno_mic (ctx , task -> tk_rqstp -> rq_seqnos [i ], seq , p , len );
1715
1728
if (maj_stat == GSS_S_CONTEXT_EXPIRED )
1716
1729
clear_bit (RPCAUTH_CRED_UPTODATE , & cred -> cr_flags );
1717
1730
if (maj_stat )
@@ -1750,7 +1763,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1750
1763
if (!p )
1751
1764
goto wrap_failed ;
1752
1765
integ_len = p ++ ;
1753
- * p = cpu_to_be32 (rqstp -> rq_seqno );
1766
+ * p = cpu_to_be32 (* rqstp -> rq_seqnos );
1754
1767
1755
1768
if (rpcauth_wrap_req_encode (task , xdr ))
1756
1769
goto wrap_failed ;
@@ -1847,7 +1860,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1847
1860
if (!p )
1848
1861
goto wrap_failed ;
1849
1862
opaque_len = p ++ ;
1850
- * p = cpu_to_be32 (rqstp -> rq_seqno );
1863
+ * p = cpu_to_be32 (* rqstp -> rq_seqnos );
1851
1864
1852
1865
if (rpcauth_wrap_req_encode (task , xdr ))
1853
1866
goto wrap_failed ;
@@ -2001,7 +2014,7 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
2001
2014
offset = rcv_buf -> len - xdr_stream_remaining (xdr );
2002
2015
if (xdr_stream_decode_u32 (xdr , & seqno ))
2003
2016
goto unwrap_failed ;
2004
- if (seqno != rqstp -> rq_seqno )
2017
+ if (seqno != * rqstp -> rq_seqnos )
2005
2018
goto bad_seqno ;
2006
2019
if (xdr_buf_subsegment (rcv_buf , & gss_data , offset , len ))
2007
2020
goto unwrap_failed ;
@@ -2045,7 +2058,7 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
2045
2058
trace_rpcgss_unwrap_failed (task );
2046
2059
goto out ;
2047
2060
bad_seqno :
2048
- trace_rpcgss_bad_seqno (task , rqstp -> rq_seqno , seqno );
2061
+ trace_rpcgss_bad_seqno (task , * rqstp -> rq_seqnos , seqno );
2049
2062
goto out ;
2050
2063
bad_mic :
2051
2064
trace_rpcgss_verify_mic (task , maj_stat );
@@ -2077,7 +2090,7 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
2077
2090
if (maj_stat != GSS_S_COMPLETE )
2078
2091
goto bad_unwrap ;
2079
2092
/* gss_unwrap decrypted the sequence number */
2080
- if (be32_to_cpup (p ++ ) != rqstp -> rq_seqno )
2093
+ if (be32_to_cpup (p ++ ) != * rqstp -> rq_seqnos )
2081
2094
goto bad_seqno ;
2082
2095
2083
2096
/* gss_unwrap redacts the opaque blob from the head iovec.
@@ -2093,7 +2106,7 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
2093
2106
trace_rpcgss_unwrap_failed (task );
2094
2107
return - EIO ;
2095
2108
bad_seqno :
2096
- trace_rpcgss_bad_seqno (task , rqstp -> rq_seqno , be32_to_cpup (-- p ));
2109
+ trace_rpcgss_bad_seqno (task , * rqstp -> rq_seqnos , be32_to_cpup (-- p ));
2097
2110
return - EIO ;
2098
2111
bad_unwrap :
2099
2112
trace_rpcgss_unwrap (task , maj_stat );
@@ -2118,14 +2131,14 @@ gss_xmit_need_reencode(struct rpc_task *task)
2118
2131
if (!ctx )
2119
2132
goto out ;
2120
2133
2121
- if (gss_seq_is_newer (req -> rq_seqno , READ_ONCE (ctx -> gc_seq )))
2134
+ if (gss_seq_is_newer (* req -> rq_seqnos , READ_ONCE (ctx -> gc_seq )))
2122
2135
goto out_ctx ;
2123
2136
2124
2137
seq_xmit = READ_ONCE (ctx -> gc_seq_xmit );
2125
- while (gss_seq_is_newer (req -> rq_seqno , seq_xmit )) {
2138
+ while (gss_seq_is_newer (* req -> rq_seqnos , seq_xmit )) {
2126
2139
u32 tmp = seq_xmit ;
2127
2140
2128
- seq_xmit = cmpxchg (& ctx -> gc_seq_xmit , tmp , req -> rq_seqno );
2141
+ seq_xmit = cmpxchg (& ctx -> gc_seq_xmit , tmp , * req -> rq_seqnos );
2129
2142
if (seq_xmit == tmp ) {
2130
2143
ret = false;
2131
2144
goto out_ctx ;
@@ -2134,7 +2147,7 @@ gss_xmit_need_reencode(struct rpc_task *task)
2134
2147
2135
2148
win = ctx -> gc_win ;
2136
2149
if (win > 0 )
2137
- ret = !gss_seq_is_newer (req -> rq_seqno , seq_xmit - win );
2150
+ ret = !gss_seq_is_newer (* req -> rq_seqnos , seq_xmit - win );
2138
2151
2139
2152
out_ctx :
2140
2153
gss_put_ctx (ctx );
0 commit comments