1313#include <linux/sort.h>
1414#include <crypto/aead.h>
1515#include <linux/fiemap.h>
16+ #include <linux/folio_queue.h>
1617#include <uapi/linux/magic.h>
1718#include "cifsfs.h"
1819#include "cifsglob.h"
@@ -4391,30 +4392,86 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
43914392}
43924393
43934394/*
4394- * Clear a read buffer, discarding the folios which have XA_MARK_0 set.
4395+ * Clear a read buffer, discarding the folios which have the 1st mark set.
43954396 */
4396- static void cifs_clear_xarray_buffer (struct xarray * buffer )
4397+ static void cifs_clear_folioq_buffer (struct folio_queue * buffer )
43974398{
4399+ struct folio_queue * folioq ;
4400+
4401+ while ((folioq = buffer )) {
4402+ for (int s = 0 ; s < folioq_count (folioq ); s ++ )
4403+ if (folioq_is_marked (folioq , s ))
4404+ folio_put (folioq_folio (folioq , s ));
4405+ buffer = folioq -> next ;
4406+ kfree (folioq );
4407+ }
4408+ }
4409+
4410+ /*
4411+ * Allocate buffer space into a folio queue.
4412+ */
4413+ static struct folio_queue * cifs_alloc_folioq_buffer (ssize_t size )
4414+ {
4415+ struct folio_queue * buffer = NULL , * tail = NULL , * p ;
43984416 struct folio * folio ;
4417+ unsigned int slot ;
4418+
4419+ do {
4420+ if (!tail || folioq_full (tail )) {
4421+ p = kmalloc (sizeof (* p ), GFP_NOFS );
4422+ if (!p )
4423+ goto nomem ;
4424+ folioq_init (p );
4425+ if (tail ) {
4426+ tail -> next = p ;
4427+ p -> prev = tail ;
4428+ } else {
4429+ buffer = p ;
4430+ }
4431+ tail = p ;
4432+ }
4433+
4434+ folio = folio_alloc (GFP_KERNEL |__GFP_HIGHMEM , 0 );
4435+ if (!folio )
4436+ goto nomem ;
4437+
4438+ slot = folioq_append_mark (tail , folio );
4439+ size -= folioq_folio_size (tail , slot );
4440+ } while (size > 0 );
4441+
4442+ return buffer ;
4443+
4444+ nomem :
4445+ cifs_clear_folioq_buffer (buffer );
4446+ return NULL ;
4447+ }
4448+
4449+ /*
4450+ * Copy data from an iterator to the folios in a folio queue buffer.
4451+ */
4452+ static bool cifs_copy_iter_to_folioq (struct iov_iter * iter , size_t size ,
4453+ struct folio_queue * buffer )
4454+ {
4455+ for (; buffer ; buffer = buffer -> next ) {
4456+ for (int s = 0 ; s < folioq_count (buffer ); s ++ ) {
4457+ struct folio * folio = folioq_folio (buffer , s );
4458+ size_t part = folioq_folio_size (buffer , s );
43994459
4400- XA_STATE ( xas , buffer , 0 );
4460+ part = umin ( part , size );
44014461
4402- rcu_read_lock ();
4403- xas_for_each_marked (& xas , folio , ULONG_MAX , XA_MARK_0 ) {
4404- folio_put (folio );
4462+ if (copy_folio_from_iter (folio , 0 , part , iter ) != part )
4463+ return false;
4464+ size -= part ;
4465+ }
44054466 }
4406- rcu_read_unlock ();
4407- xa_destroy (buffer );
4467+ return true;
44084468}
44094469
44104470void
44114471smb3_free_compound_rqst (int num_rqst , struct smb_rqst * rqst )
44124472{
4413- int i ;
4414-
4415- for (i = 0 ; i < num_rqst ; i ++ )
4416- if (!xa_empty (& rqst [i ].rq_buffer ))
4417- cifs_clear_xarray_buffer (& rqst [i ].rq_buffer );
4473+ for (int i = 0 ; i < num_rqst ; i ++ )
4474+ cifs_clear_folioq_buffer (rqst [i ].rq_buffer );
44184475}
44194476
44204477/*
@@ -4435,52 +4492,32 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
44354492 struct smb_rqst * new_rq , struct smb_rqst * old_rq )
44364493{
44374494 struct smb2_transform_hdr * tr_hdr = new_rq [0 ].rq_iov [0 ].iov_base ;
4438- struct page * page ;
44394495 unsigned int orig_len = 0 ;
4440- int i , j ;
44414496 int rc = - ENOMEM ;
44424497
4443- for (i = 1 ; i < num_rqst ; i ++ ) {
4498+ for (int i = 1 ; i < num_rqst ; i ++ ) {
44444499 struct smb_rqst * old = & old_rq [i - 1 ];
44454500 struct smb_rqst * new = & new_rq [i ];
4446- struct xarray * buffer = & new -> rq_buffer ;
4447- size_t size = iov_iter_count (& old -> rq_iter ), seg , copied = 0 ;
4501+ struct folio_queue * buffer ;
4502+ size_t size = iov_iter_count (& old -> rq_iter );
44484503
44494504 orig_len += smb_rqst_len (server , old );
44504505 new -> rq_iov = old -> rq_iov ;
44514506 new -> rq_nvec = old -> rq_nvec ;
44524507
4453- xa_init (buffer );
4454-
44554508 if (size > 0 ) {
4456- unsigned int npages = DIV_ROUND_UP (size , PAGE_SIZE );
4457-
4458- for (j = 0 ; j < npages ; j ++ ) {
4459- void * o ;
4460-
4461- rc = - ENOMEM ;
4462- page = alloc_page (GFP_KERNEL |__GFP_HIGHMEM );
4463- if (!page )
4464- goto err_free ;
4465- page -> index = j ;
4466- o = xa_store (buffer , j , page , GFP_KERNEL );
4467- if (xa_is_err (o )) {
4468- rc = xa_err (o );
4469- put_page (page );
4470- goto err_free ;
4471- }
4509+ buffer = cifs_alloc_folioq_buffer (size );
4510+ if (!buffer )
4511+ goto err_free ;
44724512
4473- xa_set_mark (buffer , j , XA_MARK_0 );
4513+ new -> rq_buffer = buffer ;
4514+ iov_iter_folio_queue (& new -> rq_iter , ITER_SOURCE ,
4515+ buffer , 0 , 0 , size );
44744516
4475- seg = min_t (size_t , size - copied , PAGE_SIZE );
4476- if (copy_page_from_iter (page , 0 , seg , & old -> rq_iter ) != seg ) {
4477- rc = - EFAULT ;
4478- goto err_free ;
4479- }
4480- copied += seg ;
4517+ if (!cifs_copy_iter_to_folioq (& old -> rq_iter , size , buffer )) {
4518+ rc = - EIO ;
4519+ goto err_free ;
44814520 }
4482- iov_iter_xarray (& new -> rq_iter , ITER_SOURCE ,
4483- buffer , 0 , size );
44844521 }
44854522 }
44864523
@@ -4544,31 +4581,32 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
45444581}
45454582
45464583static int
4547- cifs_copy_pages_to_iter (struct xarray * pages , unsigned int data_size ,
4548- unsigned int skip , struct iov_iter * iter )
4584+ cifs_copy_folioq_to_iter (struct folio_queue * folioq , size_t data_size ,
4585+ size_t skip , struct iov_iter * iter )
45494586{
4550- struct page * page ;
4551- unsigned long index ;
4552-
4553- xa_for_each (pages , index , page ) {
4554- size_t n , len = min_t (unsigned int , PAGE_SIZE - skip , data_size );
4555-
4556- n = copy_page_to_iter (page , skip , len , iter );
4557- if (n != len ) {
4558- cifs_dbg (VFS , "%s: something went wrong\n" , __func__ );
4559- return - EIO ;
4587+ for (; folioq ; folioq = folioq -> next ) {
4588+ for (int s = 0 ; s < folioq_count (folioq ); s ++ ) {
4589+ struct folio * folio = folioq_folio (folioq , s );
4590+ size_t fsize = folio_size (folio );
4591+ size_t n , len = umin (fsize - skip , data_size );
4592+
4593+ n = copy_folio_to_iter (folio , skip , len , iter );
4594+ if (n != len ) {
4595+ cifs_dbg (VFS , "%s: something went wrong\n" , __func__ );
4596+ return - EIO ;
4597+ }
4598+ data_size -= n ;
4599+ skip = 0 ;
45604600 }
4561- data_size -= n ;
4562- skip = 0 ;
45634601 }
45644602
45654603 return 0 ;
45664604}
45674605
45684606static int
45694607handle_read_data (struct TCP_Server_Info * server , struct mid_q_entry * mid ,
4570- char * buf , unsigned int buf_len , struct xarray * pages ,
4571- unsigned int pages_len , bool is_offloaded )
4608+ char * buf , unsigned int buf_len , struct folio_queue * buffer ,
4609+ unsigned int buffer_len , bool is_offloaded )
45724610{
45734611 unsigned int data_offset ;
45744612 unsigned int data_len ;
@@ -4665,7 +4703,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
46654703 return 0 ;
46664704 }
46674705
4668- if (data_len > pages_len - pad_len ) {
4706+ if (data_len > buffer_len - pad_len ) {
46694707 /* data_len is corrupt -- discard frame */
46704708 rdata -> result = - EIO ;
46714709 if (is_offloaded )
@@ -4676,21 +4714,20 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
46764714 }
46774715
46784716 /* Copy the data to the output I/O iterator. */
4679- rdata -> result = cifs_copy_pages_to_iter ( pages , pages_len ,
4680- cur_off , & rdata -> subreq .io_iter );
4717+ rdata -> result = cifs_copy_folioq_to_iter ( buffer , buffer_len ,
4718+ cur_off , & rdata -> subreq .io_iter );
46814719 if (rdata -> result != 0 ) {
46824720 if (is_offloaded )
46834721 mid -> mid_state = MID_RESPONSE_MALFORMED ;
46844722 else
46854723 dequeue_mid (mid , rdata -> result );
46864724 return 0 ;
46874725 }
4688- rdata -> got_bytes = pages_len ;
4726+ rdata -> got_bytes = buffer_len ;
46894727
46904728 } else if (buf_len >= data_offset + data_len ) {
46914729 /* read response payload is in buf */
4692- WARN_ONCE (pages && !xa_empty (pages ),
4693- "read data can be either in buf or in pages" );
4730+ WARN_ONCE (buffer , "read data can be either in buf or in buffer" );
46944731 length = copy_to_iter (buf + data_offset , data_len , & rdata -> subreq .io_iter );
46954732 if (length < 0 )
46964733 return length ;
@@ -4716,7 +4753,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
47164753struct smb2_decrypt_work {
47174754 struct work_struct decrypt ;
47184755 struct TCP_Server_Info * server ;
4719- struct xarray buffer ;
4756+ struct folio_queue * buffer ;
47204757 char * buf ;
47214758 unsigned int len ;
47224759};
@@ -4730,7 +4767,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
47304767 struct mid_q_entry * mid ;
47314768 struct iov_iter iter ;
47324769
4733- iov_iter_xarray (& iter , ITER_DEST , & dw -> buffer , 0 , dw -> len );
4770+ iov_iter_folio_queue (& iter , ITER_DEST , dw -> buffer , 0 , 0 , dw -> len );
47344771 rc = decrypt_raw_data (dw -> server , dw -> buf , dw -> server -> vals -> read_rsp_size ,
47354772 & iter , true);
47364773 if (rc ) {
@@ -4746,7 +4783,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
47464783 mid -> decrypted = true;
47474784 rc = handle_read_data (dw -> server , mid , dw -> buf ,
47484785 dw -> server -> vals -> read_rsp_size ,
4749- & dw -> buffer , dw -> len ,
4786+ dw -> buffer , dw -> len ,
47504787 true);
47514788 if (rc >= 0 ) {
47524789#ifdef CONFIG_CIFS_STATS2
@@ -4779,7 +4816,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
47794816 }
47804817
47814818free_pages :
4782- cifs_clear_xarray_buffer ( & dw -> buffer );
4819+ cifs_clear_folioq_buffer ( dw -> buffer );
47834820 cifs_small_buf_release (dw -> buf );
47844821 kfree (dw );
47854822}
@@ -4789,20 +4826,17 @@ static int
47894826receive_encrypted_read (struct TCP_Server_Info * server , struct mid_q_entry * * mid ,
47904827 int * num_mids )
47914828{
4792- struct page * page ;
47934829 char * buf = server -> smallbuf ;
47944830 struct smb2_transform_hdr * tr_hdr = (struct smb2_transform_hdr * )buf ;
47954831 struct iov_iter iter ;
4796- unsigned int len , npages ;
4832+ unsigned int len ;
47974833 unsigned int buflen = server -> pdu_size ;
47984834 int rc ;
4799- int i = 0 ;
48004835 struct smb2_decrypt_work * dw ;
48014836
48024837 dw = kzalloc (sizeof (struct smb2_decrypt_work ), GFP_KERNEL );
48034838 if (!dw )
48044839 return - ENOMEM ;
4805- xa_init (& dw -> buffer );
48064840 INIT_WORK (& dw -> decrypt , smb2_decrypt_offload );
48074841 dw -> server = server ;
48084842
@@ -4818,36 +4852,24 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
48184852 len = le32_to_cpu (tr_hdr -> OriginalMessageSize ) -
48194853 server -> vals -> read_rsp_size ;
48204854 dw -> len = len ;
4821- npages = DIV_ROUND_UP ( len , PAGE_SIZE );
4855+ len = round_up ( dw -> len , PAGE_SIZE );
48224856
48234857 rc = - ENOMEM ;
4824- for (; i < npages ; i ++ ) {
4825- void * old ;
4826-
4827- page = alloc_page (GFP_KERNEL |__GFP_HIGHMEM );
4828- if (!page )
4829- goto discard_data ;
4830- page -> index = i ;
4831- old = xa_store (& dw -> buffer , i , page , GFP_KERNEL );
4832- if (xa_is_err (old )) {
4833- rc = xa_err (old );
4834- put_page (page );
4835- goto discard_data ;
4836- }
4837- xa_set_mark (& dw -> buffer , i , XA_MARK_0 );
4838- }
4858+ dw -> buffer = cifs_alloc_folioq_buffer (len );
4859+ if (!dw -> buffer )
4860+ goto discard_data ;
48394861
4840- iov_iter_xarray (& iter , ITER_DEST , & dw -> buffer , 0 , npages * PAGE_SIZE );
4862+ iov_iter_folio_queue (& iter , ITER_DEST , dw -> buffer , 0 , 0 , len );
48414863
48424864 /* Read the data into the buffer and clear excess bufferage. */
48434865 rc = cifs_read_iter_from_socket (server , & iter , dw -> len );
48444866 if (rc < 0 )
48454867 goto discard_data ;
48464868
48474869 server -> total_read += rc ;
4848- if (rc < npages * PAGE_SIZE )
4849- iov_iter_zero (npages * PAGE_SIZE - rc , & iter );
4850- iov_iter_revert (& iter , npages * PAGE_SIZE );
4870+ if (rc < len )
4871+ iov_iter_zero (len - rc , & iter );
4872+ iov_iter_revert (& iter , len );
48514873 iov_iter_truncate (& iter , dw -> len );
48524874
48534875 rc = cifs_discard_remaining_data (server );
@@ -4882,7 +4904,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
48824904 (* mid )-> decrypted = true;
48834905 rc = handle_read_data (server , * mid , buf ,
48844906 server -> vals -> read_rsp_size ,
4885- & dw -> buffer , dw -> len , false);
4907+ dw -> buffer , dw -> len , false);
48864908 if (rc >= 0 ) {
48874909 if (server -> ops -> is_network_name_deleted ) {
48884910 server -> ops -> is_network_name_deleted (buf ,
@@ -4892,7 +4914,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
48924914 }
48934915
48944916free_pages :
4895- cifs_clear_xarray_buffer ( & dw -> buffer );
4917+ cifs_clear_folioq_buffer ( dw -> buffer );
48964918free_dw :
48974919 kfree (dw );
48984920 return rc ;
0 commit comments