@@ -627,6 +627,7 @@ mod tests {
627
627
use blake2:: { digest:: consts:: U32 , Blake2b } ;
628
628
use proptest:: collection:: vec;
629
629
use proptest:: prelude:: * ;
630
+ use rand:: { seq:: IteratorRandom , thread_rng} ;
630
631
631
632
prop_compose ! {
632
633
fn arb_tree( max_size: u32 )
@@ -683,36 +684,6 @@ mod tests {
683
684
assert_eq!( tree. nodes, decoded. nodes) ;
684
685
}
685
686
686
- #[ test]
687
- fn test_create_batch_proof( ( t, values) in arb_tree( 30 ) ) {
688
- let mut mt_index_list : Vec <usize > = Vec :: new( ) ;
689
- for ( i, _v) in values. iter( ) . enumerate( ) {
690
- mt_index_list. push( i) ;
691
- }
692
- mt_index_list. sort_unstable( ) ;
693
- let batch_proof = t. get_batched_path( mt_index_list) ;
694
- assert!( t. to_commitment_batch_compat( ) . check( & values, & batch_proof) . is_ok( ) ) ;
695
- }
696
-
697
- #[ test]
698
- fn test_bytes_batch_path( ( t, values) in arb_tree( 30 ) ) {
699
- let mut mt_index_list : Vec <usize > = Vec :: new( ) ;
700
- for ( i, _v) in values. iter( ) . enumerate( ) {
701
- mt_index_list. push( i) ;
702
- }
703
- mt_index_list. sort_unstable( ) ;
704
-
705
- let bp = t. get_batched_path( mt_index_list) ;
706
-
707
- let bytes = & bp. to_bytes( ) ;
708
- let deserialized = BatchPath :: from_bytes( bytes) . unwrap( ) ;
709
- assert!( t. to_commitment_batch_compat( ) . check( & values, & deserialized) . is_ok( ) ) ;
710
-
711
- let encoded = bincode:: serialize( & bp) . unwrap( ) ;
712
- let decoded: BatchPath <Blake2b <U32 >> = bincode:: deserialize( & encoded) . unwrap( ) ;
713
- assert!( t. to_commitment_batch_compat( ) . check( & values, & decoded) . is_ok( ) ) ;
714
- }
715
-
716
687
#[ test]
717
688
fn test_bytes_tree_commitment_batch_compat( ( t, values) in arb_tree( 5 ) ) {
718
689
let encoded = bincode:: serialize( & t. to_commitment_batch_compat( ) ) . unwrap( ) ;
@@ -778,4 +749,47 @@ mod tests {
778
749
assert!( t. to_commitment_batch_compat( ) . check( & batch_values, & path) . is_err( ) ) ;
779
750
}
780
751
}
752
+
753
+ prop_compose ! {
754
+ fn arb_tree_arb_batch( max_size: u32 )
755
+ ( v in vec( any:: <u64 >( ) , 2 ..max_size as usize ) ) -> ( MerkleTree <Blake2b <U32 >>, Vec <MTLeaf >, Vec <usize >) {
756
+ let mut rng = thread_rng( ) ;
757
+ let size = v. len( ) ;
758
+ let pks = vec![ VerificationKey :: default ( ) ; size] ;
759
+ let leaves = pks. into_iter( ) . zip( v. into_iter( ) ) . map( |( key, stake) | MTLeaf ( key, stake) ) . collect:: <Vec <MTLeaf >>( ) ;
760
+
761
+ let indices: Vec <usize > = ( 0 ..size) . collect( ) ;
762
+ let mut mt_list: Vec <usize > = indices. into_iter( ) . choose_multiple( & mut rng, size * 2 / 10 + 1 ) ;
763
+ mt_list. sort_unstable( ) ;
764
+
765
+ let mut batch_values: Vec <MTLeaf > = Vec :: with_capacity( mt_list. len( ) ) ;
766
+ for i in mt_list. iter( ) {
767
+ batch_values. push( leaves[ * i] ) ;
768
+ }
769
+
770
+ ( MerkleTree :: <Blake2b <U32 >>:: create( & leaves) , batch_values, mt_list)
771
+ }
772
+ }
773
+
774
+ proptest ! {
775
+ #![ proptest_config( ProptestConfig :: with_cases( 100 ) ) ]
776
+ #[ test]
777
+ fn test_create_batch_proof( ( t, batch_values, indices) in arb_tree_arb_batch( 30 ) ) {
778
+ let batch_proof = t. get_batched_path( indices) ;
779
+ assert!( t. to_commitment_batch_compat( ) . check( & batch_values, & batch_proof) . is_ok( ) ) ;
780
+ }
781
+
782
+ #[ test]
783
+ fn test_bytes_batch_path( ( t, batch_values, indices) in arb_tree_arb_batch( 30 ) ) {
784
+ let bp = t. get_batched_path( indices) ;
785
+
786
+ let bytes = & bp. to_bytes( ) ;
787
+ let deserialized = BatchPath :: from_bytes( bytes) . unwrap( ) ;
788
+ assert!( t. to_commitment_batch_compat( ) . check( & batch_values, & deserialized) . is_ok( ) ) ;
789
+
790
+ let encoded = bincode:: serialize( & bp) . unwrap( ) ;
791
+ let decoded: BatchPath <Blake2b <U32 >> = bincode:: deserialize( & encoded) . unwrap( ) ;
792
+ assert!( t. to_commitment_batch_compat( ) . check( & batch_values, & decoded) . is_ok( ) ) ;
793
+ }
794
+ }
781
795
}
0 commit comments