@@ -86,6 +86,15 @@ static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab)
86
86
return true;
87
87
}
88
88
89
+ static inline int sgx2_supported (void )
90
+ {
91
+ unsigned int eax , ebx , ecx , edx ;
92
+
93
+ __cpuid_count (SGX_CPUID , 0x0 , eax , ebx , ecx , edx );
94
+
95
+ return eax & 0x2 ;
96
+ }
97
+
89
98
static unsigned long elf_sym_hash (const char * name )
90
99
{
91
100
unsigned long h = 0 , high ;
@@ -840,4 +849,245 @@ TEST_F(enclave, epcm_permissions)
840
849
EXPECT_EQ (self -> run .exception_addr , 0 );
841
850
}
842
851
852
+ /*
853
+ * Test the addition of pages to an initialized enclave via writing to
854
+ * a page belonging to the enclave's address space but was not added
855
+ * during enclave creation.
856
+ */
857
+ TEST_F (enclave , augment )
858
+ {
859
+ struct encl_op_get_from_addr get_addr_op ;
860
+ struct encl_op_put_to_addr put_addr_op ;
861
+ struct encl_op_eaccept eaccept_op ;
862
+ size_t total_size = 0 ;
863
+ void * addr ;
864
+ int i ;
865
+
866
+ if (!sgx2_supported ())
867
+ SKIP (return , "SGX2 not supported" );
868
+
869
+ ASSERT_TRUE (setup_test_encl (ENCL_HEAP_SIZE_DEFAULT , & self -> encl , _metadata ));
870
+
871
+ memset (& self -> run , 0 , sizeof (self -> run ));
872
+ self -> run .tcs = self -> encl .encl_base ;
873
+
874
+ for (i = 0 ; i < self -> encl .nr_segments ; i ++ ) {
875
+ struct encl_segment * seg = & self -> encl .segment_tbl [i ];
876
+
877
+ total_size += seg -> size ;
878
+ }
879
+
880
+ /*
881
+ * Actual enclave size is expected to be larger than the loaded
882
+ * test enclave since enclave size must be a power of 2 in bytes
883
+ * and test_encl does not consume it all.
884
+ */
885
+ EXPECT_LT (total_size + PAGE_SIZE , self -> encl .encl_size );
886
+
887
+ /*
888
+ * Create memory mapping for the page that will be added. New
889
+ * memory mapping is for one page right after all existing
890
+ * mappings.
891
+ * Kernel will allow new mapping using any permissions if it
892
+ * falls into the enclave's address range but not backed
893
+ * by existing enclave pages.
894
+ */
895
+ addr = mmap ((void * )self -> encl .encl_base + total_size , PAGE_SIZE ,
896
+ PROT_READ | PROT_WRITE | PROT_EXEC ,
897
+ MAP_SHARED | MAP_FIXED , self -> encl .fd , 0 );
898
+ EXPECT_NE (addr , MAP_FAILED );
899
+
900
+ self -> run .exception_vector = 0 ;
901
+ self -> run .exception_error_code = 0 ;
902
+ self -> run .exception_addr = 0 ;
903
+
904
+ /*
905
+ * Attempt to write to the new page from within enclave.
906
+ * Expected to fail since page is not (yet) part of the enclave.
907
+ * The first #PF will trigger the addition of the page to the
908
+ * enclave, but since the new page needs an EACCEPT from within the
909
+ * enclave before it can be used it would not be possible
910
+ * to successfully return to the failing instruction. This is the
911
+ * cause of the second #PF captured here having the SGX bit set,
912
+ * it is from hardware preventing the page from being used.
913
+ */
914
+ put_addr_op .value = MAGIC ;
915
+ put_addr_op .addr = (unsigned long )addr ;
916
+ put_addr_op .header .type = ENCL_OP_PUT_TO_ADDRESS ;
917
+
918
+ EXPECT_EQ (ENCL_CALL (& put_addr_op , & self -> run , true), 0 );
919
+
920
+ EXPECT_EQ (self -> run .function , ERESUME );
921
+ EXPECT_EQ (self -> run .exception_vector , 14 );
922
+ EXPECT_EQ (self -> run .exception_addr , (unsigned long )addr );
923
+
924
+ if (self -> run .exception_error_code == 0x6 ) {
925
+ munmap (addr , PAGE_SIZE );
926
+ SKIP (return , "Kernel does not support adding pages to initialized enclave" );
927
+ }
928
+
929
+ EXPECT_EQ (self -> run .exception_error_code , 0x8007 );
930
+
931
+ self -> run .exception_vector = 0 ;
932
+ self -> run .exception_error_code = 0 ;
933
+ self -> run .exception_addr = 0 ;
934
+
935
+ /* Handle AEX by running EACCEPT from new entry point. */
936
+ self -> run .tcs = self -> encl .encl_base + PAGE_SIZE ;
937
+
938
+ eaccept_op .epc_addr = self -> encl .encl_base + total_size ;
939
+ eaccept_op .flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING ;
940
+ eaccept_op .ret = 0 ;
941
+ eaccept_op .header .type = ENCL_OP_EACCEPT ;
942
+
943
+ EXPECT_EQ (ENCL_CALL (& eaccept_op , & self -> run , true), 0 );
944
+
945
+ EXPECT_EEXIT (& self -> run );
946
+ EXPECT_EQ (self -> run .exception_vector , 0 );
947
+ EXPECT_EQ (self -> run .exception_error_code , 0 );
948
+ EXPECT_EQ (self -> run .exception_addr , 0 );
949
+ EXPECT_EQ (eaccept_op .ret , 0 );
950
+
951
+ /* Can now return to main TCS to resume execution. */
952
+ self -> run .tcs = self -> encl .encl_base ;
953
+
954
+ EXPECT_EQ (vdso_sgx_enter_enclave ((unsigned long )& put_addr_op , 0 , 0 ,
955
+ ERESUME , 0 , 0 ,
956
+ & self -> run ),
957
+ 0 );
958
+
959
+ EXPECT_EEXIT (& self -> run );
960
+ EXPECT_EQ (self -> run .exception_vector , 0 );
961
+ EXPECT_EQ (self -> run .exception_error_code , 0 );
962
+ EXPECT_EQ (self -> run .exception_addr , 0 );
963
+
964
+ /*
965
+ * Read memory from newly added page that was just written to,
966
+ * confirming that data previously written (MAGIC) is present.
967
+ */
968
+ get_addr_op .value = 0 ;
969
+ get_addr_op .addr = (unsigned long )addr ;
970
+ get_addr_op .header .type = ENCL_OP_GET_FROM_ADDRESS ;
971
+
972
+ EXPECT_EQ (ENCL_CALL (& get_addr_op , & self -> run , true), 0 );
973
+
974
+ EXPECT_EQ (get_addr_op .value , MAGIC );
975
+ EXPECT_EEXIT (& self -> run );
976
+ EXPECT_EQ (self -> run .exception_vector , 0 );
977
+ EXPECT_EQ (self -> run .exception_error_code , 0 );
978
+ EXPECT_EQ (self -> run .exception_addr , 0 );
979
+
980
+ munmap (addr , PAGE_SIZE );
981
+ }
982
+
983
+ /*
984
+ * Test for the addition of pages to an initialized enclave via a
985
+ * pre-emptive run of EACCEPT on page to be added.
986
+ */
987
+ TEST_F (enclave , augment_via_eaccept )
988
+ {
989
+ struct encl_op_get_from_addr get_addr_op ;
990
+ struct encl_op_put_to_addr put_addr_op ;
991
+ struct encl_op_eaccept eaccept_op ;
992
+ size_t total_size = 0 ;
993
+ void * addr ;
994
+ int i ;
995
+
996
+ if (!sgx2_supported ())
997
+ SKIP (return , "SGX2 not supported" );
998
+
999
+ ASSERT_TRUE (setup_test_encl (ENCL_HEAP_SIZE_DEFAULT , & self -> encl , _metadata ));
1000
+
1001
+ memset (& self -> run , 0 , sizeof (self -> run ));
1002
+ self -> run .tcs = self -> encl .encl_base ;
1003
+
1004
+ for (i = 0 ; i < self -> encl .nr_segments ; i ++ ) {
1005
+ struct encl_segment * seg = & self -> encl .segment_tbl [i ];
1006
+
1007
+ total_size += seg -> size ;
1008
+ }
1009
+
1010
+ /*
1011
+ * Actual enclave size is expected to be larger than the loaded
1012
+ * test enclave since enclave size must be a power of 2 in bytes while
1013
+ * test_encl does not consume it all.
1014
+ */
1015
+ EXPECT_LT (total_size + PAGE_SIZE , self -> encl .encl_size );
1016
+
1017
+ /*
1018
+ * mmap() a page at end of existing enclave to be used for dynamic
1019
+ * EPC page.
1020
+ *
1021
+ * Kernel will allow new mapping using any permissions if it
1022
+ * falls into the enclave's address range but not backed
1023
+ * by existing enclave pages.
1024
+ */
1025
+
1026
+ addr = mmap ((void * )self -> encl .encl_base + total_size , PAGE_SIZE ,
1027
+ PROT_READ | PROT_WRITE | PROT_EXEC , MAP_SHARED | MAP_FIXED ,
1028
+ self -> encl .fd , 0 );
1029
+ EXPECT_NE (addr , MAP_FAILED );
1030
+
1031
+ self -> run .exception_vector = 0 ;
1032
+ self -> run .exception_error_code = 0 ;
1033
+ self -> run .exception_addr = 0 ;
1034
+
1035
+ /*
1036
+ * Run EACCEPT on new page to trigger the #PF->EAUG->EACCEPT(again
1037
+ * without a #PF). All should be transparent to userspace.
1038
+ */
1039
+ eaccept_op .epc_addr = self -> encl .encl_base + total_size ;
1040
+ eaccept_op .flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING ;
1041
+ eaccept_op .ret = 0 ;
1042
+ eaccept_op .header .type = ENCL_OP_EACCEPT ;
1043
+
1044
+ EXPECT_EQ (ENCL_CALL (& eaccept_op , & self -> run , true), 0 );
1045
+
1046
+ if (self -> run .exception_vector == 14 &&
1047
+ self -> run .exception_error_code == 4 &&
1048
+ self -> run .exception_addr == self -> encl .encl_base + total_size ) {
1049
+ munmap (addr , PAGE_SIZE );
1050
+ SKIP (return , "Kernel does not support adding pages to initialized enclave" );
1051
+ }
1052
+
1053
+ EXPECT_EEXIT (& self -> run );
1054
+ EXPECT_EQ (self -> run .exception_vector , 0 );
1055
+ EXPECT_EQ (self -> run .exception_error_code , 0 );
1056
+ EXPECT_EQ (self -> run .exception_addr , 0 );
1057
+ EXPECT_EQ (eaccept_op .ret , 0 );
1058
+
1059
+ /*
1060
+ * New page should be accessible from within enclave - attempt to
1061
+ * write to it.
1062
+ */
1063
+ put_addr_op .value = MAGIC ;
1064
+ put_addr_op .addr = (unsigned long )addr ;
1065
+ put_addr_op .header .type = ENCL_OP_PUT_TO_ADDRESS ;
1066
+
1067
+ EXPECT_EQ (ENCL_CALL (& put_addr_op , & self -> run , true), 0 );
1068
+
1069
+ EXPECT_EEXIT (& self -> run );
1070
+ EXPECT_EQ (self -> run .exception_vector , 0 );
1071
+ EXPECT_EQ (self -> run .exception_error_code , 0 );
1072
+ EXPECT_EQ (self -> run .exception_addr , 0 );
1073
+
1074
+ /*
1075
+ * Read memory from newly added page that was just written to,
1076
+ * confirming that data previously written (MAGIC) is present.
1077
+ */
1078
+ get_addr_op .value = 0 ;
1079
+ get_addr_op .addr = (unsigned long )addr ;
1080
+ get_addr_op .header .type = ENCL_OP_GET_FROM_ADDRESS ;
1081
+
1082
+ EXPECT_EQ (ENCL_CALL (& get_addr_op , & self -> run , true), 0 );
1083
+
1084
+ EXPECT_EQ (get_addr_op .value , MAGIC );
1085
+ EXPECT_EEXIT (& self -> run );
1086
+ EXPECT_EQ (self -> run .exception_vector , 0 );
1087
+ EXPECT_EQ (self -> run .exception_error_code , 0 );
1088
+ EXPECT_EQ (self -> run .exception_addr , 0 );
1089
+
1090
+ munmap (addr , PAGE_SIZE );
1091
+ }
1092
+
843
1093
TEST_HARNESS_MAIN
0 commit comments