@@ -33,24 +33,14 @@ static unsigned long mpu_get_region_base(mpid_t mpid)
3333 return PRBAR_BASE (prbar );
3434}
3535
36- static unsigned long mpu_get_region_limit (mpid_t mpid )
37- {
38- unsigned long prlar = 0 ;
39-
40- sysreg_prselr_el2_write (mpid );
41- ISB ();
42- prlar = sysreg_prlar_el2_read ();
43-
44- return PRLAR_LIMIT (prlar );
45- }
46-
47- static mpid_t mpu_find_region_base (struct mp_region * mpr )
36+ static mpid_t mpu_find_region (struct mp_region * mpr , asid_t asid )
4837{
4938 mpid_t mpid = INVALID_MPID ;
5039
5140 for (mpid_t i = 0 ; i < MPU_ARCH_MAX_NUM_ENTRIES ; i ++ ) {
5241 if (bitmap_get (cpu ()-> arch .profile .mpu .allocated_entries , i )) {
53- if (mpu_get_region_base (i ) == mpr -> base ) {
42+ if (mpu_get_region_base (i ) == mpr -> base &&
43+ cpu ()-> arch .profile .mpu .entry_asid [i ] == asid ) {
5444 mpid = i ;
5545 break ;
5646 }
@@ -59,33 +49,22 @@ static mpid_t mpu_find_region_base(struct mp_region* mpr)
5949 return mpid ;
6050}
6151
62- static mpid_t mpu_find_region_exact (struct mp_region * mpr )
63- {
64- mpid_t mpid = mpu_find_region_base (mpr );
65-
66- if (mpid != INVALID_MPID ) {
67- if (mpu_get_region_limit (mpid ) == PRLAR_LIMIT (mpr -> base + mpr -> size - 1 )) {
68- return mpid ;
69- }
70- }
71-
72- return INVALID_MPID ;
73- }
74-
75- static mpid_t mpu_entry_allocate (void )
52+ static mpid_t mpu_entry_allocate (asid_t asid )
7653{
7754 mpid_t reg_num = INVALID_MPID ;
7855 reg_num = (mpid_t )bitmap_find_nth (cpu ()-> arch .profile .mpu .allocated_entries ,
7956 MPU_ARCH_MAX_NUM_ENTRIES , 1 , 0 , false);
8057
8158 bitmap_set (cpu ()-> arch .profile .mpu .allocated_entries , reg_num );
59+ cpu ()-> arch .profile .mpu .entry_asid [reg_num ] = asid ;
8260
8361 return reg_num ;
8462}
8563
8664static inline void mpu_entry_deallocate (mpid_t mpid )
8765{
8866 bitmap_clear (cpu ()-> arch .profile .mpu .allocated_entries , mpid );
67+ cpu ()-> arch .profile .mpu .entry_asid [mpid ] = INVALID_ASID ;
8968}
9069
9170static inline void mpu_entry_lock (mpid_t mpid )
@@ -145,7 +124,7 @@ bool mpu_map(struct addr_space* as, struct mp_region* mpr, bool locked)
145124 */
146125
147126 else {
148- mpid = mpu_entry_allocate ();
127+ mpid = mpu_entry_allocate (as -> id );
149128 if (mpid != INVALID_MPID ) {
150129 if (locked ) {
151130 mpu_entry_lock (mpid );
@@ -164,7 +143,7 @@ bool mpu_map(struct addr_space* as, struct mp_region* mpr, bool locked)
164143bool mpu_unmap (struct addr_space * as , struct mp_region * mpr )
165144{
166145 UNUSED_ARG (as );
167- mpid_t mpid = mpu_find_region_exact (mpr );
146+ mpid_t mpid = mpu_find_region (mpr , as -> id );
168147
169148 if (mpid != INVALID_MPID ) {
170149 mpu_entry_deallocate (mpid );
@@ -180,9 +159,7 @@ bool mpu_unmap(struct addr_space* as, struct mp_region* mpr)
180159
181160bool mpu_update (struct addr_space * as , struct mp_region * mpr )
182161{
183- UNUSED_ARG (as );
184-
185- mpid_t mpid = mpu_find_region_base (mpr );
162+ mpid_t mpid = mpu_find_region (mpr , as -> id );
186163
187164 if (mpid != INVALID_MPID ) {
188165 mpu_entry_update_limit (mpid , mpr );
@@ -213,5 +190,6 @@ void mpu_init()
213190 for (mpid_t mpid = 0 ; mpid < MPU_ARCH_MAX_NUM_ENTRIES ; mpid ++ ) {
214191 bitmap_clear (cpu ()-> arch .profile .mpu .allocated_entries , mpid );
215192 bitmap_clear (cpu ()-> arch .profile .mpu .locked_entries , mpid );
193+ cpu ()-> arch .profile .mpu .entry_asid [mpid ] = INVALID_ASID ;
216194 }
217195}
0 commit comments