55#include <linux/spinlock.h>
66#include <asm/unaccepted_memory.h>
77
8- /* Protects unaccepted memory bitmap */
8+ /* Protects unaccepted memory bitmap and accepting_list */
99static DEFINE_SPINLOCK (unaccepted_memory_lock );
1010
11+ struct accept_range {
12+ struct list_head list ;
13+ unsigned long start ;
14+ unsigned long end ;
15+ };
16+
17+ static LIST_HEAD (accepting_list );
18+
1119/*
1220 * accept_memory() -- Consult bitmap and accept the memory if needed.
1321 *
@@ -24,6 +32,7 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
2432{
2533 struct efi_unaccepted_memory * unaccepted ;
2634 unsigned long range_start , range_end ;
35+ struct accept_range range , * entry ;
2736 unsigned long flags ;
2837 u64 unit_size ;
2938
@@ -78,20 +87,67 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
7887 if (end > unaccepted -> size * unit_size * BITS_PER_BYTE )
7988 end = unaccepted -> size * unit_size * BITS_PER_BYTE ;
8089
81- range_start = start / unit_size ;
82-
90+ range .start = start / unit_size ;
91+ range .end = DIV_ROUND_UP (end , unit_size );
92+ retry :
8393 spin_lock_irqsave (& unaccepted_memory_lock , flags );
94+
95+ /*
96+ * Check if anybody works on accepting the same range of the memory.
97+ *
98+ * The check is done with unit_size granularity. It is crucial to catch
99+ * all accept requests to the same unit_size block, even if they don't
100+ * overlap on physical address level.
101+ */
102+ list_for_each_entry (entry , & accepting_list , list ) {
103+ if (entry -> end < range .start )
104+ continue ;
105+ if (entry -> start >= range .end )
106+ continue ;
107+
108+ /*
109+ * Somebody else accepting the range. Or at least part of it.
110+ *
111+ * Drop the lock and retry until it is complete.
112+ */
113+ spin_unlock_irqrestore (& unaccepted_memory_lock , flags );
114+ goto retry ;
115+ }
116+
117+ /*
118+ * Register that the range is about to be accepted.
119+ * Make sure nobody else will accept it.
120+ */
121+ list_add (& range .list , & accepting_list );
122+
123+ range_start = range .start ;
84124 for_each_set_bitrange_from (range_start , range_end , unaccepted -> bitmap ,
85- DIV_ROUND_UP ( end , unit_size ) ) {
125+ range . end ) {
86126 unsigned long phys_start , phys_end ;
87127 unsigned long len = range_end - range_start ;
88128
89129 phys_start = range_start * unit_size + unaccepted -> phys_base ;
90130 phys_end = range_end * unit_size + unaccepted -> phys_base ;
91131
132+ /*
133+ * Keep interrupts disabled until the accept operation is
134+ * complete in order to prevent deadlocks.
135+ *
136+ * Enabling interrupts before calling arch_accept_memory()
137+ * creates an opportunity for an interrupt handler to request
138+ * acceptance for the same memory. The handler will continuously
139+ * spin with interrupts disabled, preventing other task from
140+ * making progress with the acceptance process.
141+ */
142+ spin_unlock (& unaccepted_memory_lock );
143+
92144 arch_accept_memory (phys_start , phys_end );
145+
146+ spin_lock (& unaccepted_memory_lock );
93147 bitmap_clear (unaccepted -> bitmap , range_start , len );
94148 }
149+
150+ list_del (& range .list );
95151 spin_unlock_irqrestore (& unaccepted_memory_lock , flags );
96152}
97153
0 commit comments