@@ -190,3 +190,166 @@ unsafe impl<T: ?Sized> Lock<DisabledInterrupts> for SpinLock<T> {
190
190
& self . data
191
191
}
192
192
}
193
+
194
+ /// Safely initialises a [`RawSpinLock`] with the given name, generating a new lock class.
195
+ #[ macro_export]
196
+ macro_rules! rawspinlock_init {
197
+ ( $spinlock: expr, $name: literal) => {
198
+ $crate:: init_with_lockdep!( $spinlock, $name)
199
+ } ;
200
+ }
201
+
202
+ /// Exposes the kernel's [`raw_spinlock_t`].
203
+ ///
204
+ /// It is very similar to [`SpinLock`], except that it is guaranteed not to sleep even on RT
205
+ /// variants of the kernel.
206
+ ///
207
+ /// # Examples
208
+ ///
209
+ /// ```
210
+ /// # use kernel::prelude::*;
211
+ /// # use kernel::sync::RawSpinLock;
212
+ /// # use core::pin::Pin;
213
+ ///
214
+ /// struct Example {
215
+ /// a: u32,
216
+ /// b: u32,
217
+ /// }
218
+ ///
219
+ /// // Function that acquires the raw spinlock without changing interrupt state.
220
+ /// fn lock_example(value: &RawSpinLock<Example>) {
221
+ /// let mut guard = value.lock();
222
+ /// guard.a = 10;
223
+ /// guard.b = 20;
224
+ /// }
225
+ ///
226
+ /// // Function that acquires the raw spinlock and disables interrupts while holding it.
227
+ /// fn lock_irqdisable_example(value: &RawSpinLock<Example>) {
228
+ /// let mut guard = value.lock_irqdisable();
229
+ /// guard.a = 30;
230
+ /// guard.b = 40;
231
+ /// }
232
+ ///
233
+ /// // Initialises a raw spinlock and calls the example functions.
234
+ /// pub fn spinlock_example() {
235
+ /// // SAFETY: `rawspinlock_init` is called below.
236
+ /// let mut value = unsafe { RawSpinLock::new(Example { a: 1, b: 2 }) };
237
+ /// // SAFETY: We don't move `value`.
238
+ /// kernel::rawspinlock_init!(unsafe { Pin::new_unchecked(&mut value) }, "value");
239
+ /// lock_example(&value);
240
+ /// lock_irqdisable_example(&value);
241
+ /// }
242
+ /// ```
243
+ ///
244
+ /// [`raw_spinlock_t`]: ../../../include/linux/spinlock.h
245
+ pub struct RawSpinLock < T : ?Sized > {
246
+ spin_lock : Opaque < bindings:: raw_spinlock > ,
247
+
248
+ // Spinlocks are architecture-defined. So we conservatively require them to be pinned in case
249
+ // some architecture uses self-references now or in the future.
250
+ _pin : PhantomPinned ,
251
+
252
+ data : UnsafeCell < T > ,
253
+ }
254
+
255
+ // SAFETY: `RawSpinLock` can be transferred across thread boundaries iff the data it protects can.
256
+ unsafe impl < T : ?Sized + Send > Send for RawSpinLock < T > { }
257
+
258
+ // SAFETY: `RawSpinLock` serialises the interior mutability it provides, so it is `Sync` as long as
259
+ // the data it protects is `Send`.
260
+ unsafe impl < T : ?Sized + Send > Sync for RawSpinLock < T > { }
261
+
262
+ impl < T > RawSpinLock < T > {
263
+ /// Constructs a new raw spinlock.
264
+ ///
265
+ /// # Safety
266
+ ///
267
+ /// The caller must call [`RawSpinLock::init_lock`] before using the raw spinlock.
268
+ pub const unsafe fn new ( t : T ) -> Self {
269
+ Self {
270
+ spin_lock : Opaque :: uninit ( ) ,
271
+ data : UnsafeCell :: new ( t) ,
272
+ _pin : PhantomPinned ,
273
+ }
274
+ }
275
+ }
276
+
277
+ impl < T : ?Sized > RawSpinLock < T > {
278
+ /// Locks the raw spinlock and gives the caller access to the data protected by it. Only one
279
+ /// thread at a time is allowed to access the protected data.
280
+ pub fn lock ( & self ) -> Guard < ' _ , Self , WriteLock > {
281
+ let ctx = <Self as Lock < WriteLock > >:: lock_noguard ( self ) ;
282
+ // SAFETY: The raw spinlock was just acquired.
283
+ unsafe { Guard :: new ( self , ctx) }
284
+ }
285
+
286
+ /// Locks the raw spinlock and gives the caller access to the data protected by it.
287
+ /// Additionally it disables interrupts (if they are enabled).
288
+ ///
289
+ /// When the lock in unlocked, the interrupt state (enabled/disabled) is restored.
290
+ pub fn lock_irqdisable ( & self ) -> Guard < ' _ , Self , DisabledInterrupts > {
291
+ let ctx = <Self as Lock < DisabledInterrupts > >:: lock_noguard ( self ) ;
292
+ // SAFETY: The raw spinlock was just acquired.
293
+ unsafe { Guard :: new ( self , ctx) }
294
+ }
295
+ }
296
+
297
+ impl < T > CreatableLock for RawSpinLock < T > {
298
+ type CreateArgType = T ;
299
+
300
+ unsafe fn new_lock ( data : Self :: CreateArgType ) -> Self {
301
+ // SAFETY: The safety requirements of `new_lock` also require that `init_lock` be called.
302
+ unsafe { Self :: new ( data) }
303
+ }
304
+
305
+ unsafe fn init_lock (
306
+ self : Pin < & mut Self > ,
307
+ name : & ' static CStr ,
308
+ key : * mut bindings:: lock_class_key ,
309
+ ) {
310
+ unsafe { bindings:: _raw_spin_lock_init ( self . spin_lock . get ( ) , name. as_char_ptr ( ) , key) } ;
311
+ }
312
+ }
313
+
314
+ // SAFETY: The underlying kernel `raw_spinlock_t` object ensures mutual exclusion.
315
+ unsafe impl < T : ?Sized > Lock for RawSpinLock < T > {
316
+ type Inner = T ;
317
+ type GuardContext = EmptyGuardContext ;
318
+
319
+ fn lock_noguard ( & self ) -> EmptyGuardContext {
320
+ // SAFETY: `spin_lock` points to valid memory.
321
+ unsafe { bindings:: raw_spin_lock ( self . spin_lock . get ( ) ) } ;
322
+ EmptyGuardContext
323
+ }
324
+
325
+ unsafe fn unlock ( & self , _: & mut EmptyGuardContext ) {
326
+ // SAFETY: The safety requirements of the function ensure that the raw spinlock is owned by
327
+ // the caller.
328
+ unsafe { bindings:: raw_spin_unlock ( self . spin_lock . get ( ) ) } ;
329
+ }
330
+
331
+ fn locked_data ( & self ) -> & UnsafeCell < T > {
332
+ & self . data
333
+ }
334
+ }
335
+
336
+ // SAFETY: The underlying kernel `raw_spinlock_t` object ensures mutual exclusion.
337
+ unsafe impl < T : ?Sized > Lock < DisabledInterrupts > for RawSpinLock < T > {
338
+ type Inner = T ;
339
+ type GuardContext = c_types:: c_ulong ;
340
+
341
+ fn lock_noguard ( & self ) -> c_types:: c_ulong {
342
+ // SAFETY: `spin_lock` points to valid memory.
343
+ unsafe { bindings:: raw_spin_lock_irqsave ( self . spin_lock . get ( ) ) }
344
+ }
345
+
346
+ unsafe fn unlock ( & self , ctx : & mut c_types:: c_ulong ) {
347
+ // SAFETY: The safety requirements of the function ensure that the raw spinlock is owned by
348
+ // the caller.
349
+ unsafe { bindings:: raw_spin_unlock_irqrestore ( self . spin_lock . get ( ) , * ctx) } ;
350
+ }
351
+
352
+ fn locked_data ( & self ) -> & UnsafeCell < T > {
353
+ & self . data
354
+ }
355
+ }
0 commit comments