@@ -56,6 +56,7 @@ struct rt_sigframe_user_layout {
56
56
unsigned long fpsimd_offset ;
57
57
unsigned long esr_offset ;
58
58
unsigned long sve_offset ;
59
+ unsigned long za_offset ;
59
60
unsigned long extra_offset ;
60
61
unsigned long end_offset ;
61
62
};
@@ -218,6 +219,7 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
218
219
struct user_ctxs {
219
220
struct fpsimd_context __user * fpsimd ;
220
221
struct sve_context __user * sve ;
222
+ struct za_context __user * za ;
221
223
};
222
224
223
225
#ifdef CONFIG_ARM64_SVE
@@ -346,6 +348,101 @@ extern int restore_sve_fpsimd_context(struct user_ctxs *user);
346
348
347
349
#endif /* ! CONFIG_ARM64_SVE */
348
350
351
+ #ifdef CONFIG_ARM64_SME
352
+
353
+ static int preserve_za_context (struct za_context __user * ctx )
354
+ {
355
+ int err = 0 ;
356
+ u16 reserved [ARRAY_SIZE (ctx -> __reserved )];
357
+ unsigned int vl = task_get_sme_vl (current );
358
+ unsigned int vq ;
359
+
360
+ if (thread_za_enabled (& current -> thread ))
361
+ vq = sve_vq_from_vl (vl );
362
+ else
363
+ vq = 0 ;
364
+
365
+ memset (reserved , 0 , sizeof (reserved ));
366
+
367
+ __put_user_error (ZA_MAGIC , & ctx -> head .magic , err );
368
+ __put_user_error (round_up (ZA_SIG_CONTEXT_SIZE (vq ), 16 ),
369
+ & ctx -> head .size , err );
370
+ __put_user_error (vl , & ctx -> vl , err );
371
+ BUILD_BUG_ON (sizeof (ctx -> __reserved ) != sizeof (reserved ));
372
+ err |= __copy_to_user (& ctx -> __reserved , reserved , sizeof (reserved ));
373
+
374
+ if (vq ) {
375
+ /*
376
+ * This assumes that the ZA state has already been saved to
377
+ * the task struct by calling the function
378
+ * fpsimd_signal_preserve_current_state().
379
+ */
380
+ err |= __copy_to_user ((char __user * )ctx + ZA_SIG_REGS_OFFSET ,
381
+ current -> thread .za_state ,
382
+ ZA_SIG_REGS_SIZE (vq ));
383
+ }
384
+
385
+ return err ? - EFAULT : 0 ;
386
+ }
387
+
388
+ static int restore_za_context (struct user_ctxs __user * user )
389
+ {
390
+ int err ;
391
+ unsigned int vq ;
392
+ struct za_context za ;
393
+
394
+ if (__copy_from_user (& za , user -> za , sizeof (za )))
395
+ return - EFAULT ;
396
+
397
+ if (za .vl != task_get_sme_vl (current ))
398
+ return - EINVAL ;
399
+
400
+ if (za .head .size <= sizeof (* user -> za )) {
401
+ current -> thread .svcr &= ~SYS_SVCR_EL0_ZA_MASK ;
402
+ return 0 ;
403
+ }
404
+
405
+ vq = sve_vq_from_vl (za .vl );
406
+
407
+ if (za .head .size < ZA_SIG_CONTEXT_SIZE (vq ))
408
+ return - EINVAL ;
409
+
410
+ /*
411
+ * Careful: we are about __copy_from_user() directly into
412
+ * thread.za_state with preemption enabled, so protection is
413
+ * needed to prevent a racing context switch from writing stale
414
+ * registers back over the new data.
415
+ */
416
+
417
+ fpsimd_flush_task_state (current );
418
+ /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
419
+
420
+ sme_alloc (current );
421
+ if (!current -> thread .za_state ) {
422
+ current -> thread .svcr &= ~SYS_SVCR_EL0_ZA_MASK ;
423
+ clear_thread_flag (TIF_SME );
424
+ return - ENOMEM ;
425
+ }
426
+
427
+ err = __copy_from_user (current -> thread .za_state ,
428
+ (char __user const * )user -> za +
429
+ ZA_SIG_REGS_OFFSET ,
430
+ ZA_SIG_REGS_SIZE (vq ));
431
+ if (err )
432
+ return - EFAULT ;
433
+
434
+ set_thread_flag (TIF_SME );
435
+ current -> thread .svcr |= SYS_SVCR_EL0_ZA_MASK ;
436
+
437
+ return 0 ;
438
+ }
439
+ #else /* ! CONFIG_ARM64_SME */
440
+
441
+ /* Turn any non-optimised out attempts to use these into a link error: */
442
+ extern int preserve_za_context (void __user * ctx );
443
+ extern int restore_za_context (struct user_ctxs * user );
444
+
445
+ #endif /* ! CONFIG_ARM64_SME */
349
446
350
447
static int parse_user_sigframe (struct user_ctxs * user ,
351
448
struct rt_sigframe __user * sf )
@@ -360,6 +457,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
360
457
361
458
user -> fpsimd = NULL ;
362
459
user -> sve = NULL ;
460
+ user -> za = NULL ;
363
461
364
462
if (!IS_ALIGNED ((unsigned long )base , 16 ))
365
463
goto invalid ;
@@ -425,6 +523,19 @@ static int parse_user_sigframe(struct user_ctxs *user,
425
523
user -> sve = (struct sve_context __user * )head ;
426
524
break ;
427
525
526
+ case ZA_MAGIC :
527
+ if (!system_supports_sme ())
528
+ goto invalid ;
529
+
530
+ if (user -> za )
531
+ goto invalid ;
532
+
533
+ if (size < sizeof (* user -> za ))
534
+ goto invalid ;
535
+
536
+ user -> za = (struct za_context __user * )head ;
537
+ break ;
538
+
428
539
case EXTRA_MAGIC :
429
540
if (have_extra_context )
430
541
goto invalid ;
@@ -548,6 +659,9 @@ static int restore_sigframe(struct pt_regs *regs,
548
659
}
549
660
}
550
661
662
+ if (err == 0 && system_supports_sme () && user .za )
663
+ err = restore_za_context (& user );
664
+
551
665
return err ;
552
666
}
553
667
@@ -630,6 +744,24 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
630
744
return err ;
631
745
}
632
746
747
+ if (system_supports_sme ()) {
748
+ unsigned int vl ;
749
+ unsigned int vq = 0 ;
750
+
751
+ if (add_all )
752
+ vl = sme_max_vl ();
753
+ else
754
+ vl = task_get_sme_vl (current );
755
+
756
+ if (thread_za_enabled (& current -> thread ))
757
+ vq = sve_vq_from_vl (vl );
758
+
759
+ err = sigframe_alloc (user , & user -> za_offset ,
760
+ ZA_SIG_CONTEXT_SIZE (vq ));
761
+ if (err )
762
+ return err ;
763
+ }
764
+
633
765
return sigframe_alloc_end (user );
634
766
}
635
767
@@ -678,6 +810,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
678
810
err |= preserve_sve_context (sve_ctx );
679
811
}
680
812
813
+ /* ZA state if present */
814
+ if (system_supports_sme () && err == 0 && user -> za_offset ) {
815
+ struct za_context __user * za_ctx =
816
+ apply_user_offset (user , user -> za_offset );
817
+ err |= preserve_za_context (za_ctx );
818
+ }
819
+
681
820
if (err == 0 && user -> extra_offset ) {
682
821
char __user * sfp = (char __user * )user -> sigframe ;
683
822
char __user * userp =
0 commit comments