4141#include <linux/thread_info.h>
4242#include <linux/prctl.h>
4343#include <linux/stacktrace.h>
44+ #include <linux/memory_ordering_model.h>
4445
4546#include <asm/alternative.h>
47+ #include <asm/apple_cpufeature.h>
4648#include <asm/compat.h>
4749#include <asm/cpufeature.h>
4850#include <asm/cacheflush.h>
@@ -371,6 +373,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
371373 if (system_supports_tpidr2 ())
372374 p -> thread .tpidr2_el0 = read_sysreg_s (SYS_TPIDR2_EL0 );
373375
376+ #ifdef CONFIG_ARM64_ACTLR_STATE
377+ if (system_has_actlr_state ())
378+ p -> thread .actlr = read_sysreg (actlr_el1 );
379+ #endif
380+
374381 if (stack_start ) {
375382 if (is_compat_thread (task_thread_info (p )))
376383 childregs -> compat_sp = stack_start ;
@@ -513,6 +520,65 @@ void update_sctlr_el1(u64 sctlr)
513520 isb ();
514521}
515522
523+ #ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
524+ int arch_prctl_mem_model_get (struct task_struct * t )
525+ {
526+ if (alternative_has_cap_unlikely (ARM64_HAS_TSO_APPLE ) &&
527+ t -> thread .actlr & ACTLR_APPLE_TSO )
528+ return PR_SET_MEM_MODEL_TSO ;
529+
530+ return PR_SET_MEM_MODEL_DEFAULT ;
531+ }
532+
533+ int arch_prctl_mem_model_set (struct task_struct * t , unsigned long val )
534+ {
535+ if (alternative_has_cap_unlikely (ARM64_HAS_TSO_FIXED ) &&
536+ val == PR_SET_MEM_MODEL_TSO )
537+ return 0 ;
538+
539+ if (alternative_has_cap_unlikely (ARM64_HAS_TSO_APPLE )) {
540+ WARN_ON (!system_has_actlr_state ());
541+
542+ switch (val ) {
543+ case PR_SET_MEM_MODEL_TSO :
544+ t -> thread .actlr |= ACTLR_APPLE_TSO ;
545+ break ;
546+ case PR_SET_MEM_MODEL_DEFAULT :
547+ t -> thread .actlr &= ~ACTLR_APPLE_TSO ;
548+ break ;
549+ default :
550+ return - EINVAL ;
551+ }
552+ write_sysreg (t -> thread .actlr , actlr_el1 );
553+ return 0 ;
554+ }
555+
556+ if (val == PR_SET_MEM_MODEL_DEFAULT )
557+ return 0 ;
558+
559+ return - EINVAL ;
560+ }
561+ #endif
562+
563+ #ifdef CONFIG_ARM64_ACTLR_STATE
564+ /*
565+ * IMPDEF control register ACTLR_EL1 handling. Some CPUs use this to
566+ * expose features that can be controlled by userspace.
567+ */
568+ static void actlr_thread_switch (struct task_struct * next )
569+ {
570+ if (!system_has_actlr_state ())
571+ return ;
572+
573+ current -> thread .actlr = read_sysreg (actlr_el1 );
574+ write_sysreg (next -> thread .actlr , actlr_el1 );
575+ }
576+ #else
577+ static inline void actlr_thread_switch (struct task_struct * next )
578+ {
579+ }
580+ #endif
581+
516582/*
517583 * Thread switching.
518584 */
@@ -530,6 +596,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
530596 ssbs_thread_switch (next );
531597 erratum_1418040_thread_switch (next );
532598 ptrauth_thread_switch_user (next );
599+ actlr_thread_switch (next );
533600
534601 /*
535602 * Complete any pending TLB or cache maintenance on this CPU in case
@@ -651,6 +718,10 @@ void arch_setup_new_exec(void)
651718 arch_prctl_spec_ctrl_set (current , PR_SPEC_STORE_BYPASS ,
652719 PR_SPEC_ENABLE );
653720 }
721+
722+ #ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
723+ arch_prctl_mem_model_set (current , PR_SET_MEM_MODEL_DEFAULT );
724+ #endif
654725}
655726
656727#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
0 commit comments