@@ -538,6 +538,88 @@ pub fn clrex() void {
538538 asm volatile ("clrex" );
539539}
540540
541+ /// Atomic operations with fallback to critical sections for Cortex-M0/M0+
542+ pub const atomic = struct {
543+ pub const has_native_atomics = switch (cortex_m ) {
544+ .cortex_m0 , .cortex_m0plus = > false ,
545+ else = > true ,
546+ };
547+
548+ /// Atomic add
549+ pub fn add (comptime T : type , ptr : * T , delta : T ) T {
550+ if (comptime has_native_atomics ) {
551+ return @atomicRmw (T , ptr , .Add , delta , .monotonic );
552+ } else {
553+ interrupt .disable_interrupts ();
554+ defer interrupt .enable_interrupts ();
555+ const old_value = ptr .* ;
556+ ptr .* = old_value +% delta ;
557+ return old_value ;
558+ }
559+ }
560+
561+ /// Atomic load
562+ pub fn load (comptime T : type , ptr : * const T , comptime ordering : std.builtin.AtomicOrder ) T {
563+ if (comptime has_native_atomics ) {
564+ return @atomicLoad (T , ptr , ordering );
565+ } else {
566+ interrupt .disable_interrupts ();
567+ defer interrupt .enable_interrupts ();
568+ return ptr .* ;
569+ }
570+ }
571+
572+ /// Atomic store
573+ pub fn store (comptime T : type , ptr : * T , value : T , comptime ordering : std.builtin.AtomicOrder ) void {
574+ if (comptime has_native_atomics ) {
575+ @atomicStore (T , ptr , value , ordering );
576+ } else {
577+ interrupt .disable_interrupts ();
578+ defer interrupt .enable_interrupts ();
579+ ptr .* = value ;
580+ }
581+ }
582+
583+ /// Atomic compare and swap
584+ pub fn cmpxchg (comptime T : type , ptr : * T , expected_value : T , new_value : T , comptime success_ordering : std.builtin.AtomicOrder , comptime failure_ordering : std.builtin.AtomicOrder ) ? T {
585+ if (comptime has_native_atomics ) {
586+ return @cmpxchgWeak (T , ptr , expected_value , new_value , success_ordering , failure_ordering );
587+ } else {
588+ interrupt .disable_interrupts ();
589+ defer interrupt .enable_interrupts ();
590+ const current = ptr .* ;
591+ if (current == expected_value ) {
592+ ptr .* = new_value ;
593+ return null ;
594+ }
595+ return current ;
596+ }
597+ }
598+
599+ /// Atomic read-modify-write
600+ pub fn rmw (comptime T : type , ptr : * T , comptime op : std.builtin.AtomicRmwOp , operand : T , comptime ordering : std.builtin.AtomicOrder ) T {
601+ if (comptime has_native_atomics ) {
602+ return @atomicRmw (T , ptr , op , operand , ordering );
603+ } else {
604+ interrupt .disable_interrupts ();
605+ defer interrupt .enable_interrupts ();
606+ const old_value = ptr .* ;
607+ ptr .* = switch (op ) {
608+ .Xchg = > operand ,
609+ .Add = > old_value +% operand ,
610+ .Sub = > old_value -% operand ,
611+ .And = > old_value & operand ,
612+ .Nand = > ~ (old_value & operand ),
613+ .Or = > old_value | operand ,
614+ .Xor = > old_value ^ operand ,
615+ .Max = > @max (old_value , operand ),
616+ .Min = > @min (old_value , operand ),
617+ };
618+ return old_value ;
619+ }
620+ }
621+ };
622+
541623/// The RAM vector table used. You can swap interrupt handlers at runtime here.
542624/// Available when using a RAM vector table or a RAM image.
543625pub var ram_vector_table : VectorTable align (256 ) = if (using_ram_vector_table or is_ram_image )
@@ -624,7 +706,7 @@ pub const startup_logic = struct {
624706 }
625707
626708 // Apply user-set interrupts
627- // TODO: We might want to fail compilation if any interruptt is already set, since that
709+ // TODO: We might want to fail compilation if any interrupt is already set, since that
628710 // could e.g. disable timekeeping
629711 for (@typeInfo (@TypeOf (microzig .options .interrupts )).@"struct" .fields ) | field | {
630712 const maybe_handler = @field (microzig .options .interrupts , field .name );
0 commit comments