|
68 | 68 | //! ```
|
69 | 69 |
|
70 | 70 | #![no_std]
|
71 |
| -#![feature(asm_experimental_arch)] |
72 |
| -#![cfg_attr(not(target_arch = "msp430"), feature(core_intrinsics))] |
| 71 | +#![cfg_attr(target_arch = "msp430", feature(asm_experimental_arch))] |
73 | 72 |
|
| 73 | +#[cfg(target_arch = "msp430")] |
74 | 74 | use core::arch::asm;
|
75 | 75 | use core::cell::UnsafeCell;
|
76 | 76 | use core::fmt;
|
@@ -681,42 +681,50 @@ macro_rules! atomic_int {
|
681 | 681 | impl AtomicOperations for $int_type {
|
682 | 682 | #[inline(always)]
|
683 | 683 | unsafe fn atomic_store(dst: *mut Self, val: Self) {
|
684 |
| - ::core::intrinsics::atomic_store(dst, val); |
| 684 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 685 | + .store(val, ::core::sync::atomic::Ordering::SeqCst); |
685 | 686 | }
|
686 | 687 |
|
687 | 688 | #[inline(always)]
|
688 | 689 | unsafe fn atomic_load(dst: *const Self) -> Self {
|
689 |
| - ::core::intrinsics::atomic_load(dst) |
| 690 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 691 | + .load(::core::sync::atomic::Ordering::SeqCst) |
690 | 692 | }
|
691 | 693 |
|
692 | 694 | #[inline(always)]
|
693 | 695 | unsafe fn atomic_add(dst: *mut Self, val: Self) {
|
694 |
| - ::core::intrinsics::atomic_xadd(dst, val); |
| 696 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 697 | + .fetch_add(val, ::core::sync::atomic::Ordering::SeqCst); |
695 | 698 | }
|
696 | 699 |
|
697 | 700 | #[inline(always)]
|
698 | 701 | unsafe fn atomic_sub(dst: *mut Self, val: Self) {
|
699 |
| - ::core::intrinsics::atomic_xsub(dst, val); |
| 702 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 703 | + .fetch_sub(val, ::core::sync::atomic::Ordering::SeqCst); |
700 | 704 | }
|
701 | 705 |
|
702 | 706 | #[inline(always)]
|
703 | 707 | unsafe fn atomic_and(dst: *mut Self, val: Self) {
|
704 |
| - ::core::intrinsics::atomic_and(dst, val); |
| 708 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 709 | + .fetch_and(val, ::core::sync::atomic::Ordering::SeqCst); |
705 | 710 | }
|
706 | 711 |
|
707 | 712 | #[inline(always)]
|
708 | 713 | unsafe fn atomic_clear(dst: *mut Self, val: Self) {
|
709 |
| - ::core::intrinsics::atomic_and(dst, !val); |
| 714 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 715 | + .fetch_and(!val, ::core::sync::atomic::Ordering::SeqCst); |
710 | 716 | }
|
711 | 717 |
|
712 | 718 | #[inline(always)]
|
713 | 719 | unsafe fn atomic_or(dst: *mut Self, val: Self) {
|
714 |
| - ::core::intrinsics::atomic_or(dst, val); |
| 720 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 721 | + .fetch_or(val, ::core::sync::atomic::Ordering::SeqCst); |
715 | 722 | }
|
716 | 723 |
|
717 | 724 | #[inline(always)]
|
718 | 725 | unsafe fn atomic_xor(dst: *mut Self, val: Self) {
|
719 |
| - ::core::intrinsics::atomic_xor(dst, val); |
| 726 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 727 | + .fetch_xor(val, ::core::sync::atomic::Ordering::SeqCst); |
720 | 728 | }
|
721 | 729 | }
|
722 | 730 | }
|
|
0 commit comments