summaryrefslogtreecommitdiff
path: root/libatomic/host-config.h
blob: 1ea183502a21f90b8426ea92ab396d3d2ff0673b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
/* Copyright (C) 2012-2020 Free Software Foundation, Inc.
   Contributed by Richard Henderson <rth@redhat.com>.

   This file is part of the GNU Atomic Library (libatomic).

   Libatomic is free software; you can redistribute it and/or modify it
   under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 3 of the License, or
   (at your option) any later version.

   Libatomic is distributed in the hope that it will be useful, but WITHOUT ANY
   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
   FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.

   Under Section 7 of GPL version 3, you are granted additional
   permissions described in the GCC Runtime Library Exception, version
   3.1, as published by the Free Software Foundation.

   You should have received a copy of the GNU General Public License and
   a copy of the GCC Runtime Library Exception along with this program;
   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
   <http://www.gnu.org/licenses/>.  */

/* Included after all more target-specific host-config.h.  */


/* The target may have some OS specific way to implement compare-and-swap.  */
#if !defined(atomic_compare_exchange_n) && SIZE(HAVE_ATOMIC_CAS)
# define atomic_compare_exchange_n  __atomic_compare_exchange_n
#endif
#if !defined(atomic_compare_exchange_w) && WSIZE(HAVE_ATOMIC_CAS)
# define atomic_compare_exchange_w  __atomic_compare_exchange_n
#endif

/* For some targets, it may be significantly faster to avoid all barriers
   if the user only wants relaxed memory order.  Sometimes we don't want
   the extra code bloat.  In all cases, use the input to avoid warnings.  */
#if defined(WANT_SPECIALCASE_RELAXED) && !defined(__OPTIMIZE_SIZE__)
# define maybe_specialcase_relaxed(x)	((x) == __ATOMIC_RELAXED)
#else
# define maybe_specialcase_relaxed(x)	((x) & 0)
#endif

/* Similar, but for targets for which the seq_cst model is sufficiently
   more expensive than the acq_rel model.  */
#if defined(WANT_SPECIALCASE_ACQREL) && !defined(__OPTIMIZE_SIZE__)
# define maybe_specialcase_acqrel(x)	((x) != __ATOMIC_SEQ_CST)
#else
# define maybe_specialcase_acqrel(x)	((x) & 0)
#endif


/* The target may have some OS specific way to emit barriers.  */
#ifndef pre_post_barrier
static inline void __attribute__((always_inline, artificial))
pre_barrier(int model)
{
  if (!maybe_specialcase_relaxed(model))
    {
      if (maybe_specialcase_acqrel(model))
        __atomic_thread_fence (__ATOMIC_ACQ_REL);
      else
        __atomic_thread_fence (__ATOMIC_SEQ_CST);
    }
}
static inline void __attribute__((always_inline, artificial))
post_barrier(int model)
{
  pre_barrier(model);
}
#define pre_post_barrier 1
#endif /* pre_post_barrier */

/* Similar, but assume that acq_rel is already handled via locks.  */
#ifndef pre_post_seq_barrier
static inline void __attribute__((always_inline, artificial))
pre_seq_barrier(int model)
{
}
static inline void __attribute__((always_inline, artificial))
post_seq_barrier(int model)
{
}
#define pre_post_seq_barrier 1
#endif