LCOV - code coverage report
Current view: top level - mm/boehm-gc/libatomic_ops/src/atomic_ops/sysdeps/gcc - x86_64.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 3 0.0 %
Date: 2015-06-10 18:10:59 Functions: 0 1 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
       3             :  * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
       4             :  * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
       5             :  *
       6             :  *
       7             :  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
       8             :  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
       9             :  *
      10             :  * Permission is hereby granted to use or copy this program
      11             :  * for any purpose,  provided the above notices are retained on all copies.
      12             :  * Permission to modify the code and to distribute modified code is granted,
      13             :  * provided the above notices are retained, and a notice that the code was
      14             :  * modified is included with the above copyright notice.
      15             :  *
      16             :  * Some of the machine specific code was borrowed from our GC distribution.
      17             :  */
      18             : 
      19             : #include "../all_aligned_atomic_load_store.h"
      20             : 
      21             : /* Real X86 implementations appear                                      */
      22             : /* to enforce ordering between memory operations, EXCEPT that a later   */
      23             : /* read can pass earlier writes, presumably due to the visible          */
      24             : /* presence of store buffers.                                           */
      25             : /* We ignore the fact that the official specs                           */
      26             : /* seem to be much weaker (and arguably too weak to be usable).         */
      27             : 
      28             : #include "../ordered_except_wr.h"
      29             : 
      30             : #include "../test_and_set_t_is_char.h"
      31             : 
      32             : #include "../standard_ao_double_t.h"
      33             : 
      34             : AO_INLINE void
      35             : AO_nop_full(void)
      36             : {
      37             :   /* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips.      */
      38             :   __asm__ __volatile__("mfence" : : : "memory");
      39             : }
      40             : #define AO_HAVE_nop_full
      41             : 
      42             : /* As far as we can tell, the lfence and sfence instructions are not    */
      43             : /* currently needed or useful for cached memory accesses.               */
      44             : 
      45             : AO_INLINE AO_t
      46             : AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
      47             : {
      48             :   AO_t result;
      49             : 
      50             :   __asm__ __volatile__ ("lock; xadd %0, %1" :
      51             :                         "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
      52             :                         : "memory");
      53             :   return result;
      54             : }
      55             : #define AO_HAVE_fetch_and_add_full
      56             : 
      57             : AO_INLINE unsigned char
      58             : AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
      59             : {
      60             :   unsigned char result;
      61             : 
      62             :   __asm__ __volatile__ ("lock; xaddb %0, %1" :
      63             :                         "=q" (result), "=m" (*p) : "0" (incr), "m" (*p)
      64             :                         : "memory");
      65             :   return result;
      66             : }
      67             : #define AO_HAVE_char_fetch_and_add_full
      68             : 
      69             : AO_INLINE unsigned short
      70             : AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
      71             : {
      72             :   unsigned short result;
      73             : 
      74             :   __asm__ __volatile__ ("lock; xaddw %0, %1" :
      75             :                         "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
      76             :                         : "memory");
      77             :   return result;
      78             : }
      79             : #define AO_HAVE_short_fetch_and_add_full
      80             : 
      81             : AO_INLINE unsigned int
      82             : AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
      83             : {
      84             :   unsigned int result;
      85             : 
      86             :   __asm__ __volatile__ ("lock; xaddl %0, %1" :
      87             :                         "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
      88             :                         : "memory");
      89             :   return result;
      90             : }
      91             : #define AO_HAVE_int_fetch_and_add_full
      92             : 
      93             : AO_INLINE void
      94             : AO_or_full (volatile AO_t *p, AO_t incr)
      95             : {
      96             :   __asm__ __volatile__ ("lock; or %1, %0" :
      97             :                         "=m" (*p) : "r" (incr), "m" (*p) : "memory");
      98             : }
      99             : #define AO_HAVE_or_full
     100             : 
     101             : AO_INLINE AO_TS_VAL_t
     102           0 : AO_test_and_set_full(volatile AO_TS_t *addr)
     103             : {
     104             :   unsigned char oldval;
     105             :   /* Note: the "xchg" instruction does not need a "lock" prefix */
     106           0 :   __asm__ __volatile__("xchgb %0, %1"
     107             :                 : "=q"(oldval), "=m"(*addr)
     108             :                 : "0"((unsigned char)0xff), "m"(*addr) : "memory");
     109           0 :   return (AO_TS_VAL_t)oldval;
     110             : }
     111             : #define AO_HAVE_test_and_set_full
     112             : 
     113             : /* Returns nonzero if the comparison succeeded. */
     114             : AO_INLINE int
     115             : AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
     116             : {
     117             : # ifdef AO_USE_SYNC_CAS_BUILTIN
     118             :     return (int)__sync_bool_compare_and_swap(addr, old, new_val);
     119             : # else
     120             :     char result;
     121             :     __asm__ __volatile__("lock; cmpxchg %3, %0; setz %1"
     122             :                          : "=m" (*addr), "=a" (result)
     123             :                          : "m" (*addr), "r" (new_val), "a" (old) : "memory");
     124             :     return (int) result;
     125             : # endif
     126             : }
     127             : #define AO_HAVE_compare_and_swap_full
     128             : 
     129             : #ifdef AO_CMPXCHG16B_AVAILABLE
     130             : 
     131             : /* NEC LE-IT: older AMD Opterons are missing this instruction.
     132             :  * On these machines SIGILL will be thrown.
     133             :  * Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated
     134             :  * (lock based) version available */
     135             : /* HB: Changed this to not define either by default.  There are
     136             :  * enough machines and tool chains around on which cmpxchg16b
     137             :  * doesn't work.  And the emulation is unsafe by our usual rules.
     138             :  * Hoewever both are clearly useful in certain cases.
     139             :  */
     140             : AO_INLINE int
     141             : AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
     142             :                                        AO_t old_val1, AO_t old_val2,
     143             :                                        AO_t new_val1, AO_t new_val2)
     144             : {
     145             :   char result;
     146             :   __asm__ __volatile__("lock; cmpxchg16b %0; setz %1"
     147             :                        : "=m"(*addr), "=a"(result)
     148             :                        : "m"(*addr), "d" (old_val2), "a" (old_val1),
     149             :                          "c" (new_val2), "b" (new_val1) : "memory");
     150             :   return (int) result;
     151             : }
     152             : #define AO_HAVE_compare_double_and_swap_double_full
     153             : 
     154             : #else
     155             : /* this one provides spinlock based emulation of CAS implemented in     */
     156             : /* atomic_ops.c.  We probably do not want to do this here, since it is  */
     157             : /* not atomic with respect to other kinds of updates of *addr.  On the  */
     158             : /* other hand, this may be a useful facility on occasion.               */
     159             : #ifdef AO_WEAK_DOUBLE_CAS_EMULATION
     160             : int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
     161             :                                                 AO_t old_val1, AO_t old_val2,
     162             :                                                 AO_t new_val1, AO_t new_val2);
     163             : 
     164             : AO_INLINE int
     165             : AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
     166             :                                        AO_t old_val1, AO_t old_val2,
     167             :                                        AO_t new_val1, AO_t new_val2)
     168             : {
     169             :   return AO_compare_double_and_swap_double_emulation(addr, old_val1, old_val2,
     170             :                                                      new_val1, new_val2);
     171             : }
     172             : #define AO_HAVE_compare_double_and_swap_double_full
     173             : #endif /* AO_WEAK_DOUBLE_CAS_EMULATION */
     174             : 
     175             : #endif /* AO_CMPXCHG16B_AVAILABLE */
     176             : 
     177             : #ifdef __ILP32__
     178             : # define AO_T_IS_INT
     179             : #endif

Generated by: LCOV version 1.11