1 /* { dg-do compile } */
2 /* { dg-skip-if "incompatible options" { arm*-*-* } { "-march=*" } { "-march=armv7-a" } } */
3 /* { dg-skip-if "-mpure-code supports M-profile only" { *-*-* } { "-mpure-code" } } */
4 /* { dg-options "-O2 -fno-omit-frame-pointer -marm -march=armv7-a -mfpu=vfp3" } */
13 /* This is here to ensure that the offset of perf_event_id below
14 relative to the LANCHOR symbol exceeds the allowed displacement. */
15 static int __warned
[300];
19 extern void *kmem_cache_alloc_trace (void *cachep
);
20 extern void *cs_cachep
;
21 extern int nr_cpu_ids
;
26 static unsigned long long __attribute__((aligned(8))) perf_event_id
;
28 unsigned long long result
;
31 if (cpu
>= nr_cpu_ids
)
34 event
= kmem_cache_alloc_trace (cs_cachep
);
36 __asm__
__volatile__ ("dmb" : : : "memory");
38 __asm__
__volatile__("@ atomic64_add_return\n"
39 "1: ldrexd %0, %H0, [%3]\n"
41 " adc %H0, %H0, %H4\n"
42 " strexd %1, %0, %H0, [%3]\n"
45 : "=&r" (result
), "=&r" (tmp
), "+Qo" (perf_event_id
)
46 : "r" (&perf_event_id
), "r" (1LL)
49 __asm__
__volatile__ ("dmb" : : : "memory");
56 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++)
57 kmem_cache_alloc_trace (cs_cachep
);