# 1 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct32x32_34_add_neon.c" # 1 "/home/chr1s/src/oxide/master/src/objdir-arm64/chromium//" # 1 "" # 1 "" # 1 "/usr/aarch64-linux-gnu/include/stdc-predef.h" 1 3 # 1 "" 2 # 1 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct32x32_34_add_neon.c" # 11 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct32x32_34_add_neon.c" # 1 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 1 3 4 # 30 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 #pragma GCC push_options #pragma GCC target ("+nothing+simd") # 1 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/stdint.h" 1 3 4 # 9 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/stdint.h" 3 4 # 1 "/usr/aarch64-linux-gnu/include/stdint.h" 1 3 4 # 25 "/usr/aarch64-linux-gnu/include/stdint.h" 3 4 # 1 "/usr/aarch64-linux-gnu/include/features.h" 1 3 4 # 367 "/usr/aarch64-linux-gnu/include/features.h" 3 4 # 1 "/usr/aarch64-linux-gnu/include/sys/cdefs.h" 1 3 4 # 410 "/usr/aarch64-linux-gnu/include/sys/cdefs.h" 3 4 # 1 "/usr/aarch64-linux-gnu/include/bits/wordsize.h" 1 3 4 # 411 "/usr/aarch64-linux-gnu/include/sys/cdefs.h" 2 3 4 # 368 "/usr/aarch64-linux-gnu/include/features.h" 2 3 4 # 391 "/usr/aarch64-linux-gnu/include/features.h" 3 4 # 1 "/usr/aarch64-linux-gnu/include/gnu/stubs.h" 1 3 4 # 1 "/usr/aarch64-linux-gnu/include/bits/wordsize.h" 1 3 4 # 6 "/usr/aarch64-linux-gnu/include/gnu/stubs.h" 2 3 4 # 1 "/usr/aarch64-linux-gnu/include/gnu/stubs-lp64.h" 1 3 4 # 9 "/usr/aarch64-linux-gnu/include/gnu/stubs.h" 2 3 4 # 392 "/usr/aarch64-linux-gnu/include/features.h" 2 3 4 # 26 "/usr/aarch64-linux-gnu/include/stdint.h" 2 3 4 # 1 "/usr/aarch64-linux-gnu/include/bits/wchar.h" 1 3 4 # 27 "/usr/aarch64-linux-gnu/include/stdint.h" 2 3 4 # 1 "/usr/aarch64-linux-gnu/include/bits/wordsize.h" 1 3 4 # 28 "/usr/aarch64-linux-gnu/include/stdint.h" 2 3 4 # 36 "/usr/aarch64-linux-gnu/include/stdint.h" 3 4 # 36 "/usr/aarch64-linux-gnu/include/stdint.h" 3 4 typedef signed char int8_t; typedef short int int16_t; typedef int int32_t; typedef long int int64_t; typedef unsigned char uint8_t; typedef unsigned short int uint16_t; typedef unsigned int uint32_t; typedef unsigned long int uint64_t; # 65 "/usr/aarch64-linux-gnu/include/stdint.h" 3 4 typedef signed char int_least8_t; typedef short int int_least16_t; typedef int int_least32_t; typedef long int int_least64_t; typedef unsigned char uint_least8_t; typedef unsigned short int uint_least16_t; typedef unsigned int uint_least32_t; typedef unsigned long int uint_least64_t; # 90 "/usr/aarch64-linux-gnu/include/stdint.h" 3 4 typedef signed char int_fast8_t; typedef long int int_fast16_t; typedef long int int_fast32_t; typedef long int int_fast64_t; # 103 "/usr/aarch64-linux-gnu/include/stdint.h" 3 4 typedef unsigned char uint_fast8_t; typedef unsigned long int uint_fast16_t; typedef unsigned long int uint_fast32_t; typedef unsigned long int uint_fast64_t; # 119 "/usr/aarch64-linux-gnu/include/stdint.h" 3 4 typedef long int intptr_t; typedef unsigned long int uintptr_t; # 134 "/usr/aarch64-linux-gnu/include/stdint.h" 3 4 typedef long int intmax_t; typedef unsigned long int uintmax_t; # 10 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/stdint.h" 2 3 4 # 34 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 2 3 4 typedef __Int8x8_t int8x8_t; typedef __Int16x4_t int16x4_t; typedef __Int32x2_t int32x2_t; typedef __Int64x1_t int64x1_t; typedef __Float16x4_t float16x4_t; typedef __Float32x2_t float32x2_t; typedef __Poly8x8_t poly8x8_t; typedef __Poly16x4_t poly16x4_t; typedef __Uint8x8_t uint8x8_t; typedef __Uint16x4_t uint16x4_t; typedef __Uint32x2_t uint32x2_t; typedef __Float64x1_t float64x1_t; typedef __Uint64x1_t uint64x1_t; typedef __Int8x16_t int8x16_t; typedef __Int16x8_t int16x8_t; typedef __Int32x4_t int32x4_t; typedef __Int64x2_t int64x2_t; typedef __Float16x8_t float16x8_t; typedef __Float32x4_t float32x4_t; typedef __Float64x2_t float64x2_t; typedef __Poly8x16_t poly8x16_t; typedef __Poly16x8_t poly16x8_t; typedef __Poly64x2_t poly64x2_t; typedef __Uint8x16_t uint8x16_t; typedef __Uint16x8_t uint16x8_t; typedef __Uint32x4_t uint32x4_t; typedef __Uint64x2_t uint64x2_t; typedef __Poly8_t poly8_t; typedef __Poly16_t poly16_t; typedef __Poly64_t poly64_t; typedef __Poly128_t poly128_t; typedef __fp16 float16_t; typedef float float32_t; typedef double float64_t; typedef struct int8x8x2_t { int8x8_t val[2]; } int8x8x2_t; typedef struct int8x16x2_t { int8x16_t val[2]; } int8x16x2_t; typedef struct int16x4x2_t { int16x4_t val[2]; } int16x4x2_t; typedef struct int16x8x2_t { int16x8_t val[2]; } int16x8x2_t; typedef struct int32x2x2_t { int32x2_t val[2]; } int32x2x2_t; typedef struct int32x4x2_t { int32x4_t val[2]; } int32x4x2_t; typedef struct int64x1x2_t { int64x1_t val[2]; } int64x1x2_t; typedef struct int64x2x2_t { int64x2_t val[2]; } int64x2x2_t; typedef struct uint8x8x2_t { uint8x8_t val[2]; } uint8x8x2_t; typedef struct uint8x16x2_t { uint8x16_t val[2]; } uint8x16x2_t; typedef struct uint16x4x2_t { uint16x4_t val[2]; } uint16x4x2_t; typedef struct uint16x8x2_t { uint16x8_t val[2]; } uint16x8x2_t; typedef struct uint32x2x2_t { uint32x2_t val[2]; } uint32x2x2_t; typedef struct uint32x4x2_t { uint32x4_t val[2]; } uint32x4x2_t; typedef struct uint64x1x2_t { uint64x1_t val[2]; } uint64x1x2_t; typedef struct uint64x2x2_t { uint64x2_t val[2]; } uint64x2x2_t; typedef struct float16x4x2_t { float16x4_t val[2]; } float16x4x2_t; typedef struct float16x8x2_t { float16x8_t val[2]; } float16x8x2_t; typedef struct float32x2x2_t { float32x2_t val[2]; } float32x2x2_t; typedef struct float32x4x2_t { float32x4_t val[2]; } float32x4x2_t; typedef struct float64x2x2_t { float64x2_t val[2]; } float64x2x2_t; typedef struct float64x1x2_t { float64x1_t val[2]; } float64x1x2_t; typedef struct poly8x8x2_t { poly8x8_t val[2]; } poly8x8x2_t; typedef struct poly8x16x2_t { poly8x16_t val[2]; } poly8x16x2_t; typedef struct poly16x4x2_t { poly16x4_t val[2]; } poly16x4x2_t; typedef struct poly16x8x2_t { poly16x8_t val[2]; } poly16x8x2_t; typedef struct int8x8x3_t { int8x8_t val[3]; } int8x8x3_t; typedef struct int8x16x3_t { int8x16_t val[3]; } int8x16x3_t; typedef struct int16x4x3_t { int16x4_t val[3]; } int16x4x3_t; typedef struct int16x8x3_t { int16x8_t val[3]; } int16x8x3_t; typedef struct int32x2x3_t { int32x2_t val[3]; } int32x2x3_t; typedef struct int32x4x3_t { int32x4_t val[3]; } int32x4x3_t; typedef struct int64x1x3_t { int64x1_t val[3]; } int64x1x3_t; typedef struct int64x2x3_t { int64x2_t val[3]; } int64x2x3_t; typedef struct uint8x8x3_t { uint8x8_t val[3]; } uint8x8x3_t; typedef struct uint8x16x3_t { uint8x16_t val[3]; } uint8x16x3_t; typedef struct uint16x4x3_t { uint16x4_t val[3]; } uint16x4x3_t; typedef struct uint16x8x3_t { uint16x8_t val[3]; } uint16x8x3_t; typedef struct uint32x2x3_t { uint32x2_t val[3]; } uint32x2x3_t; typedef struct uint32x4x3_t { uint32x4_t val[3]; } uint32x4x3_t; typedef struct uint64x1x3_t { uint64x1_t val[3]; } uint64x1x3_t; typedef struct uint64x2x3_t { uint64x2_t val[3]; } uint64x2x3_t; typedef struct float16x4x3_t { float16x4_t val[3]; } float16x4x3_t; typedef struct float16x8x3_t { float16x8_t val[3]; } float16x8x3_t; typedef struct float32x2x3_t { float32x2_t val[3]; } float32x2x3_t; typedef struct float32x4x3_t { float32x4_t val[3]; } float32x4x3_t; typedef struct float64x2x3_t { float64x2_t val[3]; } float64x2x3_t; typedef struct float64x1x3_t { float64x1_t val[3]; } float64x1x3_t; typedef struct poly8x8x3_t { poly8x8_t val[3]; } poly8x8x3_t; typedef struct poly8x16x3_t { poly8x16_t val[3]; } poly8x16x3_t; typedef struct poly16x4x3_t { poly16x4_t val[3]; } poly16x4x3_t; typedef struct poly16x8x3_t { poly16x8_t val[3]; } poly16x8x3_t; typedef struct int8x8x4_t { int8x8_t val[4]; } int8x8x4_t; typedef struct int8x16x4_t { int8x16_t val[4]; } int8x16x4_t; typedef struct int16x4x4_t { int16x4_t val[4]; } int16x4x4_t; typedef struct int16x8x4_t { int16x8_t val[4]; } int16x8x4_t; typedef struct int32x2x4_t { int32x2_t val[4]; } int32x2x4_t; typedef struct int32x4x4_t { int32x4_t val[4]; } int32x4x4_t; typedef struct int64x1x4_t { int64x1_t val[4]; } int64x1x4_t; typedef struct int64x2x4_t { int64x2_t val[4]; } int64x2x4_t; typedef struct uint8x8x4_t { uint8x8_t val[4]; } uint8x8x4_t; typedef struct uint8x16x4_t { uint8x16_t val[4]; } uint8x16x4_t; typedef struct uint16x4x4_t { uint16x4_t val[4]; } uint16x4x4_t; typedef struct uint16x8x4_t { uint16x8_t val[4]; } uint16x8x4_t; typedef struct uint32x2x4_t { uint32x2_t val[4]; } uint32x2x4_t; typedef struct uint32x4x4_t { uint32x4_t val[4]; } uint32x4x4_t; typedef struct uint64x1x4_t { uint64x1_t val[4]; } uint64x1x4_t; typedef struct uint64x2x4_t { uint64x2_t val[4]; } uint64x2x4_t; typedef struct float16x4x4_t { float16x4_t val[4]; } float16x4x4_t; typedef struct float16x8x4_t { float16x8_t val[4]; } float16x8x4_t; typedef struct float32x2x4_t { float32x2_t val[4]; } float32x2x4_t; typedef struct float32x4x4_t { float32x4_t val[4]; } float32x4x4_t; typedef struct float64x2x4_t { float64x2_t val[4]; } float64x2x4_t; typedef struct float64x1x4_t { float64x1_t val[4]; } float64x1x4_t; typedef struct poly8x8x4_t { poly8x8_t val[4]; } poly8x8x4_t; typedef struct poly8x16x4_t { poly8x16_t val[4]; } poly8x16x4_t; typedef struct poly16x4x4_t { poly16x4_t val[4]; } poly16x4x4_t; typedef struct poly16x8x4_t { poly16x8_t val[4]; } poly16x8x4_t; # 604 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vadd_s8 (int8x8_t __a, int8x8_t __b) { return __a + __b; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vadd_s16 (int16x4_t __a, int16x4_t __b) { return __a + __b; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vadd_s32 (int32x2_t __a, int32x2_t __b) { return __a + __b; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vadd_f32 (float32x2_t __a, float32x2_t __b) { return __a + __b; } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vadd_f64 (float64x1_t __a, float64x1_t __b) { return __a + __b; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vadd_u8 (uint8x8_t __a, uint8x8_t __b) { return __a + __b; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vadd_u16 (uint16x4_t __a, uint16x4_t __b) { return __a + __b; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vadd_u32 (uint32x2_t __a, uint32x2_t __b) { return __a + __b; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vadd_s64 (int64x1_t __a, int64x1_t __b) { return __a + __b; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vadd_u64 (uint64x1_t __a, uint64x1_t __b) { return __a + __b; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vaddq_s8 (int8x16_t __a, int8x16_t __b) { return __a + __b; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vaddq_s16 (int16x8_t __a, int16x8_t __b) { return __a + __b; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vaddq_s32 (int32x4_t __a, int32x4_t __b) { return __a + __b; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vaddq_s64 (int64x2_t __a, int64x2_t __b) { return __a + __b; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vaddq_f32 (float32x4_t __a, float32x4_t __b) { return __a + __b; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vaddq_f64 (float64x2_t __a, float64x2_t __b) { return __a + __b; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vaddq_u8 (uint8x16_t __a, uint8x16_t __b) { return __a + __b; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vaddq_u16 (uint16x8_t __a, uint16x8_t __b) { return __a + __b; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vaddq_u32 (uint32x4_t __a, uint32x4_t __b) { return __a + __b; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vaddq_u64 (uint64x2_t __a, uint64x2_t __b) { return __a + __b; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vaddl_s8 (int8x8_t __a, int8x8_t __b) { return (int16x8_t) __builtin_aarch64_saddlv8qi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vaddl_s16 (int16x4_t __a, int16x4_t __b) { return (int32x4_t) __builtin_aarch64_saddlv4hi (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vaddl_s32 (int32x2_t __a, int32x2_t __b) { return (int64x2_t) __builtin_aarch64_saddlv2si (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vaddl_u8 (uint8x8_t __a, uint8x8_t __b) { return (uint16x8_t) __builtin_aarch64_uaddlv8qi ((int8x8_t) __a, (int8x8_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vaddl_u16 (uint16x4_t __a, uint16x4_t __b) { return (uint32x4_t) __builtin_aarch64_uaddlv4hi ((int16x4_t) __a, (int16x4_t) __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vaddl_u32 (uint32x2_t __a, uint32x2_t __b) { return (uint64x2_t) __builtin_aarch64_uaddlv2si ((int32x2_t) __a, (int32x2_t) __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vaddl_high_s8 (int8x16_t __a, int8x16_t __b) { return (int16x8_t) __builtin_aarch64_saddl2v16qi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vaddl_high_s16 (int16x8_t __a, int16x8_t __b) { return (int32x4_t) __builtin_aarch64_saddl2v8hi (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vaddl_high_s32 (int32x4_t __a, int32x4_t __b) { return (int64x2_t) __builtin_aarch64_saddl2v4si (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vaddl_high_u8 (uint8x16_t __a, uint8x16_t __b) { return (uint16x8_t) __builtin_aarch64_uaddl2v16qi ((int8x16_t) __a, (int8x16_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vaddl_high_u16 (uint16x8_t __a, uint16x8_t __b) { return (uint32x4_t) __builtin_aarch64_uaddl2v8hi ((int16x8_t) __a, (int16x8_t) __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vaddl_high_u32 (uint32x4_t __a, uint32x4_t __b) { return (uint64x2_t) __builtin_aarch64_uaddl2v4si ((int32x4_t) __a, (int32x4_t) __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vaddw_s8 (int16x8_t __a, int8x8_t __b) { return (int16x8_t) __builtin_aarch64_saddwv8qi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vaddw_s16 (int32x4_t __a, int16x4_t __b) { return (int32x4_t) __builtin_aarch64_saddwv4hi (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vaddw_s32 (int64x2_t __a, int32x2_t __b) { return (int64x2_t) __builtin_aarch64_saddwv2si (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vaddw_u8 (uint16x8_t __a, uint8x8_t __b) { return (uint16x8_t) __builtin_aarch64_uaddwv8qi ((int16x8_t) __a, (int8x8_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vaddw_u16 (uint32x4_t __a, uint16x4_t __b) { return (uint32x4_t) __builtin_aarch64_uaddwv4hi ((int32x4_t) __a, (int16x4_t) __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vaddw_u32 (uint64x2_t __a, uint32x2_t __b) { return (uint64x2_t) __builtin_aarch64_uaddwv2si ((int64x2_t) __a, (int32x2_t) __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vaddw_high_s8 (int16x8_t __a, int8x16_t __b) { return (int16x8_t) __builtin_aarch64_saddw2v16qi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vaddw_high_s16 (int32x4_t __a, int16x8_t __b) { return (int32x4_t) __builtin_aarch64_saddw2v8hi (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vaddw_high_s32 (int64x2_t __a, int32x4_t __b) { return (int64x2_t) __builtin_aarch64_saddw2v4si (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vaddw_high_u8 (uint16x8_t __a, uint8x16_t __b) { return (uint16x8_t) __builtin_aarch64_uaddw2v16qi ((int16x8_t) __a, (int8x16_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vaddw_high_u16 (uint32x4_t __a, uint16x8_t __b) { return (uint32x4_t) __builtin_aarch64_uaddw2v8hi ((int32x4_t) __a, (int16x8_t) __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vaddw_high_u32 (uint64x2_t __a, uint32x4_t __b) { return (uint64x2_t) __builtin_aarch64_uaddw2v4si ((int64x2_t) __a, (int32x4_t) __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vhadd_s8 (int8x8_t __a, int8x8_t __b) { return (int8x8_t) __builtin_aarch64_shaddv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vhadd_s16 (int16x4_t __a, int16x4_t __b) { return (int16x4_t) __builtin_aarch64_shaddv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vhadd_s32 (int32x2_t __a, int32x2_t __b) { return (int32x2_t) __builtin_aarch64_shaddv2si (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vhadd_u8 (uint8x8_t __a, uint8x8_t __b) { return (uint8x8_t) __builtin_aarch64_uhaddv8qi ((int8x8_t) __a, (int8x8_t) __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vhadd_u16 (uint16x4_t __a, uint16x4_t __b) { return (uint16x4_t) __builtin_aarch64_uhaddv4hi ((int16x4_t) __a, (int16x4_t) __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vhadd_u32 (uint32x2_t __a, uint32x2_t __b) { return (uint32x2_t) __builtin_aarch64_uhaddv2si ((int32x2_t) __a, (int32x2_t) __b); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vhaddq_s8 (int8x16_t __a, int8x16_t __b) { return (int8x16_t) __builtin_aarch64_shaddv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vhaddq_s16 (int16x8_t __a, int16x8_t __b) { return (int16x8_t) __builtin_aarch64_shaddv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vhaddq_s32 (int32x4_t __a, int32x4_t __b) { return (int32x4_t) __builtin_aarch64_shaddv4si (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vhaddq_u8 (uint8x16_t __a, uint8x16_t __b) { return (uint8x16_t) __builtin_aarch64_uhaddv16qi ((int8x16_t) __a, (int8x16_t) __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vhaddq_u16 (uint16x8_t __a, uint16x8_t __b) { return (uint16x8_t) __builtin_aarch64_uhaddv8hi ((int16x8_t) __a, (int16x8_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vhaddq_u32 (uint32x4_t __a, uint32x4_t __b) { return (uint32x4_t) __builtin_aarch64_uhaddv4si ((int32x4_t) __a, (int32x4_t) __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vrhadd_s8 (int8x8_t __a, int8x8_t __b) { return (int8x8_t) __builtin_aarch64_srhaddv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vrhadd_s16 (int16x4_t __a, int16x4_t __b) { return (int16x4_t) __builtin_aarch64_srhaddv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vrhadd_s32 (int32x2_t __a, int32x2_t __b) { return (int32x2_t) __builtin_aarch64_srhaddv2si (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vrhadd_u8 (uint8x8_t __a, uint8x8_t __b) { return (uint8x8_t) __builtin_aarch64_urhaddv8qi ((int8x8_t) __a, (int8x8_t) __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vrhadd_u16 (uint16x4_t __a, uint16x4_t __b) { return (uint16x4_t) __builtin_aarch64_urhaddv4hi ((int16x4_t) __a, (int16x4_t) __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vrhadd_u32 (uint32x2_t __a, uint32x2_t __b) { return (uint32x2_t) __builtin_aarch64_urhaddv2si ((int32x2_t) __a, (int32x2_t) __b); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vrhaddq_s8 (int8x16_t __a, int8x16_t __b) { return (int8x16_t) __builtin_aarch64_srhaddv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vrhaddq_s16 (int16x8_t __a, int16x8_t __b) { return (int16x8_t) __builtin_aarch64_srhaddv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vrhaddq_s32 (int32x4_t __a, int32x4_t __b) { return (int32x4_t) __builtin_aarch64_srhaddv4si (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b) { return (uint8x16_t) __builtin_aarch64_urhaddv16qi ((int8x16_t) __a, (int8x16_t) __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b) { return (uint16x8_t) __builtin_aarch64_urhaddv8hi ((int16x8_t) __a, (int16x8_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b) { return (uint32x4_t) __builtin_aarch64_urhaddv4si ((int32x4_t) __a, (int32x4_t) __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vaddhn_s16 (int16x8_t __a, int16x8_t __b) { return (int8x8_t) __builtin_aarch64_addhnv8hi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vaddhn_s32 (int32x4_t __a, int32x4_t __b) { return (int16x4_t) __builtin_aarch64_addhnv4si (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vaddhn_s64 (int64x2_t __a, int64x2_t __b) { return (int32x2_t) __builtin_aarch64_addhnv2di (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vaddhn_u16 (uint16x8_t __a, uint16x8_t __b) { return (uint8x8_t) __builtin_aarch64_addhnv8hi ((int16x8_t) __a, (int16x8_t) __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vaddhn_u32 (uint32x4_t __a, uint32x4_t __b) { return (uint16x4_t) __builtin_aarch64_addhnv4si ((int32x4_t) __a, (int32x4_t) __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vaddhn_u64 (uint64x2_t __a, uint64x2_t __b) { return (uint32x2_t) __builtin_aarch64_addhnv2di ((int64x2_t) __a, (int64x2_t) __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vraddhn_s16 (int16x8_t __a, int16x8_t __b) { return (int8x8_t) __builtin_aarch64_raddhnv8hi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vraddhn_s32 (int32x4_t __a, int32x4_t __b) { return (int16x4_t) __builtin_aarch64_raddhnv4si (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vraddhn_s64 (int64x2_t __a, int64x2_t __b) { return (int32x2_t) __builtin_aarch64_raddhnv2di (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vraddhn_u16 (uint16x8_t __a, uint16x8_t __b) { return (uint8x8_t) __builtin_aarch64_raddhnv8hi ((int16x8_t) __a, (int16x8_t) __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vraddhn_u32 (uint32x4_t __a, uint32x4_t __b) { return (uint16x4_t) __builtin_aarch64_raddhnv4si ((int32x4_t) __a, (int32x4_t) __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vraddhn_u64 (uint64x2_t __a, uint64x2_t __b) { return (uint32x2_t) __builtin_aarch64_raddhnv2di ((int64x2_t) __a, (int64x2_t) __b); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vaddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c) { return (int8x16_t) __builtin_aarch64_addhn2v8hi (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vaddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c) { return (int16x8_t) __builtin_aarch64_addhn2v4si (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vaddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c) { return (int32x4_t) __builtin_aarch64_addhn2v2di (__a, __b, __c); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vaddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) { return (uint8x16_t) __builtin_aarch64_addhn2v8hi ((int8x8_t) __a, (int16x8_t) __b, (int16x8_t) __c); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vaddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) { return (uint16x8_t) __builtin_aarch64_addhn2v4si ((int16x4_t) __a, (int32x4_t) __b, (int32x4_t) __c); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vaddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) { return (uint32x4_t) __builtin_aarch64_addhn2v2di ((int32x2_t) __a, (int64x2_t) __b, (int64x2_t) __c); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vraddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c) { return (int8x16_t) __builtin_aarch64_raddhn2v8hi (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vraddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c) { return (int16x8_t) __builtin_aarch64_raddhn2v4si (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vraddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c) { return (int32x4_t) __builtin_aarch64_raddhn2v2di (__a, __b, __c); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vraddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) { return (uint8x16_t) __builtin_aarch64_raddhn2v8hi ((int8x8_t) __a, (int16x8_t) __b, (int16x8_t) __c); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vraddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) { return (uint16x8_t) __builtin_aarch64_raddhn2v4si ((int16x4_t) __a, (int32x4_t) __b, (int32x4_t) __c); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vraddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) { return (uint32x4_t) __builtin_aarch64_raddhn2v2di ((int32x2_t) __a, (int64x2_t) __b, (int64x2_t) __c); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vdiv_f32 (float32x2_t __a, float32x2_t __b) { return __a / __b; } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vdiv_f64 (float64x1_t __a, float64x1_t __b) { return __a / __b; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vdivq_f32 (float32x4_t __a, float32x4_t __b) { return __a / __b; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vdivq_f64 (float64x2_t __a, float64x2_t __b) { return __a / __b; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vmul_s8 (int8x8_t __a, int8x8_t __b) { return __a * __b; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmul_s16 (int16x4_t __a, int16x4_t __b) { return __a * __b; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmul_s32 (int32x2_t __a, int32x2_t __b) { return __a * __b; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmul_f32 (float32x2_t __a, float32x2_t __b) { return __a * __b; } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vmul_f64 (float64x1_t __a, float64x1_t __b) { return __a * __b; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vmul_u8 (uint8x8_t __a, uint8x8_t __b) { return __a * __b; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmul_u16 (uint16x4_t __a, uint16x4_t __b) { return __a * __b; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmul_u32 (uint32x2_t __a, uint32x2_t __b) { return __a * __b; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vmul_p8 (poly8x8_t __a, poly8x8_t __b) { return (poly8x8_t) __builtin_aarch64_pmulv8qi ((int8x8_t) __a, (int8x8_t) __b); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vmulq_s8 (int8x16_t __a, int8x16_t __b) { return __a * __b; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmulq_s16 (int16x8_t __a, int16x8_t __b) { return __a * __b; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmulq_s32 (int32x4_t __a, int32x4_t __b) { return __a * __b; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmulq_f32 (float32x4_t __a, float32x4_t __b) { return __a * __b; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vmulq_f64 (float64x2_t __a, float64x2_t __b) { return __a * __b; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vmulq_u8 (uint8x16_t __a, uint8x16_t __b) { return __a * __b; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmulq_u16 (uint16x8_t __a, uint16x8_t __b) { return __a * __b; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmulq_u32 (uint32x4_t __a, uint32x4_t __b) { return __a * __b; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vmulq_p8 (poly8x16_t __a, poly8x16_t __b) { return (poly8x16_t) __builtin_aarch64_pmulv16qi ((int8x16_t) __a, (int8x16_t) __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vand_s8 (int8x8_t __a, int8x8_t __b) { return __a & __b; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vand_s16 (int16x4_t __a, int16x4_t __b) { return __a & __b; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vand_s32 (int32x2_t __a, int32x2_t __b) { return __a & __b; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vand_u8 (uint8x8_t __a, uint8x8_t __b) { return __a & __b; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vand_u16 (uint16x4_t __a, uint16x4_t __b) { return __a & __b; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vand_u32 (uint32x2_t __a, uint32x2_t __b) { return __a & __b; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vand_s64 (int64x1_t __a, int64x1_t __b) { return __a & __b; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vand_u64 (uint64x1_t __a, uint64x1_t __b) { return __a & __b; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vandq_s8 (int8x16_t __a, int8x16_t __b) { return __a & __b; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vandq_s16 (int16x8_t __a, int16x8_t __b) { return __a & __b; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vandq_s32 (int32x4_t __a, int32x4_t __b) { return __a & __b; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vandq_s64 (int64x2_t __a, int64x2_t __b) { return __a & __b; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vandq_u8 (uint8x16_t __a, uint8x16_t __b) { return __a & __b; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vandq_u16 (uint16x8_t __a, uint16x8_t __b) { return __a & __b; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vandq_u32 (uint32x4_t __a, uint32x4_t __b) { return __a & __b; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vandq_u64 (uint64x2_t __a, uint64x2_t __b) { return __a & __b; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vorr_s8 (int8x8_t __a, int8x8_t __b) { return __a | __b; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vorr_s16 (int16x4_t __a, int16x4_t __b) { return __a | __b; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vorr_s32 (int32x2_t __a, int32x2_t __b) { return __a | __b; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vorr_u8 (uint8x8_t __a, uint8x8_t __b) { return __a | __b; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vorr_u16 (uint16x4_t __a, uint16x4_t __b) { return __a | __b; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vorr_u32 (uint32x2_t __a, uint32x2_t __b) { return __a | __b; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vorr_s64 (int64x1_t __a, int64x1_t __b) { return __a | __b; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vorr_u64 (uint64x1_t __a, uint64x1_t __b) { return __a | __b; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vorrq_s8 (int8x16_t __a, int8x16_t __b) { return __a | __b; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vorrq_s16 (int16x8_t __a, int16x8_t __b) { return __a | __b; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vorrq_s32 (int32x4_t __a, int32x4_t __b) { return __a | __b; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vorrq_s64 (int64x2_t __a, int64x2_t __b) { return __a | __b; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vorrq_u8 (uint8x16_t __a, uint8x16_t __b) { return __a | __b; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vorrq_u16 (uint16x8_t __a, uint16x8_t __b) { return __a | __b; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vorrq_u32 (uint32x4_t __a, uint32x4_t __b) { return __a | __b; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vorrq_u64 (uint64x2_t __a, uint64x2_t __b) { return __a | __b; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) veor_s8 (int8x8_t __a, int8x8_t __b) { return __a ^ __b; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) veor_s16 (int16x4_t __a, int16x4_t __b) { return __a ^ __b; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) veor_s32 (int32x2_t __a, int32x2_t __b) { return __a ^ __b; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) veor_u8 (uint8x8_t __a, uint8x8_t __b) { return __a ^ __b; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) veor_u16 (uint16x4_t __a, uint16x4_t __b) { return __a ^ __b; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) veor_u32 (uint32x2_t __a, uint32x2_t __b) { return __a ^ __b; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) veor_s64 (int64x1_t __a, int64x1_t __b) { return __a ^ __b; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) veor_u64 (uint64x1_t __a, uint64x1_t __b) { return __a ^ __b; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) veorq_s8 (int8x16_t __a, int8x16_t __b) { return __a ^ __b; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) veorq_s16 (int16x8_t __a, int16x8_t __b) { return __a ^ __b; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) veorq_s32 (int32x4_t __a, int32x4_t __b) { return __a ^ __b; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) veorq_s64 (int64x2_t __a, int64x2_t __b) { return __a ^ __b; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) veorq_u8 (uint8x16_t __a, uint8x16_t __b) { return __a ^ __b; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) veorq_u16 (uint16x8_t __a, uint16x8_t __b) { return __a ^ __b; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) veorq_u32 (uint32x4_t __a, uint32x4_t __b) { return __a ^ __b; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) veorq_u64 (uint64x2_t __a, uint64x2_t __b) { return __a ^ __b; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vbic_s8 (int8x8_t __a, int8x8_t __b) { return __a & ~__b; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vbic_s16 (int16x4_t __a, int16x4_t __b) { return __a & ~__b; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vbic_s32 (int32x2_t __a, int32x2_t __b) { return __a & ~__b; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vbic_u8 (uint8x8_t __a, uint8x8_t __b) { return __a & ~__b; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vbic_u16 (uint16x4_t __a, uint16x4_t __b) { return __a & ~__b; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vbic_u32 (uint32x2_t __a, uint32x2_t __b) { return __a & ~__b; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vbic_s64 (int64x1_t __a, int64x1_t __b) { return __a & ~__b; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vbic_u64 (uint64x1_t __a, uint64x1_t __b) { return __a & ~__b; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vbicq_s8 (int8x16_t __a, int8x16_t __b) { return __a & ~__b; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vbicq_s16 (int16x8_t __a, int16x8_t __b) { return __a & ~__b; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vbicq_s32 (int32x4_t __a, int32x4_t __b) { return __a & ~__b; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vbicq_s64 (int64x2_t __a, int64x2_t __b) { return __a & ~__b; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vbicq_u8 (uint8x16_t __a, uint8x16_t __b) { return __a & ~__b; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vbicq_u16 (uint16x8_t __a, uint16x8_t __b) { return __a & ~__b; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vbicq_u32 (uint32x4_t __a, uint32x4_t __b) { return __a & ~__b; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vbicq_u64 (uint64x2_t __a, uint64x2_t __b) { return __a & ~__b; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vorn_s8 (int8x8_t __a, int8x8_t __b) { return __a | ~__b; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vorn_s16 (int16x4_t __a, int16x4_t __b) { return __a | ~__b; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vorn_s32 (int32x2_t __a, int32x2_t __b) { return __a | ~__b; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vorn_u8 (uint8x8_t __a, uint8x8_t __b) { return __a | ~__b; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vorn_u16 (uint16x4_t __a, uint16x4_t __b) { return __a | ~__b; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vorn_u32 (uint32x2_t __a, uint32x2_t __b) { return __a | ~__b; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vorn_s64 (int64x1_t __a, int64x1_t __b) { return __a | ~__b; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vorn_u64 (uint64x1_t __a, uint64x1_t __b) { return __a | ~__b; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vornq_s8 (int8x16_t __a, int8x16_t __b) { return __a | ~__b; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vornq_s16 (int16x8_t __a, int16x8_t __b) { return __a | ~__b; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vornq_s32 (int32x4_t __a, int32x4_t __b) { return __a | ~__b; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vornq_s64 (int64x2_t __a, int64x2_t __b) { return __a | ~__b; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vornq_u8 (uint8x16_t __a, uint8x16_t __b) { return __a | ~__b; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vornq_u16 (uint16x8_t __a, uint16x8_t __b) { return __a | ~__b; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vornq_u32 (uint32x4_t __a, uint32x4_t __b) { return __a | ~__b; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vornq_u64 (uint64x2_t __a, uint64x2_t __b) { return __a | ~__b; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vsub_s8 (int8x8_t __a, int8x8_t __b) { return __a - __b; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vsub_s16 (int16x4_t __a, int16x4_t __b) { return __a - __b; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vsub_s32 (int32x2_t __a, int32x2_t __b) { return __a - __b; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vsub_f32 (float32x2_t __a, float32x2_t __b) { return __a - __b; } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vsub_f64 (float64x1_t __a, float64x1_t __b) { return __a - __b; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vsub_u8 (uint8x8_t __a, uint8x8_t __b) { return __a - __b; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vsub_u16 (uint16x4_t __a, uint16x4_t __b) { return __a - __b; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vsub_u32 (uint32x2_t __a, uint32x2_t __b) { return __a - __b; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vsub_s64 (int64x1_t __a, int64x1_t __b) { return __a - __b; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vsub_u64 (uint64x1_t __a, uint64x1_t __b) { return __a - __b; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vsubq_s8 (int8x16_t __a, int8x16_t __b) { return __a - __b; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vsubq_s16 (int16x8_t __a, int16x8_t __b) { return __a - __b; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vsubq_s32 (int32x4_t __a, int32x4_t __b) { return __a - __b; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vsubq_s64 (int64x2_t __a, int64x2_t __b) { return __a - __b; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vsubq_f32 (float32x4_t __a, float32x4_t __b) { return __a - __b; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vsubq_f64 (float64x2_t __a, float64x2_t __b) { return __a - __b; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vsubq_u8 (uint8x16_t __a, uint8x16_t __b) { return __a - __b; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vsubq_u16 (uint16x8_t __a, uint16x8_t __b) { return __a - __b; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsubq_u32 (uint32x4_t __a, uint32x4_t __b) { return __a - __b; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vsubq_u64 (uint64x2_t __a, uint64x2_t __b) { return __a - __b; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vsubl_s8 (int8x8_t __a, int8x8_t __b) { return (int16x8_t) __builtin_aarch64_ssublv8qi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vsubl_s16 (int16x4_t __a, int16x4_t __b) { return (int32x4_t) __builtin_aarch64_ssublv4hi (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vsubl_s32 (int32x2_t __a, int32x2_t __b) { return (int64x2_t) __builtin_aarch64_ssublv2si (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vsubl_u8 (uint8x8_t __a, uint8x8_t __b) { return (uint16x8_t) __builtin_aarch64_usublv8qi ((int8x8_t) __a, (int8x8_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsubl_u16 (uint16x4_t __a, uint16x4_t __b) { return (uint32x4_t) __builtin_aarch64_usublv4hi ((int16x4_t) __a, (int16x4_t) __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vsubl_u32 (uint32x2_t __a, uint32x2_t __b) { return (uint64x2_t) __builtin_aarch64_usublv2si ((int32x2_t) __a, (int32x2_t) __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vsubl_high_s8 (int8x16_t __a, int8x16_t __b) { return (int16x8_t) __builtin_aarch64_ssubl2v16qi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vsubl_high_s16 (int16x8_t __a, int16x8_t __b) { return (int32x4_t) __builtin_aarch64_ssubl2v8hi (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vsubl_high_s32 (int32x4_t __a, int32x4_t __b) { return (int64x2_t) __builtin_aarch64_ssubl2v4si (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vsubl_high_u8 (uint8x16_t __a, uint8x16_t __b) { return (uint16x8_t) __builtin_aarch64_usubl2v16qi ((int8x16_t) __a, (int8x16_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsubl_high_u16 (uint16x8_t __a, uint16x8_t __b) { return (uint32x4_t) __builtin_aarch64_usubl2v8hi ((int16x8_t) __a, (int16x8_t) __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vsubl_high_u32 (uint32x4_t __a, uint32x4_t __b) { return (uint64x2_t) __builtin_aarch64_usubl2v4si ((int32x4_t) __a, (int32x4_t) __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vsubw_s8 (int16x8_t __a, int8x8_t __b) { return (int16x8_t) __builtin_aarch64_ssubwv8qi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vsubw_s16 (int32x4_t __a, int16x4_t __b) { return (int32x4_t) __builtin_aarch64_ssubwv4hi (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vsubw_s32 (int64x2_t __a, int32x2_t __b) { return (int64x2_t) __builtin_aarch64_ssubwv2si (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vsubw_u8 (uint16x8_t __a, uint8x8_t __b) { return (uint16x8_t) __builtin_aarch64_usubwv8qi ((int16x8_t) __a, (int8x8_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsubw_u16 (uint32x4_t __a, uint16x4_t __b) { return (uint32x4_t) __builtin_aarch64_usubwv4hi ((int32x4_t) __a, (int16x4_t) __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vsubw_u32 (uint64x2_t __a, uint32x2_t __b) { return (uint64x2_t) __builtin_aarch64_usubwv2si ((int64x2_t) __a, (int32x2_t) __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vsubw_high_s8 (int16x8_t __a, int8x16_t __b) { return (int16x8_t) __builtin_aarch64_ssubw2v16qi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vsubw_high_s16 (int32x4_t __a, int16x8_t __b) { return (int32x4_t) __builtin_aarch64_ssubw2v8hi (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vsubw_high_s32 (int64x2_t __a, int32x4_t __b) { return (int64x2_t) __builtin_aarch64_ssubw2v4si (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vsubw_high_u8 (uint16x8_t __a, uint8x16_t __b) { return (uint16x8_t) __builtin_aarch64_usubw2v16qi ((int16x8_t) __a, (int8x16_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsubw_high_u16 (uint32x4_t __a, uint16x8_t __b) { return (uint32x4_t) __builtin_aarch64_usubw2v8hi ((int32x4_t) __a, (int16x8_t) __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vsubw_high_u32 (uint64x2_t __a, uint32x4_t __b) { return (uint64x2_t) __builtin_aarch64_usubw2v4si ((int64x2_t) __a, (int32x4_t) __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqadd_s8 (int8x8_t __a, int8x8_t __b) { return (int8x8_t) __builtin_aarch64_sqaddv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqadd_s16 (int16x4_t __a, int16x4_t __b) { return (int16x4_t) __builtin_aarch64_sqaddv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqadd_s32 (int32x2_t __a, int32x2_t __b) { return (int32x2_t) __builtin_aarch64_sqaddv2si (__a, __b); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vqadd_s64 (int64x1_t __a, int64x1_t __b) { return (int64x1_t) {__builtin_aarch64_sqadddi (__a[0], __b[0])}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqadd_u8 (uint8x8_t __a, uint8x8_t __b) { return __builtin_aarch64_uqaddv8qi_uuu (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vhsub_s8 (int8x8_t __a, int8x8_t __b) { return (int8x8_t)__builtin_aarch64_shsubv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vhsub_s16 (int16x4_t __a, int16x4_t __b) { return (int16x4_t) __builtin_aarch64_shsubv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vhsub_s32 (int32x2_t __a, int32x2_t __b) { return (int32x2_t) __builtin_aarch64_shsubv2si (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vhsub_u8 (uint8x8_t __a, uint8x8_t __b) { return (uint8x8_t) __builtin_aarch64_uhsubv8qi ((int8x8_t) __a, (int8x8_t) __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vhsub_u16 (uint16x4_t __a, uint16x4_t __b) { return (uint16x4_t) __builtin_aarch64_uhsubv4hi ((int16x4_t) __a, (int16x4_t) __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vhsub_u32 (uint32x2_t __a, uint32x2_t __b) { return (uint32x2_t) __builtin_aarch64_uhsubv2si ((int32x2_t) __a, (int32x2_t) __b); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vhsubq_s8 (int8x16_t __a, int8x16_t __b) { return (int8x16_t) __builtin_aarch64_shsubv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vhsubq_s16 (int16x8_t __a, int16x8_t __b) { return (int16x8_t) __builtin_aarch64_shsubv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vhsubq_s32 (int32x4_t __a, int32x4_t __b) { return (int32x4_t) __builtin_aarch64_shsubv4si (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vhsubq_u8 (uint8x16_t __a, uint8x16_t __b) { return (uint8x16_t) __builtin_aarch64_uhsubv16qi ((int8x16_t) __a, (int8x16_t) __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vhsubq_u16 (uint16x8_t __a, uint16x8_t __b) { return (uint16x8_t) __builtin_aarch64_uhsubv8hi ((int16x8_t) __a, (int16x8_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vhsubq_u32 (uint32x4_t __a, uint32x4_t __b) { return (uint32x4_t) __builtin_aarch64_uhsubv4si ((int32x4_t) __a, (int32x4_t) __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vsubhn_s16 (int16x8_t __a, int16x8_t __b) { return (int8x8_t) __builtin_aarch64_subhnv8hi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vsubhn_s32 (int32x4_t __a, int32x4_t __b) { return (int16x4_t) __builtin_aarch64_subhnv4si (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vsubhn_s64 (int64x2_t __a, int64x2_t __b) { return (int32x2_t) __builtin_aarch64_subhnv2di (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vsubhn_u16 (uint16x8_t __a, uint16x8_t __b) { return (uint8x8_t) __builtin_aarch64_subhnv8hi ((int16x8_t) __a, (int16x8_t) __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vsubhn_u32 (uint32x4_t __a, uint32x4_t __b) { return (uint16x4_t) __builtin_aarch64_subhnv4si ((int32x4_t) __a, (int32x4_t) __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vsubhn_u64 (uint64x2_t __a, uint64x2_t __b) { return (uint32x2_t) __builtin_aarch64_subhnv2di ((int64x2_t) __a, (int64x2_t) __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vrsubhn_s16 (int16x8_t __a, int16x8_t __b) { return (int8x8_t) __builtin_aarch64_rsubhnv8hi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vrsubhn_s32 (int32x4_t __a, int32x4_t __b) { return (int16x4_t) __builtin_aarch64_rsubhnv4si (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vrsubhn_s64 (int64x2_t __a, int64x2_t __b) { return (int32x2_t) __builtin_aarch64_rsubhnv2di (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vrsubhn_u16 (uint16x8_t __a, uint16x8_t __b) { return (uint8x8_t) __builtin_aarch64_rsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vrsubhn_u32 (uint32x4_t __a, uint32x4_t __b) { return (uint16x4_t) __builtin_aarch64_rsubhnv4si ((int32x4_t) __a, (int32x4_t) __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vrsubhn_u64 (uint64x2_t __a, uint64x2_t __b) { return (uint32x2_t) __builtin_aarch64_rsubhnv2di ((int64x2_t) __a, (int64x2_t) __b); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vrsubhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c) { return (int8x16_t) __builtin_aarch64_rsubhn2v8hi (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vrsubhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c) { return (int16x8_t) __builtin_aarch64_rsubhn2v4si (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vrsubhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c) { return (int32x4_t) __builtin_aarch64_rsubhn2v2di (__a, __b, __c); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vrsubhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) { return (uint8x16_t) __builtin_aarch64_rsubhn2v8hi ((int8x8_t) __a, (int16x8_t) __b, (int16x8_t) __c); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vrsubhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) { return (uint16x8_t) __builtin_aarch64_rsubhn2v4si ((int16x4_t) __a, (int32x4_t) __b, (int32x4_t) __c); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vrsubhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) { return (uint32x4_t) __builtin_aarch64_rsubhn2v2di ((int32x2_t) __a, (int64x2_t) __b, (int64x2_t) __c); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vsubhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c) { return (int8x16_t) __builtin_aarch64_subhn2v8hi (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vsubhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c) { return (int16x8_t) __builtin_aarch64_subhn2v4si (__a, __b, __c);; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vsubhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c) { return (int32x4_t) __builtin_aarch64_subhn2v2di (__a, __b, __c); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vsubhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) { return (uint8x16_t) __builtin_aarch64_subhn2v8hi ((int8x8_t) __a, (int16x8_t) __b, (int16x8_t) __c); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vsubhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) { return (uint16x8_t) __builtin_aarch64_subhn2v4si ((int16x4_t) __a, (int32x4_t) __b, (int32x4_t) __c); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsubhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) { return (uint32x4_t) __builtin_aarch64_subhn2v2di ((int32x2_t) __a, (int64x2_t) __b, (int64x2_t) __c); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vqadd_u16 (uint16x4_t __a, uint16x4_t __b) { return __builtin_aarch64_uqaddv4hi_uuu (__a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vqadd_u32 (uint32x2_t __a, uint32x2_t __b) { return __builtin_aarch64_uqaddv2si_uuu (__a, __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vqadd_u64 (uint64x1_t __a, uint64x1_t __b) { return (uint64x1_t) {__builtin_aarch64_uqadddi_uuu (__a[0], __b[0])}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqaddq_s8 (int8x16_t __a, int8x16_t __b) { return (int8x16_t) __builtin_aarch64_sqaddv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqaddq_s16 (int16x8_t __a, int16x8_t __b) { return (int16x8_t) __builtin_aarch64_sqaddv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqaddq_s32 (int32x4_t __a, int32x4_t __b) { return (int32x4_t) __builtin_aarch64_sqaddv4si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqaddq_s64 (int64x2_t __a, int64x2_t __b) { return (int64x2_t) __builtin_aarch64_sqaddv2di (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqaddq_u8 (uint8x16_t __a, uint8x16_t __b) { return __builtin_aarch64_uqaddv16qi_uuu (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vqaddq_u16 (uint16x8_t __a, uint16x8_t __b) { return __builtin_aarch64_uqaddv8hi_uuu (__a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vqaddq_u32 (uint32x4_t __a, uint32x4_t __b) { return __builtin_aarch64_uqaddv4si_uuu (__a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vqaddq_u64 (uint64x2_t __a, uint64x2_t __b) { return __builtin_aarch64_uqaddv2di_uuu (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqsub_s8 (int8x8_t __a, int8x8_t __b) { return (int8x8_t) __builtin_aarch64_sqsubv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqsub_s16 (int16x4_t __a, int16x4_t __b) { return (int16x4_t) __builtin_aarch64_sqsubv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqsub_s32 (int32x2_t __a, int32x2_t __b) { return (int32x2_t) __builtin_aarch64_sqsubv2si (__a, __b); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vqsub_s64 (int64x1_t __a, int64x1_t __b) { return (int64x1_t) {__builtin_aarch64_sqsubdi (__a[0], __b[0])}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqsub_u8 (uint8x8_t __a, uint8x8_t __b) { return __builtin_aarch64_uqsubv8qi_uuu (__a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vqsub_u16 (uint16x4_t __a, uint16x4_t __b) { return __builtin_aarch64_uqsubv4hi_uuu (__a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vqsub_u32 (uint32x2_t __a, uint32x2_t __b) { return __builtin_aarch64_uqsubv2si_uuu (__a, __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vqsub_u64 (uint64x1_t __a, uint64x1_t __b) { return (uint64x1_t) {__builtin_aarch64_uqsubdi_uuu (__a[0], __b[0])}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqsubq_s8 (int8x16_t __a, int8x16_t __b) { return (int8x16_t) __builtin_aarch64_sqsubv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqsubq_s16 (int16x8_t __a, int16x8_t __b) { return (int16x8_t) __builtin_aarch64_sqsubv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqsubq_s32 (int32x4_t __a, int32x4_t __b) { return (int32x4_t) __builtin_aarch64_sqsubv4si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqsubq_s64 (int64x2_t __a, int64x2_t __b) { return (int64x2_t) __builtin_aarch64_sqsubv2di (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqsubq_u8 (uint8x16_t __a, uint8x16_t __b) { return __builtin_aarch64_uqsubv16qi_uuu (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vqsubq_u16 (uint16x8_t __a, uint16x8_t __b) { return __builtin_aarch64_uqsubv8hi_uuu (__a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vqsubq_u32 (uint32x4_t __a, uint32x4_t __b) { return __builtin_aarch64_uqsubv4si_uuu (__a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vqsubq_u64 (uint64x2_t __a, uint64x2_t __b) { return __builtin_aarch64_uqsubv2di_uuu (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqneg_s8 (int8x8_t __a) { return (int8x8_t) __builtin_aarch64_sqnegv8qi (__a); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqneg_s16 (int16x4_t __a) { return (int16x4_t) __builtin_aarch64_sqnegv4hi (__a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqneg_s32 (int32x2_t __a) { return (int32x2_t) __builtin_aarch64_sqnegv2si (__a); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vqneg_s64 (int64x1_t __a) { return (int64x1_t) {__builtin_aarch64_sqnegdi (__a[0])}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqnegq_s8 (int8x16_t __a) { return (int8x16_t) __builtin_aarch64_sqnegv16qi (__a); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqnegq_s16 (int16x8_t __a) { return (int16x8_t) __builtin_aarch64_sqnegv8hi (__a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqnegq_s32 (int32x4_t __a) { return (int32x4_t) __builtin_aarch64_sqnegv4si (__a); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqabs_s8 (int8x8_t __a) { return (int8x8_t) __builtin_aarch64_sqabsv8qi (__a); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqabs_s16 (int16x4_t __a) { return (int16x4_t) __builtin_aarch64_sqabsv4hi (__a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqabs_s32 (int32x2_t __a) { return (int32x2_t) __builtin_aarch64_sqabsv2si (__a); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vqabs_s64 (int64x1_t __a) { return (int64x1_t) {__builtin_aarch64_sqabsdi (__a[0])}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqabsq_s8 (int8x16_t __a) { return (int8x16_t) __builtin_aarch64_sqabsv16qi (__a); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqabsq_s16 (int16x8_t __a) { return (int16x8_t) __builtin_aarch64_sqabsv8hi (__a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqabsq_s32 (int32x4_t __a) { return (int32x4_t) __builtin_aarch64_sqabsv4si (__a); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqdmulh_s16 (int16x4_t __a, int16x4_t __b) { return (int16x4_t) __builtin_aarch64_sqdmulhv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqdmulh_s32 (int32x2_t __a, int32x2_t __b) { return (int32x2_t) __builtin_aarch64_sqdmulhv2si (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqdmulhq_s16 (int16x8_t __a, int16x8_t __b) { return (int16x8_t) __builtin_aarch64_sqdmulhv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmulhq_s32 (int32x4_t __a, int32x4_t __b) { return (int32x4_t) __builtin_aarch64_sqdmulhv4si (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqrdmulh_s16 (int16x4_t __a, int16x4_t __b) { return (int16x4_t) __builtin_aarch64_sqrdmulhv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqrdmulh_s32 (int32x2_t __a, int32x2_t __b) { return (int32x2_t) __builtin_aarch64_sqrdmulhv2si (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b) { return (int16x8_t) __builtin_aarch64_sqrdmulhv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b) { return (int32x4_t) __builtin_aarch64_sqrdmulhv4si (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vcreate_s8 (uint64_t __a) { return (int8x8_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vcreate_s16 (uint64_t __a) { return (int16x4_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vcreate_s32 (uint64_t __a) { return (int32x2_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vcreate_s64 (uint64_t __a) { return (int64x1_t) {__a}; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vcreate_f16 (uint64_t __a) { return (float16x4_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vcreate_f32 (uint64_t __a) { return (float32x2_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcreate_u8 (uint64_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcreate_u16 (uint64_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcreate_u32 (uint64_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcreate_u64 (uint64_t __a) { return (uint64x1_t) {__a}; } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vcreate_f64 (uint64_t __a) { return (float64x1_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vcreate_p8 (uint64_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vcreate_p16 (uint64_t __a) { return (poly16x4_t) __a; } __extension__ static __inline float16_t __attribute__ ((__always_inline__)) vget_lane_f16 (float16x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vget_lane_f32 (float32x2_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vget_lane_f64 (float64x1_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline poly8_t __attribute__ ((__always_inline__)) vget_lane_p8 (poly8x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline poly16_t __attribute__ ((__always_inline__)) vget_lane_p16 (poly16x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vget_lane_s8 (int8x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vget_lane_s16 (int16x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vget_lane_s32 (int32x2_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vget_lane_s64 (int64x1_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vget_lane_u8 (uint8x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vget_lane_u16 (uint16x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vget_lane_u32 (uint32x2_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vget_lane_u64 (uint64x1_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline float16_t __attribute__ ((__always_inline__)) vgetq_lane_f16 (float16x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vgetq_lane_f32 (float32x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vgetq_lane_f64 (float64x2_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline poly8_t __attribute__ ((__always_inline__)) vgetq_lane_p8 (poly8x16_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline poly16_t __attribute__ ((__always_inline__)) vgetq_lane_p16 (poly16x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vgetq_lane_s8 (int8x16_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vgetq_lane_s16 (int16x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vgetq_lane_s32 (int32x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vgetq_lane_s64 (int64x2_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vgetq_lane_u8 (uint8x16_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vgetq_lane_u16 (uint16x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vgetq_lane_u32 (uint32x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vgetq_lane_u64 (uint64x2_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vreinterpret_p8_f16 (float16x4_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vreinterpret_p8_f64 (float64x1_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vreinterpret_p8_s8 (int8x8_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vreinterpret_p8_s16 (int16x4_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vreinterpret_p8_s32 (int32x2_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vreinterpret_p8_s64 (int64x1_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vreinterpret_p8_f32 (float32x2_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vreinterpret_p8_u8 (uint8x8_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vreinterpret_p8_u16 (uint16x4_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vreinterpret_p8_u32 (uint32x2_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vreinterpret_p8_u64 (uint64x1_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vreinterpret_p8_p16 (poly16x4_t __a) { return (poly8x8_t) __a; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vreinterpretq_p8_f64 (float64x2_t __a) { return (poly8x16_t) __a; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vreinterpretq_p8_s8 (int8x16_t __a) { return (poly8x16_t) __a; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vreinterpretq_p8_s16 (int16x8_t __a) { return (poly8x16_t) __a; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vreinterpretq_p8_s32 (int32x4_t __a) { return (poly8x16_t) __a; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vreinterpretq_p8_s64 (int64x2_t __a) { return (poly8x16_t) __a; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vreinterpretq_p8_f16 (float16x8_t __a) { return (poly8x16_t) __a; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vreinterpretq_p8_f32 (float32x4_t __a) { return (poly8x16_t) __a; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vreinterpretq_p8_u8 (uint8x16_t __a) { return (poly8x16_t) __a; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vreinterpretq_p8_u16 (uint16x8_t __a) { return (poly8x16_t) __a; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vreinterpretq_p8_u32 (uint32x4_t __a) { return (poly8x16_t) __a; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vreinterpretq_p8_u64 (uint64x2_t __a) { return (poly8x16_t) __a; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vreinterpretq_p8_p16 (poly16x8_t __a) { return (poly8x16_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vreinterpret_p16_f16 (float16x4_t __a) { return (poly16x4_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vreinterpret_p16_f64 (float64x1_t __a) { return (poly16x4_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vreinterpret_p16_s8 (int8x8_t __a) { return (poly16x4_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vreinterpret_p16_s16 (int16x4_t __a) { return (poly16x4_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vreinterpret_p16_s32 (int32x2_t __a) { return (poly16x4_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vreinterpret_p16_s64 (int64x1_t __a) { return (poly16x4_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vreinterpret_p16_f32 (float32x2_t __a) { return (poly16x4_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vreinterpret_p16_u8 (uint8x8_t __a) { return (poly16x4_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vreinterpret_p16_u16 (uint16x4_t __a) { return (poly16x4_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vreinterpret_p16_u32 (uint32x2_t __a) { return (poly16x4_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vreinterpret_p16_u64 (uint64x1_t __a) { return (poly16x4_t) __a; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vreinterpret_p16_p8 (poly8x8_t __a) { return (poly16x4_t) __a; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vreinterpretq_p16_f64 (float64x2_t __a) { return (poly16x8_t) __a; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vreinterpretq_p16_s8 (int8x16_t __a) { return (poly16x8_t) __a; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vreinterpretq_p16_s16 (int16x8_t __a) { return (poly16x8_t) __a; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vreinterpretq_p16_s32 (int32x4_t __a) { return (poly16x8_t) __a; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vreinterpretq_p16_s64 (int64x2_t __a) { return (poly16x8_t) __a; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vreinterpretq_p16_f16 (float16x8_t __a) { return (poly16x8_t) __a; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vreinterpretq_p16_f32 (float32x4_t __a) { return (poly16x8_t) __a; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vreinterpretq_p16_u8 (uint8x16_t __a) { return (poly16x8_t) __a; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vreinterpretq_p16_u16 (uint16x8_t __a) { return (poly16x8_t) __a; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vreinterpretq_p16_u32 (uint32x4_t __a) { return (poly16x8_t) __a; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vreinterpretq_p16_u64 (uint64x2_t __a) { return (poly16x8_t) __a; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vreinterpretq_p16_p8 (poly8x16_t __a) { return (poly16x8_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vreinterpret_f16_f64 (float64x1_t __a) { return (float16x4_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vreinterpret_f16_s8 (int8x8_t __a) { return (float16x4_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vreinterpret_f16_s16 (int16x4_t __a) { return (float16x4_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vreinterpret_f16_s32 (int32x2_t __a) { return (float16x4_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vreinterpret_f16_s64 (int64x1_t __a) { return (float16x4_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vreinterpret_f16_f32 (float32x2_t __a) { return (float16x4_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vreinterpret_f16_u8 (uint8x8_t __a) { return (float16x4_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vreinterpret_f16_u16 (uint16x4_t __a) { return (float16x4_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vreinterpret_f16_u32 (uint32x2_t __a) { return (float16x4_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vreinterpret_f16_u64 (uint64x1_t __a) { return (float16x4_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vreinterpret_f16_p8 (poly8x8_t __a) { return (float16x4_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vreinterpret_f16_p16 (poly16x4_t __a) { return (float16x4_t) __a; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vreinterpretq_f16_f64 (float64x2_t __a) { return (float16x8_t) __a; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vreinterpretq_f16_s8 (int8x16_t __a) { return (float16x8_t) __a; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vreinterpretq_f16_s16 (int16x8_t __a) { return (float16x8_t) __a; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vreinterpretq_f16_s32 (int32x4_t __a) { return (float16x8_t) __a; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vreinterpretq_f16_s64 (int64x2_t __a) { return (float16x8_t) __a; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vreinterpretq_f16_f32 (float32x4_t __a) { return (float16x8_t) __a; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vreinterpretq_f16_u8 (uint8x16_t __a) { return (float16x8_t) __a; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vreinterpretq_f16_u16 (uint16x8_t __a) { return (float16x8_t) __a; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vreinterpretq_f16_u32 (uint32x4_t __a) { return (float16x8_t) __a; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vreinterpretq_f16_u64 (uint64x2_t __a) { return (float16x8_t) __a; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vreinterpretq_f16_p8 (poly8x16_t __a) { return (float16x8_t) __a; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vreinterpretq_f16_p16 (poly16x8_t __a) { return (float16x8_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vreinterpret_f32_f16 (float16x4_t __a) { return (float32x2_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vreinterpret_f32_f64 (float64x1_t __a) { return (float32x2_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vreinterpret_f32_s8 (int8x8_t __a) { return (float32x2_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vreinterpret_f32_s16 (int16x4_t __a) { return (float32x2_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vreinterpret_f32_s32 (int32x2_t __a) { return (float32x2_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vreinterpret_f32_s64 (int64x1_t __a) { return (float32x2_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vreinterpret_f32_u8 (uint8x8_t __a) { return (float32x2_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vreinterpret_f32_u16 (uint16x4_t __a) { return (float32x2_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vreinterpret_f32_u32 (uint32x2_t __a) { return (float32x2_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vreinterpret_f32_u64 (uint64x1_t __a) { return (float32x2_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vreinterpret_f32_p8 (poly8x8_t __a) { return (float32x2_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vreinterpret_f32_p16 (poly16x4_t __a) { return (float32x2_t) __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vreinterpretq_f32_f16 (float16x8_t __a) { return (float32x4_t) __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vreinterpretq_f32_f64 (float64x2_t __a) { return (float32x4_t) __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vreinterpretq_f32_s8 (int8x16_t __a) { return (float32x4_t) __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vreinterpretq_f32_s16 (int16x8_t __a) { return (float32x4_t) __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vreinterpretq_f32_s32 (int32x4_t __a) { return (float32x4_t) __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vreinterpretq_f32_s64 (int64x2_t __a) { return (float32x4_t) __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vreinterpretq_f32_u8 (uint8x16_t __a) { return (float32x4_t) __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vreinterpretq_f32_u16 (uint16x8_t __a) { return (float32x4_t) __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vreinterpretq_f32_u32 (uint32x4_t __a) { return (float32x4_t) __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vreinterpretq_f32_u64 (uint64x2_t __a) { return (float32x4_t) __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vreinterpretq_f32_p8 (poly8x16_t __a) { return (float32x4_t) __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vreinterpretq_f32_p16 (poly16x8_t __a) { return (float32x4_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_f16 (float16x4_t __a) { return (float64x1_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_f32 (float32x2_t __a) { return (float64x1_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_p8 (poly8x8_t __a) { return (float64x1_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_p16 (poly16x4_t __a) { return (float64x1_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_s8 (int8x8_t __a) { return (float64x1_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_s16 (int16x4_t __a) { return (float64x1_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_s32 (int32x2_t __a) { return (float64x1_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_s64 (int64x1_t __a) { return (float64x1_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_u8 (uint8x8_t __a) { return (float64x1_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_u16 (uint16x4_t __a) { return (float64x1_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_u32 (uint32x2_t __a) { return (float64x1_t) __a; } __extension__ static __inline float64x1_t __attribute__((__always_inline__)) vreinterpret_f64_u64 (uint64x1_t __a) { return (float64x1_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) vreinterpretq_f64_f16 (float16x8_t __a) { return (float64x2_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) vreinterpretq_f64_f32 (float32x4_t __a) { return (float64x2_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) vreinterpretq_f64_p8 (poly8x16_t __a) { return (float64x2_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) vreinterpretq_f64_p16 (poly16x8_t __a) { return (float64x2_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) vreinterpretq_f64_s8 (int8x16_t __a) { return (float64x2_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) vreinterpretq_f64_s16 (int16x8_t __a) { return (float64x2_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) vreinterpretq_f64_s32 (int32x4_t __a) { return (float64x2_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) vreinterpretq_f64_s64 (int64x2_t __a) { return (float64x2_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) vreinterpretq_f64_u8 (uint8x16_t __a) { return (float64x2_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) vreinterpretq_f64_u16 (uint16x8_t __a) { return (float64x2_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) vreinterpretq_f64_u32 (uint32x4_t __a) { return (float64x2_t) __a; } __extension__ static __inline float64x2_t __attribute__((__always_inline__)) vreinterpretq_f64_u64 (uint64x2_t __a) { return (float64x2_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vreinterpret_s64_f16 (float16x4_t __a) { return (int64x1_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vreinterpret_s64_f64 (float64x1_t __a) { return (int64x1_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vreinterpret_s64_s8 (int8x8_t __a) { return (int64x1_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vreinterpret_s64_s16 (int16x4_t __a) { return (int64x1_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vreinterpret_s64_s32 (int32x2_t __a) { return (int64x1_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vreinterpret_s64_f32 (float32x2_t __a) { return (int64x1_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vreinterpret_s64_u8 (uint8x8_t __a) { return (int64x1_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vreinterpret_s64_u16 (uint16x4_t __a) { return (int64x1_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vreinterpret_s64_u32 (uint32x2_t __a) { return (int64x1_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vreinterpret_s64_u64 (uint64x1_t __a) { return (int64x1_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vreinterpret_s64_p8 (poly8x8_t __a) { return (int64x1_t) __a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vreinterpret_s64_p16 (poly16x4_t __a) { return (int64x1_t) __a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vreinterpretq_s64_f64 (float64x2_t __a) { return (int64x2_t) __a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vreinterpretq_s64_s8 (int8x16_t __a) { return (int64x2_t) __a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vreinterpretq_s64_s16 (int16x8_t __a) { return (int64x2_t) __a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vreinterpretq_s64_s32 (int32x4_t __a) { return (int64x2_t) __a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vreinterpretq_s64_f16 (float16x8_t __a) { return (int64x2_t) __a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vreinterpretq_s64_f32 (float32x4_t __a) { return (int64x2_t) __a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vreinterpretq_s64_u8 (uint8x16_t __a) { return (int64x2_t) __a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vreinterpretq_s64_u16 (uint16x8_t __a) { return (int64x2_t) __a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vreinterpretq_s64_u32 (uint32x4_t __a) { return (int64x2_t) __a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vreinterpretq_s64_u64 (uint64x2_t __a) { return (int64x2_t) __a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vreinterpretq_s64_p8 (poly8x16_t __a) { return (int64x2_t) __a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vreinterpretq_s64_p16 (poly16x8_t __a) { return (int64x2_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vreinterpret_u64_f16 (float16x4_t __a) { return (uint64x1_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vreinterpret_u64_f64 (float64x1_t __a) { return (uint64x1_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vreinterpret_u64_s8 (int8x8_t __a) { return (uint64x1_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vreinterpret_u64_s16 (int16x4_t __a) { return (uint64x1_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vreinterpret_u64_s32 (int32x2_t __a) { return (uint64x1_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vreinterpret_u64_s64 (int64x1_t __a) { return (uint64x1_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vreinterpret_u64_f32 (float32x2_t __a) { return (uint64x1_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vreinterpret_u64_u8 (uint8x8_t __a) { return (uint64x1_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vreinterpret_u64_u16 (uint16x4_t __a) { return (uint64x1_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vreinterpret_u64_u32 (uint32x2_t __a) { return (uint64x1_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vreinterpret_u64_p8 (poly8x8_t __a) { return (uint64x1_t) __a; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vreinterpret_u64_p16 (poly16x4_t __a) { return (uint64x1_t) __a; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vreinterpretq_u64_f64 (float64x2_t __a) { return (uint64x2_t) __a; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vreinterpretq_u64_s8 (int8x16_t __a) { return (uint64x2_t) __a; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vreinterpretq_u64_s16 (int16x8_t __a) { return (uint64x2_t) __a; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vreinterpretq_u64_s32 (int32x4_t __a) { return (uint64x2_t) __a; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vreinterpretq_u64_s64 (int64x2_t __a) { return (uint64x2_t) __a; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vreinterpretq_u64_f16 (float16x8_t __a) { return (uint64x2_t) __a; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vreinterpretq_u64_f32 (float32x4_t __a) { return (uint64x2_t) __a; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vreinterpretq_u64_u8 (uint8x16_t __a) { return (uint64x2_t) __a; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vreinterpretq_u64_u16 (uint16x8_t __a) { return (uint64x2_t) __a; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vreinterpretq_u64_u32 (uint32x4_t __a) { return (uint64x2_t) __a; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vreinterpretq_u64_p8 (poly8x16_t __a) { return (uint64x2_t) __a; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vreinterpretq_u64_p16 (poly16x8_t __a) { return (uint64x2_t) __a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vreinterpret_s8_f16 (float16x4_t __a) { return (int8x8_t) __a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vreinterpret_s8_f64 (float64x1_t __a) { return (int8x8_t) __a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vreinterpret_s8_s16 (int16x4_t __a) { return (int8x8_t) __a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vreinterpret_s8_s32 (int32x2_t __a) { return (int8x8_t) __a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vreinterpret_s8_s64 (int64x1_t __a) { return (int8x8_t) __a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vreinterpret_s8_f32 (float32x2_t __a) { return (int8x8_t) __a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vreinterpret_s8_u8 (uint8x8_t __a) { return (int8x8_t) __a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vreinterpret_s8_u16 (uint16x4_t __a) { return (int8x8_t) __a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vreinterpret_s8_u32 (uint32x2_t __a) { return (int8x8_t) __a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vreinterpret_s8_u64 (uint64x1_t __a) { return (int8x8_t) __a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vreinterpret_s8_p8 (poly8x8_t __a) { return (int8x8_t) __a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vreinterpret_s8_p16 (poly16x4_t __a) { return (int8x8_t) __a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vreinterpretq_s8_f64 (float64x2_t __a) { return (int8x16_t) __a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vreinterpretq_s8_s16 (int16x8_t __a) { return (int8x16_t) __a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vreinterpretq_s8_s32 (int32x4_t __a) { return (int8x16_t) __a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vreinterpretq_s8_s64 (int64x2_t __a) { return (int8x16_t) __a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vreinterpretq_s8_f16 (float16x8_t __a) { return (int8x16_t) __a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vreinterpretq_s8_f32 (float32x4_t __a) { return (int8x16_t) __a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vreinterpretq_s8_u8 (uint8x16_t __a) { return (int8x16_t) __a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vreinterpretq_s8_u16 (uint16x8_t __a) { return (int8x16_t) __a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vreinterpretq_s8_u32 (uint32x4_t __a) { return (int8x16_t) __a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vreinterpretq_s8_u64 (uint64x2_t __a) { return (int8x16_t) __a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vreinterpretq_s8_p8 (poly8x16_t __a) { return (int8x16_t) __a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vreinterpretq_s8_p16 (poly16x8_t __a) { return (int8x16_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vreinterpret_s16_f16 (float16x4_t __a) { return (int16x4_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vreinterpret_s16_f64 (float64x1_t __a) { return (int16x4_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vreinterpret_s16_s8 (int8x8_t __a) { return (int16x4_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vreinterpret_s16_s32 (int32x2_t __a) { return (int16x4_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vreinterpret_s16_s64 (int64x1_t __a) { return (int16x4_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vreinterpret_s16_f32 (float32x2_t __a) { return (int16x4_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vreinterpret_s16_u8 (uint8x8_t __a) { return (int16x4_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vreinterpret_s16_u16 (uint16x4_t __a) { return (int16x4_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vreinterpret_s16_u32 (uint32x2_t __a) { return (int16x4_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vreinterpret_s16_u64 (uint64x1_t __a) { return (int16x4_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vreinterpret_s16_p8 (poly8x8_t __a) { return (int16x4_t) __a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vreinterpret_s16_p16 (poly16x4_t __a) { return (int16x4_t) __a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vreinterpretq_s16_f64 (float64x2_t __a) { return (int16x8_t) __a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vreinterpretq_s16_s8 (int8x16_t __a) { return (int16x8_t) __a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vreinterpretq_s16_s32 (int32x4_t __a) { return (int16x8_t) __a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vreinterpretq_s16_s64 (int64x2_t __a) { return (int16x8_t) __a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vreinterpretq_s16_f16 (float16x8_t __a) { return (int16x8_t) __a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vreinterpretq_s16_f32 (float32x4_t __a) { return (int16x8_t) __a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vreinterpretq_s16_u8 (uint8x16_t __a) { return (int16x8_t) __a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vreinterpretq_s16_u16 (uint16x8_t __a) { return (int16x8_t) __a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vreinterpretq_s16_u32 (uint32x4_t __a) { return (int16x8_t) __a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vreinterpretq_s16_u64 (uint64x2_t __a) { return (int16x8_t) __a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vreinterpretq_s16_p8 (poly8x16_t __a) { return (int16x8_t) __a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vreinterpretq_s16_p16 (poly16x8_t __a) { return (int16x8_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vreinterpret_s32_f16 (float16x4_t __a) { return (int32x2_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vreinterpret_s32_f64 (float64x1_t __a) { return (int32x2_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vreinterpret_s32_s8 (int8x8_t __a) { return (int32x2_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vreinterpret_s32_s16 (int16x4_t __a) { return (int32x2_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vreinterpret_s32_s64 (int64x1_t __a) { return (int32x2_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vreinterpret_s32_f32 (float32x2_t __a) { return (int32x2_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vreinterpret_s32_u8 (uint8x8_t __a) { return (int32x2_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vreinterpret_s32_u16 (uint16x4_t __a) { return (int32x2_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vreinterpret_s32_u32 (uint32x2_t __a) { return (int32x2_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vreinterpret_s32_u64 (uint64x1_t __a) { return (int32x2_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vreinterpret_s32_p8 (poly8x8_t __a) { return (int32x2_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vreinterpret_s32_p16 (poly16x4_t __a) { return (int32x2_t) __a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vreinterpretq_s32_f64 (float64x2_t __a) { return (int32x4_t) __a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vreinterpretq_s32_s8 (int8x16_t __a) { return (int32x4_t) __a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vreinterpretq_s32_s16 (int16x8_t __a) { return (int32x4_t) __a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vreinterpretq_s32_s64 (int64x2_t __a) { return (int32x4_t) __a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vreinterpretq_s32_f16 (float16x8_t __a) { return (int32x4_t) __a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vreinterpretq_s32_f32 (float32x4_t __a) { return (int32x4_t) __a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vreinterpretq_s32_u8 (uint8x16_t __a) { return (int32x4_t) __a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vreinterpretq_s32_u16 (uint16x8_t __a) { return (int32x4_t) __a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vreinterpretq_s32_u32 (uint32x4_t __a) { return (int32x4_t) __a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vreinterpretq_s32_u64 (uint64x2_t __a) { return (int32x4_t) __a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vreinterpretq_s32_p8 (poly8x16_t __a) { return (int32x4_t) __a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vreinterpretq_s32_p16 (poly16x8_t __a) { return (int32x4_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vreinterpret_u8_f16 (float16x4_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vreinterpret_u8_f64 (float64x1_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vreinterpret_u8_s8 (int8x8_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vreinterpret_u8_s16 (int16x4_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vreinterpret_u8_s32 (int32x2_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vreinterpret_u8_s64 (int64x1_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vreinterpret_u8_f32 (float32x2_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vreinterpret_u8_u16 (uint16x4_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vreinterpret_u8_u32 (uint32x2_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vreinterpret_u8_u64 (uint64x1_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vreinterpret_u8_p8 (poly8x8_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vreinterpret_u8_p16 (poly16x4_t __a) { return (uint8x8_t) __a; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vreinterpretq_u8_f64 (float64x2_t __a) { return (uint8x16_t) __a; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vreinterpretq_u8_s8 (int8x16_t __a) { return (uint8x16_t) __a; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vreinterpretq_u8_s16 (int16x8_t __a) { return (uint8x16_t) __a; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vreinterpretq_u8_s32 (int32x4_t __a) { return (uint8x16_t) __a; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vreinterpretq_u8_s64 (int64x2_t __a) { return (uint8x16_t) __a; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vreinterpretq_u8_f16 (float16x8_t __a) { return (uint8x16_t) __a; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vreinterpretq_u8_f32 (float32x4_t __a) { return (uint8x16_t) __a; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vreinterpretq_u8_u16 (uint16x8_t __a) { return (uint8x16_t) __a; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vreinterpretq_u8_u32 (uint32x4_t __a) { return (uint8x16_t) __a; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vreinterpretq_u8_u64 (uint64x2_t __a) { return (uint8x16_t) __a; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vreinterpretq_u8_p8 (poly8x16_t __a) { return (uint8x16_t) __a; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vreinterpretq_u8_p16 (poly16x8_t __a) { return (uint8x16_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vreinterpret_u16_f16 (float16x4_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vreinterpret_u16_f64 (float64x1_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vreinterpret_u16_s8 (int8x8_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vreinterpret_u16_s16 (int16x4_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vreinterpret_u16_s32 (int32x2_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vreinterpret_u16_s64 (int64x1_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vreinterpret_u16_f32 (float32x2_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vreinterpret_u16_u8 (uint8x8_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vreinterpret_u16_u32 (uint32x2_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vreinterpret_u16_u64 (uint64x1_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vreinterpret_u16_p8 (poly8x8_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vreinterpret_u16_p16 (poly16x4_t __a) { return (uint16x4_t) __a; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vreinterpretq_u16_f64 (float64x2_t __a) { return (uint16x8_t) __a; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vreinterpretq_u16_s8 (int8x16_t __a) { return (uint16x8_t) __a; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vreinterpretq_u16_s16 (int16x8_t __a) { return (uint16x8_t) __a; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vreinterpretq_u16_s32 (int32x4_t __a) { return (uint16x8_t) __a; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vreinterpretq_u16_s64 (int64x2_t __a) { return (uint16x8_t) __a; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vreinterpretq_u16_f16 (float16x8_t __a) { return (uint16x8_t) __a; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vreinterpretq_u16_f32 (float32x4_t __a) { return (uint16x8_t) __a; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vreinterpretq_u16_u8 (uint8x16_t __a) { return (uint16x8_t) __a; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vreinterpretq_u16_u32 (uint32x4_t __a) { return (uint16x8_t) __a; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vreinterpretq_u16_u64 (uint64x2_t __a) { return (uint16x8_t) __a; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vreinterpretq_u16_p8 (poly8x16_t __a) { return (uint16x8_t) __a; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vreinterpretq_u16_p16 (poly16x8_t __a) { return (uint16x8_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vreinterpret_u32_f16 (float16x4_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vreinterpret_u32_f64 (float64x1_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vreinterpret_u32_s8 (int8x8_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vreinterpret_u32_s16 (int16x4_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vreinterpret_u32_s32 (int32x2_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vreinterpret_u32_s64 (int64x1_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vreinterpret_u32_f32 (float32x2_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vreinterpret_u32_u8 (uint8x8_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vreinterpret_u32_u16 (uint16x4_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vreinterpret_u32_u64 (uint64x1_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vreinterpret_u32_p8 (poly8x8_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vreinterpret_u32_p16 (poly16x4_t __a) { return (uint32x2_t) __a; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vreinterpretq_u32_f64 (float64x2_t __a) { return (uint32x4_t) __a; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vreinterpretq_u32_s8 (int8x16_t __a) { return (uint32x4_t) __a; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vreinterpretq_u32_s16 (int16x8_t __a) { return (uint32x4_t) __a; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vreinterpretq_u32_s32 (int32x4_t __a) { return (uint32x4_t) __a; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vreinterpretq_u32_s64 (int64x2_t __a) { return (uint32x4_t) __a; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vreinterpretq_u32_f16 (float16x8_t __a) { return (uint32x4_t) __a; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vreinterpretq_u32_f32 (float32x4_t __a) { return (uint32x4_t) __a; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vreinterpretq_u32_u8 (uint8x16_t __a) { return (uint32x4_t) __a; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vreinterpretq_u32_u16 (uint16x8_t __a) { return (uint32x4_t) __a; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vreinterpretq_u32_u64 (uint64x2_t __a) { return (uint32x4_t) __a; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vreinterpretq_u32_p8 (poly8x16_t __a) { return (uint32x4_t) __a; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vreinterpretq_u32_p16 (poly16x8_t __a) { return (uint32x4_t) __a; } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vset_lane_f16 (float16_t __elem, float16x4_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vset_lane_f32 (float32_t __elem, float32x2_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vset_lane_f64 (float64_t __elem, float64x1_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vset_lane_p8 (poly8_t __elem, poly8x8_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vset_lane_p16 (poly16_t __elem, poly16x4_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vset_lane_s8 (int8_t __elem, int8x8_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vset_lane_s16 (int16_t __elem, int16x4_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vset_lane_s32 (int32_t __elem, int32x2_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vset_lane_s64 (int64_t __elem, int64x1_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vset_lane_u8 (uint8_t __elem, uint8x8_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vset_lane_u16 (uint16_t __elem, uint16x4_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vset_lane_u32 (uint32_t __elem, uint32x2_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vset_lane_u64 (uint64_t __elem, uint64x1_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vsetq_lane_f16 (float16_t __elem, float16x8_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vsetq_lane_f32 (float32_t __elem, float32x4_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vsetq_lane_f64 (float64_t __elem, float64x2_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vsetq_lane_p8 (poly8_t __elem, poly8x16_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vsetq_lane_p16 (poly16_t __elem, poly16x8_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vsetq_lane_s8 (int8_t __elem, int8x16_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vsetq_lane_s16 (int16_t __elem, int16x8_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vsetq_lane_s32 (int32_t __elem, int32x4_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vsetq_lane_s64 (int64_t __elem, int64x2_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vsetq_lane_u8 (uint8_t __elem, uint8x16_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vsetq_lane_u16 (uint16_t __elem, uint16x8_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsetq_lane_u32 (uint32_t __elem, uint32x4_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vsetq_lane_u64 (uint64_t __elem, uint64x2_t __vec, const int __index) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __index); __vec[__index] = __elem; __vec; }); } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vget_low_f16 (float16x8_t __a) { uint64x2_t tmp = vreinterpretq_u64_f16 (__a); uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); return vreinterpret_f16_u64 (lo);; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vget_low_f32 (float32x4_t __a) { uint64x2_t tmp = vreinterpretq_u64_f32 (__a); uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); return vreinterpret_f32_u64 (lo);; } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vget_low_f64 (float64x2_t __a) { return (float64x1_t) {vgetq_lane_f64 (__a, 0)}; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vget_low_p8 (poly8x16_t __a) { uint64x2_t tmp = vreinterpretq_u64_p8 (__a); uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); return vreinterpret_p8_u64 (lo);; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vget_low_p16 (poly16x8_t __a) { uint64x2_t tmp = vreinterpretq_u64_p16 (__a); uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); return vreinterpret_p16_u64 (lo);; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vget_low_s8 (int8x16_t __a) { uint64x2_t tmp = vreinterpretq_u64_s8 (__a); uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); return vreinterpret_s8_u64 (lo);; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vget_low_s16 (int16x8_t __a) { uint64x2_t tmp = vreinterpretq_u64_s16 (__a); uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); return vreinterpret_s16_u64 (lo);; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vget_low_s32 (int32x4_t __a) { uint64x2_t tmp = vreinterpretq_u64_s32 (__a); uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); return vreinterpret_s32_u64 (lo);; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vget_low_s64 (int64x2_t __a) { uint64x2_t tmp = vreinterpretq_u64_s64 (__a); uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); return vreinterpret_s64_u64 (lo);; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vget_low_u8 (uint8x16_t __a) { uint64x2_t tmp = vreinterpretq_u64_u8 (__a); uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); return vreinterpret_u8_u64 (lo);; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vget_low_u16 (uint16x8_t __a) { uint64x2_t tmp = vreinterpretq_u64_u16 (__a); uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); return vreinterpret_u16_u64 (lo);; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vget_low_u32 (uint32x4_t __a) { uint64x2_t tmp = vreinterpretq_u64_u32 (__a); uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); return vreinterpret_u32_u64 (lo);; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vget_low_u64 (uint64x2_t __a) { return vcreate_u64 (vgetq_lane_u64 (__a, 0)); } # 5014 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vget_high_f16 (float16x8_t __a) { uint64x2_t tmp = vreinterpretq_u64_f16 (__a); uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); return vreinterpret_f16_u64 (hi);; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vget_high_f32 (float32x4_t __a) { uint64x2_t tmp = vreinterpretq_u64_f32 (__a); uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); return vreinterpret_f32_u64 (hi);; } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vget_high_f64 (float64x2_t __a) { uint64x2_t tmp = vreinterpretq_u64_f64 (__a); uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); return vreinterpret_f64_u64 (hi);; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vget_high_p8 (poly8x16_t __a) { uint64x2_t tmp = vreinterpretq_u64_p8 (__a); uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); return vreinterpret_p8_u64 (hi);; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vget_high_p16 (poly16x8_t __a) { uint64x2_t tmp = vreinterpretq_u64_p16 (__a); uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); return vreinterpret_p16_u64 (hi);; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vget_high_s8 (int8x16_t __a) { uint64x2_t tmp = vreinterpretq_u64_s8 (__a); uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); return vreinterpret_s8_u64 (hi);; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vget_high_s16 (int16x8_t __a) { uint64x2_t tmp = vreinterpretq_u64_s16 (__a); uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); return vreinterpret_s16_u64 (hi);; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vget_high_s32 (int32x4_t __a) { uint64x2_t tmp = vreinterpretq_u64_s32 (__a); uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); return vreinterpret_s32_u64 (hi);; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vget_high_s64 (int64x2_t __a) { uint64x2_t tmp = vreinterpretq_u64_s64 (__a); uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); return vreinterpret_s64_u64 (hi);; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vget_high_u8 (uint8x16_t __a) { uint64x2_t tmp = vreinterpretq_u64_u8 (__a); uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); return vreinterpret_u8_u64 (hi);; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vget_high_u16 (uint16x8_t __a) { uint64x2_t tmp = vreinterpretq_u64_u16 (__a); uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); return vreinterpret_u16_u64 (hi);; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vget_high_u32 (uint32x4_t __a) { uint64x2_t tmp = vreinterpretq_u64_u32 (__a); uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); return vreinterpret_u32_u64 (hi);; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vget_high_u64 (uint64x2_t __a) { return vcreate_u64 (vgetq_lane_u64 (__a, 1)); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vcombine_s8 (int8x8_t __a, int8x8_t __b) { return (int8x16_t) __builtin_aarch64_combinev8qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vcombine_s16 (int16x4_t __a, int16x4_t __b) { return (int16x8_t) __builtin_aarch64_combinev4hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vcombine_s32 (int32x2_t __a, int32x2_t __b) { return (int32x4_t) __builtin_aarch64_combinev2si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vcombine_s64 (int64x1_t __a, int64x1_t __b) { return __builtin_aarch64_combinedi (__a[0], __b[0]); } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vcombine_f16 (float16x4_t __a, float16x4_t __b) { return __builtin_aarch64_combinev4hf (__a, __b); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vcombine_f32 (float32x2_t __a, float32x2_t __b) { return (float32x4_t) __builtin_aarch64_combinev2sf (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcombine_u8 (uint8x8_t __a, uint8x8_t __b) { return (uint8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a, (int8x8_t) __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcombine_u16 (uint16x4_t __a, uint16x4_t __b) { return (uint16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a, (int16x4_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcombine_u32 (uint32x2_t __a, uint32x2_t __b) { return (uint32x4_t) __builtin_aarch64_combinev2si ((int32x2_t) __a, (int32x2_t) __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcombine_u64 (uint64x1_t __a, uint64x1_t __b) { return (uint64x2_t) __builtin_aarch64_combinedi (__a[0], __b[0]); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vcombine_f64 (float64x1_t __a, float64x1_t __b) { return __builtin_aarch64_combinedf (__a[0], __b[0]); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vcombine_p8 (poly8x8_t __a, poly8x8_t __b) { return (poly8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a, (int8x8_t) __b); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vcombine_p16 (poly16x4_t __a, poly16x4_t __b) { return (poly16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a, (int16x4_t) __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vaba_s8 (int8x8_t a, int8x8_t b, int8x8_t c) { int8x8_t result; __asm__ ("saba %0.8b,%2.8b,%3.8b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vaba_s16 (int16x4_t a, int16x4_t b, int16x4_t c) { int16x4_t result; __asm__ ("saba %0.4h,%2.4h,%3.4h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vaba_s32 (int32x2_t a, int32x2_t b, int32x2_t c) { int32x2_t result; __asm__ ("saba %0.2s,%2.2s,%3.2s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vaba_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c) { uint8x8_t result; __asm__ ("uaba %0.8b,%2.8b,%3.8b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vaba_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c) { uint16x4_t result; __asm__ ("uaba %0.4h,%2.4h,%3.4h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vaba_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c) { uint32x2_t result; __asm__ ("uaba %0.2s,%2.2s,%3.2s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vabal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c) { int16x8_t result; __asm__ ("sabal2 %0.8h,%2.16b,%3.16b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vabal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) { int32x4_t result; __asm__ ("sabal2 %0.4s,%2.8h,%3.8h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vabal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) { int64x2_t result; __asm__ ("sabal2 %0.2d,%2.4s,%3.4s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vabal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c) { uint16x8_t result; __asm__ ("uabal2 %0.8h,%2.16b,%3.16b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vabal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c) { uint32x4_t result; __asm__ ("uabal2 %0.4s,%2.8h,%3.8h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vabal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c) { uint64x2_t result; __asm__ ("uabal2 %0.2d,%2.4s,%3.4s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vabal_s8 (int16x8_t a, int8x8_t b, int8x8_t c) { int16x8_t result; __asm__ ("sabal %0.8h,%2.8b,%3.8b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vabal_s16 (int32x4_t a, int16x4_t b, int16x4_t c) { int32x4_t result; __asm__ ("sabal %0.4s,%2.4h,%3.4h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vabal_s32 (int64x2_t a, int32x2_t b, int32x2_t c) { int64x2_t result; __asm__ ("sabal %0.2d,%2.2s,%3.2s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vabal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c) { uint16x8_t result; __asm__ ("uabal %0.8h,%2.8b,%3.8b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vabal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c) { uint32x4_t result; __asm__ ("uabal %0.4s,%2.4h,%3.4h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vabal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c) { uint64x2_t result; __asm__ ("uabal %0.2d,%2.2s,%3.2s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vabaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c) { int8x16_t result; __asm__ ("saba %0.16b,%2.16b,%3.16b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vabaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c) { int16x8_t result; __asm__ ("saba %0.8h,%2.8h,%3.8h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vabaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c) { int32x4_t result; __asm__ ("saba %0.4s,%2.4s,%3.4s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vabaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c) { uint8x16_t result; __asm__ ("uaba %0.16b,%2.16b,%3.16b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vabaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c) { uint16x8_t result; __asm__ ("uaba %0.8h,%2.8h,%3.8h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vabaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c) { uint32x4_t result; __asm__ ("uaba %0.4s,%2.4s,%3.4s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vabd_f32 (float32x2_t a, float32x2_t b) { float32x2_t result; __asm__ ("fabd %0.2s, %1.2s, %2.2s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vabd_s8 (int8x8_t a, int8x8_t b) { int8x8_t result; __asm__ ("sabd %0.8b, %1.8b, %2.8b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vabd_s16 (int16x4_t a, int16x4_t b) { int16x4_t result; __asm__ ("sabd %0.4h, %1.4h, %2.4h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vabd_s32 (int32x2_t a, int32x2_t b) { int32x2_t result; __asm__ ("sabd %0.2s, %1.2s, %2.2s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vabd_u8 (uint8x8_t a, uint8x8_t b) { uint8x8_t result; __asm__ ("uabd %0.8b, %1.8b, %2.8b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vabd_u16 (uint16x4_t a, uint16x4_t b) { uint16x4_t result; __asm__ ("uabd %0.4h, %1.4h, %2.4h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vabd_u32 (uint32x2_t a, uint32x2_t b) { uint32x2_t result; __asm__ ("uabd %0.2s, %1.2s, %2.2s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vabdd_f64 (float64_t a, float64_t b) { float64_t result; __asm__ ("fabd %d0, %d1, %d2" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vabdl_high_s8 (int8x16_t a, int8x16_t b) { int16x8_t result; __asm__ ("sabdl2 %0.8h,%1.16b,%2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vabdl_high_s16 (int16x8_t a, int16x8_t b) { int32x4_t result; __asm__ ("sabdl2 %0.4s,%1.8h,%2.8h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vabdl_high_s32 (int32x4_t a, int32x4_t b) { int64x2_t result; __asm__ ("sabdl2 %0.2d,%1.4s,%2.4s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vabdl_high_u8 (uint8x16_t a, uint8x16_t b) { uint16x8_t result; __asm__ ("uabdl2 %0.8h,%1.16b,%2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vabdl_high_u16 (uint16x8_t a, uint16x8_t b) { uint32x4_t result; __asm__ ("uabdl2 %0.4s,%1.8h,%2.8h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vabdl_high_u32 (uint32x4_t a, uint32x4_t b) { uint64x2_t result; __asm__ ("uabdl2 %0.2d,%1.4s,%2.4s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vabdl_s8 (int8x8_t a, int8x8_t b) { int16x8_t result; __asm__ ("sabdl %0.8h, %1.8b, %2.8b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vabdl_s16 (int16x4_t a, int16x4_t b) { int32x4_t result; __asm__ ("sabdl %0.4s, %1.4h, %2.4h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vabdl_s32 (int32x2_t a, int32x2_t b) { int64x2_t result; __asm__ ("sabdl %0.2d, %1.2s, %2.2s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vabdl_u8 (uint8x8_t a, uint8x8_t b) { uint16x8_t result; __asm__ ("uabdl %0.8h, %1.8b, %2.8b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vabdl_u16 (uint16x4_t a, uint16x4_t b) { uint32x4_t result; __asm__ ("uabdl %0.4s, %1.4h, %2.4h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vabdl_u32 (uint32x2_t a, uint32x2_t b) { uint64x2_t result; __asm__ ("uabdl %0.2d, %1.2s, %2.2s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vabdq_f32 (float32x4_t a, float32x4_t b) { float32x4_t result; __asm__ ("fabd %0.4s, %1.4s, %2.4s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vabdq_f64 (float64x2_t a, float64x2_t b) { float64x2_t result; __asm__ ("fabd %0.2d, %1.2d, %2.2d" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vabdq_s8 (int8x16_t a, int8x16_t b) { int8x16_t result; __asm__ ("sabd %0.16b, %1.16b, %2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vabdq_s16 (int16x8_t a, int16x8_t b) { int16x8_t result; __asm__ ("sabd %0.8h, %1.8h, %2.8h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vabdq_s32 (int32x4_t a, int32x4_t b) { int32x4_t result; __asm__ ("sabd %0.4s, %1.4s, %2.4s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vabdq_u8 (uint8x16_t a, uint8x16_t b) { uint8x16_t result; __asm__ ("uabd %0.16b, %1.16b, %2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vabdq_u16 (uint16x8_t a, uint16x8_t b) { uint16x8_t result; __asm__ ("uabd %0.8h, %1.8h, %2.8h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vabdq_u32 (uint32x4_t a, uint32x4_t b) { uint32x4_t result; __asm__ ("uabd %0.4s, %1.4s, %2.4s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vabds_f32 (float32_t a, float32_t b) { float32_t result; __asm__ ("fabd %s0, %s1, %s2" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vaddlv_s8 (int8x8_t a) { int16_t result; __asm__ ("saddlv %h0,%1.8b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vaddlv_s16 (int16x4_t a) { int32_t result; __asm__ ("saddlv %s0,%1.4h" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vaddlv_u8 (uint8x8_t a) { uint16_t result; __asm__ ("uaddlv %h0,%1.8b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vaddlv_u16 (uint16x4_t a) { uint32_t result; __asm__ ("uaddlv %s0,%1.4h" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vaddlvq_s8 (int8x16_t a) { int16_t result; __asm__ ("saddlv %h0,%1.16b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vaddlvq_s16 (int16x8_t a) { int32_t result; __asm__ ("saddlv %s0,%1.8h" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vaddlvq_s32 (int32x4_t a) { int64_t result; __asm__ ("saddlv %d0,%1.4s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vaddlvq_u8 (uint8x16_t a) { uint16_t result; __asm__ ("uaddlv %h0,%1.16b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vaddlvq_u16 (uint16x8_t a) { uint32_t result; __asm__ ("uaddlv %s0,%1.8h" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vaddlvq_u32 (uint32x4_t a) { uint64_t result; __asm__ ("uaddlv %d0,%1.4s" : "=w"(result) : "w"(a) : ); return result; } # 6268 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vcvtx_f32_f64 (float64x2_t a) { float32x2_t result; __asm__ ("fcvtxn %0.2s,%1.2d" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vcvtx_high_f32_f64 (float32x2_t a, float64x2_t b) { float32x4_t result; __asm__ ("fcvtxn2 %0.4s,%1.2d" : "=w"(result) : "w" (b), "0"(a) : ); return result; } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vcvtxd_f32_f64 (float64_t a) { float32_t result; __asm__ ("fcvtxn %s0,%d1" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmla_n_f32 (float32x2_t a, float32x2_t b, float32_t c) { float32x2_t result; float32x2_t t1; __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s" : "=w"(result), "=w"(t1) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmla_n_s16 (int16x4_t a, int16x4_t b, int16_t c) { int16x4_t result; __asm__ ("mla %0.4h,%2.4h,%3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmla_n_s32 (int32x2_t a, int32x2_t b, int32_t c) { int32x2_t result; __asm__ ("mla %0.2s,%2.2s,%3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmla_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c) { uint16x4_t result; __asm__ ("mla %0.4h,%2.4h,%3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmla_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c) { uint32x2_t result; __asm__ ("mla %0.2s,%2.2s,%3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vmla_s8 (int8x8_t a, int8x8_t b, int8x8_t c) { int8x8_t result; __asm__ ("mla %0.8b, %2.8b, %3.8b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmla_s16 (int16x4_t a, int16x4_t b, int16x4_t c) { int16x4_t result; __asm__ ("mla %0.4h, %2.4h, %3.4h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmla_s32 (int32x2_t a, int32x2_t b, int32x2_t c) { int32x2_t result; __asm__ ("mla %0.2s, %2.2s, %3.2s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vmla_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c) { uint8x8_t result; __asm__ ("mla %0.8b, %2.8b, %3.8b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmla_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c) { uint16x4_t result; __asm__ ("mla %0.4h, %2.4h, %3.4h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmla_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c) { uint32x2_t result; __asm__ ("mla %0.2s, %2.2s, %3.2s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } # 6535 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlal_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c) { int32x4_t result; __asm__ ("smlal2 %0.4s,%2.8h,%3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmlal_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c) { int64x2_t result; __asm__ ("smlal2 %0.2d,%2.4s,%3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlal_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c) { uint32x4_t result; __asm__ ("umlal2 %0.4s,%2.8h,%3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmlal_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c) { uint64x2_t result; __asm__ ("umlal2 %0.2d,%2.4s,%3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c) { int16x8_t result; __asm__ ("smlal2 %0.8h,%2.16b,%3.16b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) { int32x4_t result; __asm__ ("smlal2 %0.4s,%2.8h,%3.8h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) { int64x2_t result; __asm__ ("smlal2 %0.2d,%2.4s,%3.4s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c) { uint16x8_t result; __asm__ ("umlal2 %0.8h,%2.16b,%3.16b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c) { uint32x4_t result; __asm__ ("umlal2 %0.4s,%2.8h,%3.8h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c) { uint64x2_t result; __asm__ ("umlal2 %0.2d,%2.4s,%3.4s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } # 6757 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlal_n_s16 (int32x4_t a, int16x4_t b, int16_t c) { int32x4_t result; __asm__ ("smlal %0.4s,%2.4h,%3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmlal_n_s32 (int64x2_t a, int32x2_t b, int32_t c) { int64x2_t result; __asm__ ("smlal %0.2d,%2.2s,%3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlal_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c) { uint32x4_t result; __asm__ ("umlal %0.4s,%2.4h,%3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmlal_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c) { uint64x2_t result; __asm__ ("umlal %0.2d,%2.2s,%3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlal_s8 (int16x8_t a, int8x8_t b, int8x8_t c) { int16x8_t result; __asm__ ("smlal %0.8h,%2.8b,%3.8b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c) { int32x4_t result; __asm__ ("smlal %0.4s,%2.4h,%3.4h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c) { int64x2_t result; __asm__ ("smlal %0.2d,%2.2s,%3.2s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c) { uint16x8_t result; __asm__ ("umlal %0.8h,%2.8b,%3.8b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c) { uint32x4_t result; __asm__ ("umlal %0.4s,%2.4h,%3.4h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmlal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c) { uint64x2_t result; __asm__ ("umlal %0.2d,%2.2s,%3.2s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmlaq_n_f32 (float32x4_t a, float32x4_t b, float32_t c) { float32x4_t result; float32x4_t t1; __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s" : "=w"(result), "=w"(t1) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlaq_n_s16 (int16x8_t a, int16x8_t b, int16_t c) { int16x8_t result; __asm__ ("mla %0.8h,%2.8h,%3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlaq_n_s32 (int32x4_t a, int32x4_t b, int32_t c) { int32x4_t result; __asm__ ("mla %0.4s,%2.4s,%3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlaq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c) { uint16x8_t result; __asm__ ("mla %0.8h,%2.8h,%3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlaq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c) { uint32x4_t result; __asm__ ("mla %0.4s,%2.4s,%3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vmlaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c) { int8x16_t result; __asm__ ("mla %0.16b, %2.16b, %3.16b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c) { int16x8_t result; __asm__ ("mla %0.8h, %2.8h, %3.8h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c) { int32x4_t result; __asm__ ("mla %0.4s, %2.4s, %3.4s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vmlaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c) { uint8x16_t result; __asm__ ("mla %0.16b, %2.16b, %3.16b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c) { uint16x8_t result; __asm__ ("mla %0.8h, %2.8h, %3.8h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c) { uint32x4_t result; __asm__ ("mla %0.4s, %2.4s, %3.4s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmls_n_f32 (float32x2_t a, float32x2_t b, float32_t c) { float32x2_t result; float32x2_t t1; __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s" : "=w"(result), "=w"(t1) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmls_n_s16 (int16x4_t a, int16x4_t b, int16_t c) { int16x4_t result; __asm__ ("mls %0.4h, %2.4h, %3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmls_n_s32 (int32x2_t a, int32x2_t b, int32_t c) { int32x2_t result; __asm__ ("mls %0.2s, %2.2s, %3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmls_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c) { uint16x4_t result; __asm__ ("mls %0.4h, %2.4h, %3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmls_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c) { uint32x2_t result; __asm__ ("mls %0.2s, %2.2s, %3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vmls_s8 (int8x8_t a, int8x8_t b, int8x8_t c) { int8x8_t result; __asm__ ("mls %0.8b,%2.8b,%3.8b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmls_s16 (int16x4_t a, int16x4_t b, int16x4_t c) { int16x4_t result; __asm__ ("mls %0.4h,%2.4h,%3.4h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmls_s32 (int32x2_t a, int32x2_t b, int32x2_t c) { int32x2_t result; __asm__ ("mls %0.2s,%2.2s,%3.2s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vmls_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c) { uint8x8_t result; __asm__ ("mls %0.8b,%2.8b,%3.8b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmls_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c) { uint16x4_t result; __asm__ ("mls %0.4h,%2.4h,%3.4h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmls_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c) { uint32x2_t result; __asm__ ("mls %0.2s,%2.2s,%3.2s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } # 7223 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlsl_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c) { int32x4_t result; __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmlsl_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c) { int64x2_t result; __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlsl_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c) { uint32x4_t result; __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmlsl_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c) { uint64x2_t result; __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlsl_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c) { int16x8_t result; __asm__ ("smlsl2 %0.8h,%2.16b,%3.16b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) { int32x4_t result; __asm__ ("smlsl2 %0.4s,%2.8h,%3.8h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) { int64x2_t result; __asm__ ("smlsl2 %0.2d,%2.4s,%3.4s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlsl_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c) { uint16x8_t result; __asm__ ("umlsl2 %0.8h,%2.16b,%3.16b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlsl_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c) { uint32x4_t result; __asm__ ("umlsl2 %0.4s,%2.8h,%3.8h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c) { uint64x2_t result; __asm__ ("umlsl2 %0.2d,%2.4s,%3.4s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } # 7445 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlsl_n_s16 (int32x4_t a, int16x4_t b, int16_t c) { int32x4_t result; __asm__ ("smlsl %0.4s, %2.4h, %3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmlsl_n_s32 (int64x2_t a, int32x2_t b, int32_t c) { int64x2_t result; __asm__ ("smlsl %0.2d, %2.2s, %3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlsl_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c) { uint32x4_t result; __asm__ ("umlsl %0.4s, %2.4h, %3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmlsl_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c) { uint64x2_t result; __asm__ ("umlsl %0.2d, %2.2s, %3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlsl_s8 (int16x8_t a, int8x8_t b, int8x8_t c) { int16x8_t result; __asm__ ("smlsl %0.8h, %2.8b, %3.8b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c) { int32x4_t result; __asm__ ("smlsl %0.4s, %2.4h, %3.4h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c) { int64x2_t result; __asm__ ("smlsl %0.2d, %2.2s, %3.2s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlsl_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c) { uint16x8_t result; __asm__ ("umlsl %0.8h, %2.8b, %3.8b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlsl_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c) { uint32x4_t result; __asm__ ("umlsl %0.4s, %2.4h, %3.4h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmlsl_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c) { uint64x2_t result; __asm__ ("umlsl %0.2d, %2.2s, %3.2s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmlsq_n_f32 (float32x4_t a, float32x4_t b, float32_t c) { float32x4_t result; float32x4_t t1; __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s" : "=w"(result), "=w"(t1) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlsq_n_s16 (int16x8_t a, int16x8_t b, int16_t c) { int16x8_t result; __asm__ ("mls %0.8h, %2.8h, %3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlsq_n_s32 (int32x4_t a, int32x4_t b, int32_t c) { int32x4_t result; __asm__ ("mls %0.4s, %2.4s, %3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlsq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c) { uint16x8_t result; __asm__ ("mls %0.8h, %2.8h, %3.h[0]" : "=w"(result) : "0"(a), "w"(b), "x"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlsq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c) { uint32x4_t result; __asm__ ("mls %0.4s, %2.4s, %3.s[0]" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vmlsq_s8 (int8x16_t a, int8x16_t b, int8x16_t c) { int8x16_t result; __asm__ ("mls %0.16b,%2.16b,%3.16b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlsq_s16 (int16x8_t a, int16x8_t b, int16x8_t c) { int16x8_t result; __asm__ ("mls %0.8h,%2.8h,%3.8h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlsq_s32 (int32x4_t a, int32x4_t b, int32x4_t c) { int32x4_t result; __asm__ ("mls %0.4s,%2.4s,%3.4s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vmlsq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c) { uint8x16_t result; __asm__ ("mls %0.16b,%2.16b,%3.16b" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlsq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c) { uint16x8_t result; __asm__ ("mls %0.8h,%2.8h,%3.8h" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlsq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c) { uint32x4_t result; __asm__ ("mls %0.4s,%2.4s,%3.4s" : "=w"(result) : "0"(a), "w"(b), "w"(c) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmovl_high_s8 (int8x16_t a) { int16x8_t result; __asm__ ("sshll2 %0.8h,%1.16b,#0" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmovl_high_s16 (int16x8_t a) { int32x4_t result; __asm__ ("sshll2 %0.4s,%1.8h,#0" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmovl_high_s32 (int32x4_t a) { int64x2_t result; __asm__ ("sshll2 %0.2d,%1.4s,#0" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmovl_high_u8 (uint8x16_t a) { uint16x8_t result; __asm__ ("ushll2 %0.8h,%1.16b,#0" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmovl_high_u16 (uint16x8_t a) { uint32x4_t result; __asm__ ("ushll2 %0.4s,%1.8h,#0" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmovl_high_u32 (uint32x4_t a) { uint64x2_t result; __asm__ ("ushll2 %0.2d,%1.4s,#0" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmovl_s8 (int8x8_t a) { int16x8_t result; __asm__ ("sshll %0.8h,%1.8b,#0" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmovl_s16 (int16x4_t a) { int32x4_t result; __asm__ ("sshll %0.4s,%1.4h,#0" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmovl_s32 (int32x2_t a) { int64x2_t result; __asm__ ("sshll %0.2d,%1.2s,#0" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmovl_u8 (uint8x8_t a) { uint16x8_t result; __asm__ ("ushll %0.8h,%1.8b,#0" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmovl_u16 (uint16x4_t a) { uint32x4_t result; __asm__ ("ushll %0.4s,%1.4h,#0" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmovl_u32 (uint32x2_t a) { uint64x2_t result; __asm__ ("ushll %0.2d,%1.2s,#0" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vmovn_high_s16 (int8x8_t a, int16x8_t b) { int8x16_t result = vcombine_s8 (a, vcreate_s8 (((uint64_t) 0x0))); __asm__ ("xtn2 %0.16b,%1.8h" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmovn_high_s32 (int16x4_t a, int32x4_t b) { int16x8_t result = vcombine_s16 (a, vcreate_s16 (((uint64_t) 0x0))); __asm__ ("xtn2 %0.8h,%1.4s" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmovn_high_s64 (int32x2_t a, int64x2_t b) { int32x4_t result = vcombine_s32 (a, vcreate_s32 (((uint64_t) 0x0))); __asm__ ("xtn2 %0.4s,%1.2d" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vmovn_high_u16 (uint8x8_t a, uint16x8_t b) { uint8x16_t result = vcombine_u8 (a, vcreate_u8 (((uint64_t) 0x0))); __asm__ ("xtn2 %0.16b,%1.8h" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmovn_high_u32 (uint16x4_t a, uint32x4_t b) { uint16x8_t result = vcombine_u16 (a, vcreate_u16 (((uint64_t) 0x0))); __asm__ ("xtn2 %0.8h,%1.4s" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmovn_high_u64 (uint32x2_t a, uint64x2_t b) { uint32x4_t result = vcombine_u32 (a, vcreate_u32 (((uint64_t) 0x0))); __asm__ ("xtn2 %0.4s,%1.2d" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vmovn_s16 (int16x8_t a) { int8x8_t result; __asm__ ("xtn %0.8b,%1.8h" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmovn_s32 (int32x4_t a) { int16x4_t result; __asm__ ("xtn %0.4h,%1.4s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmovn_s64 (int64x2_t a) { int32x2_t result; __asm__ ("xtn %0.2s,%1.2d" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vmovn_u16 (uint16x8_t a) { uint8x8_t result; __asm__ ("xtn %0.8b,%1.8h" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmovn_u32 (uint32x4_t a) { uint16x4_t result; __asm__ ("xtn %0.4h,%1.4s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmovn_u64 (uint64x2_t a) { uint32x2_t result; __asm__ ("xtn %0.2s,%1.2d" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmul_n_f32 (float32x2_t a, float32_t b) { float32x2_t result; __asm__ ("fmul %0.2s,%1.2s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmul_n_s16 (int16x4_t a, int16_t b) { int16x4_t result; __asm__ ("mul %0.4h,%1.4h,%2.h[0]" : "=w"(result) : "w"(a), "x"(b) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmul_n_s32 (int32x2_t a, int32_t b) { int32x2_t result; __asm__ ("mul %0.2s,%1.2s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmul_n_u16 (uint16x4_t a, uint16_t b) { uint16x4_t result; __asm__ ("mul %0.4h,%1.4h,%2.h[0]" : "=w"(result) : "w"(a), "x"(b) : ); return result; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmul_n_u32 (uint32x2_t a, uint32_t b) { uint32x2_t result; __asm__ ("mul %0.2s,%1.2s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } # 8100 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmull_high_n_s16 (int16x8_t a, int16_t b) { int32x4_t result; __asm__ ("smull2 %0.4s,%1.8h,%2.h[0]" : "=w"(result) : "w"(a), "x"(b) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmull_high_n_s32 (int32x4_t a, int32_t b) { int64x2_t result; __asm__ ("smull2 %0.2d,%1.4s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmull_high_n_u16 (uint16x8_t a, uint16_t b) { uint32x4_t result; __asm__ ("umull2 %0.4s,%1.8h,%2.h[0]" : "=w"(result) : "w"(a), "x"(b) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmull_high_n_u32 (uint32x4_t a, uint32_t b) { uint64x2_t result; __asm__ ("umull2 %0.2d,%1.4s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vmull_high_p8 (poly8x16_t a, poly8x16_t b) { poly16x8_t result; __asm__ ("pmull2 %0.8h,%1.16b,%2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmull_high_s8 (int8x16_t a, int8x16_t b) { int16x8_t result; __asm__ ("smull2 %0.8h,%1.16b,%2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmull_high_s16 (int16x8_t a, int16x8_t b) { int32x4_t result; __asm__ ("smull2 %0.4s,%1.8h,%2.8h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmull_high_s32 (int32x4_t a, int32x4_t b) { int64x2_t result; __asm__ ("smull2 %0.2d,%1.4s,%2.4s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmull_high_u8 (uint8x16_t a, uint8x16_t b) { uint16x8_t result; __asm__ ("umull2 %0.8h,%1.16b,%2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmull_high_u16 (uint16x8_t a, uint16x8_t b) { uint32x4_t result; __asm__ ("umull2 %0.4s,%1.8h,%2.8h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmull_high_u32 (uint32x4_t a, uint32x4_t b) { uint64x2_t result; __asm__ ("umull2 %0.2d,%1.4s,%2.4s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } # 8325 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmull_n_s16 (int16x4_t a, int16_t b) { int32x4_t result; __asm__ ("smull %0.4s,%1.4h,%2.h[0]" : "=w"(result) : "w"(a), "x"(b) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmull_n_s32 (int32x2_t a, int32_t b) { int64x2_t result; __asm__ ("smull %0.2d,%1.2s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmull_n_u16 (uint16x4_t a, uint16_t b) { uint32x4_t result; __asm__ ("umull %0.4s,%1.4h,%2.h[0]" : "=w"(result) : "w"(a), "x"(b) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmull_n_u32 (uint32x2_t a, uint32_t b) { uint64x2_t result; __asm__ ("umull %0.2d,%1.2s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vmull_p8 (poly8x8_t a, poly8x8_t b) { poly16x8_t result; __asm__ ("pmull %0.8h, %1.8b, %2.8b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmull_s8 (int8x8_t a, int8x8_t b) { int16x8_t result; __asm__ ("smull %0.8h, %1.8b, %2.8b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmull_s16 (int16x4_t a, int16x4_t b) { int32x4_t result; __asm__ ("smull %0.4s, %1.4h, %2.4h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmull_s32 (int32x2_t a, int32x2_t b) { int64x2_t result; __asm__ ("smull %0.2d, %1.2s, %2.2s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmull_u8 (uint8x8_t a, uint8x8_t b) { uint16x8_t result; __asm__ ("umull %0.8h, %1.8b, %2.8b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmull_u16 (uint16x4_t a, uint16x4_t b) { uint32x4_t result; __asm__ ("umull %0.4s, %1.4h, %2.4h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmull_u32 (uint32x2_t a, uint32x2_t b) { uint64x2_t result; __asm__ ("umull %0.2d, %1.2s, %2.2s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmulq_n_f32 (float32x4_t a, float32_t b) { float32x4_t result; __asm__ ("fmul %0.4s,%1.4s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vmulq_n_f64 (float64x2_t a, float64_t b) { float64x2_t result; __asm__ ("fmul %0.2d,%1.2d,%2.d[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmulq_n_s16 (int16x8_t a, int16_t b) { int16x8_t result; __asm__ ("mul %0.8h,%1.8h,%2.h[0]" : "=w"(result) : "w"(a), "x"(b) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmulq_n_s32 (int32x4_t a, int32_t b) { int32x4_t result; __asm__ ("mul %0.4s,%1.4s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmulq_n_u16 (uint16x8_t a, uint16_t b) { uint16x8_t result; __asm__ ("mul %0.8h,%1.8h,%2.h[0]" : "=w"(result) : "w"(a), "x"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmulq_n_u32 (uint32x4_t a, uint32_t b) { uint32x4_t result; __asm__ ("mul %0.4s,%1.4s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmulx_f32 (float32x2_t a, float32x2_t b) { float32x2_t result; __asm__ ("fmulx %0.2s,%1.2s,%2.2s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } # 8536 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vmulxd_f64 (float64_t a, float64_t b) { float64_t result; __asm__ ("fmulx %d0, %d1, %d2" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmulxq_f32 (float32x4_t a, float32x4_t b) { float32x4_t result; __asm__ ("fmulx %0.4s,%1.4s,%2.4s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vmulxq_f64 (float64x2_t a, float64x2_t b) { float64x2_t result; __asm__ ("fmulx %0.2d,%1.2d,%2.2d" : "=w"(result) : "w"(a), "w"(b) : ); return result; } # 8595 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmulxs_f32 (float32_t a, float32_t b) { float32_t result; __asm__ ("fmulx %s0, %s1, %s2" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vmvn_p8 (poly8x8_t a) { poly8x8_t result; __asm__ ("mvn %0.8b,%1.8b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vmvn_s8 (int8x8_t a) { int8x8_t result; __asm__ ("mvn %0.8b,%1.8b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmvn_s16 (int16x4_t a) { int16x4_t result; __asm__ ("mvn %0.8b,%1.8b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmvn_s32 (int32x2_t a) { int32x2_t result; __asm__ ("mvn %0.8b,%1.8b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vmvn_u8 (uint8x8_t a) { uint8x8_t result; __asm__ ("mvn %0.8b,%1.8b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmvn_u16 (uint16x4_t a) { uint16x4_t result; __asm__ ("mvn %0.8b,%1.8b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmvn_u32 (uint32x2_t a) { uint32x2_t result; __asm__ ("mvn %0.8b,%1.8b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vmvnq_p8 (poly8x16_t a) { poly8x16_t result; __asm__ ("mvn %0.16b,%1.16b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vmvnq_s8 (int8x16_t a) { int8x16_t result; __asm__ ("mvn %0.16b,%1.16b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmvnq_s16 (int16x8_t a) { int16x8_t result; __asm__ ("mvn %0.16b,%1.16b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmvnq_s32 (int32x4_t a) { int32x4_t result; __asm__ ("mvn %0.16b,%1.16b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vmvnq_u8 (uint8x16_t a) { uint8x16_t result; __asm__ ("mvn %0.16b,%1.16b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmvnq_u16 (uint16x8_t a) { uint16x8_t result; __asm__ ("mvn %0.16b,%1.16b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmvnq_u32 (uint32x4_t a) { uint32x4_t result; __asm__ ("mvn %0.16b,%1.16b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vpadal_s8 (int16x4_t a, int8x8_t b) { int16x4_t result; __asm__ ("sadalp %0.4h,%2.8b" : "=w"(result) : "0"(a), "w"(b) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vpadal_s16 (int32x2_t a, int16x4_t b) { int32x2_t result; __asm__ ("sadalp %0.2s,%2.4h" : "=w"(result) : "0"(a), "w"(b) : ); return result; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vpadal_s32 (int64x1_t a, int32x2_t b) { int64x1_t result; __asm__ ("sadalp %0.1d,%2.2s" : "=w"(result) : "0"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vpadal_u8 (uint16x4_t a, uint8x8_t b) { uint16x4_t result; __asm__ ("uadalp %0.4h,%2.8b" : "=w"(result) : "0"(a), "w"(b) : ); return result; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vpadal_u16 (uint32x2_t a, uint16x4_t b) { uint32x2_t result; __asm__ ("uadalp %0.2s,%2.4h" : "=w"(result) : "0"(a), "w"(b) : ); return result; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vpadal_u32 (uint64x1_t a, uint32x2_t b) { uint64x1_t result; __asm__ ("uadalp %0.1d,%2.2s" : "=w"(result) : "0"(a), "w"(b) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vpadalq_s8 (int16x8_t a, int8x16_t b) { int16x8_t result; __asm__ ("sadalp %0.8h,%2.16b" : "=w"(result) : "0"(a), "w"(b) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vpadalq_s16 (int32x4_t a, int16x8_t b) { int32x4_t result; __asm__ ("sadalp %0.4s,%2.8h" : "=w"(result) : "0"(a), "w"(b) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vpadalq_s32 (int64x2_t a, int32x4_t b) { int64x2_t result; __asm__ ("sadalp %0.2d,%2.4s" : "=w"(result) : "0"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vpadalq_u8 (uint16x8_t a, uint8x16_t b) { uint16x8_t result; __asm__ ("uadalp %0.8h,%2.16b" : "=w"(result) : "0"(a), "w"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vpadalq_u16 (uint32x4_t a, uint16x8_t b) { uint32x4_t result; __asm__ ("uadalp %0.4s,%2.8h" : "=w"(result) : "0"(a), "w"(b) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vpadalq_u32 (uint64x2_t a, uint32x4_t b) { uint64x2_t result; __asm__ ("uadalp %0.2d,%2.4s" : "=w"(result) : "0"(a), "w"(b) : ); return result; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vpadd_f32 (float32x2_t a, float32x2_t b) { float32x2_t result; __asm__ ("faddp %0.2s,%1.2s,%2.2s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vpaddl_s8 (int8x8_t a) { int16x4_t result; __asm__ ("saddlp %0.4h,%1.8b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vpaddl_s16 (int16x4_t a) { int32x2_t result; __asm__ ("saddlp %0.2s,%1.4h" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vpaddl_s32 (int32x2_t a) { int64x1_t result; __asm__ ("saddlp %0.1d,%1.2s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vpaddl_u8 (uint8x8_t a) { uint16x4_t result; __asm__ ("uaddlp %0.4h,%1.8b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vpaddl_u16 (uint16x4_t a) { uint32x2_t result; __asm__ ("uaddlp %0.2s,%1.4h" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vpaddl_u32 (uint32x2_t a) { uint64x1_t result; __asm__ ("uaddlp %0.1d,%1.2s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vpaddlq_s8 (int8x16_t a) { int16x8_t result; __asm__ ("saddlp %0.8h,%1.16b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vpaddlq_s16 (int16x8_t a) { int32x4_t result; __asm__ ("saddlp %0.4s,%1.8h" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vpaddlq_s32 (int32x4_t a) { int64x2_t result; __asm__ ("saddlp %0.2d,%1.4s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vpaddlq_u8 (uint8x16_t a) { uint16x8_t result; __asm__ ("uaddlp %0.8h,%1.16b" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vpaddlq_u16 (uint16x8_t a) { uint32x4_t result; __asm__ ("uaddlp %0.4s,%1.8h" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vpaddlq_u32 (uint32x4_t a) { uint64x2_t result; __asm__ ("uaddlp %0.2d,%1.4s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vpaddq_f32 (float32x4_t a, float32x4_t b) { float32x4_t result; __asm__ ("faddp %0.4s,%1.4s,%2.4s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vpaddq_f64 (float64x2_t a, float64x2_t b) { float64x2_t result; __asm__ ("faddp %0.2d,%1.2d,%2.2d" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vpaddq_s8 (int8x16_t a, int8x16_t b) { int8x16_t result; __asm__ ("addp %0.16b,%1.16b,%2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vpaddq_s16 (int16x8_t a, int16x8_t b) { int16x8_t result; __asm__ ("addp %0.8h,%1.8h,%2.8h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vpaddq_s32 (int32x4_t a, int32x4_t b) { int32x4_t result; __asm__ ("addp %0.4s,%1.4s,%2.4s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vpaddq_s64 (int64x2_t a, int64x2_t b) { int64x2_t result; __asm__ ("addp %0.2d,%1.2d,%2.2d" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vpaddq_u8 (uint8x16_t a, uint8x16_t b) { uint8x16_t result; __asm__ ("addp %0.16b,%1.16b,%2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vpaddq_u16 (uint16x8_t a, uint16x8_t b) { uint16x8_t result; __asm__ ("addp %0.8h,%1.8h,%2.8h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vpaddq_u32 (uint32x4_t a, uint32x4_t b) { uint32x4_t result; __asm__ ("addp %0.4s,%1.4s,%2.4s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vpaddq_u64 (uint64x2_t a, uint64x2_t b) { uint64x2_t result; __asm__ ("addp %0.2d,%1.2d,%2.2d" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vpadds_f32 (float32x2_t a) { float32_t result; __asm__ ("faddp %s0,%1.2s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqdmulh_n_s16 (int16x4_t a, int16_t b) { int16x4_t result; __asm__ ("sqdmulh %0.4h,%1.4h,%2.h[0]" : "=w"(result) : "w"(a), "x"(b) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqdmulh_n_s32 (int32x2_t a, int32_t b) { int32x2_t result; __asm__ ("sqdmulh %0.2s,%1.2s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqdmulhq_n_s16 (int16x8_t a, int16_t b) { int16x8_t result; __asm__ ("sqdmulh %0.8h,%1.8h,%2.h[0]" : "=w"(result) : "w"(a), "x"(b) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmulhq_n_s32 (int32x4_t a, int32_t b) { int32x4_t result; __asm__ ("sqdmulh %0.4s,%1.4s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqmovn_high_s16 (int8x8_t a, int16x8_t b) { int8x16_t result = vcombine_s8 (a, vcreate_s8 (((uint64_t) 0x0))); __asm__ ("sqxtn2 %0.16b, %1.8h" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqmovn_high_s32 (int16x4_t a, int32x4_t b) { int16x8_t result = vcombine_s16 (a, vcreate_s16 (((uint64_t) 0x0))); __asm__ ("sqxtn2 %0.8h, %1.4s" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqmovn_high_s64 (int32x2_t a, int64x2_t b) { int32x4_t result = vcombine_s32 (a, vcreate_s32 (((uint64_t) 0x0))); __asm__ ("sqxtn2 %0.4s, %1.2d" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqmovn_high_u16 (uint8x8_t a, uint16x8_t b) { uint8x16_t result = vcombine_u8 (a, vcreate_u8 (((uint64_t) 0x0))); __asm__ ("uqxtn2 %0.16b, %1.8h" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vqmovn_high_u32 (uint16x4_t a, uint32x4_t b) { uint16x8_t result = vcombine_u16 (a, vcreate_u16 (((uint64_t) 0x0))); __asm__ ("uqxtn2 %0.8h, %1.4s" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vqmovn_high_u64 (uint32x2_t a, uint64x2_t b) { uint32x4_t result = vcombine_u32 (a, vcreate_u32 (((uint64_t) 0x0))); __asm__ ("uqxtn2 %0.4s, %1.2d" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqmovun_high_s16 (uint8x8_t a, int16x8_t b) { uint8x16_t result = vcombine_u8 (a, vcreate_u8 (((uint64_t) 0x0))); __asm__ ("sqxtun2 %0.16b, %1.8h" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vqmovun_high_s32 (uint16x4_t a, int32x4_t b) { uint16x8_t result = vcombine_u16 (a, vcreate_u16 (((uint64_t) 0x0))); __asm__ ("sqxtun2 %0.8h, %1.4s" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vqmovun_high_s64 (uint32x2_t a, int64x2_t b) { uint32x4_t result = vcombine_u32 (a, vcreate_u32 (((uint64_t) 0x0))); __asm__ ("sqxtun2 %0.4s, %1.2d" : "+w"(result) : "w"(b) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqrdmulh_n_s16 (int16x4_t a, int16_t b) { int16x4_t result; __asm__ ("sqrdmulh %0.4h,%1.4h,%2.h[0]" : "=w"(result) : "w"(a), "x"(b) : ); return result; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqrdmulh_n_s32 (int32x2_t a, int32_t b) { int32x2_t result; __asm__ ("sqrdmulh %0.2s,%1.2s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqrdmulhq_n_s16 (int16x8_t a, int16_t b) { int16x8_t result; __asm__ ("sqrdmulh %0.8h,%1.8h,%2.h[0]" : "=w"(result) : "w"(a), "x"(b) : ); return result; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqrdmulhq_n_s32 (int32x4_t a, int32_t b) { int32x4_t result; __asm__ ("sqrdmulh %0.4s,%1.4s,%2.s[0]" : "=w"(result) : "w"(a), "w"(b) : ); return result; } # 9776 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vrsqrte_f32 (float32x2_t a) { float32x2_t result; __asm__ ("frsqrte %0.2s,%1.2s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vrsqrte_f64 (float64x1_t a) { float64x1_t result; __asm__ ("frsqrte %d0,%d1" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vrsqrte_u32 (uint32x2_t a) { uint32x2_t result; __asm__ ("ursqrte %0.2s,%1.2s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vrsqrted_f64 (float64_t a) { float64_t result; __asm__ ("frsqrte %d0,%d1" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vrsqrteq_f32 (float32x4_t a) { float32x4_t result; __asm__ ("frsqrte %0.4s,%1.4s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vrsqrteq_f64 (float64x2_t a) { float64x2_t result; __asm__ ("frsqrte %0.2d,%1.2d" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vrsqrteq_u32 (uint32x4_t a) { uint32x4_t result; __asm__ ("ursqrte %0.4s,%1.4s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vrsqrtes_f32 (float32_t a) { float32_t result; __asm__ ("frsqrte %s0,%s1" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vrsqrts_f32 (float32x2_t a, float32x2_t b) { float32x2_t result; __asm__ ("frsqrts %0.2s,%1.2s,%2.2s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vrsqrtsd_f64 (float64_t a, float64_t b) { float64_t result; __asm__ ("frsqrts %d0,%d1,%d2" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vrsqrtsq_f32 (float32x4_t a, float32x4_t b) { float32x4_t result; __asm__ ("frsqrts %0.4s,%1.4s,%2.4s" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vrsqrtsq_f64 (float64x2_t a, float64x2_t b) { float64x2_t result; __asm__ ("frsqrts %0.2d,%1.2d,%2.2d" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vrsqrtss_f32 (float32_t a, float32_t b) { float32_t result; __asm__ ("frsqrts %s0,%s1,%s2" : "=w"(result) : "w"(a), "w"(b) : ); return result; } # 10185 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtst_p8 (poly8x8_t a, poly8x8_t b) { uint8x8_t result; __asm__ ("cmtst %0.8b, %1.8b, %2.8b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vtst_p16 (poly16x4_t a, poly16x4_t b) { uint16x4_t result; __asm__ ("cmtst %0.4h, %1.4h, %2.4h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vtstq_p8 (poly8x16_t a, poly8x16_t b) { uint8x16_t result; __asm__ ("cmtst %0.16b, %1.16b, %2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vtstq_p16 (poly16x8_t a, poly16x8_t b) { uint16x8_t result; __asm__ ("cmtst %0.8h, %1.8h, %2.8h" : "=w"(result) : "w"(a), "w"(b) : ); return result; } # 10284 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 typedef struct int8x2_t { int8_t val[2]; } int8x2_t; typedef struct int16x2_t { int16_t val[2]; } int16x2_t; typedef struct uint8x2_t { uint8_t val[2]; } uint8x2_t; typedef struct uint16x2_t { uint16_t val[2]; } uint16x2_t; typedef struct float16x2_t { float16_t val[2]; } float16x2_t; typedef struct poly8x2_t { poly8_t val[2]; } poly8x2_t; typedef struct poly16x2_t { poly16_t val[2]; } poly16x2_t; typedef struct int8x3_t { int8_t val[3]; } int8x3_t; typedef struct int16x3_t { int16_t val[3]; } int16x3_t; typedef struct int32x3_t { int32_t val[3]; } int32x3_t; typedef struct int64x3_t { int64_t val[3]; } int64x3_t; typedef struct uint8x3_t { uint8_t val[3]; } uint8x3_t; typedef struct uint16x3_t { uint16_t val[3]; } uint16x3_t; typedef struct uint32x3_t { uint32_t val[3]; } uint32x3_t; typedef struct uint64x3_t { uint64_t val[3]; } uint64x3_t; typedef struct float16x3_t { float16_t val[3]; } float16x3_t; typedef struct float32x3_t { float32_t val[3]; } float32x3_t; typedef struct float64x3_t { float64_t val[3]; } float64x3_t; typedef struct poly8x3_t { poly8_t val[3]; } poly8x3_t; typedef struct poly16x3_t { poly16_t val[3]; } poly16x3_t; typedef struct int8x4_t { int8_t val[4]; } int8x4_t; typedef struct int64x4_t { int64_t val[4]; } int64x4_t; typedef struct uint8x4_t { uint8_t val[4]; } uint8x4_t; typedef struct uint64x4_t { uint64_t val[4]; } uint64x4_t; typedef struct poly8x4_t { poly8_t val[4]; } poly8x4_t; typedef struct float64x4_t { float64_t val[4]; } float64x4_t; # 10338 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_f16 (float16_t *__ptr, float16x4x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; float16x8x2_t __temp; __temp.val[0] = vcombine_f16 (__b.val[0], vcreate_f16 (((uint64_t) 0))); __temp.val[1] = vcombine_f16 (__b.val[1], vcreate_f16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv8hf (__o, (float16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hf (__o, (float16x8_t) __temp.val[1], 1); __builtin_aarch64_st2_lanev4hf ((__builtin_aarch64_simd_hf *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_f32 (float32_t *__ptr, float32x2x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; float32x4x2_t __temp; __temp.val[0] = vcombine_f32 (__b.val[0], vcreate_f32 (((uint64_t) 0))); __temp.val[1] = vcombine_f32 (__b.val[1], vcreate_f32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __temp.val[1], 1); __builtin_aarch64_st2_lanev2sf ((__builtin_aarch64_simd_sf *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_f64 (float64_t *__ptr, float64x1x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; float64x2x2_t __temp; __temp.val[0] = vcombine_f64 (__b.val[0], vcreate_f64 (((uint64_t) 0))); __temp.val[1] = vcombine_f64 (__b.val[1], vcreate_f64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __temp.val[1], 1); __builtin_aarch64_st2_lanedf ((__builtin_aarch64_simd_df *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_p8 (poly8_t *__ptr, poly8x8x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; poly8x16x2_t __temp; __temp.val[0] = vcombine_p8 (__b.val[0], vcreate_p8 (((uint64_t) 0))); __temp.val[1] = vcombine_p8 (__b.val[1], vcreate_p8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1); __builtin_aarch64_st2_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_p16 (poly16_t *__ptr, poly16x4x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; poly16x8x2_t __temp; __temp.val[0] = vcombine_p16 (__b.val[0], vcreate_p16 (((uint64_t) 0))); __temp.val[1] = vcombine_p16 (__b.val[1], vcreate_p16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1); __builtin_aarch64_st2_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_s8 (int8_t *__ptr, int8x8x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; int8x16x2_t __temp; __temp.val[0] = vcombine_s8 (__b.val[0], vcreate_s8 (((uint64_t) 0))); __temp.val[1] = vcombine_s8 (__b.val[1], vcreate_s8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1); __builtin_aarch64_st2_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_s16 (int16_t *__ptr, int16x4x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; int16x8x2_t __temp; __temp.val[0] = vcombine_s16 (__b.val[0], vcreate_s16 (((uint64_t) 0))); __temp.val[1] = vcombine_s16 (__b.val[1], vcreate_s16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1); __builtin_aarch64_st2_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_s32 (int32_t *__ptr, int32x2x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; int32x4x2_t __temp; __temp.val[0] = vcombine_s32 (__b.val[0], vcreate_s32 (((uint64_t) 0))); __temp.val[1] = vcombine_s32 (__b.val[1], vcreate_s32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[1], 1); __builtin_aarch64_st2_lanev2si ((__builtin_aarch64_simd_si *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_s64 (int64_t *__ptr, int64x1x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; int64x2x2_t __temp; __temp.val[0] = vcombine_s64 (__b.val[0], vcreate_s64 (((uint64_t) 0))); __temp.val[1] = vcombine_s64 (__b.val[1], vcreate_s64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[1], 1); __builtin_aarch64_st2_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_u8 (uint8_t *__ptr, uint8x8x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; uint8x16x2_t __temp; __temp.val[0] = vcombine_u8 (__b.val[0], vcreate_u8 (((uint64_t) 0))); __temp.val[1] = vcombine_u8 (__b.val[1], vcreate_u8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1); __builtin_aarch64_st2_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_u16 (uint16_t *__ptr, uint16x4x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; uint16x8x2_t __temp; __temp.val[0] = vcombine_u16 (__b.val[0], vcreate_u16 (((uint64_t) 0))); __temp.val[1] = vcombine_u16 (__b.val[1], vcreate_u16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1); __builtin_aarch64_st2_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_u32 (uint32_t *__ptr, uint32x2x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; uint32x4x2_t __temp; __temp.val[0] = vcombine_u32 (__b.val[0], vcreate_u32 (((uint64_t) 0))); __temp.val[1] = vcombine_u32 (__b.val[1], vcreate_u32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[1], 1); __builtin_aarch64_st2_lanev2si ((__builtin_aarch64_simd_si *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_lane_u64 (uint64_t *__ptr, uint64x1x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; uint64x2x2_t __temp; __temp.val[0] = vcombine_u64 (__b.val[0], vcreate_u64 (((uint64_t) 0))); __temp.val[1] = vcombine_u64 (__b.val[1], vcreate_u64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[1], 1); __builtin_aarch64_st2_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, __c); } # 10378 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_f16 (float16_t *__ptr, float16x8x2_t __b, const int __c) { union { float16x8x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev8hf ((__builtin_aarch64_simd_hf *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_f32 (float32_t *__ptr, float32x4x2_t __b, const int __c) { union { float32x4x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev4sf ((__builtin_aarch64_simd_sf *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_f64 (float64_t *__ptr, float64x2x2_t __b, const int __c) { union { float64x2x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev2df ((__builtin_aarch64_simd_df *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_p8 (poly8_t *__ptr, poly8x16x2_t __b, const int __c) { union { poly8x16x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_p16 (poly16_t *__ptr, poly16x8x2_t __b, const int __c) { union { poly16x8x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_s8 (int8_t *__ptr, int8x16x2_t __b, const int __c) { union { int8x16x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_s16 (int16_t *__ptr, int16x8x2_t __b, const int __c) { union { int16x8x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_s32 (int32_t *__ptr, int32x4x2_t __b, const int __c) { union { int32x4x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev4si ((__builtin_aarch64_simd_si *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_s64 (int64_t *__ptr, int64x2x2_t __b, const int __c) { union { int64x2x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_u8 (uint8_t *__ptr, uint8x16x2_t __b, const int __c) { union { uint8x16x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_u16 (uint16_t *__ptr, uint16x8x2_t __b, const int __c) { union { uint16x8x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_u32 (uint32_t *__ptr, uint32x4x2_t __b, const int __c) { union { uint32x4x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev4si ((__builtin_aarch64_simd_si *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_lane_u64 (uint64_t *__ptr, uint64x2x2_t __b, const int __c) { union { uint64x2x2_t __i; __builtin_aarch64_simd_oi __o; } __temp = { __b }; __builtin_aarch64_st2_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __temp.__o, __c); } # 10420 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_f16 (float16_t *__ptr, float16x4x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; float16x8x3_t __temp; __temp.val[0] = vcombine_f16 (__b.val[0], vcreate_f16 (((uint64_t) 0))); __temp.val[1] = vcombine_f16 (__b.val[1], vcreate_f16 (((uint64_t) 0))); __temp.val[2] = vcombine_f16 (__b.val[2], vcreate_f16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __temp.val[2], 2); __builtin_aarch64_st3_lanev4hf ((__builtin_aarch64_simd_hf *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_f32 (float32_t *__ptr, float32x2x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; float32x4x3_t __temp; __temp.val[0] = vcombine_f32 (__b.val[0], vcreate_f32 (((uint64_t) 0))); __temp.val[1] = vcombine_f32 (__b.val[1], vcreate_f32 (((uint64_t) 0))); __temp.val[2] = vcombine_f32 (__b.val[2], vcreate_f32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __temp.val[2], 2); __builtin_aarch64_st3_lanev2sf ((__builtin_aarch64_simd_sf *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_f64 (float64_t *__ptr, float64x1x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; float64x2x3_t __temp; __temp.val[0] = vcombine_f64 (__b.val[0], vcreate_f64 (((uint64_t) 0))); __temp.val[1] = vcombine_f64 (__b.val[1], vcreate_f64 (((uint64_t) 0))); __temp.val[2] = vcombine_f64 (__b.val[2], vcreate_f64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __temp.val[2], 2); __builtin_aarch64_st3_lanedf ((__builtin_aarch64_simd_df *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_p8 (poly8_t *__ptr, poly8x8x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; poly8x16x3_t __temp; __temp.val[0] = vcombine_p8 (__b.val[0], vcreate_p8 (((uint64_t) 0))); __temp.val[1] = vcombine_p8 (__b.val[1], vcreate_p8 (((uint64_t) 0))); __temp.val[2] = vcombine_p8 (__b.val[2], vcreate_p8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[2], 2); __builtin_aarch64_st3_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_p16 (poly16_t *__ptr, poly16x4x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; poly16x8x3_t __temp; __temp.val[0] = vcombine_p16 (__b.val[0], vcreate_p16 (((uint64_t) 0))); __temp.val[1] = vcombine_p16 (__b.val[1], vcreate_p16 (((uint64_t) 0))); __temp.val[2] = vcombine_p16 (__b.val[2], vcreate_p16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[2], 2); __builtin_aarch64_st3_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_s8 (int8_t *__ptr, int8x8x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; int8x16x3_t __temp; __temp.val[0] = vcombine_s8 (__b.val[0], vcreate_s8 (((uint64_t) 0))); __temp.val[1] = vcombine_s8 (__b.val[1], vcreate_s8 (((uint64_t) 0))); __temp.val[2] = vcombine_s8 (__b.val[2], vcreate_s8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[2], 2); __builtin_aarch64_st3_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_s16 (int16_t *__ptr, int16x4x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; int16x8x3_t __temp; __temp.val[0] = vcombine_s16 (__b.val[0], vcreate_s16 (((uint64_t) 0))); __temp.val[1] = vcombine_s16 (__b.val[1], vcreate_s16 (((uint64_t) 0))); __temp.val[2] = vcombine_s16 (__b.val[2], vcreate_s16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[2], 2); __builtin_aarch64_st3_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_s32 (int32_t *__ptr, int32x2x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; int32x4x3_t __temp; __temp.val[0] = vcombine_s32 (__b.val[0], vcreate_s32 (((uint64_t) 0))); __temp.val[1] = vcombine_s32 (__b.val[1], vcreate_s32 (((uint64_t) 0))); __temp.val[2] = vcombine_s32 (__b.val[2], vcreate_s32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[2], 2); __builtin_aarch64_st3_lanev2si ((__builtin_aarch64_simd_si *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_s64 (int64_t *__ptr, int64x1x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; int64x2x3_t __temp; __temp.val[0] = vcombine_s64 (__b.val[0], vcreate_s64 (((uint64_t) 0))); __temp.val[1] = vcombine_s64 (__b.val[1], vcreate_s64 (((uint64_t) 0))); __temp.val[2] = vcombine_s64 (__b.val[2], vcreate_s64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[2], 2); __builtin_aarch64_st3_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_u8 (uint8_t *__ptr, uint8x8x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; uint8x16x3_t __temp; __temp.val[0] = vcombine_u8 (__b.val[0], vcreate_u8 (((uint64_t) 0))); __temp.val[1] = vcombine_u8 (__b.val[1], vcreate_u8 (((uint64_t) 0))); __temp.val[2] = vcombine_u8 (__b.val[2], vcreate_u8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[2], 2); __builtin_aarch64_st3_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_u16 (uint16_t *__ptr, uint16x4x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; uint16x8x3_t __temp; __temp.val[0] = vcombine_u16 (__b.val[0], vcreate_u16 (((uint64_t) 0))); __temp.val[1] = vcombine_u16 (__b.val[1], vcreate_u16 (((uint64_t) 0))); __temp.val[2] = vcombine_u16 (__b.val[2], vcreate_u16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[2], 2); __builtin_aarch64_st3_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_u32 (uint32_t *__ptr, uint32x2x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; uint32x4x3_t __temp; __temp.val[0] = vcombine_u32 (__b.val[0], vcreate_u32 (((uint64_t) 0))); __temp.val[1] = vcombine_u32 (__b.val[1], vcreate_u32 (((uint64_t) 0))); __temp.val[2] = vcombine_u32 (__b.val[2], vcreate_u32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[2], 2); __builtin_aarch64_st3_lanev2si ((__builtin_aarch64_simd_si *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_lane_u64 (uint64_t *__ptr, uint64x1x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; uint64x2x3_t __temp; __temp.val[0] = vcombine_u64 (__b.val[0], vcreate_u64 (((uint64_t) 0))); __temp.val[1] = vcombine_u64 (__b.val[1], vcreate_u64 (((uint64_t) 0))); __temp.val[2] = vcombine_u64 (__b.val[2], vcreate_u64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[2], 2); __builtin_aarch64_st3_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, __c); } # 10460 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_f16 (float16_t *__ptr, float16x8x3_t __b, const int __c) { union { float16x8x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev8hf ((__builtin_aarch64_simd_hf *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_f32 (float32_t *__ptr, float32x4x3_t __b, const int __c) { union { float32x4x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev4sf ((__builtin_aarch64_simd_sf *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_f64 (float64_t *__ptr, float64x2x3_t __b, const int __c) { union { float64x2x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev2df ((__builtin_aarch64_simd_df *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_p8 (poly8_t *__ptr, poly8x16x3_t __b, const int __c) { union { poly8x16x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_p16 (poly16_t *__ptr, poly16x8x3_t __b, const int __c) { union { poly16x8x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_s8 (int8_t *__ptr, int8x16x3_t __b, const int __c) { union { int8x16x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_s16 (int16_t *__ptr, int16x8x3_t __b, const int __c) { union { int16x8x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_s32 (int32_t *__ptr, int32x4x3_t __b, const int __c) { union { int32x4x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev4si ((__builtin_aarch64_simd_si *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_s64 (int64_t *__ptr, int64x2x3_t __b, const int __c) { union { int64x2x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_u8 (uint8_t *__ptr, uint8x16x3_t __b, const int __c) { union { uint8x16x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_u16 (uint16_t *__ptr, uint16x8x3_t __b, const int __c) { union { uint16x8x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_u32 (uint32_t *__ptr, uint32x4x3_t __b, const int __c) { union { uint32x4x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev4si ((__builtin_aarch64_simd_si *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_lane_u64 (uint64_t *__ptr, uint64x2x3_t __b, const int __c) { union { uint64x2x3_t __i; __builtin_aarch64_simd_ci __o; } __temp = { __b }; __builtin_aarch64_st3_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __temp.__o, __c); } # 10507 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_f16 (float16_t *__ptr, float16x4x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; float16x8x4_t __temp; __temp.val[0] = vcombine_f16 (__b.val[0], vcreate_f16 (((uint64_t) 0))); __temp.val[1] = vcombine_f16 (__b.val[1], vcreate_f16 (((uint64_t) 0))); __temp.val[2] = vcombine_f16 (__b.val[2], vcreate_f16 (((uint64_t) 0))); __temp.val[3] = vcombine_f16 (__b.val[3], vcreate_f16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __temp.val[3], 3); __builtin_aarch64_st4_lanev4hf ((__builtin_aarch64_simd_hf *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_f32 (float32_t *__ptr, float32x2x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; float32x4x4_t __temp; __temp.val[0] = vcombine_f32 (__b.val[0], vcreate_f32 (((uint64_t) 0))); __temp.val[1] = vcombine_f32 (__b.val[1], vcreate_f32 (((uint64_t) 0))); __temp.val[2] = vcombine_f32 (__b.val[2], vcreate_f32 (((uint64_t) 0))); __temp.val[3] = vcombine_f32 (__b.val[3], vcreate_f32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __temp.val[3], 3); __builtin_aarch64_st4_lanev2sf ((__builtin_aarch64_simd_sf *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_f64 (float64_t *__ptr, float64x1x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; float64x2x4_t __temp; __temp.val[0] = vcombine_f64 (__b.val[0], vcreate_f64 (((uint64_t) 0))); __temp.val[1] = vcombine_f64 (__b.val[1], vcreate_f64 (((uint64_t) 0))); __temp.val[2] = vcombine_f64 (__b.val[2], vcreate_f64 (((uint64_t) 0))); __temp.val[3] = vcombine_f64 (__b.val[3], vcreate_f64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __temp.val[3], 3); __builtin_aarch64_st4_lanedf ((__builtin_aarch64_simd_df *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_p8 (poly8_t *__ptr, poly8x8x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; poly8x16x4_t __temp; __temp.val[0] = vcombine_p8 (__b.val[0], vcreate_p8 (((uint64_t) 0))); __temp.val[1] = vcombine_p8 (__b.val[1], vcreate_p8 (((uint64_t) 0))); __temp.val[2] = vcombine_p8 (__b.val[2], vcreate_p8 (((uint64_t) 0))); __temp.val[3] = vcombine_p8 (__b.val[3], vcreate_p8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[3], 3); __builtin_aarch64_st4_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_p16 (poly16_t *__ptr, poly16x4x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; poly16x8x4_t __temp; __temp.val[0] = vcombine_p16 (__b.val[0], vcreate_p16 (((uint64_t) 0))); __temp.val[1] = vcombine_p16 (__b.val[1], vcreate_p16 (((uint64_t) 0))); __temp.val[2] = vcombine_p16 (__b.val[2], vcreate_p16 (((uint64_t) 0))); __temp.val[3] = vcombine_p16 (__b.val[3], vcreate_p16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[3], 3); __builtin_aarch64_st4_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_s8 (int8_t *__ptr, int8x8x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; int8x16x4_t __temp; __temp.val[0] = vcombine_s8 (__b.val[0], vcreate_s8 (((uint64_t) 0))); __temp.val[1] = vcombine_s8 (__b.val[1], vcreate_s8 (((uint64_t) 0))); __temp.val[2] = vcombine_s8 (__b.val[2], vcreate_s8 (((uint64_t) 0))); __temp.val[3] = vcombine_s8 (__b.val[3], vcreate_s8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[3], 3); __builtin_aarch64_st4_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_s16 (int16_t *__ptr, int16x4x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; int16x8x4_t __temp; __temp.val[0] = vcombine_s16 (__b.val[0], vcreate_s16 (((uint64_t) 0))); __temp.val[1] = vcombine_s16 (__b.val[1], vcreate_s16 (((uint64_t) 0))); __temp.val[2] = vcombine_s16 (__b.val[2], vcreate_s16 (((uint64_t) 0))); __temp.val[3] = vcombine_s16 (__b.val[3], vcreate_s16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[3], 3); __builtin_aarch64_st4_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_s32 (int32_t *__ptr, int32x2x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; int32x4x4_t __temp; __temp.val[0] = vcombine_s32 (__b.val[0], vcreate_s32 (((uint64_t) 0))); __temp.val[1] = vcombine_s32 (__b.val[1], vcreate_s32 (((uint64_t) 0))); __temp.val[2] = vcombine_s32 (__b.val[2], vcreate_s32 (((uint64_t) 0))); __temp.val[3] = vcombine_s32 (__b.val[3], vcreate_s32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[3], 3); __builtin_aarch64_st4_lanev2si ((__builtin_aarch64_simd_si *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_s64 (int64_t *__ptr, int64x1x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; int64x2x4_t __temp; __temp.val[0] = vcombine_s64 (__b.val[0], vcreate_s64 (((uint64_t) 0))); __temp.val[1] = vcombine_s64 (__b.val[1], vcreate_s64 (((uint64_t) 0))); __temp.val[2] = vcombine_s64 (__b.val[2], vcreate_s64 (((uint64_t) 0))); __temp.val[3] = vcombine_s64 (__b.val[3], vcreate_s64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[3], 3); __builtin_aarch64_st4_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_u8 (uint8_t *__ptr, uint8x8x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; uint8x16x4_t __temp; __temp.val[0] = vcombine_u8 (__b.val[0], vcreate_u8 (((uint64_t) 0))); __temp.val[1] = vcombine_u8 (__b.val[1], vcreate_u8 (((uint64_t) 0))); __temp.val[2] = vcombine_u8 (__b.val[2], vcreate_u8 (((uint64_t) 0))); __temp.val[3] = vcombine_u8 (__b.val[3], vcreate_u8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[3], 3); __builtin_aarch64_st4_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_u16 (uint16_t *__ptr, uint16x4x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; uint16x8x4_t __temp; __temp.val[0] = vcombine_u16 (__b.val[0], vcreate_u16 (((uint64_t) 0))); __temp.val[1] = vcombine_u16 (__b.val[1], vcreate_u16 (((uint64_t) 0))); __temp.val[2] = vcombine_u16 (__b.val[2], vcreate_u16 (((uint64_t) 0))); __temp.val[3] = vcombine_u16 (__b.val[3], vcreate_u16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[3], 3); __builtin_aarch64_st4_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_u32 (uint32_t *__ptr, uint32x2x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; uint32x4x4_t __temp; __temp.val[0] = vcombine_u32 (__b.val[0], vcreate_u32 (((uint64_t) 0))); __temp.val[1] = vcombine_u32 (__b.val[1], vcreate_u32 (((uint64_t) 0))); __temp.val[2] = vcombine_u32 (__b.val[2], vcreate_u32 (((uint64_t) 0))); __temp.val[3] = vcombine_u32 (__b.val[3], vcreate_u32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[3], 3); __builtin_aarch64_st4_lanev2si ((__builtin_aarch64_simd_si *) __ptr, __o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_lane_u64 (uint64_t *__ptr, uint64x1x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; uint64x2x4_t __temp; __temp.val[0] = vcombine_u64 (__b.val[0], vcreate_u64 (((uint64_t) 0))); __temp.val[1] = vcombine_u64 (__b.val[1], vcreate_u64 (((uint64_t) 0))); __temp.val[2] = vcombine_u64 (__b.val[2], vcreate_u64 (((uint64_t) 0))); __temp.val[3] = vcombine_u64 (__b.val[3], vcreate_u64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[3], 3); __builtin_aarch64_st4_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, __c); } # 10547 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_f16 (float16_t *__ptr, float16x8x4_t __b, const int __c) { union { float16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev8hf ((__builtin_aarch64_simd_hf *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_f32 (float32_t *__ptr, float32x4x4_t __b, const int __c) { union { float32x4x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev4sf ((__builtin_aarch64_simd_sf *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_f64 (float64_t *__ptr, float64x2x4_t __b, const int __c) { union { float64x2x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev2df ((__builtin_aarch64_simd_df *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_p8 (poly8_t *__ptr, poly8x16x4_t __b, const int __c) { union { poly8x16x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_p16 (poly16_t *__ptr, poly16x8x4_t __b, const int __c) { union { poly16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_s8 (int8_t *__ptr, int8x16x4_t __b, const int __c) { union { int8x16x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_s16 (int16_t *__ptr, int16x8x4_t __b, const int __c) { union { int16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_s32 (int32_t *__ptr, int32x4x4_t __b, const int __c) { union { int32x4x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev4si ((__builtin_aarch64_simd_si *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_s64 (int64_t *__ptr, int64x2x4_t __b, const int __c) { union { int64x2x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_u8 (uint8_t *__ptr, uint8x16x4_t __b, const int __c) { union { uint8x16x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_u16 (uint16_t *__ptr, uint16x8x4_t __b, const int __c) { union { uint16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_u32 (uint32_t *__ptr, uint32x4x4_t __b, const int __c) { union { uint32x4x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev4si ((__builtin_aarch64_simd_si *) __ptr, __temp.__o, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_lane_u64 (uint64_t *__ptr, uint64x2x4_t __b, const int __c) { union { uint64x2x4_t __i; __builtin_aarch64_simd_xi __o; } __temp = { __b }; __builtin_aarch64_st4_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __temp.__o, __c); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vaddlv_s32 (int32x2_t a) { int64_t result; __asm__ ("saddlp %0.1d, %1.2s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vaddlv_u32 (uint32x2_t a) { uint64_t result; __asm__ ("uaddlp %0.1d, %1.2s" : "=w"(result) : "w"(a) : ); return result; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c) { return __builtin_aarch64_sqdmulh_laneqv4hi (__a, __b, __c); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c) { return __builtin_aarch64_sqdmulh_laneqv2si (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c) { return __builtin_aarch64_sqdmulh_laneqv8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c) { return __builtin_aarch64_sqdmulh_laneqv4si (__a, __b, __c); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqrdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c) { return __builtin_aarch64_sqrdmulh_laneqv4hi (__a, __b, __c); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqrdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c) { return __builtin_aarch64_sqrdmulh_laneqv2si (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqrdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c) { return __builtin_aarch64_sqrdmulh_laneqv8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqrdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c) { return __builtin_aarch64_sqrdmulh_laneqv4si (__a, __b, __c); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vqtbl1_p8 (poly8x16_t a, uint8x8_t b) { poly8x8_t result; __asm__ ("tbl %0.8b, {%1.16b}, %2.8b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqtbl1_s8 (int8x16_t a, uint8x8_t b) { int8x8_t result; __asm__ ("tbl %0.8b, {%1.16b}, %2.8b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqtbl1_u8 (uint8x16_t a, uint8x8_t b) { uint8x8_t result; __asm__ ("tbl %0.8b, {%1.16b}, %2.8b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vqtbl1q_p8 (poly8x16_t a, uint8x16_t b) { poly8x16_t result; __asm__ ("tbl %0.16b, {%1.16b}, %2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqtbl1q_s8 (int8x16_t a, uint8x16_t b) { int8x16_t result; __asm__ ("tbl %0.16b, {%1.16b}, %2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqtbl1q_u8 (uint8x16_t a, uint8x16_t b) { uint8x16_t result; __asm__ ("tbl %0.16b, {%1.16b}, %2.16b" : "=w"(result) : "w"(a), "w"(b) : ); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqtbl2_s8 (int8x16x2_t tab, uint8x8_t idx) { int8x8_t result; __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t" "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17"); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqtbl2_u8 (uint8x16x2_t tab, uint8x8_t idx) { uint8x8_t result; __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t" "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17"); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vqtbl2_p8 (poly8x16x2_t tab, uint8x8_t idx) { poly8x8_t result; __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t" "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17"); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqtbl2q_s8 (int8x16x2_t tab, uint8x16_t idx) { int8x16_t result; __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t" "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17"); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqtbl2q_u8 (uint8x16x2_t tab, uint8x16_t idx) { uint8x16_t result; __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t" "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17"); return result; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vqtbl2q_p8 (poly8x16x2_t tab, uint8x16_t idx) { poly8x16_t result; __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t" "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17"); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqtbl3_s8 (int8x16x3_t tab, uint8x8_t idx) { int8x8_t result; __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t" "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18"); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqtbl3_u8 (uint8x16x3_t tab, uint8x8_t idx) { uint8x8_t result; __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t" "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18"); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vqtbl3_p8 (poly8x16x3_t tab, uint8x8_t idx) { poly8x8_t result; __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t" "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18"); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqtbl3q_s8 (int8x16x3_t tab, uint8x16_t idx) { int8x16_t result; __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t" "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18"); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqtbl3q_u8 (uint8x16x3_t tab, uint8x16_t idx) { uint8x16_t result; __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t" "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18"); return result; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vqtbl3q_p8 (poly8x16x3_t tab, uint8x16_t idx) { poly8x16_t result; __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t" "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18"); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqtbl4_s8 (int8x16x4_t tab, uint8x8_t idx) { int8x8_t result; __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t" "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18", "v19"); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqtbl4_u8 (uint8x16x4_t tab, uint8x8_t idx) { uint8x8_t result; __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t" "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18", "v19"); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vqtbl4_p8 (poly8x16x4_t tab, uint8x8_t idx) { poly8x8_t result; __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t" "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18", "v19"); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqtbl4q_s8 (int8x16x4_t tab, uint8x16_t idx) { int8x16_t result; __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t" "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18", "v19"); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqtbl4q_u8 (uint8x16x4_t tab, uint8x16_t idx) { uint8x16_t result; __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t" "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18", "v19"); return result; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vqtbl4q_p8 (poly8x16x4_t tab, uint8x16_t idx) { poly8x16_t result; __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t" "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t" :"=w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18", "v19"); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqtbx1_s8 (int8x8_t r, int8x16_t tab, uint8x8_t idx) { int8x8_t result = r; __asm__ ("tbx %0.8b,{%1.16b},%2.8b" : "+w"(result) : "w"(tab), "w"(idx) : ); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqtbx1_u8 (uint8x8_t r, uint8x16_t tab, uint8x8_t idx) { uint8x8_t result = r; __asm__ ("tbx %0.8b,{%1.16b},%2.8b" : "+w"(result) : "w"(tab), "w"(idx) : ); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vqtbx1_p8 (poly8x8_t r, poly8x16_t tab, uint8x8_t idx) { poly8x8_t result = r; __asm__ ("tbx %0.8b,{%1.16b},%2.8b" : "+w"(result) : "w"(tab), "w"(idx) : ); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqtbx1q_s8 (int8x16_t r, int8x16_t tab, uint8x16_t idx) { int8x16_t result = r; __asm__ ("tbx %0.16b,{%1.16b},%2.16b" : "+w"(result) : "w"(tab), "w"(idx) : ); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqtbx1q_u8 (uint8x16_t r, uint8x16_t tab, uint8x16_t idx) { uint8x16_t result = r; __asm__ ("tbx %0.16b,{%1.16b},%2.16b" : "+w"(result) : "w"(tab), "w"(idx) : ); return result; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vqtbx1q_p8 (poly8x16_t r, poly8x16_t tab, uint8x16_t idx) { poly8x16_t result = r; __asm__ ("tbx %0.16b,{%1.16b},%2.16b" : "+w"(result) : "w"(tab), "w"(idx) : ); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqtbx2_s8 (int8x8_t r, int8x16x2_t tab, uint8x8_t idx) { int8x8_t result = r; __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t" "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17"); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqtbx2_u8 (uint8x8_t r, uint8x16x2_t tab, uint8x8_t idx) { uint8x8_t result = r; __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t" "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17"); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vqtbx2_p8 (poly8x8_t r, poly8x16x2_t tab, uint8x8_t idx) { poly8x8_t result = r; __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t" "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17"); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqtbx2q_s8 (int8x16_t r, int8x16x2_t tab, uint8x16_t idx) { int8x16_t result = r; __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t" "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17"); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqtbx2q_u8 (uint8x16_t r, uint8x16x2_t tab, uint8x16_t idx) { uint8x16_t result = r; __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t" "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17"); return result; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vqtbx2q_p8 (poly8x16_t r, poly8x16x2_t tab, uint8x16_t idx) { poly8x16_t result = r; __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t" "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17"); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqtbx3_s8 (int8x8_t r, int8x16x3_t tab, uint8x8_t idx) { int8x8_t result = r; __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t" "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18"); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqtbx3_u8 (uint8x8_t r, uint8x16x3_t tab, uint8x8_t idx) { uint8x8_t result = r; __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t" "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18"); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vqtbx3_p8 (poly8x8_t r, poly8x16x3_t tab, uint8x8_t idx) { poly8x8_t result = r; __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t" "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18"); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqtbx3q_s8 (int8x16_t r, int8x16x3_t tab, uint8x16_t idx) { int8x16_t result = r; __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t" "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18"); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqtbx3q_u8 (uint8x16_t r, uint8x16x3_t tab, uint8x16_t idx) { uint8x16_t result = r; __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t" "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18"); return result; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vqtbx3q_p8 (poly8x16_t r, poly8x16x3_t tab, uint8x16_t idx) { poly8x16_t result = r; __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t" "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18"); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqtbx4_s8 (int8x8_t r, int8x16x4_t tab, uint8x8_t idx) { int8x8_t result = r; __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t" "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18", "v19"); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqtbx4_u8 (uint8x8_t r, uint8x16x4_t tab, uint8x8_t idx) { uint8x8_t result = r; __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t" "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18", "v19"); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vqtbx4_p8 (poly8x8_t r, poly8x16x4_t tab, uint8x8_t idx) { poly8x8_t result = r; __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t" "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18", "v19"); return result; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqtbx4q_s8 (int8x16_t r, int8x16x4_t tab, uint8x16_t idx) { int8x16_t result = r; __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t" "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18", "v19"); return result; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqtbx4q_u8 (uint8x16_t r, uint8x16x4_t tab, uint8x16_t idx) { uint8x16_t result = r; __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t" "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18", "v19"); return result; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vqtbx4q_p8 (poly8x16_t r, poly8x16x4_t tab, uint8x16_t idx) { poly8x16_t result = r; __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t" "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t" :"+w"(result) :"Q"(tab),"w"(idx) :"memory", "v16", "v17", "v18", "v19"); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vtbl1_s8 (int8x8_t tab, int8x8_t idx) { int8x8_t result; int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (((uint64_t) 0x0))); __asm__ ("tbl %0.8b, {%1.16b}, %2.8b" : "=w"(result) : "w"(temp), "w"(idx) : ); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtbl1_u8 (uint8x8_t tab, uint8x8_t idx) { uint8x8_t result; uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (((uint64_t) 0x0))); __asm__ ("tbl %0.8b, {%1.16b}, %2.8b" : "=w"(result) : "w"(temp), "w"(idx) : ); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vtbl1_p8 (poly8x8_t tab, uint8x8_t idx) { poly8x8_t result; poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (((uint64_t) 0x0))); __asm__ ("tbl %0.8b, {%1.16b}, %2.8b" : "=w"(result) : "w"(temp), "w"(idx) : ); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vtbl2_s8 (int8x8x2_t tab, int8x8_t idx) { int8x8_t result; int8x16_t temp = vcombine_s8 (tab.val[0], tab.val[1]); __asm__ ("tbl %0.8b, {%1.16b}, %2.8b" : "=w"(result) : "w"(temp), "w"(idx) : ); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtbl2_u8 (uint8x8x2_t tab, uint8x8_t idx) { uint8x8_t result; uint8x16_t temp = vcombine_u8 (tab.val[0], tab.val[1]); __asm__ ("tbl %0.8b, {%1.16b}, %2.8b" : "=w"(result) : "w"(temp), "w"(idx) : ); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vtbl2_p8 (poly8x8x2_t tab, uint8x8_t idx) { poly8x8_t result; poly8x16_t temp = vcombine_p8 (tab.val[0], tab.val[1]); __asm__ ("tbl %0.8b, {%1.16b}, %2.8b" : "=w"(result) : "w"(temp), "w"(idx) : ); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vtbl3_s8 (int8x8x3_t tab, int8x8_t idx) { int8x8_t result; int8x16x2_t temp; __builtin_aarch64_simd_oi __o; temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]); temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (((uint64_t) 0x0))); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); result = __builtin_aarch64_tbl3v8qi (__o, idx); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtbl3_u8 (uint8x8x3_t tab, uint8x8_t idx) { uint8x8_t result; uint8x16x2_t temp; __builtin_aarch64_simd_oi __o; temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]); temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (((uint64_t) 0x0))); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); result = (uint8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)idx); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vtbl3_p8 (poly8x8x3_t tab, uint8x8_t idx) { poly8x8_t result; poly8x16x2_t temp; __builtin_aarch64_simd_oi __o; temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]); temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (((uint64_t) 0x0))); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); result = (poly8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)idx); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vtbl4_s8 (int8x8x4_t tab, int8x8_t idx) { int8x8_t result; int8x16x2_t temp; __builtin_aarch64_simd_oi __o; temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]); temp.val[1] = vcombine_s8 (tab.val[2], tab.val[3]); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); result = __builtin_aarch64_tbl3v8qi (__o, idx); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtbl4_u8 (uint8x8x4_t tab, uint8x8_t idx) { uint8x8_t result; uint8x16x2_t temp; __builtin_aarch64_simd_oi __o; temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]); temp.val[1] = vcombine_u8 (tab.val[2], tab.val[3]); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); result = (uint8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)idx); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vtbl4_p8 (poly8x8x4_t tab, uint8x8_t idx) { poly8x8_t result; poly8x16x2_t temp; __builtin_aarch64_simd_oi __o; temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]); temp.val[1] = vcombine_p8 (tab.val[2], tab.val[3]); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); result = (poly8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)idx); return result; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vtbx2_s8 (int8x8_t r, int8x8x2_t tab, int8x8_t idx) { int8x8_t result = r; int8x16_t temp = vcombine_s8 (tab.val[0], tab.val[1]); __asm__ ("tbx %0.8b, {%1.16b}, %2.8b" : "+w"(result) : "w"(temp), "w"(idx) : ); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtbx2_u8 (uint8x8_t r, uint8x8x2_t tab, uint8x8_t idx) { uint8x8_t result = r; uint8x16_t temp = vcombine_u8 (tab.val[0], tab.val[1]); __asm__ ("tbx %0.8b, {%1.16b}, %2.8b" : "+w"(result) : "w"(temp), "w"(idx) : ); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vtbx2_p8 (poly8x8_t r, poly8x8x2_t tab, uint8x8_t idx) { poly8x8_t result = r; poly8x16_t temp = vcombine_p8 (tab.val[0], tab.val[1]); __asm__ ("tbx %0.8b, {%1.16b}, %2.8b" : "+w"(result) : "w"(temp), "w"(idx) : ); return result; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vabs_f32 (float32x2_t __a) { return __builtin_aarch64_absv2sf (__a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vabs_f64 (float64x1_t __a) { return (float64x1_t) {__builtin_fabs (__a[0])}; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vabs_s8 (int8x8_t __a) { return __builtin_aarch64_absv8qi (__a); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vabs_s16 (int16x4_t __a) { return __builtin_aarch64_absv4hi (__a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vabs_s32 (int32x2_t __a) { return __builtin_aarch64_absv2si (__a); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vabs_s64 (int64x1_t __a) { return (int64x1_t) {__builtin_aarch64_absdi (__a[0])}; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vabsq_f32 (float32x4_t __a) { return __builtin_aarch64_absv4sf (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vabsq_f64 (float64x2_t __a) { return __builtin_aarch64_absv2df (__a); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vabsq_s8 (int8x16_t __a) { return __builtin_aarch64_absv16qi (__a); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vabsq_s16 (int16x8_t __a) { return __builtin_aarch64_absv8hi (__a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vabsq_s32 (int32x4_t __a) { return __builtin_aarch64_absv4si (__a); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vabsq_s64 (int64x2_t __a) { return __builtin_aarch64_absv2di (__a); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vaddd_s64 (int64_t __a, int64_t __b) { return __a + __b; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vaddd_u64 (uint64_t __a, uint64_t __b) { return __a + __b; } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vaddv_s8 (int8x8_t __a) { return __builtin_aarch64_reduc_plus_scal_v8qi (__a); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vaddv_s16 (int16x4_t __a) { return __builtin_aarch64_reduc_plus_scal_v4hi (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vaddv_s32 (int32x2_t __a) { return __builtin_aarch64_reduc_plus_scal_v2si (__a); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vaddv_u8 (uint8x8_t __a) { return (uint8_t) __builtin_aarch64_reduc_plus_scal_v8qi ((int8x8_t) __a); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vaddv_u16 (uint16x4_t __a) { return (uint16_t) __builtin_aarch64_reduc_plus_scal_v4hi ((int16x4_t) __a); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vaddv_u32 (uint32x2_t __a) { return (int32_t) __builtin_aarch64_reduc_plus_scal_v2si ((int32x2_t) __a); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vaddvq_s8 (int8x16_t __a) { return __builtin_aarch64_reduc_plus_scal_v16qi (__a); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vaddvq_s16 (int16x8_t __a) { return __builtin_aarch64_reduc_plus_scal_v8hi (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vaddvq_s32 (int32x4_t __a) { return __builtin_aarch64_reduc_plus_scal_v4si (__a); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vaddvq_s64 (int64x2_t __a) { return __builtin_aarch64_reduc_plus_scal_v2di (__a); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vaddvq_u8 (uint8x16_t __a) { return (uint8_t) __builtin_aarch64_reduc_plus_scal_v16qi ((int8x16_t) __a); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vaddvq_u16 (uint16x8_t __a) { return (uint16_t) __builtin_aarch64_reduc_plus_scal_v8hi ((int16x8_t) __a); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vaddvq_u32 (uint32x4_t __a) { return (uint32_t) __builtin_aarch64_reduc_plus_scal_v4si ((int32x4_t) __a); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vaddvq_u64 (uint64x2_t __a) { return (uint64_t) __builtin_aarch64_reduc_plus_scal_v2di ((int64x2_t) __a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vaddv_f32 (float32x2_t __a) { return __builtin_aarch64_reduc_plus_scal_v2sf (__a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vaddvq_f32 (float32x4_t __a) { return __builtin_aarch64_reduc_plus_scal_v4sf (__a); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vaddvq_f64 (float64x2_t __a) { return __builtin_aarch64_reduc_plus_scal_v2df (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vbsl_f32 (uint32x2_t __a, float32x2_t __b, float32x2_t __c) { return __builtin_aarch64_simd_bslv2sf_suss (__a, __b, __c); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vbsl_f64 (uint64x1_t __a, float64x1_t __b, float64x1_t __c) { return (float64x1_t) { __builtin_aarch64_simd_bsldf_suss (__a[0], __b[0], __c[0]) }; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vbsl_p8 (uint8x8_t __a, poly8x8_t __b, poly8x8_t __c) { return __builtin_aarch64_simd_bslv8qi_pupp (__a, __b, __c); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c) { return __builtin_aarch64_simd_bslv4hi_pupp (__a, __b, __c); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c) { return __builtin_aarch64_simd_bslv8qi_suss (__a, __b, __c); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vbsl_s16 (uint16x4_t __a, int16x4_t __b, int16x4_t __c) { return __builtin_aarch64_simd_bslv4hi_suss (__a, __b, __c); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vbsl_s32 (uint32x2_t __a, int32x2_t __b, int32x2_t __c) { return __builtin_aarch64_simd_bslv2si_suss (__a, __b, __c); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vbsl_s64 (uint64x1_t __a, int64x1_t __b, int64x1_t __c) { return (int64x1_t) {__builtin_aarch64_simd_bsldi_suss (__a[0], __b[0], __c[0])}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vbsl_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) { return __builtin_aarch64_simd_bslv8qi_uuuu (__a, __b, __c); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vbsl_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) { return __builtin_aarch64_simd_bslv4hi_uuuu (__a, __b, __c); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vbsl_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) { return __builtin_aarch64_simd_bslv2si_uuuu (__a, __b, __c); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vbsl_u64 (uint64x1_t __a, uint64x1_t __b, uint64x1_t __c) { return (uint64x1_t) {__builtin_aarch64_simd_bsldi_uuuu (__a[0], __b[0], __c[0])}; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vbslq_f32 (uint32x4_t __a, float32x4_t __b, float32x4_t __c) { return __builtin_aarch64_simd_bslv4sf_suss (__a, __b, __c); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vbslq_f64 (uint64x2_t __a, float64x2_t __b, float64x2_t __c) { return __builtin_aarch64_simd_bslv2df_suss (__a, __b, __c); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vbslq_p8 (uint8x16_t __a, poly8x16_t __b, poly8x16_t __c) { return __builtin_aarch64_simd_bslv16qi_pupp (__a, __b, __c); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vbslq_p16 (uint16x8_t __a, poly16x8_t __b, poly16x8_t __c) { return __builtin_aarch64_simd_bslv8hi_pupp (__a, __b, __c); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c) { return __builtin_aarch64_simd_bslv16qi_suss (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vbslq_s16 (uint16x8_t __a, int16x8_t __b, int16x8_t __c) { return __builtin_aarch64_simd_bslv8hi_suss (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c) { return __builtin_aarch64_simd_bslv4si_suss (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vbslq_s64 (uint64x2_t __a, int64x2_t __b, int64x2_t __c) { return __builtin_aarch64_simd_bslv2di_suss (__a, __b, __c); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vbslq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) { return __builtin_aarch64_simd_bslv16qi_uuuu (__a, __b, __c); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vbslq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) { return __builtin_aarch64_simd_bslv8hi_uuuu (__a, __b, __c); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vbslq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) { return __builtin_aarch64_simd_bslv4si_uuuu (__a, __b, __c); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vbslq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c) { return __builtin_aarch64_simd_bslv2di_uuuu (__a, __b, __c); } #pragma GCC push_options #pragma GCC target ("arch=armv8.1-a") __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqrdmlah_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c) { return __builtin_aarch64_sqrdmlahv4hi (__a, __b, __c); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqrdmlah_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c) { return __builtin_aarch64_sqrdmlahv2si (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqrdmlahq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c) { return __builtin_aarch64_sqrdmlahv8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqrdmlahq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c) { return __builtin_aarch64_sqrdmlahv4si (__a, __b, __c); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqrdmlsh_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c) { return __builtin_aarch64_sqrdmlshv4hi (__a, __b, __c); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqrdmlsh_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c) { return __builtin_aarch64_sqrdmlshv2si (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqrdmlshq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c) { return __builtin_aarch64_sqrdmlshv8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqrdmlshq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c) { return __builtin_aarch64_sqrdmlshv4si (__a, __b, __c); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqrdmlah_laneq_s16 (int16x4_t __a, int16x4_t __b, int16x8_t __c, const int __d) { return __builtin_aarch64_sqrdmlah_laneqv4hi (__a, __b, __c, __d); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqrdmlah_laneq_s32 (int32x2_t __a, int32x2_t __b, int32x4_t __c, const int __d) { return __builtin_aarch64_sqrdmlah_laneqv2si (__a, __b, __c, __d); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqrdmlahq_laneq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c, const int __d) { return __builtin_aarch64_sqrdmlah_laneqv8hi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqrdmlahq_laneq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c, const int __d) { return __builtin_aarch64_sqrdmlah_laneqv4si (__a, __b, __c, __d); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqrdmlsh_laneq_s16 (int16x4_t __a, int16x4_t __b, int16x8_t __c, const int __d) { return __builtin_aarch64_sqrdmlsh_laneqv4hi (__a, __b, __c, __d); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqrdmlsh_laneq_s32 (int32x2_t __a, int32x2_t __b, int32x4_t __c, const int __d) { return __builtin_aarch64_sqrdmlsh_laneqv2si (__a, __b, __c, __d); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqrdmlshq_laneq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c, const int __d) { return __builtin_aarch64_sqrdmlsh_laneqv8hi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqrdmlshq_laneq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c, const int __d) { return __builtin_aarch64_sqrdmlsh_laneqv4si (__a, __b, __c, __d); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqrdmlah_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d) { return __builtin_aarch64_sqrdmlah_lanev4hi (__a, __b, __c, __d); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqrdmlah_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d) { return __builtin_aarch64_sqrdmlah_lanev2si (__a, __b, __c, __d); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqrdmlahq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d) { return __builtin_aarch64_sqrdmlah_lanev8hi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqrdmlahq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d) { return __builtin_aarch64_sqrdmlah_lanev4si (__a, __b, __c, __d); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqrdmlahh_s16 (int16_t __a, int16_t __b, int16_t __c) { return (int16_t) __builtin_aarch64_sqrdmlahhi (__a, __b, __c); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqrdmlahh_lane_s16 (int16_t __a, int16_t __b, int16x4_t __c, const int __d) { return __builtin_aarch64_sqrdmlah_lanehi (__a, __b, __c, __d); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqrdmlahh_laneq_s16 (int16_t __a, int16_t __b, int16x8_t __c, const int __d) { return __builtin_aarch64_sqrdmlah_laneqhi (__a, __b, __c, __d); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqrdmlahs_s32 (int32_t __a, int32_t __b, int32_t __c) { return (int32_t) __builtin_aarch64_sqrdmlahsi (__a, __b, __c); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqrdmlahs_lane_s32 (int32_t __a, int32_t __b, int32x2_t __c, const int __d) { return __builtin_aarch64_sqrdmlah_lanesi (__a, __b, __c, __d); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqrdmlahs_laneq_s32 (int32_t __a, int32_t __b, int32x4_t __c, const int __d) { return __builtin_aarch64_sqrdmlah_laneqsi (__a, __b, __c, __d); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqrdmlsh_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d) { return __builtin_aarch64_sqrdmlsh_lanev4hi (__a, __b, __c, __d); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqrdmlsh_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d) { return __builtin_aarch64_sqrdmlsh_lanev2si (__a, __b, __c, __d); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqrdmlshq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d) { return __builtin_aarch64_sqrdmlsh_lanev8hi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqrdmlshq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d) { return __builtin_aarch64_sqrdmlsh_lanev4si (__a, __b, __c, __d); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqrdmlshh_s16 (int16_t __a, int16_t __b, int16_t __c) { return (int16_t) __builtin_aarch64_sqrdmlshhi (__a, __b, __c); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqrdmlshh_lane_s16 (int16_t __a, int16_t __b, int16x4_t __c, const int __d) { return __builtin_aarch64_sqrdmlsh_lanehi (__a, __b, __c, __d); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqrdmlshh_laneq_s16 (int16_t __a, int16_t __b, int16x8_t __c, const int __d) { return __builtin_aarch64_sqrdmlsh_laneqhi (__a, __b, __c, __d); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqrdmlshs_s32 (int32_t __a, int32_t __b, int32_t __c) { return (int32_t) __builtin_aarch64_sqrdmlshsi (__a, __b, __c); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqrdmlshs_lane_s32 (int32_t __a, int32_t __b, int32x2_t __c, const int __d) { return __builtin_aarch64_sqrdmlsh_lanesi (__a, __b, __c, __d); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqrdmlshs_laneq_s32 (int32_t __a, int32_t __b, int32x4_t __c, const int __d) { return __builtin_aarch64_sqrdmlsh_laneqsi (__a, __b, __c, __d); } #pragma GCC pop_options #pragma GCC push_options #pragma GCC target ("+nothing+crypto") __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vaeseq_u8 (uint8x16_t data, uint8x16_t key) { return __builtin_aarch64_crypto_aesev16qi_uuu (data, key); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vaesdq_u8 (uint8x16_t data, uint8x16_t key) { return __builtin_aarch64_crypto_aesdv16qi_uuu (data, key); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vaesmcq_u8 (uint8x16_t data) { return __builtin_aarch64_crypto_aesmcv16qi_uu (data); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vaesimcq_u8 (uint8x16_t data) { return __builtin_aarch64_crypto_aesimcv16qi_uu (data); } #pragma GCC pop_options __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcage_f64 (float64x1_t __a, float64x1_t __b) { return vabs_f64 (__a) >= vabs_f64 (__b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcages_f32 (float32_t __a, float32_t __b) { return __builtin_fabsf (__a) >= __builtin_fabsf (__b) ? -1 : 0; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcage_f32 (float32x2_t __a, float32x2_t __b) { return vabs_f32 (__a) >= vabs_f32 (__b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcageq_f32 (float32x4_t __a, float32x4_t __b) { return vabsq_f32 (__a) >= vabsq_f32 (__b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcaged_f64 (float64_t __a, float64_t __b) { return __builtin_fabs (__a) >= __builtin_fabs (__b) ? -1 : 0; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcageq_f64 (float64x2_t __a, float64x2_t __b) { return vabsq_f64 (__a) >= vabsq_f64 (__b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcagts_f32 (float32_t __a, float32_t __b) { return __builtin_fabsf (__a) > __builtin_fabsf (__b) ? -1 : 0; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcagt_f32 (float32x2_t __a, float32x2_t __b) { return vabs_f32 (__a) > vabs_f32 (__b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcagt_f64 (float64x1_t __a, float64x1_t __b) { return vabs_f64 (__a) > vabs_f64 (__b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcagtq_f32 (float32x4_t __a, float32x4_t __b) { return vabsq_f32 (__a) > vabsq_f32 (__b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcagtd_f64 (float64_t __a, float64_t __b) { return __builtin_fabs (__a) > __builtin_fabs (__b) ? -1 : 0; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcagtq_f64 (float64x2_t __a, float64x2_t __b) { return vabsq_f64 (__a) > vabsq_f64 (__b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcale_f32 (float32x2_t __a, float32x2_t __b) { return vabs_f32 (__a) <= vabs_f32 (__b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcale_f64 (float64x1_t __a, float64x1_t __b) { return vabs_f64 (__a) <= vabs_f64 (__b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcaled_f64 (float64_t __a, float64_t __b) { return __builtin_fabs (__a) <= __builtin_fabs (__b) ? -1 : 0; } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcales_f32 (float32_t __a, float32_t __b) { return __builtin_fabsf (__a) <= __builtin_fabsf (__b) ? -1 : 0; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcaleq_f32 (float32x4_t __a, float32x4_t __b) { return vabsq_f32 (__a) <= vabsq_f32 (__b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcaleq_f64 (float64x2_t __a, float64x2_t __b) { return vabsq_f64 (__a) <= vabsq_f64 (__b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcalt_f32 (float32x2_t __a, float32x2_t __b) { return vabs_f32 (__a) < vabs_f32 (__b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcalt_f64 (float64x1_t __a, float64x1_t __b) { return vabs_f64 (__a) < vabs_f64 (__b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcaltd_f64 (float64_t __a, float64_t __b) { return __builtin_fabs (__a) < __builtin_fabs (__b) ? -1 : 0; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcaltq_f32 (float32x4_t __a, float32x4_t __b) { return vabsq_f32 (__a) < vabsq_f32 (__b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcaltq_f64 (float64x2_t __a, float64x2_t __b) { return vabsq_f64 (__a) < vabsq_f64 (__b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcalts_f32 (float32_t __a, float32_t __b) { return __builtin_fabsf (__a) < __builtin_fabsf (__b) ? -1 : 0; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vceq_f32 (float32x2_t __a, float32x2_t __b) { return (uint32x2_t) (__a == __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vceq_f64 (float64x1_t __a, float64x1_t __b) { return (uint64x1_t) (__a == __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vceq_p8 (poly8x8_t __a, poly8x8_t __b) { return (uint8x8_t) (__a == __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vceq_s8 (int8x8_t __a, int8x8_t __b) { return (uint8x8_t) (__a == __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vceq_s16 (int16x4_t __a, int16x4_t __b) { return (uint16x4_t) (__a == __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vceq_s32 (int32x2_t __a, int32x2_t __b) { return (uint32x2_t) (__a == __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vceq_s64 (int64x1_t __a, int64x1_t __b) { return (uint64x1_t) (__a == __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vceq_u8 (uint8x8_t __a, uint8x8_t __b) { return (__a == __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vceq_u16 (uint16x4_t __a, uint16x4_t __b) { return (__a == __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vceq_u32 (uint32x2_t __a, uint32x2_t __b) { return (__a == __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vceq_u64 (uint64x1_t __a, uint64x1_t __b) { return (__a == __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vceqq_f32 (float32x4_t __a, float32x4_t __b) { return (uint32x4_t) (__a == __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vceqq_f64 (float64x2_t __a, float64x2_t __b) { return (uint64x2_t) (__a == __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vceqq_p8 (poly8x16_t __a, poly8x16_t __b) { return (uint8x16_t) (__a == __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vceqq_s8 (int8x16_t __a, int8x16_t __b) { return (uint8x16_t) (__a == __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vceqq_s16 (int16x8_t __a, int16x8_t __b) { return (uint16x8_t) (__a == __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vceqq_s32 (int32x4_t __a, int32x4_t __b) { return (uint32x4_t) (__a == __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vceqq_s64 (int64x2_t __a, int64x2_t __b) { return (uint64x2_t) (__a == __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vceqq_u8 (uint8x16_t __a, uint8x16_t __b) { return (__a == __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vceqq_u16 (uint16x8_t __a, uint16x8_t __b) { return (__a == __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vceqq_u32 (uint32x4_t __a, uint32x4_t __b) { return (__a == __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vceqq_u64 (uint64x2_t __a, uint64x2_t __b) { return (__a == __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vceqs_f32 (float32_t __a, float32_t __b) { return __a == __b ? -1 : 0; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vceqd_s64 (int64_t __a, int64_t __b) { return __a == __b ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vceqd_u64 (uint64_t __a, uint64_t __b) { return __a == __b ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vceqd_f64 (float64_t __a, float64_t __b) { return __a == __b ? -1ll : 0ll; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vceqz_f32 (float32x2_t __a) { return (uint32x2_t) (__a == 0.0f); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vceqz_f64 (float64x1_t __a) { return (uint64x1_t) (__a == (float64x1_t) {0.0}); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vceqz_p8 (poly8x8_t __a) { return (uint8x8_t) (__a == 0); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vceqz_s8 (int8x8_t __a) { return (uint8x8_t) (__a == 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vceqz_s16 (int16x4_t __a) { return (uint16x4_t) (__a == 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vceqz_s32 (int32x2_t __a) { return (uint32x2_t) (__a == 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vceqz_s64 (int64x1_t __a) { return (uint64x1_t) (__a == ((int64_t) 0)); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vceqz_u8 (uint8x8_t __a) { return (__a == 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vceqz_u16 (uint16x4_t __a) { return (__a == 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vceqz_u32 (uint32x2_t __a) { return (__a == 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vceqz_u64 (uint64x1_t __a) { return (__a == ((uint64_t) 0)); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vceqzq_f32 (float32x4_t __a) { return (uint32x4_t) (__a == 0.0f); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vceqzq_f64 (float64x2_t __a) { return (uint64x2_t) (__a == 0.0f); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vceqzq_p8 (poly8x16_t __a) { return (uint8x16_t) (__a == 0); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vceqzq_s8 (int8x16_t __a) { return (uint8x16_t) (__a == 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vceqzq_s16 (int16x8_t __a) { return (uint16x8_t) (__a == 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vceqzq_s32 (int32x4_t __a) { return (uint32x4_t) (__a == 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vceqzq_s64 (int64x2_t __a) { return (uint64x2_t) (__a == ((int64_t) 0)); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vceqzq_u8 (uint8x16_t __a) { return (__a == 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vceqzq_u16 (uint16x8_t __a) { return (__a == 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vceqzq_u32 (uint32x4_t __a) { return (__a == 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vceqzq_u64 (uint64x2_t __a) { return (__a == ((uint64_t) 0)); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vceqzs_f32 (float32_t __a) { return __a == 0.0f ? -1 : 0; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vceqzd_s64 (int64_t __a) { return __a == 0 ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vceqzd_u64 (uint64_t __a) { return __a == 0 ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vceqzd_f64 (float64_t __a) { return __a == 0.0 ? -1ll : 0ll; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcge_f32 (float32x2_t __a, float32x2_t __b) { return (uint32x2_t) (__a >= __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcge_f64 (float64x1_t __a, float64x1_t __b) { return (uint64x1_t) (__a >= __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcge_s8 (int8x8_t __a, int8x8_t __b) { return (uint8x8_t) (__a >= __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcge_s16 (int16x4_t __a, int16x4_t __b) { return (uint16x4_t) (__a >= __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcge_s32 (int32x2_t __a, int32x2_t __b) { return (uint32x2_t) (__a >= __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcge_s64 (int64x1_t __a, int64x1_t __b) { return (uint64x1_t) (__a >= __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcge_u8 (uint8x8_t __a, uint8x8_t __b) { return (__a >= __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcge_u16 (uint16x4_t __a, uint16x4_t __b) { return (__a >= __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcge_u32 (uint32x2_t __a, uint32x2_t __b) { return (__a >= __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcge_u64 (uint64x1_t __a, uint64x1_t __b) { return (__a >= __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgeq_f32 (float32x4_t __a, float32x4_t __b) { return (uint32x4_t) (__a >= __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgeq_f64 (float64x2_t __a, float64x2_t __b) { return (uint64x2_t) (__a >= __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcgeq_s8 (int8x16_t __a, int8x16_t __b) { return (uint8x16_t) (__a >= __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcgeq_s16 (int16x8_t __a, int16x8_t __b) { return (uint16x8_t) (__a >= __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgeq_s32 (int32x4_t __a, int32x4_t __b) { return (uint32x4_t) (__a >= __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgeq_s64 (int64x2_t __a, int64x2_t __b) { return (uint64x2_t) (__a >= __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcgeq_u8 (uint8x16_t __a, uint8x16_t __b) { return (__a >= __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcgeq_u16 (uint16x8_t __a, uint16x8_t __b) { return (__a >= __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgeq_u32 (uint32x4_t __a, uint32x4_t __b) { return (__a >= __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgeq_u64 (uint64x2_t __a, uint64x2_t __b) { return (__a >= __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcges_f32 (float32_t __a, float32_t __b) { return __a >= __b ? -1 : 0; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcged_s64 (int64_t __a, int64_t __b) { return __a >= __b ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcged_u64 (uint64_t __a, uint64_t __b) { return __a >= __b ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcged_f64 (float64_t __a, float64_t __b) { return __a >= __b ? -1ll : 0ll; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgez_f32 (float32x2_t __a) { return (uint32x2_t) (__a >= 0.0f); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcgez_f64 (float64x1_t __a) { return (uint64x1_t) (__a[0] >= (float64x1_t) {0.0}); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcgez_s8 (int8x8_t __a) { return (uint8x8_t) (__a >= 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcgez_s16 (int16x4_t __a) { return (uint16x4_t) (__a >= 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgez_s32 (int32x2_t __a) { return (uint32x2_t) (__a >= 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcgez_s64 (int64x1_t __a) { return (uint64x1_t) (__a >= ((int64_t) 0)); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgezq_f32 (float32x4_t __a) { return (uint32x4_t) (__a >= 0.0f); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgezq_f64 (float64x2_t __a) { return (uint64x2_t) (__a >= 0.0); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcgezq_s8 (int8x16_t __a) { return (uint8x16_t) (__a >= 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcgezq_s16 (int16x8_t __a) { return (uint16x8_t) (__a >= 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgezq_s32 (int32x4_t __a) { return (uint32x4_t) (__a >= 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgezq_s64 (int64x2_t __a) { return (uint64x2_t) (__a >= ((int64_t) 0)); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcgezs_f32 (float32_t __a) { return __a >= 0.0f ? -1 : 0; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcgezd_s64 (int64_t __a) { return __a >= 0 ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcgezd_f64 (float64_t __a) { return __a >= 0.0 ? -1ll : 0ll; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgt_f32 (float32x2_t __a, float32x2_t __b) { return (uint32x2_t) (__a > __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcgt_f64 (float64x1_t __a, float64x1_t __b) { return (uint64x1_t) (__a > __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcgt_s8 (int8x8_t __a, int8x8_t __b) { return (uint8x8_t) (__a > __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcgt_s16 (int16x4_t __a, int16x4_t __b) { return (uint16x4_t) (__a > __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgt_s32 (int32x2_t __a, int32x2_t __b) { return (uint32x2_t) (__a > __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcgt_s64 (int64x1_t __a, int64x1_t __b) { return (uint64x1_t) (__a > __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcgt_u8 (uint8x8_t __a, uint8x8_t __b) { return (__a > __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcgt_u16 (uint16x4_t __a, uint16x4_t __b) { return (__a > __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgt_u32 (uint32x2_t __a, uint32x2_t __b) { return (__a > __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcgt_u64 (uint64x1_t __a, uint64x1_t __b) { return (__a > __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgtq_f32 (float32x4_t __a, float32x4_t __b) { return (uint32x4_t) (__a > __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgtq_f64 (float64x2_t __a, float64x2_t __b) { return (uint64x2_t) (__a > __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcgtq_s8 (int8x16_t __a, int8x16_t __b) { return (uint8x16_t) (__a > __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcgtq_s16 (int16x8_t __a, int16x8_t __b) { return (uint16x8_t) (__a > __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgtq_s32 (int32x4_t __a, int32x4_t __b) { return (uint32x4_t) (__a > __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgtq_s64 (int64x2_t __a, int64x2_t __b) { return (uint64x2_t) (__a > __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcgtq_u8 (uint8x16_t __a, uint8x16_t __b) { return (__a > __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcgtq_u16 (uint16x8_t __a, uint16x8_t __b) { return (__a > __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgtq_u32 (uint32x4_t __a, uint32x4_t __b) { return (__a > __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgtq_u64 (uint64x2_t __a, uint64x2_t __b) { return (__a > __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcgts_f32 (float32_t __a, float32_t __b) { return __a > __b ? -1 : 0; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcgtd_s64 (int64_t __a, int64_t __b) { return __a > __b ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcgtd_u64 (uint64_t __a, uint64_t __b) { return __a > __b ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcgtd_f64 (float64_t __a, float64_t __b) { return __a > __b ? -1ll : 0ll; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgtz_f32 (float32x2_t __a) { return (uint32x2_t) (__a > 0.0f); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcgtz_f64 (float64x1_t __a) { return (uint64x1_t) (__a > (float64x1_t) {0.0}); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcgtz_s8 (int8x8_t __a) { return (uint8x8_t) (__a > 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcgtz_s16 (int16x4_t __a) { return (uint16x4_t) (__a > 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgtz_s32 (int32x2_t __a) { return (uint32x2_t) (__a > 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcgtz_s64 (int64x1_t __a) { return (uint64x1_t) (__a > ((int64_t) 0)); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgtzq_f32 (float32x4_t __a) { return (uint32x4_t) (__a > 0.0f); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgtzq_f64 (float64x2_t __a) { return (uint64x2_t) (__a > 0.0); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcgtzq_s8 (int8x16_t __a) { return (uint8x16_t) (__a > 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcgtzq_s16 (int16x8_t __a) { return (uint16x8_t) (__a > 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgtzq_s32 (int32x4_t __a) { return (uint32x4_t) (__a > 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgtzq_s64 (int64x2_t __a) { return (uint64x2_t) (__a > ((int64_t) 0)); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcgtzs_f32 (float32_t __a) { return __a > 0.0f ? -1 : 0; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcgtzd_s64 (int64_t __a) { return __a > 0 ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcgtzd_f64 (float64_t __a) { return __a > 0.0 ? -1ll : 0ll; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcle_f32 (float32x2_t __a, float32x2_t __b) { return (uint32x2_t) (__a <= __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcle_f64 (float64x1_t __a, float64x1_t __b) { return (uint64x1_t) (__a <= __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcle_s8 (int8x8_t __a, int8x8_t __b) { return (uint8x8_t) (__a <= __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcle_s16 (int16x4_t __a, int16x4_t __b) { return (uint16x4_t) (__a <= __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcle_s32 (int32x2_t __a, int32x2_t __b) { return (uint32x2_t) (__a <= __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcle_s64 (int64x1_t __a, int64x1_t __b) { return (uint64x1_t) (__a <= __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcle_u8 (uint8x8_t __a, uint8x8_t __b) { return (__a <= __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcle_u16 (uint16x4_t __a, uint16x4_t __b) { return (__a <= __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcle_u32 (uint32x2_t __a, uint32x2_t __b) { return (__a <= __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcle_u64 (uint64x1_t __a, uint64x1_t __b) { return (__a <= __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcleq_f32 (float32x4_t __a, float32x4_t __b) { return (uint32x4_t) (__a <= __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcleq_f64 (float64x2_t __a, float64x2_t __b) { return (uint64x2_t) (__a <= __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcleq_s8 (int8x16_t __a, int8x16_t __b) { return (uint8x16_t) (__a <= __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcleq_s16 (int16x8_t __a, int16x8_t __b) { return (uint16x8_t) (__a <= __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcleq_s32 (int32x4_t __a, int32x4_t __b) { return (uint32x4_t) (__a <= __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcleq_s64 (int64x2_t __a, int64x2_t __b) { return (uint64x2_t) (__a <= __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcleq_u8 (uint8x16_t __a, uint8x16_t __b) { return (__a <= __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcleq_u16 (uint16x8_t __a, uint16x8_t __b) { return (__a <= __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcleq_u32 (uint32x4_t __a, uint32x4_t __b) { return (__a <= __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcleq_u64 (uint64x2_t __a, uint64x2_t __b) { return (__a <= __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcles_f32 (float32_t __a, float32_t __b) { return __a <= __b ? -1 : 0; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcled_s64 (int64_t __a, int64_t __b) { return __a <= __b ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcled_u64 (uint64_t __a, uint64_t __b) { return __a <= __b ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcled_f64 (float64_t __a, float64_t __b) { return __a <= __b ? -1ll : 0ll; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vclez_f32 (float32x2_t __a) { return (uint32x2_t) (__a <= 0.0f); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vclez_f64 (float64x1_t __a) { return (uint64x1_t) (__a <= (float64x1_t) {0.0}); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vclez_s8 (int8x8_t __a) { return (uint8x8_t) (__a <= 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vclez_s16 (int16x4_t __a) { return (uint16x4_t) (__a <= 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vclez_s32 (int32x2_t __a) { return (uint32x2_t) (__a <= 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vclez_s64 (int64x1_t __a) { return (uint64x1_t) (__a <= ((int64_t) 0)); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vclezq_f32 (float32x4_t __a) { return (uint32x4_t) (__a <= 0.0f); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vclezq_f64 (float64x2_t __a) { return (uint64x2_t) (__a <= 0.0); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vclezq_s8 (int8x16_t __a) { return (uint8x16_t) (__a <= 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vclezq_s16 (int16x8_t __a) { return (uint16x8_t) (__a <= 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vclezq_s32 (int32x4_t __a) { return (uint32x4_t) (__a <= 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vclezq_s64 (int64x2_t __a) { return (uint64x2_t) (__a <= ((int64_t) 0)); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vclezs_f32 (float32_t __a) { return __a <= 0.0f ? -1 : 0; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vclezd_s64 (int64_t __a) { return __a <= 0 ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vclezd_f64 (float64_t __a) { return __a <= 0.0 ? -1ll : 0ll; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vclt_f32 (float32x2_t __a, float32x2_t __b) { return (uint32x2_t) (__a < __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vclt_f64 (float64x1_t __a, float64x1_t __b) { return (uint64x1_t) (__a < __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vclt_s8 (int8x8_t __a, int8x8_t __b) { return (uint8x8_t) (__a < __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vclt_s16 (int16x4_t __a, int16x4_t __b) { return (uint16x4_t) (__a < __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vclt_s32 (int32x2_t __a, int32x2_t __b) { return (uint32x2_t) (__a < __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vclt_s64 (int64x1_t __a, int64x1_t __b) { return (uint64x1_t) (__a < __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vclt_u8 (uint8x8_t __a, uint8x8_t __b) { return (__a < __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vclt_u16 (uint16x4_t __a, uint16x4_t __b) { return (__a < __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vclt_u32 (uint32x2_t __a, uint32x2_t __b) { return (__a < __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vclt_u64 (uint64x1_t __a, uint64x1_t __b) { return (__a < __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcltq_f32 (float32x4_t __a, float32x4_t __b) { return (uint32x4_t) (__a < __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcltq_f64 (float64x2_t __a, float64x2_t __b) { return (uint64x2_t) (__a < __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcltq_s8 (int8x16_t __a, int8x16_t __b) { return (uint8x16_t) (__a < __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcltq_s16 (int16x8_t __a, int16x8_t __b) { return (uint16x8_t) (__a < __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcltq_s32 (int32x4_t __a, int32x4_t __b) { return (uint32x4_t) (__a < __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcltq_s64 (int64x2_t __a, int64x2_t __b) { return (uint64x2_t) (__a < __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcltq_u8 (uint8x16_t __a, uint8x16_t __b) { return (__a < __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcltq_u16 (uint16x8_t __a, uint16x8_t __b) { return (__a < __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcltq_u32 (uint32x4_t __a, uint32x4_t __b) { return (__a < __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcltq_u64 (uint64x2_t __a, uint64x2_t __b) { return (__a < __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vclts_f32 (float32_t __a, float32_t __b) { return __a < __b ? -1 : 0; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcltd_s64 (int64_t __a, int64_t __b) { return __a < __b ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcltd_u64 (uint64_t __a, uint64_t __b) { return __a < __b ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcltd_f64 (float64_t __a, float64_t __b) { return __a < __b ? -1ll : 0ll; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcltz_f32 (float32x2_t __a) { return (uint32x2_t) (__a < 0.0f); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcltz_f64 (float64x1_t __a) { return (uint64x1_t) (__a < (float64x1_t) {0.0}); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcltz_s8 (int8x8_t __a) { return (uint8x8_t) (__a < 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcltz_s16 (int16x4_t __a) { return (uint16x4_t) (__a < 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcltz_s32 (int32x2_t __a) { return (uint32x2_t) (__a < 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vcltz_s64 (int64x1_t __a) { return (uint64x1_t) (__a < ((int64_t) 0)); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcltzq_f32 (float32x4_t __a) { return (uint32x4_t) (__a < 0.0f); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcltzq_f64 (float64x2_t __a) { return (uint64x2_t) (__a < 0.0); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcltzq_s8 (int8x16_t __a) { return (uint8x16_t) (__a < 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcltzq_s16 (int16x8_t __a) { return (uint16x8_t) (__a < 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcltzq_s32 (int32x4_t __a) { return (uint32x4_t) (__a < 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcltzq_s64 (int64x2_t __a) { return (uint64x2_t) (__a < ((int64_t) 0)); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcltzs_f32 (float32_t __a) { return __a < 0.0f ? -1 : 0; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcltzd_s64 (int64_t __a) { return __a < 0 ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcltzd_f64 (float64_t __a) { return __a < 0.0 ? -1ll : 0ll; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vcls_s8 (int8x8_t __a) { return __builtin_aarch64_clrsbv8qi (__a); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vcls_s16 (int16x4_t __a) { return __builtin_aarch64_clrsbv4hi (__a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vcls_s32 (int32x2_t __a) { return __builtin_aarch64_clrsbv2si (__a); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vclsq_s8 (int8x16_t __a) { return __builtin_aarch64_clrsbv16qi (__a); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vclsq_s16 (int16x8_t __a) { return __builtin_aarch64_clrsbv8hi (__a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vclsq_s32 (int32x4_t __a) { return __builtin_aarch64_clrsbv4si (__a); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vclz_s8 (int8x8_t __a) { return __builtin_aarch64_clzv8qi (__a); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vclz_s16 (int16x4_t __a) { return __builtin_aarch64_clzv4hi (__a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vclz_s32 (int32x2_t __a) { return __builtin_aarch64_clzv2si (__a); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vclz_u8 (uint8x8_t __a) { return (uint8x8_t)__builtin_aarch64_clzv8qi ((int8x8_t)__a); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vclz_u16 (uint16x4_t __a) { return (uint16x4_t)__builtin_aarch64_clzv4hi ((int16x4_t)__a); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vclz_u32 (uint32x2_t __a) { return (uint32x2_t)__builtin_aarch64_clzv2si ((int32x2_t)__a); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vclzq_s8 (int8x16_t __a) { return __builtin_aarch64_clzv16qi (__a); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vclzq_s16 (int16x8_t __a) { return __builtin_aarch64_clzv8hi (__a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vclzq_s32 (int32x4_t __a) { return __builtin_aarch64_clzv4si (__a); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vclzq_u8 (uint8x16_t __a) { return (uint8x16_t)__builtin_aarch64_clzv16qi ((int8x16_t)__a); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vclzq_u16 (uint16x8_t __a) { return (uint16x8_t)__builtin_aarch64_clzv8hi ((int16x8_t)__a); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vclzq_u32 (uint32x4_t __a) { return (uint32x4_t)__builtin_aarch64_clzv4si ((int32x4_t)__a); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vcnt_p8 (poly8x8_t __a) { return (poly8x8_t) __builtin_aarch64_popcountv8qi ((int8x8_t) __a); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vcnt_s8 (int8x8_t __a) { return __builtin_aarch64_popcountv8qi (__a); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcnt_u8 (uint8x8_t __a) { return (uint8x8_t) __builtin_aarch64_popcountv8qi ((int8x8_t) __a); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vcntq_p8 (poly8x16_t __a) { return (poly8x16_t) __builtin_aarch64_popcountv16qi ((int8x16_t) __a); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vcntq_s8 (int8x16_t __a) { return __builtin_aarch64_popcountv16qi (__a); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcntq_u8 (uint8x16_t __a) { return (uint8x16_t) __builtin_aarch64_popcountv16qi ((int8x16_t) __a); } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vcvt_f16_f32 (float32x4_t __a) { return __builtin_aarch64_float_truncate_lo_v4hf (__a); } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vcvt_high_f16_f32 (float16x4_t __a, float32x4_t __b) { return __builtin_aarch64_float_truncate_hi_v8hf (__a, __b); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vcvt_f32_f64 (float64x2_t __a) { return __builtin_aarch64_float_truncate_lo_v2sf (__a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vcvt_high_f32_f64 (float32x2_t __a, float64x2_t __b) { return __builtin_aarch64_float_truncate_hi_v4sf (__a, __b); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vcvt_f32_f16 (float16x4_t __a) { return __builtin_aarch64_float_extend_lo_v4sf (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vcvt_f64_f32 (float32x2_t __a) { return __builtin_aarch64_float_extend_lo_v2df (__a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vcvt_high_f32_f16 (float16x8_t __a) { return __builtin_aarch64_vec_unpacks_hi_v8hf (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vcvt_high_f64_f32 (float32x4_t __a) { return __builtin_aarch64_vec_unpacks_hi_v4sf (__a); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vcvtd_f64_s64 (int64_t __a) { return (float64_t) __a; } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vcvtd_f64_u64 (uint64_t __a) { return (float64_t) __a; } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vcvts_f32_s32 (int32_t __a) { return (float32_t) __a; } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vcvts_f32_u32 (uint32_t __a) { return (float32_t) __a; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vcvt_f32_s32 (int32x2_t __a) { return __builtin_aarch64_floatv2siv2sf (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vcvt_f32_u32 (uint32x2_t __a) { return __builtin_aarch64_floatunsv2siv2sf ((int32x2_t) __a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vcvtq_f32_s32 (int32x4_t __a) { return __builtin_aarch64_floatv4siv4sf (__a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vcvtq_f32_u32 (uint32x4_t __a) { return __builtin_aarch64_floatunsv4siv4sf ((int32x4_t) __a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vcvtq_f64_s64 (int64x2_t __a) { return __builtin_aarch64_floatv2div2df (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vcvtq_f64_u64 (uint64x2_t __a) { return __builtin_aarch64_floatunsv2div2df ((int64x2_t) __a); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vcvtd_s64_f64 (float64_t __a) { return (int64_t) __a; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcvtd_u64_f64 (float64_t __a) { return (uint64_t) __a; } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vcvts_s32_f32 (float32_t __a) { return (int32_t) __a; } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcvts_u32_f32 (float32_t __a) { return (uint32_t) __a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vcvt_s32_f32 (float32x2_t __a) { return __builtin_aarch64_lbtruncv2sfv2si (__a); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcvt_u32_f32 (float32x2_t __a) { return __builtin_aarch64_lbtruncuv2sfv2si_us (__a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vcvtq_s32_f32 (float32x4_t __a) { return __builtin_aarch64_lbtruncv4sfv4si (__a); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcvtq_u32_f32 (float32x4_t __a) { return __builtin_aarch64_lbtruncuv4sfv4si_us (__a); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vcvtq_s64_f64 (float64x2_t __a) { return __builtin_aarch64_lbtruncv2dfv2di (__a); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcvtq_u64_f64 (float64x2_t __a) { return __builtin_aarch64_lbtruncuv2dfv2di_us (__a); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vcvtad_s64_f64 (float64_t __a) { return __builtin_aarch64_lrounddfdi (__a); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcvtad_u64_f64 (float64_t __a) { return __builtin_aarch64_lroundudfdi_us (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vcvtas_s32_f32 (float32_t __a) { return __builtin_aarch64_lroundsfsi (__a); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcvtas_u32_f32 (float32_t __a) { return __builtin_aarch64_lroundusfsi_us (__a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vcvta_s32_f32 (float32x2_t __a) { return __builtin_aarch64_lroundv2sfv2si (__a); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcvta_u32_f32 (float32x2_t __a) { return __builtin_aarch64_lrounduv2sfv2si_us (__a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vcvtaq_s32_f32 (float32x4_t __a) { return __builtin_aarch64_lroundv4sfv4si (__a); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcvtaq_u32_f32 (float32x4_t __a) { return __builtin_aarch64_lrounduv4sfv4si_us (__a); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vcvtaq_s64_f64 (float64x2_t __a) { return __builtin_aarch64_lroundv2dfv2di (__a); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcvtaq_u64_f64 (float64x2_t __a) { return __builtin_aarch64_lrounduv2dfv2di_us (__a); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vcvtmd_s64_f64 (float64_t __a) { return __builtin_llfloor (__a); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcvtmd_u64_f64 (float64_t __a) { return __builtin_aarch64_lfloorudfdi_us (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vcvtms_s32_f32 (float32_t __a) { return __builtin_ifloorf (__a); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcvtms_u32_f32 (float32_t __a) { return __builtin_aarch64_lfloorusfsi_us (__a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vcvtm_s32_f32 (float32x2_t __a) { return __builtin_aarch64_lfloorv2sfv2si (__a); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcvtm_u32_f32 (float32x2_t __a) { return __builtin_aarch64_lflooruv2sfv2si_us (__a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vcvtmq_s32_f32 (float32x4_t __a) { return __builtin_aarch64_lfloorv4sfv4si (__a); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcvtmq_u32_f32 (float32x4_t __a) { return __builtin_aarch64_lflooruv4sfv4si_us (__a); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vcvtmq_s64_f64 (float64x2_t __a) { return __builtin_aarch64_lfloorv2dfv2di (__a); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcvtmq_u64_f64 (float64x2_t __a) { return __builtin_aarch64_lflooruv2dfv2di_us (__a); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vcvtnd_s64_f64 (float64_t __a) { return __builtin_aarch64_lfrintndfdi (__a); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcvtnd_u64_f64 (float64_t __a) { return __builtin_aarch64_lfrintnudfdi_us (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vcvtns_s32_f32 (float32_t __a) { return __builtin_aarch64_lfrintnsfsi (__a); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcvtns_u32_f32 (float32_t __a) { return __builtin_aarch64_lfrintnusfsi_us (__a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vcvtn_s32_f32 (float32x2_t __a) { return __builtin_aarch64_lfrintnv2sfv2si (__a); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcvtn_u32_f32 (float32x2_t __a) { return __builtin_aarch64_lfrintnuv2sfv2si_us (__a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vcvtnq_s32_f32 (float32x4_t __a) { return __builtin_aarch64_lfrintnv4sfv4si (__a); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcvtnq_u32_f32 (float32x4_t __a) { return __builtin_aarch64_lfrintnuv4sfv4si_us (__a); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vcvtnq_s64_f64 (float64x2_t __a) { return __builtin_aarch64_lfrintnv2dfv2di (__a); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcvtnq_u64_f64 (float64x2_t __a) { return __builtin_aarch64_lfrintnuv2dfv2di_us (__a); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vcvtpd_s64_f64 (float64_t __a) { return __builtin_llceil (__a); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vcvtpd_u64_f64 (float64_t __a) { return __builtin_aarch64_lceiludfdi_us (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vcvtps_s32_f32 (float32_t __a) { return __builtin_iceilf (__a); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vcvtps_u32_f32 (float32_t __a) { return __builtin_aarch64_lceilusfsi_us (__a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vcvtp_s32_f32 (float32x2_t __a) { return __builtin_aarch64_lceilv2sfv2si (__a); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcvtp_u32_f32 (float32x2_t __a) { return __builtin_aarch64_lceiluv2sfv2si_us (__a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vcvtpq_s32_f32 (float32x4_t __a) { return __builtin_aarch64_lceilv4sfv4si (__a); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcvtpq_u32_f32 (float32x4_t __a) { return __builtin_aarch64_lceiluv4sfv4si_us (__a); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vcvtpq_s64_f64 (float64x2_t __a) { return __builtin_aarch64_lceilv2dfv2di (__a); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcvtpq_u64_f64 (float64x2_t __a) { return __builtin_aarch64_lceiluv2dfv2di_us (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vdup_n_f32 (float32_t __a) { return (float32x2_t) {__a, __a}; } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vdup_n_f64 (float64_t __a) { return (float64x1_t) {__a}; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vdup_n_p8 (poly8_t __a) { return (poly8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vdup_n_p16 (poly16_t __a) { return (poly16x4_t) {__a, __a, __a, __a}; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vdup_n_s8 (int8_t __a) { return (int8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vdup_n_s16 (int16_t __a) { return (int16x4_t) {__a, __a, __a, __a}; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vdup_n_s32 (int32_t __a) { return (int32x2_t) {__a, __a}; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vdup_n_s64 (int64_t __a) { return (int64x1_t) {__a}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vdup_n_u8 (uint8_t __a) { return (uint8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vdup_n_u16 (uint16_t __a) { return (uint16x4_t) {__a, __a, __a, __a}; } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vdup_n_u32 (uint32_t __a) { return (uint32x2_t) {__a, __a}; } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vdup_n_u64 (uint64_t __a) { return (uint64x1_t) {__a}; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vdupq_n_f32 (float32_t __a) { return (float32x4_t) {__a, __a, __a, __a}; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vdupq_n_f64 (float64_t __a) { return (float64x2_t) {__a, __a}; } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vdupq_n_p8 (uint32_t __a) { return (poly8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a}; } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vdupq_n_p16 (uint32_t __a) { return (poly16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vdupq_n_s8 (int32_t __a) { return (int8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a}; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vdupq_n_s16 (int32_t __a) { return (int16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vdupq_n_s32 (int32_t __a) { return (int32x4_t) {__a, __a, __a, __a}; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vdupq_n_s64 (int64_t __a) { return (int64x2_t) {__a, __a}; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vdupq_n_u8 (uint32_t __a) { return (uint8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a}; } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vdupq_n_u16 (uint32_t __a) { return (uint16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vdupq_n_u32 (uint32_t __a) { return (uint32x4_t) {__a, __a, __a, __a}; } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vdupq_n_u64 (uint64_t __a) { return (uint64x2_t) {__a, __a}; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vdup_lane_f32 (float32x2_t __a, const int __b) { return vdup_n_f32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vdup_lane_f64 (float64x1_t __a, const int __b) { return vdup_n_f64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vdup_lane_p8 (poly8x8_t __a, const int __b) { return vdup_n_p8 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vdup_lane_p16 (poly16x4_t __a, const int __b) { return vdup_n_p16 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vdup_lane_s8 (int8x8_t __a, const int __b) { return vdup_n_s8 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vdup_lane_s16 (int16x4_t __a, const int __b) { return vdup_n_s16 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vdup_lane_s32 (int32x2_t __a, const int __b) { return vdup_n_s32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vdup_lane_s64 (int64x1_t __a, const int __b) { return vdup_n_s64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vdup_lane_u8 (uint8x8_t __a, const int __b) { return vdup_n_u8 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vdup_lane_u16 (uint16x4_t __a, const int __b) { return vdup_n_u16 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vdup_lane_u32 (uint32x2_t __a, const int __b) { return vdup_n_u32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vdup_lane_u64 (uint64x1_t __a, const int __b) { return vdup_n_u64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vdup_laneq_f32 (float32x4_t __a, const int __b) { return vdup_n_f32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vdup_laneq_f64 (float64x2_t __a, const int __b) { return vdup_n_f64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vdup_laneq_p8 (poly8x16_t __a, const int __b) { return vdup_n_p8 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vdup_laneq_p16 (poly16x8_t __a, const int __b) { return vdup_n_p16 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vdup_laneq_s8 (int8x16_t __a, const int __b) { return vdup_n_s8 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vdup_laneq_s16 (int16x8_t __a, const int __b) { return vdup_n_s16 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vdup_laneq_s32 (int32x4_t __a, const int __b) { return vdup_n_s32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vdup_laneq_s64 (int64x2_t __a, const int __b) { return vdup_n_s64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vdup_laneq_u8 (uint8x16_t __a, const int __b) { return vdup_n_u8 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vdup_laneq_u16 (uint16x8_t __a, const int __b) { return vdup_n_u16 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vdup_laneq_u32 (uint32x4_t __a, const int __b) { return vdup_n_u32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vdup_laneq_u64 (uint64x2_t __a, const int __b) { return vdup_n_u64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vdupq_lane_f32 (float32x2_t __a, const int __b) { return vdupq_n_f32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vdupq_lane_f64 (float64x1_t __a, const int __b) { return vdupq_n_f64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vdupq_lane_p8 (poly8x8_t __a, const int __b) { return vdupq_n_p8 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vdupq_lane_p16 (poly16x4_t __a, const int __b) { return vdupq_n_p16 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vdupq_lane_s8 (int8x8_t __a, const int __b) { return vdupq_n_s8 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vdupq_lane_s16 (int16x4_t __a, const int __b) { return vdupq_n_s16 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vdupq_lane_s32 (int32x2_t __a, const int __b) { return vdupq_n_s32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vdupq_lane_s64 (int64x1_t __a, const int __b) { return vdupq_n_s64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vdupq_lane_u8 (uint8x8_t __a, const int __b) { return vdupq_n_u8 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vdupq_lane_u16 (uint16x4_t __a, const int __b) { return vdupq_n_u16 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vdupq_lane_u32 (uint32x2_t __a, const int __b) { return vdupq_n_u32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vdupq_lane_u64 (uint64x1_t __a, const int __b) { return vdupq_n_u64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vdupq_laneq_f32 (float32x4_t __a, const int __b) { return vdupq_n_f32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vdupq_laneq_f64 (float64x2_t __a, const int __b) { return vdupq_n_f64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vdupq_laneq_p8 (poly8x16_t __a, const int __b) { return vdupq_n_p8 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vdupq_laneq_p16 (poly16x8_t __a, const int __b) { return vdupq_n_p16 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vdupq_laneq_s8 (int8x16_t __a, const int __b) { return vdupq_n_s8 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vdupq_laneq_s16 (int16x8_t __a, const int __b) { return vdupq_n_s16 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vdupq_laneq_s32 (int32x4_t __a, const int __b) { return vdupq_n_s32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vdupq_laneq_s64 (int64x2_t __a, const int __b) { return vdupq_n_s64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vdupq_laneq_u8 (uint8x16_t __a, const int __b) { return vdupq_n_u8 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vdupq_laneq_u16 (uint16x8_t __a, const int __b) { return vdupq_n_u16 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vdupq_laneq_u32 (uint32x4_t __a, const int __b) { return vdupq_n_u32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vdupq_laneq_u64 (uint64x2_t __a, const int __b) { return vdupq_n_u64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; })); } __extension__ static __inline poly8_t __attribute__ ((__always_inline__)) vdupb_lane_p8 (poly8x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vdupb_lane_s8 (int8x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vdupb_lane_u8 (uint8x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline poly16_t __attribute__ ((__always_inline__)) vduph_lane_p16 (poly16x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vduph_lane_s16 (int16x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vduph_lane_u16 (uint16x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vdups_lane_f32 (float32x2_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vdups_lane_s32 (int32x2_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vdups_lane_u32 (uint32x2_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vdupd_lane_f64 (float64x1_t __a, const int __b) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); return __a[0]; } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vdupd_lane_s64 (int64x1_t __a, const int __b) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); return __a[0]; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vdupd_lane_u64 (uint64x1_t __a, const int __b) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); return __a[0]; } __extension__ static __inline poly8_t __attribute__ ((__always_inline__)) vdupb_laneq_p8 (poly8x16_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vdupb_laneq_s8 (int8x16_t __a, const int __attribute__ ((unused)) __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vdupb_laneq_u8 (uint8x16_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline poly16_t __attribute__ ((__always_inline__)) vduph_laneq_p16 (poly16x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vduph_laneq_s16 (int16x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vduph_laneq_u16 (uint16x8_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vdups_laneq_f32 (float32x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vdups_laneq_s32 (int32x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vdups_laneq_u32 (uint32x4_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vdupd_laneq_f64 (float64x2_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vdupd_laneq_s64 (int64x2_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vdupd_laneq_u64 (uint64x2_t __a, const int __b) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __b); __a[__b]; }); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vext_f32 (float32x2_t __a, float32x2_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint32x2_t) {__c, __c+1}); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vext_f64 (float64x1_t __a, float64x1_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __a; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vext_p8 (poly8x8_t __a, poly8x8_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint8x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7}); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vext_p16 (poly16x4_t __a, poly16x4_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint16x4_t) {__c, __c+1, __c+2, __c+3}); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vext_s8 (int8x8_t __a, int8x8_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint8x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7}); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vext_s16 (int16x4_t __a, int16x4_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint16x4_t) {__c, __c+1, __c+2, __c+3}); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vext_s32 (int32x2_t __a, int32x2_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint32x2_t) {__c, __c+1}); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vext_s64 (int64x1_t __a, int64x1_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __a; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vext_u8 (uint8x8_t __a, uint8x8_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint8x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7}); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vext_u16 (uint16x4_t __a, uint16x4_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint16x4_t) {__c, __c+1, __c+2, __c+3}); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vext_u32 (uint32x2_t __a, uint32x2_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint32x2_t) {__c, __c+1}); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vext_u64 (uint64x1_t __a, uint64x1_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vextq_f32 (float32x4_t __a, float32x4_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint32x4_t) {__c, __c+1, __c+2, __c+3}); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vextq_f64 (float64x2_t __a, float64x2_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint64x2_t) {__c, __c+1}); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vextq_p8 (poly8x16_t __a, poly8x16_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint8x16_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7, __c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15}); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vextq_p16 (poly16x8_t __a, poly16x8_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint16x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7}); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vextq_s8 (int8x16_t __a, int8x16_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint8x16_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7, __c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15}); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vextq_s16 (int16x8_t __a, int16x8_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint16x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7}); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vextq_s32 (int32x4_t __a, int32x4_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint32x4_t) {__c, __c+1, __c+2, __c+3}); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vextq_s64 (int64x2_t __a, int64x2_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint64x2_t) {__c, __c+1}); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vextq_u8 (uint8x16_t __a, uint8x16_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint8x16_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7, __c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15}); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vextq_u16 (uint16x8_t __a, uint16x8_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint16x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7}); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vextq_u32 (uint32x4_t __a, uint32x4_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint32x4_t) {__c, __c+1, __c+2, __c+3}); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vextq_u64 (uint64x2_t __a, uint64x2_t __b, __const int __c) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __c); return __builtin_shuffle (__a, __b, (uint64x2_t) {__c, __c+1}); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vfma_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c) { return (float64x1_t) {__builtin_fma (__b[0], __c[0], __a[0])}; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vfma_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c) { return __builtin_aarch64_fmav2sf (__b, __c, __a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) { return __builtin_aarch64_fmav4sf (__b, __c, __a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vfmaq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c) { return __builtin_aarch64_fmav2df (__b, __c, __a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vfma_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c) { return __builtin_aarch64_fmav2sf (__b, vdup_n_f32 (__c), __a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vfmaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) { return __builtin_aarch64_fmav4sf (__b, vdupq_n_f32 (__c), __a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vfmaq_n_f64 (float64x2_t __a, float64x2_t __b, float64_t __c) { return __builtin_aarch64_fmav2df (__b, vdupq_n_f64 (__c), __a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vfma_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __lane) { return __builtin_aarch64_fmav2sf (__b, vdup_n_f32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; })), __a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vfma_lane_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c, const int __lane) { return (float64x1_t) {__builtin_fma (__b[0], __c[0], __a[0])}; } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vfmad_lane_f64 (float64_t __a, float64_t __b, float64x1_t __c, const int __lane) { return __builtin_fma (__b, __c[0], __a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vfmas_lane_f32 (float32_t __a, float32_t __b, float32x2_t __c, const int __lane) { return __builtin_fmaf (__b, __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }), __a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vfma_laneq_f32 (float32x2_t __a, float32x2_t __b, float32x4_t __c, const int __lane) { return __builtin_aarch64_fmav2sf (__b, vdup_n_f32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; })), __a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vfma_laneq_f64 (float64x1_t __a, float64x1_t __b, float64x2_t __c, const int __lane) { float64_t __c0 = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }); return (float64x1_t) {__builtin_fma (__b[0], __c0, __a[0])}; } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vfmad_laneq_f64 (float64_t __a, float64_t __b, float64x2_t __c, const int __lane) { return __builtin_fma (__b, __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }), __a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vfmas_laneq_f32 (float32_t __a, float32_t __b, float32x4_t __c, const int __lane) { return __builtin_fmaf (__b, __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }), __a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vfmaq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __lane) { return __builtin_aarch64_fmav4sf (__b, vdupq_n_f32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; })), __a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vfmaq_lane_f64 (float64x2_t __a, float64x2_t __b, float64x1_t __c, const int __lane) { return __builtin_aarch64_fmav2df (__b, vdupq_n_f64 (__c[0]), __a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vfmaq_laneq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, const int __lane) { return __builtin_aarch64_fmav4sf (__b, vdupq_n_f32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; })), __a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vfmaq_laneq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c, const int __lane) { return __builtin_aarch64_fmav2df (__b, vdupq_n_f64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; })), __a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vfms_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c) { return (float64x1_t) {__builtin_fma (-__b[0], __c[0], __a[0])}; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vfms_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c) { return __builtin_aarch64_fmav2sf (-__b, __c, __a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) { return __builtin_aarch64_fmav4sf (-__b, __c, __a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vfmsq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c) { return __builtin_aarch64_fmav2df (-__b, __c, __a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vfms_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __lane) { return __builtin_aarch64_fmav2sf (-__b, vdup_n_f32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; })), __a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vfms_lane_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c, const int __lane) { return (float64x1_t) {__builtin_fma (-__b[0], __c[0], __a[0])}; } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vfmsd_lane_f64 (float64_t __a, float64_t __b, float64x1_t __c, const int __lane) { return __builtin_fma (-__b, __c[0], __a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vfmss_lane_f32 (float32_t __a, float32_t __b, float32x2_t __c, const int __lane) { return __builtin_fmaf (-__b, __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }), __a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vfms_laneq_f32 (float32x2_t __a, float32x2_t __b, float32x4_t __c, const int __lane) { return __builtin_aarch64_fmav2sf (-__b, vdup_n_f32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; })), __a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vfms_laneq_f64 (float64x1_t __a, float64x1_t __b, float64x2_t __c, const int __lane) { float64_t __c0 = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }); return (float64x1_t) {__builtin_fma (-__b[0], __c0, __a[0])}; } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vfmsd_laneq_f64 (float64_t __a, float64_t __b, float64x2_t __c, const int __lane) { return __builtin_fma (-__b, __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }), __a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vfmss_laneq_f32 (float32_t __a, float32_t __b, float32x4_t __c, const int __lane) { return __builtin_fmaf (-__b, __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }), __a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vfmsq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __lane) { return __builtin_aarch64_fmav4sf (-__b, vdupq_n_f32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; })), __a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vfmsq_lane_f64 (float64x2_t __a, float64x2_t __b, float64x1_t __c, const int __lane) { return __builtin_aarch64_fmav2df (-__b, vdupq_n_f64 (__c[0]), __a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vfmsq_laneq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, const int __lane) { return __builtin_aarch64_fmav4sf (-__b, vdupq_n_f32 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; })), __a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vfmsq_laneq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c, const int __lane) { return __builtin_aarch64_fmav2df (-__b, vdupq_n_f64 (__extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; })), __a); } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vld1_f16 (const float16_t *__a) { return __builtin_aarch64_ld1v4hf (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vld1_f32 (const float32_t *a) { return __builtin_aarch64_ld1v2sf ((const __builtin_aarch64_simd_sf *) a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vld1_f64 (const float64_t *a) { return (float64x1_t) {*a}; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vld1_p8 (const poly8_t *a) { return (poly8x8_t) __builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) a); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vld1_p16 (const poly16_t *a) { return (poly16x4_t) __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) a); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vld1_s8 (const int8_t *a) { return __builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) a); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vld1_s16 (const int16_t *a) { return __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vld1_s32 (const int32_t *a) { return __builtin_aarch64_ld1v2si ((const __builtin_aarch64_simd_si *) a); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vld1_s64 (const int64_t *a) { return (int64x1_t) {*a}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vld1_u8 (const uint8_t *a) { return (uint8x8_t) __builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) a); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vld1_u16 (const uint16_t *a) { return (uint16x4_t) __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) a); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vld1_u32 (const uint32_t *a) { return (uint32x2_t) __builtin_aarch64_ld1v2si ((const __builtin_aarch64_simd_si *) a); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vld1_u64 (const uint64_t *a) { return (uint64x1_t) {*a}; } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vld1q_f16 (const float16_t *__a) { return __builtin_aarch64_ld1v8hf (__a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vld1q_f32 (const float32_t *a) { return __builtin_aarch64_ld1v4sf ((const __builtin_aarch64_simd_sf *) a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vld1q_f64 (const float64_t *a) { return __builtin_aarch64_ld1v2df ((const __builtin_aarch64_simd_df *) a); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vld1q_p8 (const poly8_t *a) { return (poly8x16_t) __builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) a); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vld1q_p16 (const poly16_t *a) { return (poly16x8_t) __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) a); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vld1q_s8 (const int8_t *a) { return __builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) a); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vld1q_s16 (const int16_t *a) { return __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vld1q_s32 (const int32_t *a) { return __builtin_aarch64_ld1v4si ((const __builtin_aarch64_simd_si *) a); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vld1q_s64 (const int64_t *a) { return __builtin_aarch64_ld1v2di ((const __builtin_aarch64_simd_di *) a); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vld1q_u8 (const uint8_t *a) { return (uint8x16_t) __builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) a); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vld1q_u16 (const uint16_t *a) { return (uint16x8_t) __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) a); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vld1q_u32 (const uint32_t *a) { return (uint32x4_t) __builtin_aarch64_ld1v4si ((const __builtin_aarch64_simd_si *) a); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vld1q_u64 (const uint64_t *a) { return (uint64x2_t) __builtin_aarch64_ld1v2di ((const __builtin_aarch64_simd_di *) a); } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vld1_dup_f16 (const float16_t* __a) { float16_t __f = *__a; return (float16x4_t) { __f, __f, __f, __f }; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vld1_dup_f32 (const float32_t* __a) { return vdup_n_f32 (*__a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vld1_dup_f64 (const float64_t* __a) { return vdup_n_f64 (*__a); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vld1_dup_p8 (const poly8_t* __a) { return vdup_n_p8 (*__a); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vld1_dup_p16 (const poly16_t* __a) { return vdup_n_p16 (*__a); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vld1_dup_s8 (const int8_t* __a) { return vdup_n_s8 (*__a); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vld1_dup_s16 (const int16_t* __a) { return vdup_n_s16 (*__a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vld1_dup_s32 (const int32_t* __a) { return vdup_n_s32 (*__a); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vld1_dup_s64 (const int64_t* __a) { return vdup_n_s64 (*__a); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vld1_dup_u8 (const uint8_t* __a) { return vdup_n_u8 (*__a); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vld1_dup_u16 (const uint16_t* __a) { return vdup_n_u16 (*__a); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vld1_dup_u32 (const uint32_t* __a) { return vdup_n_u32 (*__a); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vld1_dup_u64 (const uint64_t* __a) { return vdup_n_u64 (*__a); } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vld1q_dup_f16 (const float16_t* __a) { float16_t __f = *__a; return (float16x8_t) { __f, __f, __f, __f, __f, __f, __f, __f }; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vld1q_dup_f32 (const float32_t* __a) { return vdupq_n_f32 (*__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vld1q_dup_f64 (const float64_t* __a) { return vdupq_n_f64 (*__a); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vld1q_dup_p8 (const poly8_t* __a) { return vdupq_n_p8 (*__a); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vld1q_dup_p16 (const poly16_t* __a) { return vdupq_n_p16 (*__a); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vld1q_dup_s8 (const int8_t* __a) { return vdupq_n_s8 (*__a); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vld1q_dup_s16 (const int16_t* __a) { return vdupq_n_s16 (*__a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vld1q_dup_s32 (const int32_t* __a) { return vdupq_n_s32 (*__a); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vld1q_dup_s64 (const int64_t* __a) { return vdupq_n_s64 (*__a); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vld1q_dup_u8 (const uint8_t* __a) { return vdupq_n_u8 (*__a); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vld1q_dup_u16 (const uint16_t* __a) { return vdupq_n_u16 (*__a); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vld1q_dup_u32 (const uint32_t* __a) { return vdupq_n_u32 (*__a); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vld1q_dup_u64 (const uint64_t* __a) { return vdupq_n_u64 (*__a); } __extension__ static __inline float16x4_t __attribute__ ((__always_inline__)) vld1_lane_f16 (const float16_t *__src, float16x4_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vld1_lane_f32 (const float32_t *__src, float32x2_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vld1_lane_f64 (const float64_t *__src, float64x1_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vld1_lane_p8 (const poly8_t *__src, poly8x8_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vld1_lane_p16 (const poly16_t *__src, poly16x4_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vld1_lane_s8 (const int8_t *__src, int8x8_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vld1_lane_s16 (const int16_t *__src, int16x4_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vld1_lane_s32 (const int32_t *__src, int32x2_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vld1_lane_s64 (const int64_t *__src, int64x1_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vld1_lane_u8 (const uint8_t *__src, uint8x8_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vld1_lane_u16 (const uint16_t *__src, uint16x4_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vld1_lane_u32 (const uint32_t *__src, uint32x2_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vld1_lane_u64 (const uint64_t *__src, uint64x1_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline float16x8_t __attribute__ ((__always_inline__)) vld1q_lane_f16 (const float16_t *__src, float16x8_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vld1q_lane_f32 (const float32_t *__src, float32x4_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vld1q_lane_f64 (const float64_t *__src, float64x2_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vld1q_lane_p8 (const poly8_t *__src, poly8x16_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vld1q_lane_p16 (const poly16_t *__src, poly16x8_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vld1q_lane_s8 (const int8_t *__src, int8x16_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vld1q_lane_s16 (const int16_t *__src, int16x8_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vld1q_lane_s32 (const int32_t *__src, int32x4_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vld1q_lane_s64 (const int64_t *__src, int64x2_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vld1q_lane_u8 (const uint8_t *__src, uint8x16_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vld1q_lane_u16 (const uint16_t *__src, uint16x8_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vld1q_lane_u32 (const uint32_t *__src, uint32x4_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vld1q_lane_u64 (const uint64_t *__src, uint64x2_t __vec, const int __lane) { return __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __lane); __vec[__lane] = *__src; __vec; }); } __extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__)) vld2_s64 (const int64_t * __a) { int64x1x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 0); ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 1); return ret; } __extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__)) vld2_u64 (const uint64_t * __a) { uint64x1x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 0); ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 1); return ret; } __extension__ static __inline float64x1x2_t __attribute__ ((__always_inline__)) vld2_f64 (const float64_t * __a) { float64x1x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2df ((const __builtin_aarch64_simd_df *) __a); ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregoidf (__o, 0)}; ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregoidf (__o, 1)}; return ret; } __extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__)) vld2_s8 (const int8_t * __a) { int8x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0); ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1); return ret; } __extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__)) vld2_p8 (const poly8_t * __a) { poly8x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0); ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1); return ret; } __extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__)) vld2_s16 (const int16_t * __a) { int16x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0); ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1); return ret; } __extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__)) vld2_p16 (const poly16_t * __a) { poly16x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0); ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1); return ret; } __extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__)) vld2_s32 (const int32_t * __a) { int32x2x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v2si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0); ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1); return ret; } __extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__)) vld2_u8 (const uint8_t * __a) { uint8x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0); ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1); return ret; } __extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__)) vld2_u16 (const uint16_t * __a) { uint16x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0); ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1); return ret; } __extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__)) vld2_u32 (const uint32_t * __a) { uint32x2x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v2si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0); ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1); return ret; } __extension__ static __inline float16x4x2_t __attribute__ ((__always_inline__)) vld2_f16 (const float16_t * __a) { float16x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v4hf (__a); ret.val[0] = __builtin_aarch64_get_dregoiv4hf (__o, 0); ret.val[1] = __builtin_aarch64_get_dregoiv4hf (__o, 1); return ret; } __extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__)) vld2_f32 (const float32_t * __a) { float32x2x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v2sf ((const __builtin_aarch64_simd_sf *) __a); ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 0); ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 1); return ret; } __extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__)) vld2q_s8 (const int8_t * __a) { int8x16x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0); ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1); return ret; } __extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__)) vld2q_p8 (const poly8_t * __a) { poly8x16x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0); ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1); return ret; } __extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__)) vld2q_s16 (const int16_t * __a) { int16x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0); ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1); return ret; } __extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__)) vld2q_p16 (const poly16_t * __a) { poly16x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0); ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1); return ret; } __extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__)) vld2q_s32 (const int32_t * __a) { int32x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v4si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline int64x2x2_t __attribute__ ((__always_inline__)) vld2q_s64 (const int64_t * __a) { int64x2x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0); ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1); return ret; } __extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__)) vld2q_u8 (const uint8_t * __a) { uint8x16x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0); ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1); return ret; } __extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__)) vld2q_u16 (const uint16_t * __a) { uint16x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0); ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1); return ret; } __extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__)) vld2q_u32 (const uint32_t * __a) { uint32x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v4si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline uint64x2x2_t __attribute__ ((__always_inline__)) vld2q_u64 (const uint64_t * __a) { uint64x2x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0); ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1); return ret; } __extension__ static __inline float16x8x2_t __attribute__ ((__always_inline__)) vld2q_f16 (const float16_t * __a) { float16x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v8hf (__a); ret.val[0] = __builtin_aarch64_get_qregoiv8hf (__o, 0); ret.val[1] = __builtin_aarch64_get_qregoiv8hf (__o, 1); return ret; } __extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__)) vld2q_f32 (const float32_t * __a) { float32x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v4sf ((const __builtin_aarch64_simd_sf *) __a); ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 0); ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 1); return ret; } __extension__ static __inline float64x2x2_t __attribute__ ((__always_inline__)) vld2q_f64 (const float64_t * __a) { float64x2x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2v2df ((const __builtin_aarch64_simd_df *) __a); ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 0); ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 1); return ret; } __extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__)) vld3_s64 (const int64_t * __a) { int64x1x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 0); ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 1); ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 2); return ret; } __extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__)) vld3_u64 (const uint64_t * __a) { uint64x1x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 0); ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 1); ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 2); return ret; } __extension__ static __inline float64x1x3_t __attribute__ ((__always_inline__)) vld3_f64 (const float64_t * __a) { float64x1x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3df ((const __builtin_aarch64_simd_df *) __a); ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregcidf (__o, 0)}; ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregcidf (__o, 1)}; ret.val[2] = (float64x1_t) {__builtin_aarch64_get_dregcidf (__o, 2)}; return ret; } __extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__)) vld3_s8 (const int8_t * __a) { int8x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0); ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1); ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2); return ret; } __extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__)) vld3_p8 (const poly8_t * __a) { poly8x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0); ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1); ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2); return ret; } __extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__)) vld3_s16 (const int16_t * __a) { int16x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0); ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1); ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2); return ret; } __extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__)) vld3_p16 (const poly16_t * __a) { poly16x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0); ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1); ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2); return ret; } __extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__)) vld3_s32 (const int32_t * __a) { int32x2x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v2si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0); ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1); ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2); return ret; } __extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__)) vld3_u8 (const uint8_t * __a) { uint8x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0); ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1); ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2); return ret; } __extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__)) vld3_u16 (const uint16_t * __a) { uint16x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0); ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1); ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2); return ret; } __extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__)) vld3_u32 (const uint32_t * __a) { uint32x2x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v2si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0); ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1); ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2); return ret; } __extension__ static __inline float16x4x3_t __attribute__ ((__always_inline__)) vld3_f16 (const float16_t * __a) { float16x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v4hf (__a); ret.val[0] = __builtin_aarch64_get_dregciv4hf (__o, 0); ret.val[1] = __builtin_aarch64_get_dregciv4hf (__o, 1); ret.val[2] = __builtin_aarch64_get_dregciv4hf (__o, 2); return ret; } __extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__)) vld3_f32 (const float32_t * __a) { float32x2x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v2sf ((const __builtin_aarch64_simd_sf *) __a); ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 0); ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 1); ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 2); return ret; } __extension__ static __inline int8x16x3_t __attribute__ ((__always_inline__)) vld3q_s8 (const int8_t * __a) { int8x16x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0); ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1); ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2); return ret; } __extension__ static __inline poly8x16x3_t __attribute__ ((__always_inline__)) vld3q_p8 (const poly8_t * __a) { poly8x16x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0); ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1); ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2); return ret; } __extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__)) vld3q_s16 (const int16_t * __a) { int16x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0); ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1); ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2); return ret; } __extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__)) vld3q_p16 (const poly16_t * __a) { poly16x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0); ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1); ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2); return ret; } __extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__)) vld3q_s32 (const int32_t * __a) { int32x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v4si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline int64x2x3_t __attribute__ ((__always_inline__)) vld3q_s64 (const int64_t * __a) { int64x2x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0); ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1); ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2); return ret; } __extension__ static __inline uint8x16x3_t __attribute__ ((__always_inline__)) vld3q_u8 (const uint8_t * __a) { uint8x16x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0); ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1); ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2); return ret; } __extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__)) vld3q_u16 (const uint16_t * __a) { uint16x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0); ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1); ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2); return ret; } __extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__)) vld3q_u32 (const uint32_t * __a) { uint32x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v4si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline uint64x2x3_t __attribute__ ((__always_inline__)) vld3q_u64 (const uint64_t * __a) { uint64x2x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0); ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1); ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2); return ret; } __extension__ static __inline float16x8x3_t __attribute__ ((__always_inline__)) vld3q_f16 (const float16_t * __a) { float16x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v8hf (__a); ret.val[0] = __builtin_aarch64_get_qregciv8hf (__o, 0); ret.val[1] = __builtin_aarch64_get_qregciv8hf (__o, 1); ret.val[2] = __builtin_aarch64_get_qregciv8hf (__o, 2); return ret; } __extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__)) vld3q_f32 (const float32_t * __a) { float32x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v4sf ((const __builtin_aarch64_simd_sf *) __a); ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 0); ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 1); ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 2); return ret; } __extension__ static __inline float64x2x3_t __attribute__ ((__always_inline__)) vld3q_f64 (const float64_t * __a) { float64x2x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3v2df ((const __builtin_aarch64_simd_df *) __a); ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 0); ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 1); ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 2); return ret; } __extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__)) vld4_s64 (const int64_t * __a) { int64x1x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 0); ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 1); ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 2); ret.val[3] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 3); return ret; } __extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__)) vld4_u64 (const uint64_t * __a) { uint64x1x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 0); ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 1); ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 2); ret.val[3] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 3); return ret; } __extension__ static __inline float64x1x4_t __attribute__ ((__always_inline__)) vld4_f64 (const float64_t * __a) { float64x1x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4df ((const __builtin_aarch64_simd_df *) __a); ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 0)}; ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 1)}; ret.val[2] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 2)}; ret.val[3] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 3)}; return ret; } __extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__)) vld4_s8 (const int8_t * __a) { int8x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0); ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1); ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2); ret.val[3] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3); return ret; } __extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__)) vld4_p8 (const poly8_t * __a) { poly8x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0); ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1); ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2); ret.val[3] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3); return ret; } __extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__)) vld4_s16 (const int16_t * __a) { int16x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0); ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1); ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2); ret.val[3] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3); return ret; } __extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__)) vld4_p16 (const poly16_t * __a) { poly16x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0); ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1); ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2); ret.val[3] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3); return ret; } __extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__)) vld4_s32 (const int32_t * __a) { int32x2x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v2si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0); ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1); ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2); ret.val[3] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3); return ret; } __extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__)) vld4_u8 (const uint8_t * __a) { uint8x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0); ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1); ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2); ret.val[3] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3); return ret; } __extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__)) vld4_u16 (const uint16_t * __a) { uint16x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0); ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1); ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2); ret.val[3] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3); return ret; } __extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__)) vld4_u32 (const uint32_t * __a) { uint32x2x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v2si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0); ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1); ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2); ret.val[3] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3); return ret; } __extension__ static __inline float16x4x4_t __attribute__ ((__always_inline__)) vld4_f16 (const float16_t * __a) { float16x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v4hf (__a); ret.val[0] = __builtin_aarch64_get_dregxiv4hf (__o, 0); ret.val[1] = __builtin_aarch64_get_dregxiv4hf (__o, 1); ret.val[2] = __builtin_aarch64_get_dregxiv4hf (__o, 2); ret.val[3] = __builtin_aarch64_get_dregxiv4hf (__o, 3); return ret; } __extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__)) vld4_f32 (const float32_t * __a) { float32x2x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v2sf ((const __builtin_aarch64_simd_sf *) __a); ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 0); ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 1); ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 2); ret.val[3] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 3); return ret; } __extension__ static __inline int8x16x4_t __attribute__ ((__always_inline__)) vld4q_s8 (const int8_t * __a) { int8x16x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0); ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1); ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2); ret.val[3] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3); return ret; } __extension__ static __inline poly8x16x4_t __attribute__ ((__always_inline__)) vld4q_p8 (const poly8_t * __a) { poly8x16x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0); ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1); ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2); ret.val[3] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3); return ret; } __extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__)) vld4q_s16 (const int16_t * __a) { int16x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0); ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1); ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2); ret.val[3] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3); return ret; } __extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__)) vld4q_p16 (const poly16_t * __a) { poly16x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0); ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1); ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2); ret.val[3] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3); return ret; } __extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__)) vld4q_s32 (const int32_t * __a) { int32x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v4si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline int64x2x4_t __attribute__ ((__always_inline__)) vld4q_s64 (const int64_t * __a) { int64x2x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0); ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1); ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2); ret.val[3] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3); return ret; } __extension__ static __inline uint8x16x4_t __attribute__ ((__always_inline__)) vld4q_u8 (const uint8_t * __a) { uint8x16x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0); ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1); ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2); ret.val[3] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3); return ret; } __extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__)) vld4q_u16 (const uint16_t * __a) { uint16x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0); ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1); ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2); ret.val[3] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3); return ret; } __extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__)) vld4q_u32 (const uint32_t * __a) { uint32x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v4si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline uint64x2x4_t __attribute__ ((__always_inline__)) vld4q_u64 (const uint64_t * __a) { uint64x2x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0); ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1); ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2); ret.val[3] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3); return ret; } __extension__ static __inline float16x8x4_t __attribute__ ((__always_inline__)) vld4q_f16 (const float16_t * __a) { float16x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v8hf (__a); ret.val[0] = __builtin_aarch64_get_qregxiv8hf (__o, 0); ret.val[1] = __builtin_aarch64_get_qregxiv8hf (__o, 1); ret.val[2] = __builtin_aarch64_get_qregxiv8hf (__o, 2); ret.val[3] = __builtin_aarch64_get_qregxiv8hf (__o, 3); return ret; } __extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__)) vld4q_f32 (const float32_t * __a) { float32x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v4sf ((const __builtin_aarch64_simd_sf *) __a); ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 0); ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 1); ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 2); ret.val[3] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 3); return ret; } __extension__ static __inline float64x2x4_t __attribute__ ((__always_inline__)) vld4q_f64 (const float64_t * __a) { float64x2x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4v2df ((const __builtin_aarch64_simd_df *) __a); ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 0); ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 1); ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 2); ret.val[3] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 3); return ret; } __extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__)) vld2_dup_s8 (const int8_t * __a) { int8x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0); ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1); return ret; } __extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__)) vld2_dup_s16 (const int16_t * __a) { int16x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0); ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1); return ret; } __extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__)) vld2_dup_s32 (const int32_t * __a) { int32x2x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv2si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0); ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1); return ret; } __extension__ static __inline float16x4x2_t __attribute__ ((__always_inline__)) vld2_dup_f16 (const float16_t * __a) { float16x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv4hf ((const __builtin_aarch64_simd_hf *) __a); ret.val[0] = __builtin_aarch64_get_dregoiv4hf (__o, 0); ret.val[1] = (float16x4_t) __builtin_aarch64_get_dregoiv4hf (__o, 1); return ret; } __extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__)) vld2_dup_f32 (const float32_t * __a) { float32x2x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv2sf ((const __builtin_aarch64_simd_sf *) __a); ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 0); ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 1); return ret; } __extension__ static __inline float64x1x2_t __attribute__ ((__always_inline__)) vld2_dup_f64 (const float64_t * __a) { float64x1x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rdf ((const __builtin_aarch64_simd_df *) __a); ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregoidf (__o, 0)}; ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregoidf (__o, 1)}; return ret; } __extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__)) vld2_dup_u8 (const uint8_t * __a) { uint8x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0); ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1); return ret; } __extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__)) vld2_dup_u16 (const uint16_t * __a) { uint16x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0); ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1); return ret; } __extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__)) vld2_dup_u32 (const uint32_t * __a) { uint32x2x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv2si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0); ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1); return ret; } __extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__)) vld2_dup_p8 (const poly8_t * __a) { poly8x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0); ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1); return ret; } __extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__)) vld2_dup_p16 (const poly16_t * __a) { poly16x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0); ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1); return ret; } __extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__)) vld2_dup_s64 (const int64_t * __a) { int64x1x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rdi ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 0); ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 1); return ret; } __extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__)) vld2_dup_u64 (const uint64_t * __a) { uint64x1x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rdi ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 0); ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 1); return ret; } __extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__)) vld2q_dup_s8 (const int8_t * __a) { int8x16x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0); ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1); return ret; } __extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__)) vld2q_dup_p8 (const poly8_t * __a) { poly8x16x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0); ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1); return ret; } __extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__)) vld2q_dup_s16 (const int16_t * __a) { int16x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0); ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1); return ret; } __extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__)) vld2q_dup_p16 (const poly16_t * __a) { poly16x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0); ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1); return ret; } __extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__)) vld2q_dup_s32 (const int32_t * __a) { int32x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv4si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline int64x2x2_t __attribute__ ((__always_inline__)) vld2q_dup_s64 (const int64_t * __a) { int64x2x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0); ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1); return ret; } __extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__)) vld2q_dup_u8 (const uint8_t * __a) { uint8x16x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0); ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1); return ret; } __extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__)) vld2q_dup_u16 (const uint16_t * __a) { uint16x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0); ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1); return ret; } __extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__)) vld2q_dup_u32 (const uint32_t * __a) { uint32x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv4si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline uint64x2x2_t __attribute__ ((__always_inline__)) vld2q_dup_u64 (const uint64_t * __a) { uint64x2x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0); ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1); return ret; } __extension__ static __inline float16x8x2_t __attribute__ ((__always_inline__)) vld2q_dup_f16 (const float16_t * __a) { float16x8x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv8hf ((const __builtin_aarch64_simd_hf *) __a); ret.val[0] = (float16x8_t) __builtin_aarch64_get_qregoiv8hf (__o, 0); ret.val[1] = __builtin_aarch64_get_qregoiv8hf (__o, 1); return ret; } __extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__)) vld2q_dup_f32 (const float32_t * __a) { float32x4x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv4sf ((const __builtin_aarch64_simd_sf *) __a); ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 0); ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 1); return ret; } __extension__ static __inline float64x2x2_t __attribute__ ((__always_inline__)) vld2q_dup_f64 (const float64_t * __a) { float64x2x2_t ret; __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_ld2rv2df ((const __builtin_aarch64_simd_df *) __a); ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 0); ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 1); return ret; } __extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__)) vld3_dup_s64 (const int64_t * __a) { int64x1x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rdi ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 0); ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 1); ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 2); return ret; } __extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__)) vld3_dup_u64 (const uint64_t * __a) { uint64x1x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rdi ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 0); ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 1); ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 2); return ret; } __extension__ static __inline float64x1x3_t __attribute__ ((__always_inline__)) vld3_dup_f64 (const float64_t * __a) { float64x1x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rdf ((const __builtin_aarch64_simd_df *) __a); ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregcidf (__o, 0)}; ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregcidf (__o, 1)}; ret.val[2] = (float64x1_t) {__builtin_aarch64_get_dregcidf (__o, 2)}; return ret; } __extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__)) vld3_dup_s8 (const int8_t * __a) { int8x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0); ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1); ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2); return ret; } __extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__)) vld3_dup_p8 (const poly8_t * __a) { poly8x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0); ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1); ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2); return ret; } __extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__)) vld3_dup_s16 (const int16_t * __a) { int16x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0); ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1); ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2); return ret; } __extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__)) vld3_dup_p16 (const poly16_t * __a) { poly16x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0); ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1); ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2); return ret; } __extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__)) vld3_dup_s32 (const int32_t * __a) { int32x2x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv2si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0); ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1); ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2); return ret; } __extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__)) vld3_dup_u8 (const uint8_t * __a) { uint8x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0); ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1); ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2); return ret; } __extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__)) vld3_dup_u16 (const uint16_t * __a) { uint16x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0); ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1); ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2); return ret; } __extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__)) vld3_dup_u32 (const uint32_t * __a) { uint32x2x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv2si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0); ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1); ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2); return ret; } __extension__ static __inline float16x4x3_t __attribute__ ((__always_inline__)) vld3_dup_f16 (const float16_t * __a) { float16x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv4hf ((const __builtin_aarch64_simd_hf *) __a); ret.val[0] = (float16x4_t) __builtin_aarch64_get_dregciv4hf (__o, 0); ret.val[1] = (float16x4_t) __builtin_aarch64_get_dregciv4hf (__o, 1); ret.val[2] = (float16x4_t) __builtin_aarch64_get_dregciv4hf (__o, 2); return ret; } __extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__)) vld3_dup_f32 (const float32_t * __a) { float32x2x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv2sf ((const __builtin_aarch64_simd_sf *) __a); ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 0); ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 1); ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 2); return ret; } __extension__ static __inline int8x16x3_t __attribute__ ((__always_inline__)) vld3q_dup_s8 (const int8_t * __a) { int8x16x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0); ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1); ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2); return ret; } __extension__ static __inline poly8x16x3_t __attribute__ ((__always_inline__)) vld3q_dup_p8 (const poly8_t * __a) { poly8x16x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0); ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1); ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2); return ret; } __extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__)) vld3q_dup_s16 (const int16_t * __a) { int16x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0); ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1); ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2); return ret; } __extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__)) vld3q_dup_p16 (const poly16_t * __a) { poly16x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0); ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1); ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2); return ret; } __extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__)) vld3q_dup_s32 (const int32_t * __a) { int32x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv4si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline int64x2x3_t __attribute__ ((__always_inline__)) vld3q_dup_s64 (const int64_t * __a) { int64x2x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0); ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1); ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2); return ret; } __extension__ static __inline uint8x16x3_t __attribute__ ((__always_inline__)) vld3q_dup_u8 (const uint8_t * __a) { uint8x16x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0); ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1); ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2); return ret; } __extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__)) vld3q_dup_u16 (const uint16_t * __a) { uint16x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0); ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1); ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2); return ret; } __extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__)) vld3q_dup_u32 (const uint32_t * __a) { uint32x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv4si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline uint64x2x3_t __attribute__ ((__always_inline__)) vld3q_dup_u64 (const uint64_t * __a) { uint64x2x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0); ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1); ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2); return ret; } __extension__ static __inline float16x8x3_t __attribute__ ((__always_inline__)) vld3q_dup_f16 (const float16_t * __a) { float16x8x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv8hf ((const __builtin_aarch64_simd_hf *) __a); ret.val[0] = (float16x8_t) __builtin_aarch64_get_qregciv8hf (__o, 0); ret.val[1] = (float16x8_t) __builtin_aarch64_get_qregciv8hf (__o, 1); ret.val[2] = (float16x8_t) __builtin_aarch64_get_qregciv8hf (__o, 2); return ret; } __extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__)) vld3q_dup_f32 (const float32_t * __a) { float32x4x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv4sf ((const __builtin_aarch64_simd_sf *) __a); ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 0); ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 1); ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 2); return ret; } __extension__ static __inline float64x2x3_t __attribute__ ((__always_inline__)) vld3q_dup_f64 (const float64_t * __a) { float64x2x3_t ret; __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_ld3rv2df ((const __builtin_aarch64_simd_df *) __a); ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 0); ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 1); ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 2); return ret; } __extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__)) vld4_dup_s64 (const int64_t * __a) { int64x1x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rdi ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 0); ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 1); ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 2); ret.val[3] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 3); return ret; } __extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__)) vld4_dup_u64 (const uint64_t * __a) { uint64x1x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rdi ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 0); ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 1); ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 2); ret.val[3] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 3); return ret; } __extension__ static __inline float64x1x4_t __attribute__ ((__always_inline__)) vld4_dup_f64 (const float64_t * __a) { float64x1x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rdf ((const __builtin_aarch64_simd_df *) __a); ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 0)}; ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 1)}; ret.val[2] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 2)}; ret.val[3] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 3)}; return ret; } __extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__)) vld4_dup_s8 (const int8_t * __a) { int8x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0); ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1); ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2); ret.val[3] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3); return ret; } __extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__)) vld4_dup_p8 (const poly8_t * __a) { poly8x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0); ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1); ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2); ret.val[3] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3); return ret; } __extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__)) vld4_dup_s16 (const int16_t * __a) { int16x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0); ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1); ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2); ret.val[3] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3); return ret; } __extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__)) vld4_dup_p16 (const poly16_t * __a) { poly16x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0); ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1); ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2); ret.val[3] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3); return ret; } __extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__)) vld4_dup_s32 (const int32_t * __a) { int32x2x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv2si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0); ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1); ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2); ret.val[3] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3); return ret; } __extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__)) vld4_dup_u8 (const uint8_t * __a) { uint8x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv8qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0); ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1); ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2); ret.val[3] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3); return ret; } __extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__)) vld4_dup_u16 (const uint16_t * __a) { uint16x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv4hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0); ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1); ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2); ret.val[3] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3); return ret; } __extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__)) vld4_dup_u32 (const uint32_t * __a) { uint32x2x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv2si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0); ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1); ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2); ret.val[3] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3); return ret; } __extension__ static __inline float16x4x4_t __attribute__ ((__always_inline__)) vld4_dup_f16 (const float16_t * __a) { float16x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv4hf ((const __builtin_aarch64_simd_hf *) __a); ret.val[0] = (float16x4_t) __builtin_aarch64_get_dregxiv4hf (__o, 0); ret.val[1] = (float16x4_t) __builtin_aarch64_get_dregxiv4hf (__o, 1); ret.val[2] = (float16x4_t) __builtin_aarch64_get_dregxiv4hf (__o, 2); ret.val[3] = (float16x4_t) __builtin_aarch64_get_dregxiv4hf (__o, 3); return ret; } __extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__)) vld4_dup_f32 (const float32_t * __a) { float32x2x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv2sf ((const __builtin_aarch64_simd_sf *) __a); ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 0); ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 1); ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 2); ret.val[3] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 3); return ret; } __extension__ static __inline int8x16x4_t __attribute__ ((__always_inline__)) vld4q_dup_s8 (const int8_t * __a) { int8x16x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0); ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1); ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2); ret.val[3] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3); return ret; } __extension__ static __inline poly8x16x4_t __attribute__ ((__always_inline__)) vld4q_dup_p8 (const poly8_t * __a) { poly8x16x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0); ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1); ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2); ret.val[3] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3); return ret; } __extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__)) vld4q_dup_s16 (const int16_t * __a) { int16x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0); ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1); ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2); ret.val[3] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3); return ret; } __extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__)) vld4q_dup_p16 (const poly16_t * __a) { poly16x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0); ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1); ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2); ret.val[3] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3); return ret; } __extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__)) vld4q_dup_s32 (const int32_t * __a) { int32x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv4si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline int64x2x4_t __attribute__ ((__always_inline__)) vld4q_dup_s64 (const int64_t * __a) { int64x2x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0); ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1); ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2); ret.val[3] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3); return ret; } __extension__ static __inline uint8x16x4_t __attribute__ ((__always_inline__)) vld4q_dup_u8 (const uint8_t * __a) { uint8x16x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv16qi ((const __builtin_aarch64_simd_qi *) __a); ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0); ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1); ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2); ret.val[3] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3); return ret; } __extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__)) vld4q_dup_u16 (const uint16_t * __a) { uint16x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv8hi ((const __builtin_aarch64_simd_hi *) __a); ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0); ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1); ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2); ret.val[3] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3); return ret; } __extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__)) vld4q_dup_u32 (const uint32_t * __a) { uint32x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv4si ((const __builtin_aarch64_simd_si *) __a); ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline uint64x2x4_t __attribute__ ((__always_inline__)) vld4q_dup_u64 (const uint64_t * __a) { uint64x2x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv2di ((const __builtin_aarch64_simd_di *) __a); ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0); ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1); ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2); ret.val[3] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3); return ret; } __extension__ static __inline float16x8x4_t __attribute__ ((__always_inline__)) vld4q_dup_f16 (const float16_t * __a) { float16x8x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv8hf ((const __builtin_aarch64_simd_hf *) __a); ret.val[0] = (float16x8_t) __builtin_aarch64_get_qregxiv8hf (__o, 0); ret.val[1] = (float16x8_t) __builtin_aarch64_get_qregxiv8hf (__o, 1); ret.val[2] = (float16x8_t) __builtin_aarch64_get_qregxiv8hf (__o, 2); ret.val[3] = (float16x8_t) __builtin_aarch64_get_qregxiv8hf (__o, 3); return ret; } __extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__)) vld4q_dup_f32 (const float32_t * __a) { float32x4x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv4sf ((const __builtin_aarch64_simd_sf *) __a); ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 0); ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 1); ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 2); ret.val[3] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 3); return ret; } __extension__ static __inline float64x2x4_t __attribute__ ((__always_inline__)) vld4q_dup_f64 (const float64_t * __a) { float64x2x4_t ret; __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_ld4rv2df ((const __builtin_aarch64_simd_df *) __a); ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 0); ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 1); ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 2); ret.val[3] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 3); return ret; } # 17572 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline float16x4x2_t __attribute__ ((__always_inline__)) vld2_lane_f16 (const float16_t * __ptr, float16x4x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; float16x8x2_t __temp; __temp.val[0] = vcombine_f16 (__b.val[0], vcreate_f16 (0)); __temp.val[1] = vcombine_f16 (__b.val[1], vcreate_f16 (0)); __o = __builtin_aarch64_set_qregoiv8hf (__o, (float16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hf (__o, (float16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanev4hf ( (__builtin_aarch64_simd_hf *) __ptr, __o, __c); __b.val[0] = (float16x4_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (float16x4_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } __extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__)) vld2_lane_f32 (const float32_t * __ptr, float32x2x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; float32x4x2_t __temp; __temp.val[0] = vcombine_f32 (__b.val[0], vcreate_f32 (0)); __temp.val[1] = vcombine_f32 (__b.val[1], vcreate_f32 (0)); __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanev2sf ( (__builtin_aarch64_simd_sf *) __ptr, __o, __c); __b.val[0] = (float32x2_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (float32x2_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } __extension__ static __inline float64x1x2_t __attribute__ ((__always_inline__)) vld2_lane_f64 (const float64_t * __ptr, float64x1x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; float64x2x2_t __temp; __temp.val[0] = vcombine_f64 (__b.val[0], vcreate_f64 (0)); __temp.val[1] = vcombine_f64 (__b.val[1], vcreate_f64 (0)); __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanedf ( (__builtin_aarch64_simd_df *) __ptr, __o, __c); __b.val[0] = (float64x1_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (float64x1_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } __extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__)) vld2_lane_p8 (const poly8_t * __ptr, poly8x8x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; poly8x16x2_t __temp; __temp.val[0] = vcombine_p8 (__b.val[0], vcreate_p8 (0)); __temp.val[1] = vcombine_p8 (__b.val[1], vcreate_p8 (0)); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanev8qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); __b.val[0] = (poly8x8_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (poly8x8_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } __extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__)) vld2_lane_p16 (const poly16_t * __ptr, poly16x4x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; poly16x8x2_t __temp; __temp.val[0] = vcombine_p16 (__b.val[0], vcreate_p16 (0)); __temp.val[1] = vcombine_p16 (__b.val[1], vcreate_p16 (0)); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanev4hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); __b.val[0] = (poly16x4_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (poly16x4_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } __extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__)) vld2_lane_s8 (const int8_t * __ptr, int8x8x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; int8x16x2_t __temp; __temp.val[0] = vcombine_s8 (__b.val[0], vcreate_s8 (0)); __temp.val[1] = vcombine_s8 (__b.val[1], vcreate_s8 (0)); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanev8qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); __b.val[0] = (int8x8_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (int8x8_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } __extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__)) vld2_lane_s16 (const int16_t * __ptr, int16x4x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; int16x8x2_t __temp; __temp.val[0] = vcombine_s16 (__b.val[0], vcreate_s16 (0)); __temp.val[1] = vcombine_s16 (__b.val[1], vcreate_s16 (0)); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanev4hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); __b.val[0] = (int16x4_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (int16x4_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } __extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__)) vld2_lane_s32 (const int32_t * __ptr, int32x2x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; int32x4x2_t __temp; __temp.val[0] = vcombine_s32 (__b.val[0], vcreate_s32 (0)); __temp.val[1] = vcombine_s32 (__b.val[1], vcreate_s32 (0)); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanev2si ( (__builtin_aarch64_simd_si *) __ptr, __o, __c); __b.val[0] = (int32x2_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (int32x2_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } __extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__)) vld2_lane_s64 (const int64_t * __ptr, int64x1x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; int64x2x2_t __temp; __temp.val[0] = vcombine_s64 (__b.val[0], vcreate_s64 (0)); __temp.val[1] = vcombine_s64 (__b.val[1], vcreate_s64 (0)); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanedi ( (__builtin_aarch64_simd_di *) __ptr, __o, __c); __b.val[0] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } __extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__)) vld2_lane_u8 (const uint8_t * __ptr, uint8x8x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; uint8x16x2_t __temp; __temp.val[0] = vcombine_u8 (__b.val[0], vcreate_u8 (0)); __temp.val[1] = vcombine_u8 (__b.val[1], vcreate_u8 (0)); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanev8qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); __b.val[0] = (uint8x8_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (uint8x8_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } __extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__)) vld2_lane_u16 (const uint16_t * __ptr, uint16x4x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; uint16x8x2_t __temp; __temp.val[0] = vcombine_u16 (__b.val[0], vcreate_u16 (0)); __temp.val[1] = vcombine_u16 (__b.val[1], vcreate_u16 (0)); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanev4hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); __b.val[0] = (uint16x4_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (uint16x4_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } __extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__)) vld2_lane_u32 (const uint32_t * __ptr, uint32x2x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; uint32x4x2_t __temp; __temp.val[0] = vcombine_u32 (__b.val[0], vcreate_u32 (0)); __temp.val[1] = vcombine_u32 (__b.val[1], vcreate_u32 (0)); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanev2si ( (__builtin_aarch64_simd_si *) __ptr, __o, __c); __b.val[0] = (uint32x2_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (uint32x2_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } __extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__)) vld2_lane_u64 (const uint64_t * __ptr, uint64x1x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; uint64x2x2_t __temp; __temp.val[0] = vcombine_u64 (__b.val[0], vcreate_u64 (0)); __temp.val[1] = vcombine_u64 (__b.val[1], vcreate_u64 (0)); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_ld2_lanedi ( (__builtin_aarch64_simd_di *) __ptr, __o, __c); __b.val[0] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 0); __b.val[1] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 1); return __b; } # 17618 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline float16x8x2_t __attribute__ ((__always_inline__)) vld2q_lane_f16 (const float16_t * __ptr, float16x8x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; float16x8x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev8hf ( (__builtin_aarch64_simd_hf *) __ptr, __o, __c); ret.val[0] = (float16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (float16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__)) vld2q_lane_f32 (const float32_t * __ptr, float32x4x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; float32x4x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev4sf ( (__builtin_aarch64_simd_sf *) __ptr, __o, __c); ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline float64x2x2_t __attribute__ ((__always_inline__)) vld2q_lane_f64 (const float64_t * __ptr, float64x2x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; float64x2x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev2df ( (__builtin_aarch64_simd_df *) __ptr, __o, __c); ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__)) vld2q_lane_p8 (const poly8_t * __ptr, poly8x16x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; poly8x16x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev16qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__)) vld2q_lane_p16 (const poly16_t * __ptr, poly16x8x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; poly16x8x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev8hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__)) vld2q_lane_s8 (const int8_t * __ptr, int8x16x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; int8x16x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev16qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__)) vld2q_lane_s16 (const int16_t * __ptr, int16x8x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; int16x8x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev8hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__)) vld2q_lane_s32 (const int32_t * __ptr, int32x4x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; int32x4x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev4si ( (__builtin_aarch64_simd_si *) __ptr, __o, __c); ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline int64x2x2_t __attribute__ ((__always_inline__)) vld2q_lane_s64 (const int64_t * __ptr, int64x2x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; int64x2x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev2di ( (__builtin_aarch64_simd_di *) __ptr, __o, __c); ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__)) vld2q_lane_u8 (const uint8_t * __ptr, uint8x16x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; uint8x16x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev16qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__)) vld2q_lane_u16 (const uint16_t * __ptr, uint16x8x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; uint16x8x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev8hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__)) vld2q_lane_u32 (const uint32_t * __ptr, uint32x4x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; uint32x4x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev4si ( (__builtin_aarch64_simd_si *) __ptr, __o, __c); ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } __extension__ static __inline uint64x2x2_t __attribute__ ((__always_inline__)) vld2q_lane_u64 (const uint64_t * __ptr, uint64x2x2_t __b, const int __c) { __builtin_aarch64_simd_oi __o; uint64x2x2_t ret; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_ld2_lanev2di ( (__builtin_aarch64_simd_di *) __ptr, __o, __c); ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 0); ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregoiv4si (__o, 1); return ret; } # 17666 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline float16x4x3_t __attribute__ ((__always_inline__)) vld3_lane_f16 (const float16_t * __ptr, float16x4x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; float16x8x3_t __temp; __temp.val[0] = vcombine_f16 (__b.val[0], vcreate_f16 (0)); __temp.val[1] = vcombine_f16 (__b.val[1], vcreate_f16 (0)); __temp.val[2] = vcombine_f16 (__b.val[2], vcreate_f16 (0)); __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanev4hf ( (__builtin_aarch64_simd_hf *) __ptr, __o, __c); __b.val[0] = (float16x4_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (float16x4_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (float16x4_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } __extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__)) vld3_lane_f32 (const float32_t * __ptr, float32x2x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; float32x4x3_t __temp; __temp.val[0] = vcombine_f32 (__b.val[0], vcreate_f32 (0)); __temp.val[1] = vcombine_f32 (__b.val[1], vcreate_f32 (0)); __temp.val[2] = vcombine_f32 (__b.val[2], vcreate_f32 (0)); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanev2sf ( (__builtin_aarch64_simd_sf *) __ptr, __o, __c); __b.val[0] = (float32x2_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (float32x2_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (float32x2_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } __extension__ static __inline float64x1x3_t __attribute__ ((__always_inline__)) vld3_lane_f64 (const float64_t * __ptr, float64x1x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; float64x2x3_t __temp; __temp.val[0] = vcombine_f64 (__b.val[0], vcreate_f64 (0)); __temp.val[1] = vcombine_f64 (__b.val[1], vcreate_f64 (0)); __temp.val[2] = vcombine_f64 (__b.val[2], vcreate_f64 (0)); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanedf ( (__builtin_aarch64_simd_df *) __ptr, __o, __c); __b.val[0] = (float64x1_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (float64x1_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (float64x1_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } __extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__)) vld3_lane_p8 (const poly8_t * __ptr, poly8x8x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; poly8x16x3_t __temp; __temp.val[0] = vcombine_p8 (__b.val[0], vcreate_p8 (0)); __temp.val[1] = vcombine_p8 (__b.val[1], vcreate_p8 (0)); __temp.val[2] = vcombine_p8 (__b.val[2], vcreate_p8 (0)); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanev8qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); __b.val[0] = (poly8x8_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (poly8x8_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (poly8x8_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } __extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__)) vld3_lane_p16 (const poly16_t * __ptr, poly16x4x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; poly16x8x3_t __temp; __temp.val[0] = vcombine_p16 (__b.val[0], vcreate_p16 (0)); __temp.val[1] = vcombine_p16 (__b.val[1], vcreate_p16 (0)); __temp.val[2] = vcombine_p16 (__b.val[2], vcreate_p16 (0)); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanev4hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); __b.val[0] = (poly16x4_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (poly16x4_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (poly16x4_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } __extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__)) vld3_lane_s8 (const int8_t * __ptr, int8x8x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; int8x16x3_t __temp; __temp.val[0] = vcombine_s8 (__b.val[0], vcreate_s8 (0)); __temp.val[1] = vcombine_s8 (__b.val[1], vcreate_s8 (0)); __temp.val[2] = vcombine_s8 (__b.val[2], vcreate_s8 (0)); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanev8qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); __b.val[0] = (int8x8_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (int8x8_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (int8x8_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } __extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__)) vld3_lane_s16 (const int16_t * __ptr, int16x4x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; int16x8x3_t __temp; __temp.val[0] = vcombine_s16 (__b.val[0], vcreate_s16 (0)); __temp.val[1] = vcombine_s16 (__b.val[1], vcreate_s16 (0)); __temp.val[2] = vcombine_s16 (__b.val[2], vcreate_s16 (0)); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanev4hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); __b.val[0] = (int16x4_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (int16x4_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (int16x4_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } __extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__)) vld3_lane_s32 (const int32_t * __ptr, int32x2x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; int32x4x3_t __temp; __temp.val[0] = vcombine_s32 (__b.val[0], vcreate_s32 (0)); __temp.val[1] = vcombine_s32 (__b.val[1], vcreate_s32 (0)); __temp.val[2] = vcombine_s32 (__b.val[2], vcreate_s32 (0)); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanev2si ( (__builtin_aarch64_simd_si *) __ptr, __o, __c); __b.val[0] = (int32x2_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (int32x2_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (int32x2_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } __extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__)) vld3_lane_s64 (const int64_t * __ptr, int64x1x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; int64x2x3_t __temp; __temp.val[0] = vcombine_s64 (__b.val[0], vcreate_s64 (0)); __temp.val[1] = vcombine_s64 (__b.val[1], vcreate_s64 (0)); __temp.val[2] = vcombine_s64 (__b.val[2], vcreate_s64 (0)); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanedi ( (__builtin_aarch64_simd_di *) __ptr, __o, __c); __b.val[0] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } __extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__)) vld3_lane_u8 (const uint8_t * __ptr, uint8x8x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; uint8x16x3_t __temp; __temp.val[0] = vcombine_u8 (__b.val[0], vcreate_u8 (0)); __temp.val[1] = vcombine_u8 (__b.val[1], vcreate_u8 (0)); __temp.val[2] = vcombine_u8 (__b.val[2], vcreate_u8 (0)); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanev8qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); __b.val[0] = (uint8x8_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (uint8x8_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (uint8x8_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } __extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__)) vld3_lane_u16 (const uint16_t * __ptr, uint16x4x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; uint16x8x3_t __temp; __temp.val[0] = vcombine_u16 (__b.val[0], vcreate_u16 (0)); __temp.val[1] = vcombine_u16 (__b.val[1], vcreate_u16 (0)); __temp.val[2] = vcombine_u16 (__b.val[2], vcreate_u16 (0)); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanev4hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); __b.val[0] = (uint16x4_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (uint16x4_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (uint16x4_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } __extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__)) vld3_lane_u32 (const uint32_t * __ptr, uint32x2x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; uint32x4x3_t __temp; __temp.val[0] = vcombine_u32 (__b.val[0], vcreate_u32 (0)); __temp.val[1] = vcombine_u32 (__b.val[1], vcreate_u32 (0)); __temp.val[2] = vcombine_u32 (__b.val[2], vcreate_u32 (0)); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanev2si ( (__builtin_aarch64_simd_si *) __ptr, __o, __c); __b.val[0] = (uint32x2_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (uint32x2_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (uint32x2_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } __extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__)) vld3_lane_u64 (const uint64_t * __ptr, uint64x1x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; uint64x2x3_t __temp; __temp.val[0] = vcombine_u64 (__b.val[0], vcreate_u64 (0)); __temp.val[1] = vcombine_u64 (__b.val[1], vcreate_u64 (0)); __temp.val[2] = vcombine_u64 (__b.val[2], vcreate_u64 (0)); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[2], 2); __o = __builtin_aarch64_ld3_lanedi ( (__builtin_aarch64_simd_di *) __ptr, __o, __c); __b.val[0] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 0); __b.val[1] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 1); __b.val[2] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 2); return __b; } # 17714 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline float16x8x3_t __attribute__ ((__always_inline__)) vld3q_lane_f16 (const float16_t * __ptr, float16x8x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; float16x8x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev8hf ( (__builtin_aarch64_simd_hf *) __ptr, __o, __c); ret.val[0] = (float16x8_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (float16x8_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (float16x8_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__)) vld3q_lane_f32 (const float32_t * __ptr, float32x4x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; float32x4x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev4sf ( (__builtin_aarch64_simd_sf *) __ptr, __o, __c); ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline float64x2x3_t __attribute__ ((__always_inline__)) vld3q_lane_f64 (const float64_t * __ptr, float64x2x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; float64x2x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev2df ( (__builtin_aarch64_simd_df *) __ptr, __o, __c); ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline poly8x16x3_t __attribute__ ((__always_inline__)) vld3q_lane_p8 (const poly8_t * __ptr, poly8x16x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; poly8x16x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev16qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__)) vld3q_lane_p16 (const poly16_t * __ptr, poly16x8x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; poly16x8x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev8hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline int8x16x3_t __attribute__ ((__always_inline__)) vld3q_lane_s8 (const int8_t * __ptr, int8x16x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; int8x16x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev16qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__)) vld3q_lane_s16 (const int16_t * __ptr, int16x8x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; int16x8x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev8hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__)) vld3q_lane_s32 (const int32_t * __ptr, int32x4x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; int32x4x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev4si ( (__builtin_aarch64_simd_si *) __ptr, __o, __c); ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline int64x2x3_t __attribute__ ((__always_inline__)) vld3q_lane_s64 (const int64_t * __ptr, int64x2x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; int64x2x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev2di ( (__builtin_aarch64_simd_di *) __ptr, __o, __c); ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline uint8x16x3_t __attribute__ ((__always_inline__)) vld3q_lane_u8 (const uint8_t * __ptr, uint8x16x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; uint8x16x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev16qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__)) vld3q_lane_u16 (const uint16_t * __ptr, uint16x8x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; uint16x8x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev8hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__)) vld3q_lane_u32 (const uint32_t * __ptr, uint32x4x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; uint32x4x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev4si ( (__builtin_aarch64_simd_si *) __ptr, __o, __c); ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } __extension__ static __inline uint64x2x3_t __attribute__ ((__always_inline__)) vld3q_lane_u64 (const uint64_t * __ptr, uint64x2x3_t __b, const int __c) { __builtin_aarch64_simd_ci __o; uint64x2x3_t ret; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_ld3_lanev2di ( (__builtin_aarch64_simd_di *) __ptr, __o, __c); ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregciv4si (__o, 0); ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregciv4si (__o, 1); ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregciv4si (__o, 2); return ret; } # 17770 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline float16x4x4_t __attribute__ ((__always_inline__)) vld4_lane_f16 (const float16_t * __ptr, float16x4x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; float16x8x4_t __temp; __temp.val[0] = vcombine_f16 (__b.val[0], vcreate_f16 (0)); __temp.val[1] = vcombine_f16 (__b.val[1], vcreate_f16 (0)); __temp.val[2] = vcombine_f16 (__b.val[2], vcreate_f16 (0)); __temp.val[3] = vcombine_f16 (__b.val[3], vcreate_f16 (0)); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanev4hf ( (__builtin_aarch64_simd_hf *) __ptr, __o, __c); __b.val[0] = (float16x4_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (float16x4_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (float16x4_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (float16x4_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } __extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__)) vld4_lane_f32 (const float32_t * __ptr, float32x2x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; float32x4x4_t __temp; __temp.val[0] = vcombine_f32 (__b.val[0], vcreate_f32 (0)); __temp.val[1] = vcombine_f32 (__b.val[1], vcreate_f32 (0)); __temp.val[2] = vcombine_f32 (__b.val[2], vcreate_f32 (0)); __temp.val[3] = vcombine_f32 (__b.val[3], vcreate_f32 (0)); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanev2sf ( (__builtin_aarch64_simd_sf *) __ptr, __o, __c); __b.val[0] = (float32x2_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (float32x2_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (float32x2_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (float32x2_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } __extension__ static __inline float64x1x4_t __attribute__ ((__always_inline__)) vld4_lane_f64 (const float64_t * __ptr, float64x1x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; float64x2x4_t __temp; __temp.val[0] = vcombine_f64 (__b.val[0], vcreate_f64 (0)); __temp.val[1] = vcombine_f64 (__b.val[1], vcreate_f64 (0)); __temp.val[2] = vcombine_f64 (__b.val[2], vcreate_f64 (0)); __temp.val[3] = vcombine_f64 (__b.val[3], vcreate_f64 (0)); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanedf ( (__builtin_aarch64_simd_df *) __ptr, __o, __c); __b.val[0] = (float64x1_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (float64x1_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (float64x1_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (float64x1_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } __extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__)) vld4_lane_p8 (const poly8_t * __ptr, poly8x8x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; poly8x16x4_t __temp; __temp.val[0] = vcombine_p8 (__b.val[0], vcreate_p8 (0)); __temp.val[1] = vcombine_p8 (__b.val[1], vcreate_p8 (0)); __temp.val[2] = vcombine_p8 (__b.val[2], vcreate_p8 (0)); __temp.val[3] = vcombine_p8 (__b.val[3], vcreate_p8 (0)); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanev8qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); __b.val[0] = (poly8x8_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (poly8x8_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (poly8x8_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (poly8x8_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } __extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__)) vld4_lane_p16 (const poly16_t * __ptr, poly16x4x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; poly16x8x4_t __temp; __temp.val[0] = vcombine_p16 (__b.val[0], vcreate_p16 (0)); __temp.val[1] = vcombine_p16 (__b.val[1], vcreate_p16 (0)); __temp.val[2] = vcombine_p16 (__b.val[2], vcreate_p16 (0)); __temp.val[3] = vcombine_p16 (__b.val[3], vcreate_p16 (0)); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanev4hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); __b.val[0] = (poly16x4_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (poly16x4_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (poly16x4_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (poly16x4_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } __extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__)) vld4_lane_s8 (const int8_t * __ptr, int8x8x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; int8x16x4_t __temp; __temp.val[0] = vcombine_s8 (__b.val[0], vcreate_s8 (0)); __temp.val[1] = vcombine_s8 (__b.val[1], vcreate_s8 (0)); __temp.val[2] = vcombine_s8 (__b.val[2], vcreate_s8 (0)); __temp.val[3] = vcombine_s8 (__b.val[3], vcreate_s8 (0)); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanev8qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); __b.val[0] = (int8x8_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (int8x8_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (int8x8_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (int8x8_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } __extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__)) vld4_lane_s16 (const int16_t * __ptr, int16x4x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; int16x8x4_t __temp; __temp.val[0] = vcombine_s16 (__b.val[0], vcreate_s16 (0)); __temp.val[1] = vcombine_s16 (__b.val[1], vcreate_s16 (0)); __temp.val[2] = vcombine_s16 (__b.val[2], vcreate_s16 (0)); __temp.val[3] = vcombine_s16 (__b.val[3], vcreate_s16 (0)); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanev4hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); __b.val[0] = (int16x4_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (int16x4_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (int16x4_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (int16x4_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } __extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__)) vld4_lane_s32 (const int32_t * __ptr, int32x2x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; int32x4x4_t __temp; __temp.val[0] = vcombine_s32 (__b.val[0], vcreate_s32 (0)); __temp.val[1] = vcombine_s32 (__b.val[1], vcreate_s32 (0)); __temp.val[2] = vcombine_s32 (__b.val[2], vcreate_s32 (0)); __temp.val[3] = vcombine_s32 (__b.val[3], vcreate_s32 (0)); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanev2si ( (__builtin_aarch64_simd_si *) __ptr, __o, __c); __b.val[0] = (int32x2_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (int32x2_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (int32x2_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (int32x2_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } __extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__)) vld4_lane_s64 (const int64_t * __ptr, int64x1x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; int64x2x4_t __temp; __temp.val[0] = vcombine_s64 (__b.val[0], vcreate_s64 (0)); __temp.val[1] = vcombine_s64 (__b.val[1], vcreate_s64 (0)); __temp.val[2] = vcombine_s64 (__b.val[2], vcreate_s64 (0)); __temp.val[3] = vcombine_s64 (__b.val[3], vcreate_s64 (0)); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanedi ( (__builtin_aarch64_simd_di *) __ptr, __o, __c); __b.val[0] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } __extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__)) vld4_lane_u8 (const uint8_t * __ptr, uint8x8x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; uint8x16x4_t __temp; __temp.val[0] = vcombine_u8 (__b.val[0], vcreate_u8 (0)); __temp.val[1] = vcombine_u8 (__b.val[1], vcreate_u8 (0)); __temp.val[2] = vcombine_u8 (__b.val[2], vcreate_u8 (0)); __temp.val[3] = vcombine_u8 (__b.val[3], vcreate_u8 (0)); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanev8qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); __b.val[0] = (uint8x8_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (uint8x8_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (uint8x8_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (uint8x8_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } __extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__)) vld4_lane_u16 (const uint16_t * __ptr, uint16x4x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; uint16x8x4_t __temp; __temp.val[0] = vcombine_u16 (__b.val[0], vcreate_u16 (0)); __temp.val[1] = vcombine_u16 (__b.val[1], vcreate_u16 (0)); __temp.val[2] = vcombine_u16 (__b.val[2], vcreate_u16 (0)); __temp.val[3] = vcombine_u16 (__b.val[3], vcreate_u16 (0)); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanev4hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); __b.val[0] = (uint16x4_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (uint16x4_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (uint16x4_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (uint16x4_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } __extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__)) vld4_lane_u32 (const uint32_t * __ptr, uint32x2x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; uint32x4x4_t __temp; __temp.val[0] = vcombine_u32 (__b.val[0], vcreate_u32 (0)); __temp.val[1] = vcombine_u32 (__b.val[1], vcreate_u32 (0)); __temp.val[2] = vcombine_u32 (__b.val[2], vcreate_u32 (0)); __temp.val[3] = vcombine_u32 (__b.val[3], vcreate_u32 (0)); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanev2si ( (__builtin_aarch64_simd_si *) __ptr, __o, __c); __b.val[0] = (uint32x2_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (uint32x2_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (uint32x2_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (uint32x2_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } __extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__)) vld4_lane_u64 (const uint64_t * __ptr, uint64x1x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; uint64x2x4_t __temp; __temp.val[0] = vcombine_u64 (__b.val[0], vcreate_u64 (0)); __temp.val[1] = vcombine_u64 (__b.val[1], vcreate_u64 (0)); __temp.val[2] = vcombine_u64 (__b.val[2], vcreate_u64 (0)); __temp.val[3] = vcombine_u64 (__b.val[3], vcreate_u64 (0)); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[3], 3); __o = __builtin_aarch64_ld4_lanedi ( (__builtin_aarch64_simd_di *) __ptr, __o, __c); __b.val[0] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 0); __b.val[1] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 1); __b.val[2] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 2); __b.val[3] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 3); return __b; } # 17820 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline float16x8x4_t __attribute__ ((__always_inline__)) vld4q_lane_f16 (const float16_t * __ptr, float16x8x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; float16x8x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev8hf ( (__builtin_aarch64_simd_hf *) __ptr, __o, __c); ret.val[0] = (float16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (float16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (float16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (float16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__)) vld4q_lane_f32 (const float32_t * __ptr, float32x4x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; float32x4x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev4sf ( (__builtin_aarch64_simd_sf *) __ptr, __o, __c); ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (float32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline float64x2x4_t __attribute__ ((__always_inline__)) vld4q_lane_f64 (const float64_t * __ptr, float64x2x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; float64x2x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev2df ( (__builtin_aarch64_simd_df *) __ptr, __o, __c); ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (float64x2_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline poly8x16x4_t __attribute__ ((__always_inline__)) vld4q_lane_p8 (const poly8_t * __ptr, poly8x16x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; poly8x16x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev16qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (poly8x16_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__)) vld4q_lane_p16 (const poly16_t * __ptr, poly16x8x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; poly16x8x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev8hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (poly16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline int8x16x4_t __attribute__ ((__always_inline__)) vld4q_lane_s8 (const int8_t * __ptr, int8x16x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; int8x16x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev16qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (int8x16_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__)) vld4q_lane_s16 (const int16_t * __ptr, int16x8x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; int16x8x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev8hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (int16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__)) vld4q_lane_s32 (const int32_t * __ptr, int32x4x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; int32x4x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev4si ( (__builtin_aarch64_simd_si *) __ptr, __o, __c); ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline int64x2x4_t __attribute__ ((__always_inline__)) vld4q_lane_s64 (const int64_t * __ptr, int64x2x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; int64x2x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev2di ( (__builtin_aarch64_simd_di *) __ptr, __o, __c); ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (int64x2_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline uint8x16x4_t __attribute__ ((__always_inline__)) vld4q_lane_u8 (const uint8_t * __ptr, uint8x16x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; uint8x16x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev16qi ( (__builtin_aarch64_simd_qi *) __ptr, __o, __c); ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (uint8x16_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__)) vld4q_lane_u16 (const uint16_t * __ptr, uint16x8x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; uint16x8x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev8hi ( (__builtin_aarch64_simd_hi *) __ptr, __o, __c); ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (uint16x8_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__)) vld4q_lane_u32 (const uint32_t * __ptr, uint32x4x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; uint32x4x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev4si ( (__builtin_aarch64_simd_si *) __ptr, __o, __c); ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline uint64x2x4_t __attribute__ ((__always_inline__)) vld4q_lane_u64 (const uint64_t * __ptr, uint64x2x4_t __b, const int __c) { __builtin_aarch64_simd_xi __o; uint64x2x4_t ret; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); __o = __builtin_aarch64_ld4_lanev2di ( (__builtin_aarch64_simd_di *) __ptr, __o, __c); ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregxiv4si (__o, 0); ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregxiv4si (__o, 1); ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregxiv4si (__o, 2); ret.val[3] = (uint64x2_t) __builtin_aarch64_get_qregxiv4si (__o, 3); return ret; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmax_f32 (float32x2_t __a, float32x2_t __b) { return __builtin_aarch64_smax_nanv2sf (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vmax_s8 (int8x8_t __a, int8x8_t __b) { return __builtin_aarch64_smaxv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmax_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_aarch64_smaxv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmax_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_aarch64_smaxv2si (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vmax_u8 (uint8x8_t __a, uint8x8_t __b) { return (uint8x8_t) __builtin_aarch64_umaxv8qi ((int8x8_t) __a, (int8x8_t) __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmax_u16 (uint16x4_t __a, uint16x4_t __b) { return (uint16x4_t) __builtin_aarch64_umaxv4hi ((int16x4_t) __a, (int16x4_t) __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmax_u32 (uint32x2_t __a, uint32x2_t __b) { return (uint32x2_t) __builtin_aarch64_umaxv2si ((int32x2_t) __a, (int32x2_t) __b); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmaxq_f32 (float32x4_t __a, float32x4_t __b) { return __builtin_aarch64_smax_nanv4sf (__a, __b); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vmaxq_f64 (float64x2_t __a, float64x2_t __b) { return __builtin_aarch64_smax_nanv2df (__a, __b); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vmaxq_s8 (int8x16_t __a, int8x16_t __b) { return __builtin_aarch64_smaxv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmaxq_s16 (int16x8_t __a, int16x8_t __b) { return __builtin_aarch64_smaxv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmaxq_s32 (int32x4_t __a, int32x4_t __b) { return __builtin_aarch64_smaxv4si (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vmaxq_u8 (uint8x16_t __a, uint8x16_t __b) { return (uint8x16_t) __builtin_aarch64_umaxv16qi ((int8x16_t) __a, (int8x16_t) __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmaxq_u16 (uint16x8_t __a, uint16x8_t __b) { return (uint16x8_t) __builtin_aarch64_umaxv8hi ((int16x8_t) __a, (int16x8_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmaxq_u32 (uint32x4_t __a, uint32x4_t __b) { return (uint32x4_t) __builtin_aarch64_umaxv4si ((int32x4_t) __a, (int32x4_t) __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vpmax_s8 (int8x8_t a, int8x8_t b) { return __builtin_aarch64_smaxpv8qi (a, b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vpmax_s16 (int16x4_t a, int16x4_t b) { return __builtin_aarch64_smaxpv4hi (a, b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vpmax_s32 (int32x2_t a, int32x2_t b) { return __builtin_aarch64_smaxpv2si (a, b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vpmax_u8 (uint8x8_t a, uint8x8_t b) { return (uint8x8_t) __builtin_aarch64_umaxpv8qi ((int8x8_t) a, (int8x8_t) b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vpmax_u16 (uint16x4_t a, uint16x4_t b) { return (uint16x4_t) __builtin_aarch64_umaxpv4hi ((int16x4_t) a, (int16x4_t) b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vpmax_u32 (uint32x2_t a, uint32x2_t b) { return (uint32x2_t) __builtin_aarch64_umaxpv2si ((int32x2_t) a, (int32x2_t) b); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vpmaxq_s8 (int8x16_t a, int8x16_t b) { return __builtin_aarch64_smaxpv16qi (a, b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vpmaxq_s16 (int16x8_t a, int16x8_t b) { return __builtin_aarch64_smaxpv8hi (a, b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vpmaxq_s32 (int32x4_t a, int32x4_t b) { return __builtin_aarch64_smaxpv4si (a, b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vpmaxq_u8 (uint8x16_t a, uint8x16_t b) { return (uint8x16_t) __builtin_aarch64_umaxpv16qi ((int8x16_t) a, (int8x16_t) b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vpmaxq_u16 (uint16x8_t a, uint16x8_t b) { return (uint16x8_t) __builtin_aarch64_umaxpv8hi ((int16x8_t) a, (int16x8_t) b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vpmaxq_u32 (uint32x4_t a, uint32x4_t b) { return (uint32x4_t) __builtin_aarch64_umaxpv4si ((int32x4_t) a, (int32x4_t) b); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vpmax_f32 (float32x2_t a, float32x2_t b) { return __builtin_aarch64_smax_nanpv2sf (a, b); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vpmaxq_f32 (float32x4_t a, float32x4_t b) { return __builtin_aarch64_smax_nanpv4sf (a, b); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vpmaxq_f64 (float64x2_t a, float64x2_t b) { return __builtin_aarch64_smax_nanpv2df (a, b); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vpmaxqd_f64 (float64x2_t a) { return __builtin_aarch64_reduc_smax_nan_scal_v2df (a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vpmaxs_f32 (float32x2_t a) { return __builtin_aarch64_reduc_smax_nan_scal_v2sf (a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vpmaxnm_f32 (float32x2_t a, float32x2_t b) { return __builtin_aarch64_smaxpv2sf (a, b); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vpmaxnmq_f32 (float32x4_t a, float32x4_t b) { return __builtin_aarch64_smaxpv4sf (a, b); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vpmaxnmq_f64 (float64x2_t a, float64x2_t b) { return __builtin_aarch64_smaxpv2df (a, b); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vpmaxnmqd_f64 (float64x2_t a) { return __builtin_aarch64_reduc_smax_scal_v2df (a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vpmaxnms_f32 (float32x2_t a) { return __builtin_aarch64_reduc_smax_scal_v2sf (a); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vpmin_s8 (int8x8_t a, int8x8_t b) { return __builtin_aarch64_sminpv8qi (a, b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vpmin_s16 (int16x4_t a, int16x4_t b) { return __builtin_aarch64_sminpv4hi (a, b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vpmin_s32 (int32x2_t a, int32x2_t b) { return __builtin_aarch64_sminpv2si (a, b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vpmin_u8 (uint8x8_t a, uint8x8_t b) { return (uint8x8_t) __builtin_aarch64_uminpv8qi ((int8x8_t) a, (int8x8_t) b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vpmin_u16 (uint16x4_t a, uint16x4_t b) { return (uint16x4_t) __builtin_aarch64_uminpv4hi ((int16x4_t) a, (int16x4_t) b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vpmin_u32 (uint32x2_t a, uint32x2_t b) { return (uint32x2_t) __builtin_aarch64_uminpv2si ((int32x2_t) a, (int32x2_t) b); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vpminq_s8 (int8x16_t a, int8x16_t b) { return __builtin_aarch64_sminpv16qi (a, b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vpminq_s16 (int16x8_t a, int16x8_t b) { return __builtin_aarch64_sminpv8hi (a, b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vpminq_s32 (int32x4_t a, int32x4_t b) { return __builtin_aarch64_sminpv4si (a, b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vpminq_u8 (uint8x16_t a, uint8x16_t b) { return (uint8x16_t) __builtin_aarch64_uminpv16qi ((int8x16_t) a, (int8x16_t) b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vpminq_u16 (uint16x8_t a, uint16x8_t b) { return (uint16x8_t) __builtin_aarch64_uminpv8hi ((int16x8_t) a, (int16x8_t) b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vpminq_u32 (uint32x4_t a, uint32x4_t b) { return (uint32x4_t) __builtin_aarch64_uminpv4si ((int32x4_t) a, (int32x4_t) b); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vpmin_f32 (float32x2_t a, float32x2_t b) { return __builtin_aarch64_smin_nanpv2sf (a, b); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vpminq_f32 (float32x4_t a, float32x4_t b) { return __builtin_aarch64_smin_nanpv4sf (a, b); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vpminq_f64 (float64x2_t a, float64x2_t b) { return __builtin_aarch64_smin_nanpv2df (a, b); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vpminqd_f64 (float64x2_t a) { return __builtin_aarch64_reduc_smin_nan_scal_v2df (a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vpmins_f32 (float32x2_t a) { return __builtin_aarch64_reduc_smin_nan_scal_v2sf (a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vpminnm_f32 (float32x2_t a, float32x2_t b) { return __builtin_aarch64_sminpv2sf (a, b); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vpminnmq_f32 (float32x4_t a, float32x4_t b) { return __builtin_aarch64_sminpv4sf (a, b); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vpminnmq_f64 (float64x2_t a, float64x2_t b) { return __builtin_aarch64_sminpv2df (a, b); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vpminnmqd_f64 (float64x2_t a) { return __builtin_aarch64_reduc_smin_scal_v2df (a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vpminnms_f32 (float32x2_t a) { return __builtin_aarch64_reduc_smin_scal_v2sf (a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmaxnm_f32 (float32x2_t __a, float32x2_t __b) { return __builtin_aarch64_smaxv2sf (__a, __b); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmaxnmq_f32 (float32x4_t __a, float32x4_t __b) { return __builtin_aarch64_smaxv4sf (__a, __b); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vmaxnmq_f64 (float64x2_t __a, float64x2_t __b) { return __builtin_aarch64_smaxv2df (__a, __b); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmaxv_f32 (float32x2_t __a) { return __builtin_aarch64_reduc_smax_nan_scal_v2sf (__a); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vmaxv_s8 (int8x8_t __a) { return __builtin_aarch64_reduc_smax_scal_v8qi (__a); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vmaxv_s16 (int16x4_t __a) { return __builtin_aarch64_reduc_smax_scal_v4hi (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vmaxv_s32 (int32x2_t __a) { return __builtin_aarch64_reduc_smax_scal_v2si (__a); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vmaxv_u8 (uint8x8_t __a) { return __builtin_aarch64_reduc_umax_scal_v8qi_uu (__a); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vmaxv_u16 (uint16x4_t __a) { return __builtin_aarch64_reduc_umax_scal_v4hi_uu (__a); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vmaxv_u32 (uint32x2_t __a) { return __builtin_aarch64_reduc_umax_scal_v2si_uu (__a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmaxvq_f32 (float32x4_t __a) { return __builtin_aarch64_reduc_smax_nan_scal_v4sf (__a); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vmaxvq_f64 (float64x2_t __a) { return __builtin_aarch64_reduc_smax_nan_scal_v2df (__a); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vmaxvq_s8 (int8x16_t __a) { return __builtin_aarch64_reduc_smax_scal_v16qi (__a); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vmaxvq_s16 (int16x8_t __a) { return __builtin_aarch64_reduc_smax_scal_v8hi (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vmaxvq_s32 (int32x4_t __a) { return __builtin_aarch64_reduc_smax_scal_v4si (__a); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vmaxvq_u8 (uint8x16_t __a) { return __builtin_aarch64_reduc_umax_scal_v16qi_uu (__a); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vmaxvq_u16 (uint16x8_t __a) { return __builtin_aarch64_reduc_umax_scal_v8hi_uu (__a); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vmaxvq_u32 (uint32x4_t __a) { return __builtin_aarch64_reduc_umax_scal_v4si_uu (__a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmaxnmv_f32 (float32x2_t __a) { return __builtin_aarch64_reduc_smax_scal_v2sf (__a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmaxnmvq_f32 (float32x4_t __a) { return __builtin_aarch64_reduc_smax_scal_v4sf (__a); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vmaxnmvq_f64 (float64x2_t __a) { return __builtin_aarch64_reduc_smax_scal_v2df (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmin_f32 (float32x2_t __a, float32x2_t __b) { return __builtin_aarch64_smin_nanv2sf (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vmin_s8 (int8x8_t __a, int8x8_t __b) { return __builtin_aarch64_sminv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmin_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_aarch64_sminv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmin_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_aarch64_sminv2si (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vmin_u8 (uint8x8_t __a, uint8x8_t __b) { return (uint8x8_t) __builtin_aarch64_uminv8qi ((int8x8_t) __a, (int8x8_t) __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmin_u16 (uint16x4_t __a, uint16x4_t __b) { return (uint16x4_t) __builtin_aarch64_uminv4hi ((int16x4_t) __a, (int16x4_t) __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmin_u32 (uint32x2_t __a, uint32x2_t __b) { return (uint32x2_t) __builtin_aarch64_uminv2si ((int32x2_t) __a, (int32x2_t) __b); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vminq_f32 (float32x4_t __a, float32x4_t __b) { return __builtin_aarch64_smin_nanv4sf (__a, __b); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vminq_f64 (float64x2_t __a, float64x2_t __b) { return __builtin_aarch64_smin_nanv2df (__a, __b); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vminq_s8 (int8x16_t __a, int8x16_t __b) { return __builtin_aarch64_sminv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vminq_s16 (int16x8_t __a, int16x8_t __b) { return __builtin_aarch64_sminv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vminq_s32 (int32x4_t __a, int32x4_t __b) { return __builtin_aarch64_sminv4si (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vminq_u8 (uint8x16_t __a, uint8x16_t __b) { return (uint8x16_t) __builtin_aarch64_uminv16qi ((int8x16_t) __a, (int8x16_t) __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vminq_u16 (uint16x8_t __a, uint16x8_t __b) { return (uint16x8_t) __builtin_aarch64_uminv8hi ((int16x8_t) __a, (int16x8_t) __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vminq_u32 (uint32x4_t __a, uint32x4_t __b) { return (uint32x4_t) __builtin_aarch64_uminv4si ((int32x4_t) __a, (int32x4_t) __b); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vminnm_f32 (float32x2_t __a, float32x2_t __b) { return __builtin_aarch64_sminv2sf (__a, __b); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vminnmq_f32 (float32x4_t __a, float32x4_t __b) { return __builtin_aarch64_sminv4sf (__a, __b); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vminnmq_f64 (float64x2_t __a, float64x2_t __b) { return __builtin_aarch64_sminv2df (__a, __b); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vminv_f32 (float32x2_t __a) { return __builtin_aarch64_reduc_smin_nan_scal_v2sf (__a); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vminv_s8 (int8x8_t __a) { return __builtin_aarch64_reduc_smin_scal_v8qi (__a); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vminv_s16 (int16x4_t __a) { return __builtin_aarch64_reduc_smin_scal_v4hi (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vminv_s32 (int32x2_t __a) { return __builtin_aarch64_reduc_smin_scal_v2si (__a); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vminv_u8 (uint8x8_t __a) { return __builtin_aarch64_reduc_umin_scal_v8qi_uu (__a); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vminv_u16 (uint16x4_t __a) { return __builtin_aarch64_reduc_umin_scal_v4hi_uu (__a); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vminv_u32 (uint32x2_t __a) { return __builtin_aarch64_reduc_umin_scal_v2si_uu (__a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vminvq_f32 (float32x4_t __a) { return __builtin_aarch64_reduc_smin_nan_scal_v4sf (__a); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vminvq_f64 (float64x2_t __a) { return __builtin_aarch64_reduc_smin_nan_scal_v2df (__a); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vminvq_s8 (int8x16_t __a) { return __builtin_aarch64_reduc_smin_scal_v16qi (__a); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vminvq_s16 (int16x8_t __a) { return __builtin_aarch64_reduc_smin_scal_v8hi (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vminvq_s32 (int32x4_t __a) { return __builtin_aarch64_reduc_smin_scal_v4si (__a); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vminvq_u8 (uint8x16_t __a) { return __builtin_aarch64_reduc_umin_scal_v16qi_uu (__a); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vminvq_u16 (uint16x8_t __a) { return __builtin_aarch64_reduc_umin_scal_v8hi_uu (__a); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vminvq_u32 (uint32x4_t __a) { return __builtin_aarch64_reduc_umin_scal_v4si_uu (__a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vminnmv_f32 (float32x2_t __a) { return __builtin_aarch64_reduc_smin_scal_v2sf (__a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vminnmvq_f32 (float32x4_t __a) { return __builtin_aarch64_reduc_smin_scal_v4sf (__a); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vminnmvq_f64 (float64x2_t __a) { return __builtin_aarch64_reduc_smin_scal_v2df (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmla_f32 (float32x2_t a, float32x2_t b, float32x2_t c) { return a + b * c; } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vmla_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c) { return __a + __b * __c; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmlaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c) { return a + b * c; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vmlaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c) { return a + b * c; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmla_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmla_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmla_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmla_laneq_f32 (float32x2_t __a, float32x2_t __b, float32x4_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmla_laneq_s16 (int16x4_t __a, int16x4_t __b, int16x8_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmla_laneq_s32 (int32x2_t __a, int32x2_t __b, int32x4_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmla_laneq_u16 (uint16x4_t __a, uint16x4_t __b, uint16x8_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmla_laneq_u32 (uint32x2_t __a, uint32x2_t __b, uint32x4_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlaq_laneq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlaq_laneq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlaq_laneq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlaq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c, const int __lane) { return (__a + (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmls_f32 (float32x2_t a, float32x2_t b, float32x2_t c) { return a - b * c; } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vmls_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c) { return __a - __b * __c; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmlsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c) { return a - b * c; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vmlsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c) { return a - b * c; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmls_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmls_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmls_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmls_laneq_f32 (float32x2_t __a, float32x2_t __b, float32x4_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmls_laneq_s16 (int16x4_t __a, int16x4_t __b, int16x8_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmls_laneq_s32 (int32x2_t __a, int32x2_t __b, int32x4_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmls_laneq_u16 (uint16x4_t __a, uint16x4_t __b, uint16x8_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmls_laneq_u32 (uint32x2_t __a, uint32x2_t __b, uint32x4_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmlsq_laneq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmlsq_laneq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmlsq_laneq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmlsq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c, const int __lane) { return (__a - (__b * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__c), sizeof(__c[0]), __lane); __c[__lane]; }))); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmov_n_f32 (float32_t __a) { return vdup_n_f32 (__a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vmov_n_f64 (float64_t __a) { return (float64x1_t) {__a}; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vmov_n_p8 (poly8_t __a) { return vdup_n_p8 (__a); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vmov_n_p16 (poly16_t __a) { return vdup_n_p16 (__a); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vmov_n_s8 (int8_t __a) { return vdup_n_s8 (__a); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmov_n_s16 (int16_t __a) { return vdup_n_s16 (__a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmov_n_s32 (int32_t __a) { return vdup_n_s32 (__a); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vmov_n_s64 (int64_t __a) { return (int64x1_t) {__a}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vmov_n_u8 (uint8_t __a) { return vdup_n_u8 (__a); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmov_n_u16 (uint16_t __a) { return vdup_n_u16 (__a); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmov_n_u32 (uint32_t __a) { return vdup_n_u32 (__a); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vmov_n_u64 (uint64_t __a) { return (uint64x1_t) {__a}; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmovq_n_f32 (float32_t __a) { return vdupq_n_f32 (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vmovq_n_f64 (float64_t __a) { return vdupq_n_f64 (__a); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vmovq_n_p8 (poly8_t __a) { return vdupq_n_p8 (__a); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vmovq_n_p16 (poly16_t __a) { return vdupq_n_p16 (__a); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vmovq_n_s8 (int8_t __a) { return vdupq_n_s8 (__a); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmovq_n_s16 (int16_t __a) { return vdupq_n_s16 (__a); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmovq_n_s32 (int32_t __a) { return vdupq_n_s32 (__a); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vmovq_n_s64 (int64_t __a) { return vdupq_n_s64 (__a); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vmovq_n_u8 (uint8_t __a) { return vdupq_n_u8 (__a); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmovq_n_u16 (uint16_t __a) { return vdupq_n_u16 (__a); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmovq_n_u32 (uint32_t __a) { return vdupq_n_u32 (__a); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vmovq_n_u64 (uint64_t __a) { return vdupq_n_u64 (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vmul_lane_f64 (float64x1_t __a, float64x1_t __b, const int __lane) { return __a * __b; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vmuld_lane_f64 (float64_t __a, float64x1_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vmuld_laneq_f64 (float64_t __a, float64x2_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmuls_lane_f32 (float32_t __a, float32x2_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmuls_laneq_f32 (float32_t __a, float32x4_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vmul_laneq_f32 (float32x2_t __a, float32x4_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vmul_laneq_f64 (float64x1_t __a, float64x2_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vmul_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vmul_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vmul_laneq_u16 (uint16x4_t __a, uint16x8_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vmul_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vmul_n_f64 (float64x1_t __a, float64_t __b) { return (float64x1_t) { vget_lane_f64 (__a, 0) * __b }; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vmulq_lane_f64 (float64x2_t __a, float64x1_t __b, const int __lane) { __builtin_aarch64_im_lane_boundsi (sizeof(__a), sizeof(__a[0]), __lane); return __a * __b[0]; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vmulq_laneq_f32 (float32x4_t __a, float32x4_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vmulq_laneq_f64 (float64x2_t __a, float64x2_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vmulq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vmulq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vmulq_laneq_u16 (uint16x8_t __a, uint16x8_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vmulq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, const int __lane) { return __a * __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vneg_f32 (float32x2_t __a) { return -__a; } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vneg_f64 (float64x1_t __a) { return -__a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vneg_s8 (int8x8_t __a) { return -__a; } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vneg_s16 (int16x4_t __a) { return -__a; } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vneg_s32 (int32x2_t __a) { return -__a; } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vneg_s64 (int64x1_t __a) { return -__a; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vnegq_f32 (float32x4_t __a) { return -__a; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vnegq_f64 (float64x2_t __a) { return -__a; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vnegq_s8 (int8x16_t __a) { return -__a; } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vnegq_s16 (int16x8_t __a) { return -__a; } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vnegq_s32 (int32x4_t __a) { return -__a; } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vnegq_s64 (int64x2_t __a) { return -__a; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vpadd_s8 (int8x8_t __a, int8x8_t __b) { return __builtin_aarch64_addpv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vpadd_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_aarch64_addpv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vpadd_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_aarch64_addpv2si (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vpadd_u8 (uint8x8_t __a, uint8x8_t __b) { return (uint8x8_t) __builtin_aarch64_addpv8qi ((int8x8_t) __a, (int8x8_t) __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vpadd_u16 (uint16x4_t __a, uint16x4_t __b) { return (uint16x4_t) __builtin_aarch64_addpv4hi ((int16x4_t) __a, (int16x4_t) __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vpadd_u32 (uint32x2_t __a, uint32x2_t __b) { return (uint32x2_t) __builtin_aarch64_addpv2si ((int32x2_t) __a, (int32x2_t) __b); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vpaddd_f64 (float64x2_t __a) { return __builtin_aarch64_reduc_plus_scal_v2df (__a); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vpaddd_s64 (int64x2_t __a) { return __builtin_aarch64_addpdi (__a); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vpaddd_u64 (uint64x2_t __a) { return __builtin_aarch64_addpdi ((int64x2_t) __a); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqabsq_s64 (int64x2_t __a) { return (int64x2_t) __builtin_aarch64_sqabsv2di (__a); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqabsb_s8 (int8_t __a) { return (int8_t) __builtin_aarch64_sqabsqi (__a); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqabsh_s16 (int16_t __a) { return (int16_t) __builtin_aarch64_sqabshi (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqabss_s32 (int32_t __a) { return (int32_t) __builtin_aarch64_sqabssi (__a); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqabsd_s64 (int64_t __a) { return __builtin_aarch64_sqabsdi (__a); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqaddb_s8 (int8_t __a, int8_t __b) { return (int8_t) __builtin_aarch64_sqaddqi (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqaddh_s16 (int16_t __a, int16_t __b) { return (int16_t) __builtin_aarch64_sqaddhi (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqadds_s32 (int32_t __a, int32_t __b) { return (int32_t) __builtin_aarch64_sqaddsi (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqaddd_s64 (int64_t __a, int64_t __b) { return __builtin_aarch64_sqadddi (__a, __b); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vqaddb_u8 (uint8_t __a, uint8_t __b) { return (uint8_t) __builtin_aarch64_uqaddqi_uuu (__a, __b); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vqaddh_u16 (uint16_t __a, uint16_t __b) { return (uint16_t) __builtin_aarch64_uqaddhi_uuu (__a, __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vqadds_u32 (uint32_t __a, uint32_t __b) { return (uint32_t) __builtin_aarch64_uqaddsi_uuu (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vqaddd_u64 (uint64_t __a, uint64_t __b) { return __builtin_aarch64_uqadddi_uuu (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c) { return __builtin_aarch64_sqdmlalv4hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c) { return __builtin_aarch64_sqdmlal2v8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlal_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x4_t __c, int const __d) { return __builtin_aarch64_sqdmlal2_lanev8hi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlal_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c, int const __d) { return __builtin_aarch64_sqdmlal2_laneqv8hi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c) { return __builtin_aarch64_sqdmlal2_nv8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d) { return __builtin_aarch64_sqdmlal_lanev4hi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlal_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d) { return __builtin_aarch64_sqdmlal_laneqv4hi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c) { return __builtin_aarch64_sqdmlal_nv4hi (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c) { return __builtin_aarch64_sqdmlalv2si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c) { return __builtin_aarch64_sqdmlal2v4si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlal_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x2_t __c, int const __d) { return __builtin_aarch64_sqdmlal2_lanev4si (__a, __b, __c, __d); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlal_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c, int const __d) { return __builtin_aarch64_sqdmlal2_laneqv4si (__a, __b, __c, __d); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c) { return __builtin_aarch64_sqdmlal2_nv4si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d) { return __builtin_aarch64_sqdmlal_lanev2si (__a, __b, __c, __d); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlal_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d) { return __builtin_aarch64_sqdmlal_laneqv2si (__a, __b, __c, __d); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c) { return __builtin_aarch64_sqdmlal_nv2si (__a, __b, __c); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqdmlalh_s16 (int32_t __a, int16_t __b, int16_t __c) { return __builtin_aarch64_sqdmlalhi (__a, __b, __c); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqdmlalh_lane_s16 (int32_t __a, int16_t __b, int16x4_t __c, const int __d) { return __builtin_aarch64_sqdmlal_lanehi (__a, __b, __c, __d); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqdmlalh_laneq_s16 (int32_t __a, int16_t __b, int16x8_t __c, const int __d) { return __builtin_aarch64_sqdmlal_laneqhi (__a, __b, __c, __d); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqdmlals_s32 (int64_t __a, int32_t __b, int32_t __c) { return __builtin_aarch64_sqdmlalsi (__a, __b, __c); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqdmlals_lane_s32 (int64_t __a, int32_t __b, int32x2_t __c, const int __d) { return __builtin_aarch64_sqdmlal_lanesi (__a, __b, __c, __d); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqdmlals_laneq_s32 (int64_t __a, int32_t __b, int32x4_t __c, const int __d) { return __builtin_aarch64_sqdmlal_laneqsi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c) { return __builtin_aarch64_sqdmlslv4hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlsl_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c) { return __builtin_aarch64_sqdmlsl2v8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlsl_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x4_t __c, int const __d) { return __builtin_aarch64_sqdmlsl2_lanev8hi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlsl_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c, int const __d) { return __builtin_aarch64_sqdmlsl2_laneqv8hi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c) { return __builtin_aarch64_sqdmlsl2_nv8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d) { return __builtin_aarch64_sqdmlsl_lanev4hi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlsl_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d) { return __builtin_aarch64_sqdmlsl_laneqv4hi (__a, __b, __c, __d); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c) { return __builtin_aarch64_sqdmlsl_nv4hi (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c) { return __builtin_aarch64_sqdmlslv2si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlsl_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c) { return __builtin_aarch64_sqdmlsl2v4si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlsl_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x2_t __c, int const __d) { return __builtin_aarch64_sqdmlsl2_lanev4si (__a, __b, __c, __d); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlsl_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c, int const __d) { return __builtin_aarch64_sqdmlsl2_laneqv4si (__a, __b, __c, __d); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c) { return __builtin_aarch64_sqdmlsl2_nv4si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d) { return __builtin_aarch64_sqdmlsl_lanev2si (__a, __b, __c, __d); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlsl_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d) { return __builtin_aarch64_sqdmlsl_laneqv2si (__a, __b, __c, __d); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c) { return __builtin_aarch64_sqdmlsl_nv2si (__a, __b, __c); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqdmlslh_s16 (int32_t __a, int16_t __b, int16_t __c) { return __builtin_aarch64_sqdmlslhi (__a, __b, __c); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqdmlslh_lane_s16 (int32_t __a, int16_t __b, int16x4_t __c, const int __d) { return __builtin_aarch64_sqdmlsl_lanehi (__a, __b, __c, __d); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqdmlslh_laneq_s16 (int32_t __a, int16_t __b, int16x8_t __c, const int __d) { return __builtin_aarch64_sqdmlsl_laneqhi (__a, __b, __c, __d); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqdmlsls_s32 (int64_t __a, int32_t __b, int32_t __c) { return __builtin_aarch64_sqdmlslsi (__a, __b, __c); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqdmlsls_lane_s32 (int64_t __a, int32_t __b, int32x2_t __c, const int __d) { return __builtin_aarch64_sqdmlsl_lanesi (__a, __b, __c, __d); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqdmlsls_laneq_s32 (int64_t __a, int32_t __b, int32x4_t __c, const int __d) { return __builtin_aarch64_sqdmlsl_laneqsi (__a, __b, __c, __d); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c) { return __builtin_aarch64_sqdmulh_lanev4hi (__a, __b, __c); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c) { return __builtin_aarch64_sqdmulh_lanev2si (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c) { return __builtin_aarch64_sqdmulh_lanev8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c) { return __builtin_aarch64_sqdmulh_lanev4si (__a, __b, __c); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqdmulhh_s16 (int16_t __a, int16_t __b) { return (int16_t) __builtin_aarch64_sqdmulhhi (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqdmulhh_lane_s16 (int16_t __a, int16x4_t __b, const int __c) { return __builtin_aarch64_sqdmulh_lanehi (__a, __b, __c); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqdmulhh_laneq_s16 (int16_t __a, int16x8_t __b, const int __c) { return __builtin_aarch64_sqdmulh_laneqhi (__a, __b, __c); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqdmulhs_s32 (int32_t __a, int32_t __b) { return (int32_t) __builtin_aarch64_sqdmulhsi (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqdmulhs_lane_s32 (int32_t __a, int32x2_t __b, const int __c) { return __builtin_aarch64_sqdmulh_lanesi (__a, __b, __c); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqdmulhs_laneq_s32 (int32_t __a, int32x4_t __b, const int __c) { return __builtin_aarch64_sqdmulh_laneqsi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmull_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_aarch64_sqdmullv4hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmull_high_s16 (int16x8_t __a, int16x8_t __b) { return __builtin_aarch64_sqdmull2v8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmull_high_lane_s16 (int16x8_t __a, int16x4_t __b, int const __c) { return __builtin_aarch64_sqdmull2_lanev8hi (__a, __b,__c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmull_high_laneq_s16 (int16x8_t __a, int16x8_t __b, int const __c) { return __builtin_aarch64_sqdmull2_laneqv8hi (__a, __b,__c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmull_high_n_s16 (int16x8_t __a, int16_t __b) { return __builtin_aarch64_sqdmull2_nv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, int const __c) { return __builtin_aarch64_sqdmull_lanev4hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmull_laneq_s16 (int16x4_t __a, int16x8_t __b, int const __c) { return __builtin_aarch64_sqdmull_laneqv4hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqdmull_n_s16 (int16x4_t __a, int16_t __b) { return __builtin_aarch64_sqdmull_nv4hi (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmull_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_aarch64_sqdmullv2si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmull_high_s32 (int32x4_t __a, int32x4_t __b) { return __builtin_aarch64_sqdmull2v4si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmull_high_lane_s32 (int32x4_t __a, int32x2_t __b, int const __c) { return __builtin_aarch64_sqdmull2_lanev4si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmull_high_laneq_s32 (int32x4_t __a, int32x4_t __b, int const __c) { return __builtin_aarch64_sqdmull2_laneqv4si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmull_high_n_s32 (int32x4_t __a, int32_t __b) { return __builtin_aarch64_sqdmull2_nv4si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, int const __c) { return __builtin_aarch64_sqdmull_lanev2si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmull_laneq_s32 (int32x2_t __a, int32x4_t __b, int const __c) { return __builtin_aarch64_sqdmull_laneqv2si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqdmull_n_s32 (int32x2_t __a, int32_t __b) { return __builtin_aarch64_sqdmull_nv2si (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqdmullh_s16 (int16_t __a, int16_t __b) { return (int32_t) __builtin_aarch64_sqdmullhi (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqdmullh_lane_s16 (int16_t __a, int16x4_t __b, const int __c) { return __builtin_aarch64_sqdmull_lanehi (__a, __b, __c); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqdmullh_laneq_s16 (int16_t __a, int16x8_t __b, const int __c) { return __builtin_aarch64_sqdmull_laneqhi (__a, __b, __c); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqdmulls_s32 (int32_t __a, int32_t __b) { return __builtin_aarch64_sqdmullsi (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqdmulls_lane_s32 (int32_t __a, int32x2_t __b, const int __c) { return __builtin_aarch64_sqdmull_lanesi (__a, __b, __c); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqdmulls_laneq_s32 (int32_t __a, int32x4_t __b, const int __c) { return __builtin_aarch64_sqdmull_laneqsi (__a, __b, __c); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqmovn_s16 (int16x8_t __a) { return (int8x8_t) __builtin_aarch64_sqmovnv8hi (__a); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqmovn_s32 (int32x4_t __a) { return (int16x4_t) __builtin_aarch64_sqmovnv4si (__a); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqmovn_s64 (int64x2_t __a) { return (int32x2_t) __builtin_aarch64_sqmovnv2di (__a); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqmovn_u16 (uint16x8_t __a) { return (uint8x8_t) __builtin_aarch64_uqmovnv8hi ((int16x8_t) __a); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vqmovn_u32 (uint32x4_t __a) { return (uint16x4_t) __builtin_aarch64_uqmovnv4si ((int32x4_t) __a); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vqmovn_u64 (uint64x2_t __a) { return (uint32x2_t) __builtin_aarch64_uqmovnv2di ((int64x2_t) __a); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqmovnh_s16 (int16_t __a) { return (int8_t) __builtin_aarch64_sqmovnhi (__a); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqmovns_s32 (int32_t __a) { return (int16_t) __builtin_aarch64_sqmovnsi (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqmovnd_s64 (int64_t __a) { return (int32_t) __builtin_aarch64_sqmovndi (__a); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vqmovnh_u16 (uint16_t __a) { return (uint8_t) __builtin_aarch64_uqmovnhi (__a); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vqmovns_u32 (uint32_t __a) { return (uint16_t) __builtin_aarch64_uqmovnsi (__a); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vqmovnd_u64 (uint64_t __a) { return (uint32_t) __builtin_aarch64_uqmovndi (__a); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqmovun_s16 (int16x8_t __a) { return (uint8x8_t) __builtin_aarch64_sqmovunv8hi (__a); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vqmovun_s32 (int32x4_t __a) { return (uint16x4_t) __builtin_aarch64_sqmovunv4si (__a); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vqmovun_s64 (int64x2_t __a) { return (uint32x2_t) __builtin_aarch64_sqmovunv2di (__a); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqmovunh_s16 (int16_t __a) { return (int8_t) __builtin_aarch64_sqmovunhi (__a); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqmovuns_s32 (int32_t __a) { return (int16_t) __builtin_aarch64_sqmovunsi (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqmovund_s64 (int64_t __a) { return (int32_t) __builtin_aarch64_sqmovundi (__a); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqnegq_s64 (int64x2_t __a) { return (int64x2_t) __builtin_aarch64_sqnegv2di (__a); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqnegb_s8 (int8_t __a) { return (int8_t) __builtin_aarch64_sqnegqi (__a); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqnegh_s16 (int16_t __a) { return (int16_t) __builtin_aarch64_sqneghi (__a); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqnegs_s32 (int32_t __a) { return (int32_t) __builtin_aarch64_sqnegsi (__a); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqnegd_s64 (int64_t __a) { return __builtin_aarch64_sqnegdi (__a); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c) { return __builtin_aarch64_sqrdmulh_lanev4hi (__a, __b, __c); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c) { return __builtin_aarch64_sqrdmulh_lanev2si (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c) { return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c) { return __builtin_aarch64_sqrdmulh_lanev4si (__a, __b, __c); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqrdmulhh_s16 (int16_t __a, int16_t __b) { return (int16_t) __builtin_aarch64_sqrdmulhhi (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqrdmulhh_lane_s16 (int16_t __a, int16x4_t __b, const int __c) { return __builtin_aarch64_sqrdmulh_lanehi (__a, __b, __c); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqrdmulhh_laneq_s16 (int16_t __a, int16x8_t __b, const int __c) { return __builtin_aarch64_sqrdmulh_laneqhi (__a, __b, __c); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqrdmulhs_s32 (int32_t __a, int32_t __b) { return (int32_t) __builtin_aarch64_sqrdmulhsi (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqrdmulhs_lane_s32 (int32_t __a, int32x2_t __b, const int __c) { return __builtin_aarch64_sqrdmulh_lanesi (__a, __b, __c); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqrdmulhs_laneq_s32 (int32_t __a, int32x4_t __b, const int __c) { return __builtin_aarch64_sqrdmulh_laneqsi (__a, __b, __c); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqrshl_s8 (int8x8_t __a, int8x8_t __b) { return __builtin_aarch64_sqrshlv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqrshl_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_aarch64_sqrshlv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqrshl_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_aarch64_sqrshlv2si (__a, __b); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vqrshl_s64 (int64x1_t __a, int64x1_t __b) { return (int64x1_t) {__builtin_aarch64_sqrshldi (__a[0], __b[0])}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqrshl_u8 (uint8x8_t __a, int8x8_t __b) { return __builtin_aarch64_uqrshlv8qi_uus ( __a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vqrshl_u16 (uint16x4_t __a, int16x4_t __b) { return __builtin_aarch64_uqrshlv4hi_uus ( __a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vqrshl_u32 (uint32x2_t __a, int32x2_t __b) { return __builtin_aarch64_uqrshlv2si_uus ( __a, __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vqrshl_u64 (uint64x1_t __a, int64x1_t __b) { return (uint64x1_t) {__builtin_aarch64_uqrshldi_uus (__a[0], __b[0])}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqrshlq_s8 (int8x16_t __a, int8x16_t __b) { return __builtin_aarch64_sqrshlv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqrshlq_s16 (int16x8_t __a, int16x8_t __b) { return __builtin_aarch64_sqrshlv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqrshlq_s32 (int32x4_t __a, int32x4_t __b) { return __builtin_aarch64_sqrshlv4si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqrshlq_s64 (int64x2_t __a, int64x2_t __b) { return __builtin_aarch64_sqrshlv2di (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqrshlq_u8 (uint8x16_t __a, int8x16_t __b) { return __builtin_aarch64_uqrshlv16qi_uus ( __a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vqrshlq_u16 (uint16x8_t __a, int16x8_t __b) { return __builtin_aarch64_uqrshlv8hi_uus ( __a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vqrshlq_u32 (uint32x4_t __a, int32x4_t __b) { return __builtin_aarch64_uqrshlv4si_uus ( __a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vqrshlq_u64 (uint64x2_t __a, int64x2_t __b) { return __builtin_aarch64_uqrshlv2di_uus ( __a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqrshlb_s8 (int8_t __a, int8_t __b) { return __builtin_aarch64_sqrshlqi (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqrshlh_s16 (int16_t __a, int16_t __b) { return __builtin_aarch64_sqrshlhi (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqrshls_s32 (int32_t __a, int32_t __b) { return __builtin_aarch64_sqrshlsi (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqrshld_s64 (int64_t __a, int64_t __b) { return __builtin_aarch64_sqrshldi (__a, __b); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vqrshlb_u8 (uint8_t __a, uint8_t __b) { return __builtin_aarch64_uqrshlqi_uus (__a, __b); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vqrshlh_u16 (uint16_t __a, uint16_t __b) { return __builtin_aarch64_uqrshlhi_uus (__a, __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vqrshls_u32 (uint32_t __a, uint32_t __b) { return __builtin_aarch64_uqrshlsi_uus (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vqrshld_u64 (uint64_t __a, uint64_t __b) { return __builtin_aarch64_uqrshldi_uus (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqrshrn_n_s16 (int16x8_t __a, const int __b) { return (int8x8_t) __builtin_aarch64_sqrshrn_nv8hi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqrshrn_n_s32 (int32x4_t __a, const int __b) { return (int16x4_t) __builtin_aarch64_sqrshrn_nv4si (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqrshrn_n_s64 (int64x2_t __a, const int __b) { return (int32x2_t) __builtin_aarch64_sqrshrn_nv2di (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqrshrn_n_u16 (uint16x8_t __a, const int __b) { return __builtin_aarch64_uqrshrn_nv8hi_uus ( __a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vqrshrn_n_u32 (uint32x4_t __a, const int __b) { return __builtin_aarch64_uqrshrn_nv4si_uus ( __a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vqrshrn_n_u64 (uint64x2_t __a, const int __b) { return __builtin_aarch64_uqrshrn_nv2di_uus ( __a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqrshrnh_n_s16 (int16_t __a, const int __b) { return (int8_t) __builtin_aarch64_sqrshrn_nhi (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqrshrns_n_s32 (int32_t __a, const int __b) { return (int16_t) __builtin_aarch64_sqrshrn_nsi (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqrshrnd_n_s64 (int64_t __a, const int __b) { return (int32_t) __builtin_aarch64_sqrshrn_ndi (__a, __b); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vqrshrnh_n_u16 (uint16_t __a, const int __b) { return __builtin_aarch64_uqrshrn_nhi_uus (__a, __b); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vqrshrns_n_u32 (uint32_t __a, const int __b) { return __builtin_aarch64_uqrshrn_nsi_uus (__a, __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vqrshrnd_n_u64 (uint64_t __a, const int __b) { return __builtin_aarch64_uqrshrn_ndi_uus (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqrshrun_n_s16 (int16x8_t __a, const int __b) { return (uint8x8_t) __builtin_aarch64_sqrshrun_nv8hi (__a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vqrshrun_n_s32 (int32x4_t __a, const int __b) { return (uint16x4_t) __builtin_aarch64_sqrshrun_nv4si (__a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vqrshrun_n_s64 (int64x2_t __a, const int __b) { return (uint32x2_t) __builtin_aarch64_sqrshrun_nv2di (__a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqrshrunh_n_s16 (int16_t __a, const int __b) { return (int8_t) __builtin_aarch64_sqrshrun_nhi (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqrshruns_n_s32 (int32_t __a, const int __b) { return (int16_t) __builtin_aarch64_sqrshrun_nsi (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqrshrund_n_s64 (int64_t __a, const int __b) { return (int32_t) __builtin_aarch64_sqrshrun_ndi (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqshl_s8 (int8x8_t __a, int8x8_t __b) { return __builtin_aarch64_sqshlv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqshl_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_aarch64_sqshlv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqshl_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_aarch64_sqshlv2si (__a, __b); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vqshl_s64 (int64x1_t __a, int64x1_t __b) { return (int64x1_t) {__builtin_aarch64_sqshldi (__a[0], __b[0])}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqshl_u8 (uint8x8_t __a, int8x8_t __b) { return __builtin_aarch64_uqshlv8qi_uus ( __a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vqshl_u16 (uint16x4_t __a, int16x4_t __b) { return __builtin_aarch64_uqshlv4hi_uus ( __a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vqshl_u32 (uint32x2_t __a, int32x2_t __b) { return __builtin_aarch64_uqshlv2si_uus ( __a, __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vqshl_u64 (uint64x1_t __a, int64x1_t __b) { return (uint64x1_t) {__builtin_aarch64_uqshldi_uus (__a[0], __b[0])}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqshlq_s8 (int8x16_t __a, int8x16_t __b) { return __builtin_aarch64_sqshlv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqshlq_s16 (int16x8_t __a, int16x8_t __b) { return __builtin_aarch64_sqshlv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqshlq_s32 (int32x4_t __a, int32x4_t __b) { return __builtin_aarch64_sqshlv4si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqshlq_s64 (int64x2_t __a, int64x2_t __b) { return __builtin_aarch64_sqshlv2di (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqshlq_u8 (uint8x16_t __a, int8x16_t __b) { return __builtin_aarch64_uqshlv16qi_uus ( __a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vqshlq_u16 (uint16x8_t __a, int16x8_t __b) { return __builtin_aarch64_uqshlv8hi_uus ( __a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vqshlq_u32 (uint32x4_t __a, int32x4_t __b) { return __builtin_aarch64_uqshlv4si_uus ( __a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vqshlq_u64 (uint64x2_t __a, int64x2_t __b) { return __builtin_aarch64_uqshlv2di_uus ( __a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqshlb_s8 (int8_t __a, int8_t __b) { return __builtin_aarch64_sqshlqi (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqshlh_s16 (int16_t __a, int16_t __b) { return __builtin_aarch64_sqshlhi (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqshls_s32 (int32_t __a, int32_t __b) { return __builtin_aarch64_sqshlsi (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqshld_s64 (int64_t __a, int64_t __b) { return __builtin_aarch64_sqshldi (__a, __b); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vqshlb_u8 (uint8_t __a, uint8_t __b) { return __builtin_aarch64_uqshlqi_uus (__a, __b); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vqshlh_u16 (uint16_t __a, uint16_t __b) { return __builtin_aarch64_uqshlhi_uus (__a, __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vqshls_u32 (uint32_t __a, uint32_t __b) { return __builtin_aarch64_uqshlsi_uus (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vqshld_u64 (uint64_t __a, uint64_t __b) { return __builtin_aarch64_uqshldi_uus (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqshl_n_s8 (int8x8_t __a, const int __b) { return (int8x8_t) __builtin_aarch64_sqshl_nv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqshl_n_s16 (int16x4_t __a, const int __b) { return (int16x4_t) __builtin_aarch64_sqshl_nv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqshl_n_s32 (int32x2_t __a, const int __b) { return (int32x2_t) __builtin_aarch64_sqshl_nv2si (__a, __b); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vqshl_n_s64 (int64x1_t __a, const int __b) { return (int64x1_t) {__builtin_aarch64_sqshl_ndi (__a[0], __b)}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqshl_n_u8 (uint8x8_t __a, const int __b) { return __builtin_aarch64_uqshl_nv8qi_uus (__a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vqshl_n_u16 (uint16x4_t __a, const int __b) { return __builtin_aarch64_uqshl_nv4hi_uus (__a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vqshl_n_u32 (uint32x2_t __a, const int __b) { return __builtin_aarch64_uqshl_nv2si_uus (__a, __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vqshl_n_u64 (uint64x1_t __a, const int __b) { return (uint64x1_t) {__builtin_aarch64_uqshl_ndi_uus (__a[0], __b)}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vqshlq_n_s8 (int8x16_t __a, const int __b) { return (int8x16_t) __builtin_aarch64_sqshl_nv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vqshlq_n_s16 (int16x8_t __a, const int __b) { return (int16x8_t) __builtin_aarch64_sqshl_nv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vqshlq_n_s32 (int32x4_t __a, const int __b) { return (int32x4_t) __builtin_aarch64_sqshl_nv4si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vqshlq_n_s64 (int64x2_t __a, const int __b) { return (int64x2_t) __builtin_aarch64_sqshl_nv2di (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqshlq_n_u8 (uint8x16_t __a, const int __b) { return __builtin_aarch64_uqshl_nv16qi_uus (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vqshlq_n_u16 (uint16x8_t __a, const int __b) { return __builtin_aarch64_uqshl_nv8hi_uus (__a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vqshlq_n_u32 (uint32x4_t __a, const int __b) { return __builtin_aarch64_uqshl_nv4si_uus (__a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vqshlq_n_u64 (uint64x2_t __a, const int __b) { return __builtin_aarch64_uqshl_nv2di_uus (__a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqshlb_n_s8 (int8_t __a, const int __b) { return (int8_t) __builtin_aarch64_sqshl_nqi (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqshlh_n_s16 (int16_t __a, const int __b) { return (int16_t) __builtin_aarch64_sqshl_nhi (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqshls_n_s32 (int32_t __a, const int __b) { return (int32_t) __builtin_aarch64_sqshl_nsi (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqshld_n_s64 (int64_t __a, const int __b) { return __builtin_aarch64_sqshl_ndi (__a, __b); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vqshlb_n_u8 (uint8_t __a, const int __b) { return __builtin_aarch64_uqshl_nqi_uus (__a, __b); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vqshlh_n_u16 (uint16_t __a, const int __b) { return __builtin_aarch64_uqshl_nhi_uus (__a, __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vqshls_n_u32 (uint32_t __a, const int __b) { return __builtin_aarch64_uqshl_nsi_uus (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vqshld_n_u64 (uint64_t __a, const int __b) { return __builtin_aarch64_uqshl_ndi_uus (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqshlu_n_s8 (int8x8_t __a, const int __b) { return __builtin_aarch64_sqshlu_nv8qi_uss (__a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vqshlu_n_s16 (int16x4_t __a, const int __b) { return __builtin_aarch64_sqshlu_nv4hi_uss (__a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vqshlu_n_s32 (int32x2_t __a, const int __b) { return __builtin_aarch64_sqshlu_nv2si_uss (__a, __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vqshlu_n_s64 (int64x1_t __a, const int __b) { return (uint64x1_t) {__builtin_aarch64_sqshlu_ndi_uss (__a[0], __b)}; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vqshluq_n_s8 (int8x16_t __a, const int __b) { return __builtin_aarch64_sqshlu_nv16qi_uss (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vqshluq_n_s16 (int16x8_t __a, const int __b) { return __builtin_aarch64_sqshlu_nv8hi_uss (__a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vqshluq_n_s32 (int32x4_t __a, const int __b) { return __builtin_aarch64_sqshlu_nv4si_uss (__a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vqshluq_n_s64 (int64x2_t __a, const int __b) { return __builtin_aarch64_sqshlu_nv2di_uss (__a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqshlub_n_s8 (int8_t __a, const int __b) { return (int8_t) __builtin_aarch64_sqshlu_nqi_uss (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqshluh_n_s16 (int16_t __a, const int __b) { return (int16_t) __builtin_aarch64_sqshlu_nhi_uss (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqshlus_n_s32 (int32_t __a, const int __b) { return (int32_t) __builtin_aarch64_sqshlu_nsi_uss (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vqshlud_n_s64 (int64_t __a, const int __b) { return __builtin_aarch64_sqshlu_ndi_uss (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vqshrn_n_s16 (int16x8_t __a, const int __b) { return (int8x8_t) __builtin_aarch64_sqshrn_nv8hi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vqshrn_n_s32 (int32x4_t __a, const int __b) { return (int16x4_t) __builtin_aarch64_sqshrn_nv4si (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vqshrn_n_s64 (int64x2_t __a, const int __b) { return (int32x2_t) __builtin_aarch64_sqshrn_nv2di (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqshrn_n_u16 (uint16x8_t __a, const int __b) { return __builtin_aarch64_uqshrn_nv8hi_uus ( __a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vqshrn_n_u32 (uint32x4_t __a, const int __b) { return __builtin_aarch64_uqshrn_nv4si_uus ( __a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vqshrn_n_u64 (uint64x2_t __a, const int __b) { return __builtin_aarch64_uqshrn_nv2di_uus ( __a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqshrnh_n_s16 (int16_t __a, const int __b) { return (int8_t) __builtin_aarch64_sqshrn_nhi (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqshrns_n_s32 (int32_t __a, const int __b) { return (int16_t) __builtin_aarch64_sqshrn_nsi (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqshrnd_n_s64 (int64_t __a, const int __b) { return (int32_t) __builtin_aarch64_sqshrn_ndi (__a, __b); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vqshrnh_n_u16 (uint16_t __a, const int __b) { return __builtin_aarch64_uqshrn_nhi_uus (__a, __b); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vqshrns_n_u32 (uint32_t __a, const int __b) { return __builtin_aarch64_uqshrn_nsi_uus (__a, __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vqshrnd_n_u64 (uint64_t __a, const int __b) { return __builtin_aarch64_uqshrn_ndi_uus (__a, __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vqshrun_n_s16 (int16x8_t __a, const int __b) { return (uint8x8_t) __builtin_aarch64_sqshrun_nv8hi (__a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vqshrun_n_s32 (int32x4_t __a, const int __b) { return (uint16x4_t) __builtin_aarch64_sqshrun_nv4si (__a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vqshrun_n_s64 (int64x2_t __a, const int __b) { return (uint32x2_t) __builtin_aarch64_sqshrun_nv2di (__a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqshrunh_n_s16 (int16_t __a, const int __b) { return (int8_t) __builtin_aarch64_sqshrun_nhi (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqshruns_n_s32 (int32_t __a, const int __b) { return (int16_t) __builtin_aarch64_sqshrun_nsi (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqshrund_n_s64 (int64_t __a, const int __b) { return (int32_t) __builtin_aarch64_sqshrun_ndi (__a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vqsubb_s8 (int8_t __a, int8_t __b) { return (int8_t) __builtin_aarch64_sqsubqi (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vqsubh_s16 (int16_t __a, int16_t __b) { return (int16_t) __builtin_aarch64_sqsubhi (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vqsubs_s32 (int32_t __a, int32_t __b) { return (int32_t) __builtin_aarch64_sqsubsi (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vqsubd_s64 (int64_t __a, int64_t __b) { return __builtin_aarch64_sqsubdi (__a, __b); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vqsubb_u8 (uint8_t __a, uint8_t __b) { return (uint8_t) __builtin_aarch64_uqsubqi_uuu (__a, __b); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vqsubh_u16 (uint16_t __a, uint16_t __b) { return (uint16_t) __builtin_aarch64_uqsubhi_uuu (__a, __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vqsubs_u32 (uint32_t __a, uint32_t __b) { return (uint32_t) __builtin_aarch64_uqsubsi_uuu (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vqsubd_u64 (uint64_t __a, uint64_t __b) { return __builtin_aarch64_uqsubdi_uuu (__a, __b); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vrbit_p8 (poly8x8_t __a) { return (poly8x8_t) __builtin_aarch64_rbitv8qi ((int8x8_t) __a); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vrbit_s8 (int8x8_t __a) { return __builtin_aarch64_rbitv8qi (__a); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vrbit_u8 (uint8x8_t __a) { return (uint8x8_t) __builtin_aarch64_rbitv8qi ((int8x8_t) __a); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vrbitq_p8 (poly8x16_t __a) { return (poly8x16_t) __builtin_aarch64_rbitv16qi ((int8x16_t)__a); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vrbitq_s8 (int8x16_t __a) { return __builtin_aarch64_rbitv16qi (__a); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vrbitq_u8 (uint8x16_t __a) { return (uint8x16_t) __builtin_aarch64_rbitv16qi ((int8x16_t) __a); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vrecpe_u32 (uint32x2_t __a) { return (uint32x2_t) __builtin_aarch64_urecpev2si ((int32x2_t) __a); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vrecpeq_u32 (uint32x4_t __a) { return (uint32x4_t) __builtin_aarch64_urecpev4si ((int32x4_t) __a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vrecpes_f32 (float32_t __a) { return __builtin_aarch64_frecpesf (__a); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vrecped_f64 (float64_t __a) { return __builtin_aarch64_frecpedf (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vrecpe_f32 (float32x2_t __a) { return __builtin_aarch64_frecpev2sf (__a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vrecpeq_f32 (float32x4_t __a) { return __builtin_aarch64_frecpev4sf (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vrecpeq_f64 (float64x2_t __a) { return __builtin_aarch64_frecpev2df (__a); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vrecpss_f32 (float32_t __a, float32_t __b) { return __builtin_aarch64_frecpssf (__a, __b); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vrecpsd_f64 (float64_t __a, float64_t __b) { return __builtin_aarch64_frecpsdf (__a, __b); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vrecps_f32 (float32x2_t __a, float32x2_t __b) { return __builtin_aarch64_frecpsv2sf (__a, __b); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vrecpsq_f32 (float32x4_t __a, float32x4_t __b) { return __builtin_aarch64_frecpsv4sf (__a, __b); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vrecpsq_f64 (float64x2_t __a, float64x2_t __b) { return __builtin_aarch64_frecpsv2df (__a, __b); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vrecpxs_f32 (float32_t __a) { return __builtin_aarch64_frecpxsf (__a); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vrecpxd_f64 (float64_t __a) { return __builtin_aarch64_frecpxdf (__a); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vrev16_p8 (poly8x8_t a) { return __builtin_shuffle (a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 }); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vrev16_s8 (int8x8_t a) { return __builtin_shuffle (a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 }); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vrev16_u8 (uint8x8_t a) { return __builtin_shuffle (a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 }); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vrev16q_p8 (poly8x16_t a) { return __builtin_shuffle (a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vrev16q_s8 (int8x16_t a) { return __builtin_shuffle (a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vrev16q_u8 (uint8x16_t a) { return __builtin_shuffle (a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vrev32_p8 (poly8x8_t a) { return __builtin_shuffle (a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 }); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vrev32_p16 (poly16x4_t a) { return __builtin_shuffle (a, (uint16x4_t) { 1, 0, 3, 2 }); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vrev32_s8 (int8x8_t a) { return __builtin_shuffle (a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 }); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vrev32_s16 (int16x4_t a) { return __builtin_shuffle (a, (uint16x4_t) { 1, 0, 3, 2 }); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vrev32_u8 (uint8x8_t a) { return __builtin_shuffle (a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 }); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vrev32_u16 (uint16x4_t a) { return __builtin_shuffle (a, (uint16x4_t) { 1, 0, 3, 2 }); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vrev32q_p8 (poly8x16_t a) { return __builtin_shuffle (a, (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vrev32q_p16 (poly16x8_t a) { return __builtin_shuffle (a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 }); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vrev32q_s8 (int8x16_t a) { return __builtin_shuffle (a, (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vrev32q_s16 (int16x8_t a) { return __builtin_shuffle (a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 }); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vrev32q_u8 (uint8x16_t a) { return __builtin_shuffle (a, (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vrev32q_u16 (uint16x8_t a) { return __builtin_shuffle (a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 }); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vrev64_f32 (float32x2_t a) { return __builtin_shuffle (a, (uint32x2_t) { 1, 0 }); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vrev64_p8 (poly8x8_t a) { return __builtin_shuffle (a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 }); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vrev64_p16 (poly16x4_t a) { return __builtin_shuffle (a, (uint16x4_t) { 3, 2, 1, 0 }); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vrev64_s8 (int8x8_t a) { return __builtin_shuffle (a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 }); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vrev64_s16 (int16x4_t a) { return __builtin_shuffle (a, (uint16x4_t) { 3, 2, 1, 0 }); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vrev64_s32 (int32x2_t a) { return __builtin_shuffle (a, (uint32x2_t) { 1, 0 }); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vrev64_u8 (uint8x8_t a) { return __builtin_shuffle (a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 }); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vrev64_u16 (uint16x4_t a) { return __builtin_shuffle (a, (uint16x4_t) { 3, 2, 1, 0 }); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vrev64_u32 (uint32x2_t a) { return __builtin_shuffle (a, (uint32x2_t) { 1, 0 }); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vrev64q_f32 (float32x4_t a) { return __builtin_shuffle (a, (uint32x4_t) { 1, 0, 3, 2 }); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vrev64q_p8 (poly8x16_t a) { return __builtin_shuffle (a, (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vrev64q_p16 (poly16x8_t a) { return __builtin_shuffle (a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 }); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vrev64q_s8 (int8x16_t a) { return __builtin_shuffle (a, (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vrev64q_s16 (int16x8_t a) { return __builtin_shuffle (a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 }); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vrev64q_s32 (int32x4_t a) { return __builtin_shuffle (a, (uint32x4_t) { 1, 0, 3, 2 }); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vrev64q_u8 (uint8x16_t a) { return __builtin_shuffle (a, (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vrev64q_u16 (uint16x8_t a) { return __builtin_shuffle (a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 }); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vrev64q_u32 (uint32x4_t a) { return __builtin_shuffle (a, (uint32x4_t) { 1, 0, 3, 2 }); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vrnd_f32 (float32x2_t __a) { return __builtin_aarch64_btruncv2sf (__a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vrnd_f64 (float64x1_t __a) { return vset_lane_f64 (__builtin_trunc (vget_lane_f64 (__a, 0)), __a, 0); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vrndq_f32 (float32x4_t __a) { return __builtin_aarch64_btruncv4sf (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vrndq_f64 (float64x2_t __a) { return __builtin_aarch64_btruncv2df (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vrnda_f32 (float32x2_t __a) { return __builtin_aarch64_roundv2sf (__a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vrnda_f64 (float64x1_t __a) { return vset_lane_f64 (__builtin_round (vget_lane_f64 (__a, 0)), __a, 0); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vrndaq_f32 (float32x4_t __a) { return __builtin_aarch64_roundv4sf (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vrndaq_f64 (float64x2_t __a) { return __builtin_aarch64_roundv2df (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vrndi_f32 (float32x2_t __a) { return __builtin_aarch64_nearbyintv2sf (__a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vrndi_f64 (float64x1_t __a) { return vset_lane_f64 (__builtin_nearbyint (vget_lane_f64 (__a, 0)), __a, 0); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vrndiq_f32 (float32x4_t __a) { return __builtin_aarch64_nearbyintv4sf (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vrndiq_f64 (float64x2_t __a) { return __builtin_aarch64_nearbyintv2df (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vrndm_f32 (float32x2_t __a) { return __builtin_aarch64_floorv2sf (__a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vrndm_f64 (float64x1_t __a) { return vset_lane_f64 (__builtin_floor (vget_lane_f64 (__a, 0)), __a, 0); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vrndmq_f32 (float32x4_t __a) { return __builtin_aarch64_floorv4sf (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vrndmq_f64 (float64x2_t __a) { return __builtin_aarch64_floorv2df (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vrndn_f32 (float32x2_t __a) { return __builtin_aarch64_frintnv2sf (__a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vrndn_f64 (float64x1_t __a) { return (float64x1_t) {__builtin_aarch64_frintndf (__a[0])}; } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vrndnq_f32 (float32x4_t __a) { return __builtin_aarch64_frintnv4sf (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vrndnq_f64 (float64x2_t __a) { return __builtin_aarch64_frintnv2df (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vrndp_f32 (float32x2_t __a) { return __builtin_aarch64_ceilv2sf (__a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vrndp_f64 (float64x1_t __a) { return vset_lane_f64 (__builtin_ceil (vget_lane_f64 (__a, 0)), __a, 0); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vrndpq_f32 (float32x4_t __a) { return __builtin_aarch64_ceilv4sf (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vrndpq_f64 (float64x2_t __a) { return __builtin_aarch64_ceilv2df (__a); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vrndx_f32 (float32x2_t __a) { return __builtin_aarch64_rintv2sf (__a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vrndx_f64 (float64x1_t __a) { return vset_lane_f64 (__builtin_rint (vget_lane_f64 (__a, 0)), __a, 0); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vrndxq_f32 (float32x4_t __a) { return __builtin_aarch64_rintv4sf (__a); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vrndxq_f64 (float64x2_t __a) { return __builtin_aarch64_rintv2df (__a); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vrshl_s8 (int8x8_t __a, int8x8_t __b) { return (int8x8_t) __builtin_aarch64_srshlv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vrshl_s16 (int16x4_t __a, int16x4_t __b) { return (int16x4_t) __builtin_aarch64_srshlv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vrshl_s32 (int32x2_t __a, int32x2_t __b) { return (int32x2_t) __builtin_aarch64_srshlv2si (__a, __b); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vrshl_s64 (int64x1_t __a, int64x1_t __b) { return (int64x1_t) {__builtin_aarch64_srshldi (__a[0], __b[0])}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vrshl_u8 (uint8x8_t __a, int8x8_t __b) { return __builtin_aarch64_urshlv8qi_uus (__a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vrshl_u16 (uint16x4_t __a, int16x4_t __b) { return __builtin_aarch64_urshlv4hi_uus (__a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vrshl_u32 (uint32x2_t __a, int32x2_t __b) { return __builtin_aarch64_urshlv2si_uus (__a, __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vrshl_u64 (uint64x1_t __a, int64x1_t __b) { return (uint64x1_t) {__builtin_aarch64_urshldi_uus (__a[0], __b[0])}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vrshlq_s8 (int8x16_t __a, int8x16_t __b) { return (int8x16_t) __builtin_aarch64_srshlv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vrshlq_s16 (int16x8_t __a, int16x8_t __b) { return (int16x8_t) __builtin_aarch64_srshlv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vrshlq_s32 (int32x4_t __a, int32x4_t __b) { return (int32x4_t) __builtin_aarch64_srshlv4si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vrshlq_s64 (int64x2_t __a, int64x2_t __b) { return (int64x2_t) __builtin_aarch64_srshlv2di (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vrshlq_u8 (uint8x16_t __a, int8x16_t __b) { return __builtin_aarch64_urshlv16qi_uus (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vrshlq_u16 (uint16x8_t __a, int16x8_t __b) { return __builtin_aarch64_urshlv8hi_uus (__a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vrshlq_u32 (uint32x4_t __a, int32x4_t __b) { return __builtin_aarch64_urshlv4si_uus (__a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vrshlq_u64 (uint64x2_t __a, int64x2_t __b) { return __builtin_aarch64_urshlv2di_uus (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vrshld_s64 (int64_t __a, int64_t __b) { return __builtin_aarch64_srshldi (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vrshld_u64 (uint64_t __a, int64_t __b) { return __builtin_aarch64_urshldi_uus (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vrshr_n_s8 (int8x8_t __a, const int __b) { return (int8x8_t) __builtin_aarch64_srshr_nv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vrshr_n_s16 (int16x4_t __a, const int __b) { return (int16x4_t) __builtin_aarch64_srshr_nv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vrshr_n_s32 (int32x2_t __a, const int __b) { return (int32x2_t) __builtin_aarch64_srshr_nv2si (__a, __b); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vrshr_n_s64 (int64x1_t __a, const int __b) { return (int64x1_t) {__builtin_aarch64_srshr_ndi (__a[0], __b)}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vrshr_n_u8 (uint8x8_t __a, const int __b) { return __builtin_aarch64_urshr_nv8qi_uus (__a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vrshr_n_u16 (uint16x4_t __a, const int __b) { return __builtin_aarch64_urshr_nv4hi_uus (__a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vrshr_n_u32 (uint32x2_t __a, const int __b) { return __builtin_aarch64_urshr_nv2si_uus (__a, __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vrshr_n_u64 (uint64x1_t __a, const int __b) { return (uint64x1_t) {__builtin_aarch64_urshr_ndi_uus (__a[0], __b)}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vrshrq_n_s8 (int8x16_t __a, const int __b) { return (int8x16_t) __builtin_aarch64_srshr_nv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vrshrq_n_s16 (int16x8_t __a, const int __b) { return (int16x8_t) __builtin_aarch64_srshr_nv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vrshrq_n_s32 (int32x4_t __a, const int __b) { return (int32x4_t) __builtin_aarch64_srshr_nv4si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vrshrq_n_s64 (int64x2_t __a, const int __b) { return (int64x2_t) __builtin_aarch64_srshr_nv2di (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vrshrq_n_u8 (uint8x16_t __a, const int __b) { return __builtin_aarch64_urshr_nv16qi_uus (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vrshrq_n_u16 (uint16x8_t __a, const int __b) { return __builtin_aarch64_urshr_nv8hi_uus (__a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vrshrq_n_u32 (uint32x4_t __a, const int __b) { return __builtin_aarch64_urshr_nv4si_uus (__a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vrshrq_n_u64 (uint64x2_t __a, const int __b) { return __builtin_aarch64_urshr_nv2di_uus (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vrshrd_n_s64 (int64_t __a, const int __b) { return __builtin_aarch64_srshr_ndi (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vrshrd_n_u64 (uint64_t __a, const int __b) { return __builtin_aarch64_urshr_ndi_uus (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c) { return (int8x8_t) __builtin_aarch64_srsra_nv8qi (__a, __b, __c); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c) { return (int16x4_t) __builtin_aarch64_srsra_nv4hi (__a, __b, __c); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c) { return (int32x2_t) __builtin_aarch64_srsra_nv2si (__a, __b, __c); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c) { return (int64x1_t) {__builtin_aarch64_srsra_ndi (__a[0], __b[0], __c)}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c) { return __builtin_aarch64_ursra_nv8qi_uuus (__a, __b, __c); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) { return __builtin_aarch64_ursra_nv4hi_uuus (__a, __b, __c); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) { return __builtin_aarch64_ursra_nv2si_uuus (__a, __b, __c); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c) { return (uint64x1_t) {__builtin_aarch64_ursra_ndi_uuus (__a[0], __b[0], __c)}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c) { return (int8x16_t) __builtin_aarch64_srsra_nv16qi (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c) { return (int16x8_t) __builtin_aarch64_srsra_nv8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c) { return (int32x4_t) __builtin_aarch64_srsra_nv4si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c) { return (int64x2_t) __builtin_aarch64_srsra_nv2di (__a, __b, __c); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c) { return __builtin_aarch64_ursra_nv16qi_uuus (__a, __b, __c); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c) { return __builtin_aarch64_ursra_nv8hi_uuus (__a, __b, __c); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c) { return __builtin_aarch64_ursra_nv4si_uuus (__a, __b, __c); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c) { return __builtin_aarch64_ursra_nv2di_uuus (__a, __b, __c); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vrsrad_n_s64 (int64_t __a, int64_t __b, const int __c) { return __builtin_aarch64_srsra_ndi (__a, __b, __c); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vrsrad_n_u64 (uint64_t __a, uint64_t __b, const int __c) { return __builtin_aarch64_ursra_ndi_uuus (__a, __b, __c); } #pragma GCC push_options #pragma GCC target ("+nothing+crypto") __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsha1cq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { return __builtin_aarch64_crypto_sha1cv4si_uuuu (hash_abcd, hash_e, wk); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsha1mq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { return __builtin_aarch64_crypto_sha1mv4si_uuuu (hash_abcd, hash_e, wk); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsha1pq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { return __builtin_aarch64_crypto_sha1pv4si_uuuu (hash_abcd, hash_e, wk); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vsha1h_u32 (uint32_t hash_e) { return __builtin_aarch64_crypto_sha1hsi_uu (hash_e); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsha1su0q_u32 (uint32x4_t w0_3, uint32x4_t w4_7, uint32x4_t w8_11) { return __builtin_aarch64_crypto_sha1su0v4si_uuuu (w0_3, w4_7, w8_11); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsha1su1q_u32 (uint32x4_t tw0_3, uint32x4_t w12_15) { return __builtin_aarch64_crypto_sha1su1v4si_uuu (tw0_3, w12_15); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsha256hq_u32 (uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk) { return __builtin_aarch64_crypto_sha256hv4si_uuuu (hash_abcd, hash_efgh, wk); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsha256h2q_u32 (uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk) { return __builtin_aarch64_crypto_sha256h2v4si_uuuu (hash_efgh, hash_abcd, wk); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsha256su0q_u32 (uint32x4_t w0_3, uint32x4_t w4_7) { return __builtin_aarch64_crypto_sha256su0v4si_uuu (w0_3, w4_7); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsha256su1q_u32 (uint32x4_t tw0_3, uint32x4_t w8_11, uint32x4_t w12_15) { return __builtin_aarch64_crypto_sha256su1v4si_uuuu (tw0_3, w8_11, w12_15); } __extension__ static __inline poly128_t __attribute__ ((__always_inline__)) vmull_p64 (poly64_t a, poly64_t b) { return __builtin_aarch64_crypto_pmulldi_ppp (a, b); } __extension__ static __inline poly128_t __attribute__ ((__always_inline__)) vmull_high_p64 (poly64x2_t a, poly64x2_t b) { return __builtin_aarch64_crypto_pmullv2di_ppp (a, b); } #pragma GCC pop_options __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vshl_n_s8 (int8x8_t __a, const int __b) { return (int8x8_t) __builtin_aarch64_ashlv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vshl_n_s16 (int16x4_t __a, const int __b) { return (int16x4_t) __builtin_aarch64_ashlv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vshl_n_s32 (int32x2_t __a, const int __b) { return (int32x2_t) __builtin_aarch64_ashlv2si (__a, __b); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vshl_n_s64 (int64x1_t __a, const int __b) { return (int64x1_t) {__builtin_aarch64_ashldi (__a[0], __b)}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vshl_n_u8 (uint8x8_t __a, const int __b) { return (uint8x8_t) __builtin_aarch64_ashlv8qi ((int8x8_t) __a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vshl_n_u16 (uint16x4_t __a, const int __b) { return (uint16x4_t) __builtin_aarch64_ashlv4hi ((int16x4_t) __a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vshl_n_u32 (uint32x2_t __a, const int __b) { return (uint32x2_t) __builtin_aarch64_ashlv2si ((int32x2_t) __a, __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vshl_n_u64 (uint64x1_t __a, const int __b) { return (uint64x1_t) {__builtin_aarch64_ashldi ((int64_t) __a[0], __b)}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vshlq_n_s8 (int8x16_t __a, const int __b) { return (int8x16_t) __builtin_aarch64_ashlv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vshlq_n_s16 (int16x8_t __a, const int __b) { return (int16x8_t) __builtin_aarch64_ashlv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vshlq_n_s32 (int32x4_t __a, const int __b) { return (int32x4_t) __builtin_aarch64_ashlv4si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vshlq_n_s64 (int64x2_t __a, const int __b) { return (int64x2_t) __builtin_aarch64_ashlv2di (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vshlq_n_u8 (uint8x16_t __a, const int __b) { return (uint8x16_t) __builtin_aarch64_ashlv16qi ((int8x16_t) __a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vshlq_n_u16 (uint16x8_t __a, const int __b) { return (uint16x8_t) __builtin_aarch64_ashlv8hi ((int16x8_t) __a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vshlq_n_u32 (uint32x4_t __a, const int __b) { return (uint32x4_t) __builtin_aarch64_ashlv4si ((int32x4_t) __a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vshlq_n_u64 (uint64x2_t __a, const int __b) { return (uint64x2_t) __builtin_aarch64_ashlv2di ((int64x2_t) __a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vshld_n_s64 (int64_t __a, const int __b) { return __builtin_aarch64_ashldi (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vshld_n_u64 (uint64_t __a, const int __b) { return (uint64_t) __builtin_aarch64_ashldi (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vshl_s8 (int8x8_t __a, int8x8_t __b) { return __builtin_aarch64_sshlv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vshl_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_aarch64_sshlv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vshl_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_aarch64_sshlv2si (__a, __b); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vshl_s64 (int64x1_t __a, int64x1_t __b) { return (int64x1_t) {__builtin_aarch64_sshldi (__a[0], __b[0])}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vshl_u8 (uint8x8_t __a, int8x8_t __b) { return __builtin_aarch64_ushlv8qi_uus (__a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vshl_u16 (uint16x4_t __a, int16x4_t __b) { return __builtin_aarch64_ushlv4hi_uus (__a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vshl_u32 (uint32x2_t __a, int32x2_t __b) { return __builtin_aarch64_ushlv2si_uus (__a, __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vshl_u64 (uint64x1_t __a, int64x1_t __b) { return (uint64x1_t) {__builtin_aarch64_ushldi_uus (__a[0], __b[0])}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vshlq_s8 (int8x16_t __a, int8x16_t __b) { return __builtin_aarch64_sshlv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vshlq_s16 (int16x8_t __a, int16x8_t __b) { return __builtin_aarch64_sshlv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vshlq_s32 (int32x4_t __a, int32x4_t __b) { return __builtin_aarch64_sshlv4si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vshlq_s64 (int64x2_t __a, int64x2_t __b) { return __builtin_aarch64_sshlv2di (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vshlq_u8 (uint8x16_t __a, int8x16_t __b) { return __builtin_aarch64_ushlv16qi_uus (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vshlq_u16 (uint16x8_t __a, int16x8_t __b) { return __builtin_aarch64_ushlv8hi_uus (__a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vshlq_u32 (uint32x4_t __a, int32x4_t __b) { return __builtin_aarch64_ushlv4si_uus (__a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vshlq_u64 (uint64x2_t __a, int64x2_t __b) { return __builtin_aarch64_ushlv2di_uus (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vshld_s64 (int64_t __a, int64_t __b) { return __builtin_aarch64_sshldi (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vshld_u64 (uint64_t __a, uint64_t __b) { return __builtin_aarch64_ushldi_uus (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vshll_high_n_s8 (int8x16_t __a, const int __b) { return __builtin_aarch64_sshll2_nv16qi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vshll_high_n_s16 (int16x8_t __a, const int __b) { return __builtin_aarch64_sshll2_nv8hi (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vshll_high_n_s32 (int32x4_t __a, const int __b) { return __builtin_aarch64_sshll2_nv4si (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vshll_high_n_u8 (uint8x16_t __a, const int __b) { return (uint16x8_t) __builtin_aarch64_ushll2_nv16qi ((int8x16_t) __a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vshll_high_n_u16 (uint16x8_t __a, const int __b) { return (uint32x4_t) __builtin_aarch64_ushll2_nv8hi ((int16x8_t) __a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vshll_high_n_u32 (uint32x4_t __a, const int __b) { return (uint64x2_t) __builtin_aarch64_ushll2_nv4si ((int32x4_t) __a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vshll_n_s8 (int8x8_t __a, const int __b) { return __builtin_aarch64_sshll_nv8qi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vshll_n_s16 (int16x4_t __a, const int __b) { return __builtin_aarch64_sshll_nv4hi (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vshll_n_s32 (int32x2_t __a, const int __b) { return __builtin_aarch64_sshll_nv2si (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vshll_n_u8 (uint8x8_t __a, const int __b) { return __builtin_aarch64_ushll_nv8qi_uus (__a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vshll_n_u16 (uint16x4_t __a, const int __b) { return __builtin_aarch64_ushll_nv4hi_uus (__a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vshll_n_u32 (uint32x2_t __a, const int __b) { return __builtin_aarch64_ushll_nv2si_uus (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vshr_n_s8 (int8x8_t __a, const int __b) { return (int8x8_t) __builtin_aarch64_ashrv8qi (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vshr_n_s16 (int16x4_t __a, const int __b) { return (int16x4_t) __builtin_aarch64_ashrv4hi (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vshr_n_s32 (int32x2_t __a, const int __b) { return (int32x2_t) __builtin_aarch64_ashrv2si (__a, __b); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vshr_n_s64 (int64x1_t __a, const int __b) { return (int64x1_t) {__builtin_aarch64_ashr_simddi (__a[0], __b)}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vshr_n_u8 (uint8x8_t __a, const int __b) { return (uint8x8_t) __builtin_aarch64_lshrv8qi ((int8x8_t) __a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vshr_n_u16 (uint16x4_t __a, const int __b) { return (uint16x4_t) __builtin_aarch64_lshrv4hi ((int16x4_t) __a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vshr_n_u32 (uint32x2_t __a, const int __b) { return (uint32x2_t) __builtin_aarch64_lshrv2si ((int32x2_t) __a, __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vshr_n_u64 (uint64x1_t __a, const int __b) { return (uint64x1_t) {__builtin_aarch64_lshr_simddi_uus ( __a[0], __b)}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vshrq_n_s8 (int8x16_t __a, const int __b) { return (int8x16_t) __builtin_aarch64_ashrv16qi (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vshrq_n_s16 (int16x8_t __a, const int __b) { return (int16x8_t) __builtin_aarch64_ashrv8hi (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vshrq_n_s32 (int32x4_t __a, const int __b) { return (int32x4_t) __builtin_aarch64_ashrv4si (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vshrq_n_s64 (int64x2_t __a, const int __b) { return (int64x2_t) __builtin_aarch64_ashrv2di (__a, __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vshrq_n_u8 (uint8x16_t __a, const int __b) { return (uint8x16_t) __builtin_aarch64_lshrv16qi ((int8x16_t) __a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vshrq_n_u16 (uint16x8_t __a, const int __b) { return (uint16x8_t) __builtin_aarch64_lshrv8hi ((int16x8_t) __a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vshrq_n_u32 (uint32x4_t __a, const int __b) { return (uint32x4_t) __builtin_aarch64_lshrv4si ((int32x4_t) __a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vshrq_n_u64 (uint64x2_t __a, const int __b) { return (uint64x2_t) __builtin_aarch64_lshrv2di ((int64x2_t) __a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vshrd_n_s64 (int64_t __a, const int __b) { return __builtin_aarch64_ashr_simddi (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vshrd_n_u64 (uint64_t __a, const int __b) { return __builtin_aarch64_lshr_simddi_uus (__a, __b); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c) { return (int8x8_t) __builtin_aarch64_ssli_nv8qi (__a, __b, __c); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vsli_n_s16 (int16x4_t __a, int16x4_t __b, const int __c) { return (int16x4_t) __builtin_aarch64_ssli_nv4hi (__a, __b, __c); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vsli_n_s32 (int32x2_t __a, int32x2_t __b, const int __c) { return (int32x2_t) __builtin_aarch64_ssli_nv2si (__a, __b, __c); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vsli_n_s64 (int64x1_t __a, int64x1_t __b, const int __c) { return (int64x1_t) {__builtin_aarch64_ssli_ndi (__a[0], __b[0], __c)}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vsli_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c) { return __builtin_aarch64_usli_nv8qi_uuus (__a, __b, __c); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vsli_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) { return __builtin_aarch64_usli_nv4hi_uuus (__a, __b, __c); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vsli_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) { return __builtin_aarch64_usli_nv2si_uuus (__a, __b, __c); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c) { return (uint64x1_t) {__builtin_aarch64_usli_ndi_uuus (__a[0], __b[0], __c)}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c) { return (int8x16_t) __builtin_aarch64_ssli_nv16qi (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c) { return (int16x8_t) __builtin_aarch64_ssli_nv8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c) { return (int32x4_t) __builtin_aarch64_ssli_nv4si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vsliq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c) { return (int64x2_t) __builtin_aarch64_ssli_nv2di (__a, __b, __c); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c) { return __builtin_aarch64_usli_nv16qi_uuus (__a, __b, __c); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c) { return __builtin_aarch64_usli_nv8hi_uuus (__a, __b, __c); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c) { return __builtin_aarch64_usli_nv4si_uuus (__a, __b, __c); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c) { return __builtin_aarch64_usli_nv2di_uuus (__a, __b, __c); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vslid_n_s64 (int64_t __a, int64_t __b, const int __c) { return __builtin_aarch64_ssli_ndi (__a, __b, __c); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vslid_n_u64 (uint64_t __a, uint64_t __b, const int __c) { return __builtin_aarch64_usli_ndi_uuus (__a, __b, __c); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vsqadd_u8 (uint8x8_t __a, int8x8_t __b) { return __builtin_aarch64_usqaddv8qi_uus (__a, __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vsqadd_u16 (uint16x4_t __a, int16x4_t __b) { return __builtin_aarch64_usqaddv4hi_uus (__a, __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vsqadd_u32 (uint32x2_t __a, int32x2_t __b) { return __builtin_aarch64_usqaddv2si_uus (__a, __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vsqadd_u64 (uint64x1_t __a, int64x1_t __b) { return (uint64x1_t) {__builtin_aarch64_usqadddi_uus (__a[0], __b[0])}; } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vsqaddq_u8 (uint8x16_t __a, int8x16_t __b) { return __builtin_aarch64_usqaddv16qi_uus (__a, __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vsqaddq_u16 (uint16x8_t __a, int16x8_t __b) { return __builtin_aarch64_usqaddv8hi_uus (__a, __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsqaddq_u32 (uint32x4_t __a, int32x4_t __b) { return __builtin_aarch64_usqaddv4si_uus (__a, __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vsqaddq_u64 (uint64x2_t __a, int64x2_t __b) { return __builtin_aarch64_usqaddv2di_uus (__a, __b); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) vsqaddb_u8 (uint8_t __a, int8_t __b) { return __builtin_aarch64_usqaddqi_uus (__a, __b); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) vsqaddh_u16 (uint16_t __a, int16_t __b) { return __builtin_aarch64_usqaddhi_uus (__a, __b); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) vsqadds_u32 (uint32_t __a, int32_t __b) { return __builtin_aarch64_usqaddsi_uus (__a, __b); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vsqaddd_u64 (uint64_t __a, int64_t __b) { return __builtin_aarch64_usqadddi_uus (__a, __b); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vsqrt_f32 (float32x2_t a) { return __builtin_aarch64_sqrtv2sf (a); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vsqrtq_f32 (float32x4_t a) { return __builtin_aarch64_sqrtv4sf (a); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vsqrt_f64 (float64x1_t a) { return (float64x1_t) { __builtin_aarch64_sqrtdf (a[0]) }; } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vsqrtq_f64 (float64x2_t a) { return __builtin_aarch64_sqrtv2df (a); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c) { return (int8x8_t) __builtin_aarch64_ssra_nv8qi (__a, __b, __c); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c) { return (int16x4_t) __builtin_aarch64_ssra_nv4hi (__a, __b, __c); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c) { return (int32x2_t) __builtin_aarch64_ssra_nv2si (__a, __b, __c); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c) { return (int64x1_t) {__builtin_aarch64_ssra_ndi (__a[0], __b[0], __c)}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c) { return __builtin_aarch64_usra_nv8qi_uuus (__a, __b, __c); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) { return __builtin_aarch64_usra_nv4hi_uuus (__a, __b, __c); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) { return __builtin_aarch64_usra_nv2si_uuus (__a, __b, __c); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c) { return (uint64x1_t) {__builtin_aarch64_usra_ndi_uuus (__a[0], __b[0], __c)}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c) { return (int8x16_t) __builtin_aarch64_ssra_nv16qi (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c) { return (int16x8_t) __builtin_aarch64_ssra_nv8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c) { return (int32x4_t) __builtin_aarch64_ssra_nv4si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c) { return (int64x2_t) __builtin_aarch64_ssra_nv2di (__a, __b, __c); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c) { return __builtin_aarch64_usra_nv16qi_uuus (__a, __b, __c); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c) { return __builtin_aarch64_usra_nv8hi_uuus (__a, __b, __c); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c) { return __builtin_aarch64_usra_nv4si_uuus (__a, __b, __c); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c) { return __builtin_aarch64_usra_nv2di_uuus (__a, __b, __c); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vsrad_n_s64 (int64_t __a, int64_t __b, const int __c) { return __builtin_aarch64_ssra_ndi (__a, __b, __c); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vsrad_n_u64 (uint64_t __a, uint64_t __b, const int __c) { return __builtin_aarch64_usra_ndi_uuus (__a, __b, __c); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c) { return (int8x8_t) __builtin_aarch64_ssri_nv8qi (__a, __b, __c); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vsri_n_s16 (int16x4_t __a, int16x4_t __b, const int __c) { return (int16x4_t) __builtin_aarch64_ssri_nv4hi (__a, __b, __c); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vsri_n_s32 (int32x2_t __a, int32x2_t __b, const int __c) { return (int32x2_t) __builtin_aarch64_ssri_nv2si (__a, __b, __c); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vsri_n_s64 (int64x1_t __a, int64x1_t __b, const int __c) { return (int64x1_t) {__builtin_aarch64_ssri_ndi (__a[0], __b[0], __c)}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vsri_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c) { return __builtin_aarch64_usri_nv8qi_uuus (__a, __b, __c); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vsri_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) { return __builtin_aarch64_usri_nv4hi_uuus (__a, __b, __c); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vsri_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) { return __builtin_aarch64_usri_nv2si_uuus (__a, __b, __c); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vsri_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c) { return (uint64x1_t) {__builtin_aarch64_usri_ndi_uuus (__a[0], __b[0], __c)}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c) { return (int8x16_t) __builtin_aarch64_ssri_nv16qi (__a, __b, __c); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c) { return (int16x8_t) __builtin_aarch64_ssri_nv8hi (__a, __b, __c); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c) { return (int32x4_t) __builtin_aarch64_ssri_nv4si (__a, __b, __c); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vsriq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c) { return (int64x2_t) __builtin_aarch64_ssri_nv2di (__a, __b, __c); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c) { return __builtin_aarch64_usri_nv16qi_uuus (__a, __b, __c); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c) { return __builtin_aarch64_usri_nv8hi_uuus (__a, __b, __c); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c) { return __builtin_aarch64_usri_nv4si_uuus (__a, __b, __c); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vsriq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c) { return __builtin_aarch64_usri_nv2di_uuus (__a, __b, __c); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vsrid_n_s64 (int64_t __a, int64_t __b, const int __c) { return __builtin_aarch64_ssri_ndi (__a, __b, __c); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vsrid_n_u64 (uint64_t __a, uint64_t __b, const int __c) { return __builtin_aarch64_usri_ndi_uuus (__a, __b, __c); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_f16 (float16_t *__a, float16x4_t __b) { __builtin_aarch64_st1v4hf (__a, __b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_f32 (float32_t *a, float32x2_t b) { __builtin_aarch64_st1v2sf ((__builtin_aarch64_simd_sf *) a, b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_f64 (float64_t *a, float64x1_t b) { *a = b[0]; } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_p8 (poly8_t *a, poly8x8_t b) { __builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) a, (int8x8_t) b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_p16 (poly16_t *a, poly16x4_t b) { __builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) a, (int16x4_t) b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_s8 (int8_t *a, int8x8_t b) { __builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) a, b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_s16 (int16_t *a, int16x4_t b) { __builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) a, b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_s32 (int32_t *a, int32x2_t b) { __builtin_aarch64_st1v2si ((__builtin_aarch64_simd_si *) a, b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_s64 (int64_t *a, int64x1_t b) { *a = b[0]; } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_u8 (uint8_t *a, uint8x8_t b) { __builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) a, (int8x8_t) b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_u16 (uint16_t *a, uint16x4_t b) { __builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) a, (int16x4_t) b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_u32 (uint32_t *a, uint32x2_t b) { __builtin_aarch64_st1v2si ((__builtin_aarch64_simd_si *) a, (int32x2_t) b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_u64 (uint64_t *a, uint64x1_t b) { *a = b[0]; } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_f16 (float16_t *__a, float16x8_t __b) { __builtin_aarch64_st1v8hf (__a, __b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_f32 (float32_t *a, float32x4_t b) { __builtin_aarch64_st1v4sf ((__builtin_aarch64_simd_sf *) a, b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_f64 (float64_t *a, float64x2_t b) { __builtin_aarch64_st1v2df ((__builtin_aarch64_simd_df *) a, b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_p8 (poly8_t *a, poly8x16_t b) { __builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) a, (int8x16_t) b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_p16 (poly16_t *a, poly16x8_t b) { __builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) a, (int16x8_t) b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_s8 (int8_t *a, int8x16_t b) { __builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) a, b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_s16 (int16_t *a, int16x8_t b) { __builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) a, b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_s32 (int32_t *a, int32x4_t b) { __builtin_aarch64_st1v4si ((__builtin_aarch64_simd_si *) a, b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_s64 (int64_t *a, int64x2_t b) { __builtin_aarch64_st1v2di ((__builtin_aarch64_simd_di *) a, b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_u8 (uint8_t *a, uint8x16_t b) { __builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) a, (int8x16_t) b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_u16 (uint16_t *a, uint16x8_t b) { __builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) a, (int16x8_t) b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_u32 (uint32_t *a, uint32x4_t b) { __builtin_aarch64_st1v4si ((__builtin_aarch64_simd_si *) a, (int32x4_t) b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_u64 (uint64_t *a, uint64x2_t b) { __builtin_aarch64_st1v2di ((__builtin_aarch64_simd_di *) a, (int64x2_t) b); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_f16 (float16_t *__a, float16x4_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_f32 (float32_t *__a, float32x2_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_f64 (float64_t *__a, float64x1_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_p8 (poly8_t *__a, poly8x8_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_p16 (poly16_t *__a, poly16x4_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_s8 (int8_t *__a, int8x8_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_s16 (int16_t *__a, int16x4_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_s32 (int32_t *__a, int32x2_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_s64 (int64_t *__a, int64x1_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_u8 (uint8_t *__a, uint8x8_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_u16 (uint16_t *__a, uint16x4_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_u32 (uint32_t *__a, uint32x2_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1_lane_u64 (uint64_t *__a, uint64x1_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_f16 (float16_t *__a, float16x8_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_f32 (float32_t *__a, float32x4_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_f64 (float64_t *__a, float64x2_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_p8 (poly8_t *__a, poly8x16_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_p16 (poly16_t *__a, poly16x8_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_s8 (int8_t *__a, int8x16_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_s16 (int16_t *__a, int16x8_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_s32 (int32_t *__a, int32x4_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_s64 (int64_t *__a, int64x2_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_u8 (uint8_t *__a, uint8x16_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_u16 (uint16_t *__a, uint16x8_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_u32 (uint32_t *__a, uint32x4_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst1q_lane_u64 (uint64_t *__a, uint64x2_t __b, const int __lane) { *__a = __extension__ ({ __builtin_aarch64_im_lane_boundsi (sizeof(__b), sizeof(__b[0]), __lane); __b[__lane]; }); } __extension__ static __inline void vst2_s64 (int64_t * __a, int64x1x2_t val) { __builtin_aarch64_simd_oi __o; int64x2x2_t temp; temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (((int64_t) 0))); temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (((int64_t) 0))); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1); __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o); } __extension__ static __inline void vst2_u64 (uint64_t * __a, uint64x1x2_t val) { __builtin_aarch64_simd_oi __o; uint64x2x2_t temp; temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (((uint64_t) 0))); temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1); __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o); } __extension__ static __inline void vst2_f64 (float64_t * __a, float64x1x2_t val) { __builtin_aarch64_simd_oi __o; float64x2x2_t temp; temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (((uint64_t) 0))); temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[1], 1); __builtin_aarch64_st2df ((__builtin_aarch64_simd_df *) __a, __o); } __extension__ static __inline void vst2_s8 (int8_t * __a, int8x8x2_t val) { __builtin_aarch64_simd_oi __o; int8x16x2_t temp; temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (((int64_t) 0))); temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (((int64_t) 0))); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_p8 (poly8_t * __a, poly8x8x2_t val) { __builtin_aarch64_simd_oi __o; poly8x16x2_t temp; temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (((uint64_t) 0))); temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_s16 (int16_t * __a, int16x4x2_t val) { __builtin_aarch64_simd_oi __o; int16x8x2_t temp; temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (((int64_t) 0))); temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (((int64_t) 0))); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1); __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_p16 (poly16_t * __a, poly16x4x2_t val) { __builtin_aarch64_simd_oi __o; poly16x8x2_t temp; temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (((uint64_t) 0))); temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1); __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_s32 (int32_t * __a, int32x2x2_t val) { __builtin_aarch64_simd_oi __o; int32x4x2_t temp; temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (((int64_t) 0))); temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (((int64_t) 0))); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1); __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_u8 (uint8_t * __a, uint8x8x2_t val) { __builtin_aarch64_simd_oi __o; uint8x16x2_t temp; temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (((uint64_t) 0))); temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_u16 (uint16_t * __a, uint16x4x2_t val) { __builtin_aarch64_simd_oi __o; uint16x8x2_t temp; temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (((uint64_t) 0))); temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1); __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_u32 (uint32_t * __a, uint32x2x2_t val) { __builtin_aarch64_simd_oi __o; uint32x4x2_t temp; temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (((uint64_t) 0))); temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1); __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_f16 (float16_t * __a, float16x4x2_t val) { __builtin_aarch64_simd_oi __o; float16x8x2_t temp; temp.val[0] = vcombine_f16 (val.val[0], vcreate_f16 (((uint64_t) 0))); temp.val[1] = vcombine_f16 (val.val[1], vcreate_f16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv8hf (__o, temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hf (__o, temp.val[1], 1); __builtin_aarch64_st2v4hf (__a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2_f32 (float32_t * __a, float32x2x2_t val) { __builtin_aarch64_simd_oi __o; float32x4x2_t temp; temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (((uint64_t) 0))); temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[1], 1); __builtin_aarch64_st2v2sf ((__builtin_aarch64_simd_sf *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_s8 (int8_t * __a, int8x16x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1); __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_p8 (poly8_t * __a, poly8x16x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1); __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_s16 (int16_t * __a, int16x8x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1); __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_p16 (poly16_t * __a, poly16x8x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1); __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_s32 (int32_t * __a, int32x4x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[1], 1); __builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_s64 (int64_t * __a, int64x2x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[0], 0); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[1], 1); __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_u8 (uint8_t * __a, uint8x16x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1); __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_u16 (uint16_t * __a, uint16x8x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1); __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_u32 (uint32_t * __a, uint32x4x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[0], 0); __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[1], 1); __builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_u64 (uint64_t * __a, uint64x2x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[0], 0); __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[1], 1); __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_f16 (float16_t * __a, float16x8x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv8hf (__o, val.val[0], 0); __o = __builtin_aarch64_set_qregoiv8hf (__o, val.val[1], 1); __builtin_aarch64_st2v8hf (__a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_f32 (float32_t * __a, float32x4x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) val.val[0], 0); __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) val.val[1], 1); __builtin_aarch64_st2v4sf ((__builtin_aarch64_simd_sf *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst2q_f64 (float64_t * __a, float64x2x2_t val) { __builtin_aarch64_simd_oi __o; __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) val.val[0], 0); __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) val.val[1], 1); __builtin_aarch64_st2v2df ((__builtin_aarch64_simd_df *) __a, __o); } __extension__ static __inline void vst3_s64 (int64_t * __a, int64x1x3_t val) { __builtin_aarch64_simd_ci __o; int64x2x3_t temp; temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (((int64_t) 0))); temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (((int64_t) 0))); temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (((int64_t) 0))); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2); __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o); } __extension__ static __inline void vst3_u64 (uint64_t * __a, uint64x1x3_t val) { __builtin_aarch64_simd_ci __o; uint64x2x3_t temp; temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (((uint64_t) 0))); temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (((uint64_t) 0))); temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2); __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o); } __extension__ static __inline void vst3_f64 (float64_t * __a, float64x1x3_t val) { __builtin_aarch64_simd_ci __o; float64x2x3_t temp; temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (((uint64_t) 0))); temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (((uint64_t) 0))); temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[2], 2); __builtin_aarch64_st3df ((__builtin_aarch64_simd_df *) __a, __o); } __extension__ static __inline void vst3_s8 (int8_t * __a, int8x8x3_t val) { __builtin_aarch64_simd_ci __o; int8x16x3_t temp; temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (((int64_t) 0))); temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (((int64_t) 0))); temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (((int64_t) 0))); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2); __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_p8 (poly8_t * __a, poly8x8x3_t val) { __builtin_aarch64_simd_ci __o; poly8x16x3_t temp; temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (((uint64_t) 0))); temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (((uint64_t) 0))); temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2); __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_s16 (int16_t * __a, int16x4x3_t val) { __builtin_aarch64_simd_ci __o; int16x8x3_t temp; temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (((int64_t) 0))); temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (((int64_t) 0))); temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (((int64_t) 0))); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2); __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_p16 (poly16_t * __a, poly16x4x3_t val) { __builtin_aarch64_simd_ci __o; poly16x8x3_t temp; temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (((uint64_t) 0))); temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (((uint64_t) 0))); temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2); __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_s32 (int32_t * __a, int32x2x3_t val) { __builtin_aarch64_simd_ci __o; int32x4x3_t temp; temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (((int64_t) 0))); temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (((int64_t) 0))); temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (((int64_t) 0))); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2); __builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_u8 (uint8_t * __a, uint8x8x3_t val) { __builtin_aarch64_simd_ci __o; uint8x16x3_t temp; temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (((uint64_t) 0))); temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (((uint64_t) 0))); temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2); __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_u16 (uint16_t * __a, uint16x4x3_t val) { __builtin_aarch64_simd_ci __o; uint16x8x3_t temp; temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (((uint64_t) 0))); temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (((uint64_t) 0))); temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2); __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_u32 (uint32_t * __a, uint32x2x3_t val) { __builtin_aarch64_simd_ci __o; uint32x4x3_t temp; temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (((uint64_t) 0))); temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (((uint64_t) 0))); temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2); __builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_f16 (float16_t * __a, float16x4x3_t val) { __builtin_aarch64_simd_ci __o; float16x8x3_t temp; temp.val[0] = vcombine_f16 (val.val[0], vcreate_f16 (((uint64_t) 0))); temp.val[1] = vcombine_f16 (val.val[1], vcreate_f16 (((uint64_t) 0))); temp.val[2] = vcombine_f16 (val.val[2], vcreate_f16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) temp.val[2], 2); __builtin_aarch64_st3v4hf ((__builtin_aarch64_simd_hf *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3_f32 (float32_t * __a, float32x2x3_t val) { __builtin_aarch64_simd_ci __o; float32x4x3_t temp; temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (((uint64_t) 0))); temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (((uint64_t) 0))); temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[2], 2); __builtin_aarch64_st3v2sf ((__builtin_aarch64_simd_sf *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_s8 (int8_t * __a, int8x16x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2); __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_p8 (poly8_t * __a, poly8x16x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2); __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_s16 (int16_t * __a, int16x8x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2); __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_p16 (poly16_t * __a, poly16x8x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2); __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_s32 (int32_t * __a, int32x4x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[2], 2); __builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_s64 (int64_t * __a, int64x2x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[2], 2); __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_u8 (uint8_t * __a, uint8x16x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2); __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_u16 (uint16_t * __a, uint16x8x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2); __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_u32 (uint32_t * __a, uint32x4x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[2], 2); __builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_u64 (uint64_t * __a, uint64x2x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[2], 2); __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_f16 (float16_t * __a, float16x8x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) val.val[2], 2); __builtin_aarch64_st3v8hf ((__builtin_aarch64_simd_hf *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_f32 (float32_t * __a, float32x4x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[2], 2); __builtin_aarch64_st3v4sf ((__builtin_aarch64_simd_sf *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst3q_f64 (float64_t * __a, float64x2x3_t val) { __builtin_aarch64_simd_ci __o; __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[0], 0); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[1], 1); __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[2], 2); __builtin_aarch64_st3v2df ((__builtin_aarch64_simd_df *) __a, __o); } __extension__ static __inline void vst4_s64 (int64_t * __a, int64x1x4_t val) { __builtin_aarch64_simd_xi __o; int64x2x4_t temp; temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (((int64_t) 0))); temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (((int64_t) 0))); temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (((int64_t) 0))); temp.val[3] = vcombine_s64 (val.val[3], vcreate_s64 (((int64_t) 0))); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[3], 3); __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o); } __extension__ static __inline void vst4_u64 (uint64_t * __a, uint64x1x4_t val) { __builtin_aarch64_simd_xi __o; uint64x2x4_t temp; temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (((uint64_t) 0))); temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (((uint64_t) 0))); temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (((uint64_t) 0))); temp.val[3] = vcombine_u64 (val.val[3], vcreate_u64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[3], 3); __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o); } __extension__ static __inline void vst4_f64 (float64_t * __a, float64x1x4_t val) { __builtin_aarch64_simd_xi __o; float64x2x4_t temp; temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (((uint64_t) 0))); temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (((uint64_t) 0))); temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (((uint64_t) 0))); temp.val[3] = vcombine_f64 (val.val[3], vcreate_f64 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[3], 3); __builtin_aarch64_st4df ((__builtin_aarch64_simd_df *) __a, __o); } __extension__ static __inline void vst4_s8 (int8_t * __a, int8x8x4_t val) { __builtin_aarch64_simd_xi __o; int8x16x4_t temp; temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (((int64_t) 0))); temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (((int64_t) 0))); temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (((int64_t) 0))); temp.val[3] = vcombine_s8 (val.val[3], vcreate_s8 (((int64_t) 0))); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3); __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_p8 (poly8_t * __a, poly8x8x4_t val) { __builtin_aarch64_simd_xi __o; poly8x16x4_t temp; temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (((uint64_t) 0))); temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (((uint64_t) 0))); temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (((uint64_t) 0))); temp.val[3] = vcombine_p8 (val.val[3], vcreate_p8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3); __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_s16 (int16_t * __a, int16x4x4_t val) { __builtin_aarch64_simd_xi __o; int16x8x4_t temp; temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (((int64_t) 0))); temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (((int64_t) 0))); temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (((int64_t) 0))); temp.val[3] = vcombine_s16 (val.val[3], vcreate_s16 (((int64_t) 0))); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3); __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_p16 (poly16_t * __a, poly16x4x4_t val) { __builtin_aarch64_simd_xi __o; poly16x8x4_t temp; temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (((uint64_t) 0))); temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (((uint64_t) 0))); temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (((uint64_t) 0))); temp.val[3] = vcombine_p16 (val.val[3], vcreate_p16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3); __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_s32 (int32_t * __a, int32x2x4_t val) { __builtin_aarch64_simd_xi __o; int32x4x4_t temp; temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (((int64_t) 0))); temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (((int64_t) 0))); temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (((int64_t) 0))); temp.val[3] = vcombine_s32 (val.val[3], vcreate_s32 (((int64_t) 0))); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[3], 3); __builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_u8 (uint8_t * __a, uint8x8x4_t val) { __builtin_aarch64_simd_xi __o; uint8x16x4_t temp; temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (((uint64_t) 0))); temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (((uint64_t) 0))); temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (((uint64_t) 0))); temp.val[3] = vcombine_u8 (val.val[3], vcreate_u8 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3); __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_u16 (uint16_t * __a, uint16x4x4_t val) { __builtin_aarch64_simd_xi __o; uint16x8x4_t temp; temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (((uint64_t) 0))); temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (((uint64_t) 0))); temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (((uint64_t) 0))); temp.val[3] = vcombine_u16 (val.val[3], vcreate_u16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3); __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_u32 (uint32_t * __a, uint32x2x4_t val) { __builtin_aarch64_simd_xi __o; uint32x4x4_t temp; temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (((uint64_t) 0))); temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (((uint64_t) 0))); temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (((uint64_t) 0))); temp.val[3] = vcombine_u32 (val.val[3], vcreate_u32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[3], 3); __builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_f16 (float16_t * __a, float16x4x4_t val) { __builtin_aarch64_simd_xi __o; float16x8x4_t temp; temp.val[0] = vcombine_f16 (val.val[0], vcreate_f16 (((uint64_t) 0))); temp.val[1] = vcombine_f16 (val.val[1], vcreate_f16 (((uint64_t) 0))); temp.val[2] = vcombine_f16 (val.val[2], vcreate_f16 (((uint64_t) 0))); temp.val[3] = vcombine_f16 (val.val[3], vcreate_f16 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) temp.val[3], 3); __builtin_aarch64_st4v4hf ((__builtin_aarch64_simd_hf *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4_f32 (float32_t * __a, float32x2x4_t val) { __builtin_aarch64_simd_xi __o; float32x4x4_t temp; temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (((uint64_t) 0))); temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (((uint64_t) 0))); temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (((uint64_t) 0))); temp.val[3] = vcombine_f32 (val.val[3], vcreate_f32 (((uint64_t) 0))); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[1], 1); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[2], 2); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[3], 3); __builtin_aarch64_st4v2sf ((__builtin_aarch64_simd_sf *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_s8 (int8_t * __a, int8x16x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3); __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_p8 (poly8_t * __a, poly8x16x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3); __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_s16 (int16_t * __a, int16x8x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3); __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_p16 (poly16_t * __a, poly16x8x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3); __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_s32 (int32_t * __a, int32x4x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[3], 3); __builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_s64 (int64_t * __a, int64x2x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[3], 3); __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_u8 (uint8_t * __a, uint8x16x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3); __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_u16 (uint16_t * __a, uint16x8x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3); __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_u32 (uint32_t * __a, uint32x4x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[3], 3); __builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_u64 (uint64_t * __a, uint64x2x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[3], 3); __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_f16 (float16_t * __a, float16x8x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) val.val[3], 3); __builtin_aarch64_st4v8hf ((__builtin_aarch64_simd_hf *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_f32 (float32_t * __a, float32x4x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[3], 3); __builtin_aarch64_st4v4sf ((__builtin_aarch64_simd_sf *) __a, __o); } __extension__ static __inline void __attribute__ ((__always_inline__)) vst4q_f64 (float64_t * __a, float64x2x4_t val) { __builtin_aarch64_simd_xi __o; __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[0], 0); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[1], 1); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[2], 2); __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[3], 3); __builtin_aarch64_st4v2df ((__builtin_aarch64_simd_df *) __a, __o); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vsubd_s64 (int64_t __a, int64_t __b) { return __a - __b; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vsubd_u64 (uint64_t __a, uint64_t __b) { return __a - __b; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vtbx1_s8 (int8x8_t __r, int8x8_t __tab, int8x8_t __idx) { uint8x8_t __mask = vclt_u8 (vreinterpret_u8_s8 (__idx), vmov_n_u8 (8)); int8x8_t __tbl = vtbl1_s8 (__tab, __idx); return vbsl_s8 (__mask, __tbl, __r); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtbx1_u8 (uint8x8_t __r, uint8x8_t __tab, uint8x8_t __idx) { uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (8)); uint8x8_t __tbl = vtbl1_u8 (__tab, __idx); return vbsl_u8 (__mask, __tbl, __r); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vtbx1_p8 (poly8x8_t __r, poly8x8_t __tab, uint8x8_t __idx) { uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (8)); poly8x8_t __tbl = vtbl1_p8 (__tab, __idx); return vbsl_p8 (__mask, __tbl, __r); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vtbx3_s8 (int8x8_t __r, int8x8x3_t __tab, int8x8_t __idx) { uint8x8_t __mask = vclt_u8 (vreinterpret_u8_s8 (__idx), vmov_n_u8 (24)); int8x8_t __tbl = vtbl3_s8 (__tab, __idx); return vbsl_s8 (__mask, __tbl, __r); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtbx3_u8 (uint8x8_t __r, uint8x8x3_t __tab, uint8x8_t __idx) { uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (24)); uint8x8_t __tbl = vtbl3_u8 (__tab, __idx); return vbsl_u8 (__mask, __tbl, __r); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vtbx3_p8 (poly8x8_t __r, poly8x8x3_t __tab, uint8x8_t __idx) { uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (24)); poly8x8_t __tbl = vtbl3_p8 (__tab, __idx); return vbsl_p8 (__mask, __tbl, __r); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vtbx4_s8 (int8x8_t __r, int8x8x4_t __tab, int8x8_t __idx) { int8x8_t result; int8x16x2_t temp; __builtin_aarch64_simd_oi __o; temp.val[0] = vcombine_s8 (__tab.val[0], __tab.val[1]); temp.val[1] = vcombine_s8 (__tab.val[2], __tab.val[3]); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); result = __builtin_aarch64_tbx4v8qi (__r, __o, __idx); return result; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtbx4_u8 (uint8x8_t __r, uint8x8x4_t __tab, uint8x8_t __idx) { uint8x8_t result; uint8x16x2_t temp; __builtin_aarch64_simd_oi __o; temp.val[0] = vcombine_u8 (__tab.val[0], __tab.val[1]); temp.val[1] = vcombine_u8 (__tab.val[2], __tab.val[3]); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); result = (uint8x8_t)__builtin_aarch64_tbx4v8qi ((int8x8_t)__r, __o, (int8x8_t)__idx); return result; } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vtbx4_p8 (poly8x8_t __r, poly8x8x4_t __tab, uint8x8_t __idx) { poly8x8_t result; poly8x16x2_t temp; __builtin_aarch64_simd_oi __o; temp.val[0] = vcombine_p8 (__tab.val[0], __tab.val[1]); temp.val[1] = vcombine_p8 (__tab.val[2], __tab.val[3]); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0); __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1); result = (poly8x8_t)__builtin_aarch64_tbx4v8qi ((int8x8_t)__r, __o, (int8x8_t)__idx); return result; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vtrn1_f32 (float32x2_t __a, float32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2}); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vtrn1_p8 (poly8x8_t __a, poly8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 2, 10, 4, 12, 6, 14}); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vtrn1_p16 (poly16x4_t __a, poly16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 2, 6}); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vtrn1_s8 (int8x8_t __a, int8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 2, 10, 4, 12, 6, 14}); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vtrn1_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 2, 6}); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vtrn1_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2}); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtrn1_u8 (uint8x8_t __a, uint8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 2, 10, 4, 12, 6, 14}); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vtrn1_u16 (uint16x4_t __a, uint16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 2, 6}); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vtrn1_u32 (uint32x2_t __a, uint32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2}); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vtrn1q_f32 (float32x4_t __a, float32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 2, 6}); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vtrn1q_f64 (float64x2_t __a, float64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2}); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vtrn1q_p8 (poly8x16_t __a, poly8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vtrn1q_p16 (poly16x8_t __a, poly16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 2, 10, 4, 12, 6, 14}); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vtrn1q_s8 (int8x16_t __a, int8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vtrn1q_s16 (int16x8_t __a, int16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 2, 10, 4, 12, 6, 14}); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vtrn1q_s32 (int32x4_t __a, int32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 2, 6}); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vtrn1q_s64 (int64x2_t __a, int64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2}); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vtrn1q_u8 (uint8x16_t __a, uint8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vtrn1q_u16 (uint16x8_t __a, uint16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 2, 10, 4, 12, 6, 14}); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vtrn1q_u32 (uint32x4_t __a, uint32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 2, 6}); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vtrn1q_u64 (uint64x2_t __a, uint64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2}); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vtrn2_f32 (float32x2_t __a, float32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3}); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vtrn2_p8 (poly8x8_t __a, poly8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 9, 3, 11, 5, 13, 7, 15}); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vtrn2_p16 (poly16x4_t __a, poly16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 5, 3, 7}); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vtrn2_s8 (int8x8_t __a, int8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 9, 3, 11, 5, 13, 7, 15}); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vtrn2_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 5, 3, 7}); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vtrn2_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3}); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtrn2_u8 (uint8x8_t __a, uint8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 9, 3, 11, 5, 13, 7, 15}); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vtrn2_u16 (uint16x4_t __a, uint16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 5, 3, 7}); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vtrn2_u32 (uint32x2_t __a, uint32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3}); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vtrn2q_f32 (float32x4_t __a, float32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 5, 3, 7}); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vtrn2q_f64 (float64x2_t __a, float64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3}); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vtrn2q_p8 (poly8x16_t __a, poly8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vtrn2q_p16 (poly16x8_t __a, poly16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 9, 3, 11, 5, 13, 7, 15}); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vtrn2q_s8 (int8x16_t __a, int8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vtrn2q_s16 (int16x8_t __a, int16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 9, 3, 11, 5, 13, 7, 15}); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vtrn2q_s32 (int32x4_t __a, int32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 5, 3, 7}); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vtrn2q_s64 (int64x2_t __a, int64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3}); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vtrn2q_u8 (uint8x16_t __a, uint8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vtrn2q_u16 (uint16x8_t __a, uint16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 9, 3, 11, 5, 13, 7, 15}); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vtrn2q_u32 (uint32x4_t __a, uint32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 5, 3, 7}); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vtrn2q_u64 (uint64x2_t __a, uint64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3}); } __extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__)) vtrn_f32 (float32x2_t a, float32x2_t b) { return (float32x2x2_t) {vtrn1_f32 (a, b), vtrn2_f32 (a, b)}; } __extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__)) vtrn_p8 (poly8x8_t a, poly8x8_t b) { return (poly8x8x2_t) {vtrn1_p8 (a, b), vtrn2_p8 (a, b)}; } __extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__)) vtrn_p16 (poly16x4_t a, poly16x4_t b) { return (poly16x4x2_t) {vtrn1_p16 (a, b), vtrn2_p16 (a, b)}; } __extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__)) vtrn_s8 (int8x8_t a, int8x8_t b) { return (int8x8x2_t) {vtrn1_s8 (a, b), vtrn2_s8 (a, b)}; } __extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__)) vtrn_s16 (int16x4_t a, int16x4_t b) { return (int16x4x2_t) {vtrn1_s16 (a, b), vtrn2_s16 (a, b)}; } __extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__)) vtrn_s32 (int32x2_t a, int32x2_t b) { return (int32x2x2_t) {vtrn1_s32 (a, b), vtrn2_s32 (a, b)}; } __extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__)) vtrn_u8 (uint8x8_t a, uint8x8_t b) { return (uint8x8x2_t) {vtrn1_u8 (a, b), vtrn2_u8 (a, b)}; } __extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__)) vtrn_u16 (uint16x4_t a, uint16x4_t b) { return (uint16x4x2_t) {vtrn1_u16 (a, b), vtrn2_u16 (a, b)}; } __extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__)) vtrn_u32 (uint32x2_t a, uint32x2_t b) { return (uint32x2x2_t) {vtrn1_u32 (a, b), vtrn2_u32 (a, b)}; } __extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__)) vtrnq_f32 (float32x4_t a, float32x4_t b) { return (float32x4x2_t) {vtrn1q_f32 (a, b), vtrn2q_f32 (a, b)}; } __extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__)) vtrnq_p8 (poly8x16_t a, poly8x16_t b) { return (poly8x16x2_t) {vtrn1q_p8 (a, b), vtrn2q_p8 (a, b)}; } __extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__)) vtrnq_p16 (poly16x8_t a, poly16x8_t b) { return (poly16x8x2_t) {vtrn1q_p16 (a, b), vtrn2q_p16 (a, b)}; } __extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__)) vtrnq_s8 (int8x16_t a, int8x16_t b) { return (int8x16x2_t) {vtrn1q_s8 (a, b), vtrn2q_s8 (a, b)}; } __extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__)) vtrnq_s16 (int16x8_t a, int16x8_t b) { return (int16x8x2_t) {vtrn1q_s16 (a, b), vtrn2q_s16 (a, b)}; } __extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__)) vtrnq_s32 (int32x4_t a, int32x4_t b) { return (int32x4x2_t) {vtrn1q_s32 (a, b), vtrn2q_s32 (a, b)}; } __extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__)) vtrnq_u8 (uint8x16_t a, uint8x16_t b) { return (uint8x16x2_t) {vtrn1q_u8 (a, b), vtrn2q_u8 (a, b)}; } __extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__)) vtrnq_u16 (uint16x8_t a, uint16x8_t b) { return (uint16x8x2_t) {vtrn1q_u16 (a, b), vtrn2q_u16 (a, b)}; } __extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__)) vtrnq_u32 (uint32x4_t a, uint32x4_t b) { return (uint32x4x2_t) {vtrn1q_u32 (a, b), vtrn2q_u32 (a, b)}; } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtst_s8 (int8x8_t __a, int8x8_t __b) { return (uint8x8_t) ((__a & __b) != 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vtst_s16 (int16x4_t __a, int16x4_t __b) { return (uint16x4_t) ((__a & __b) != 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vtst_s32 (int32x2_t __a, int32x2_t __b) { return (uint32x2_t) ((__a & __b) != 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vtst_s64 (int64x1_t __a, int64x1_t __b) { return (uint64x1_t) ((__a & __b) != ((int64_t) 0)); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtst_u8 (uint8x8_t __a, uint8x8_t __b) { return ((__a & __b) != 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vtst_u16 (uint16x4_t __a, uint16x4_t __b) { return ((__a & __b) != 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vtst_u32 (uint32x2_t __a, uint32x2_t __b) { return ((__a & __b) != 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vtst_u64 (uint64x1_t __a, uint64x1_t __b) { return ((__a & __b) != ((uint64_t) 0)); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vtstq_s8 (int8x16_t __a, int8x16_t __b) { return (uint8x16_t) ((__a & __b) != 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vtstq_s16 (int16x8_t __a, int16x8_t __b) { return (uint16x8_t) ((__a & __b) != 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vtstq_s32 (int32x4_t __a, int32x4_t __b) { return (uint32x4_t) ((__a & __b) != 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vtstq_s64 (int64x2_t __a, int64x2_t __b) { return (uint64x2_t) ((__a & __b) != ((int64_t) 0)); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vtstq_u8 (uint8x16_t __a, uint8x16_t __b) { return ((__a & __b) != 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vtstq_u16 (uint16x8_t __a, uint16x8_t __b) { return ((__a & __b) != 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vtstq_u32 (uint32x4_t __a, uint32x4_t __b) { return ((__a & __b) != 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vtstq_u64 (uint64x2_t __a, uint64x2_t __b) { return ((__a & __b) != ((uint64_t) 0)); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vtstd_s64 (int64_t __a, int64_t __b) { return (__a & __b) ? -1ll : 0ll; } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) vtstd_u64 (uint64_t __a, uint64_t __b) { return (__a & __b) ? -1ll : 0ll; } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vuqadd_s8 (int8x8_t __a, uint8x8_t __b) { return __builtin_aarch64_suqaddv8qi_ssu (__a, __b); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vuqadd_s16 (int16x4_t __a, uint16x4_t __b) { return __builtin_aarch64_suqaddv4hi_ssu (__a, __b); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vuqadd_s32 (int32x2_t __a, uint32x2_t __b) { return __builtin_aarch64_suqaddv2si_ssu (__a, __b); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vuqadd_s64 (int64x1_t __a, uint64x1_t __b) { return (int64x1_t) {__builtin_aarch64_suqadddi_ssu (__a[0], __b[0])}; } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vuqaddq_s8 (int8x16_t __a, uint8x16_t __b) { return __builtin_aarch64_suqaddv16qi_ssu (__a, __b); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vuqaddq_s16 (int16x8_t __a, uint16x8_t __b) { return __builtin_aarch64_suqaddv8hi_ssu (__a, __b); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vuqaddq_s32 (int32x4_t __a, uint32x4_t __b) { return __builtin_aarch64_suqaddv4si_ssu (__a, __b); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vuqaddq_s64 (int64x2_t __a, uint64x2_t __b) { return __builtin_aarch64_suqaddv2di_ssu (__a, __b); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vuqaddb_s8 (int8_t __a, uint8_t __b) { return __builtin_aarch64_suqaddqi_ssu (__a, __b); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vuqaddh_s16 (int16_t __a, uint16_t __b) { return __builtin_aarch64_suqaddhi_ssu (__a, __b); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vuqadds_s32 (int32_t __a, uint32_t __b) { return __builtin_aarch64_suqaddsi_ssu (__a, __b); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vuqaddd_s64 (int64_t __a, uint64_t __b) { return __builtin_aarch64_suqadddi_ssu (__a, __b); } # 24868 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vuzp1_f32 (float32x2_t __a, float32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2}); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vuzp1_p8 (poly8x8_t __a, poly8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14}); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vuzp1_p16 (poly16x4_t __a, poly16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6}); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vuzp1_s8 (int8x8_t __a, int8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14}); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vuzp1_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6}); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vuzp1_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2}); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vuzp1_u8 (uint8x8_t __a, uint8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14}); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vuzp1_u16 (uint16x4_t __a, uint16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6}); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vuzp1_u32 (uint32x2_t __a, uint32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2}); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vuzp1q_f32 (float32x4_t __a, float32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6}); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vuzp1q_f64 (float64x2_t __a, float64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2}); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vuzp1q_p8 (poly8x16_t __a, poly8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vuzp1q_p16 (poly16x8_t __a, poly16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14}); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vuzp1q_s8 (int8x16_t __a, int8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vuzp1q_s16 (int16x8_t __a, int16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14}); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vuzp1q_s32 (int32x4_t __a, int32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6}); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vuzp1q_s64 (int64x2_t __a, int64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2}); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vuzp1q_u8 (uint8x16_t __a, uint8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vuzp1q_u16 (uint16x8_t __a, uint16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14}); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vuzp1q_u32 (uint32x4_t __a, uint32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6}); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vuzp1q_u64 (uint64x2_t __a, uint64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2}); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vuzp2_f32 (float32x2_t __a, float32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3}); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vuzp2_p8 (poly8x8_t __a, poly8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15}); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vuzp2_p16 (poly16x4_t __a, poly16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7}); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vuzp2_s8 (int8x8_t __a, int8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15}); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vuzp2_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7}); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vuzp2_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3}); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vuzp2_u8 (uint8x8_t __a, uint8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15}); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vuzp2_u16 (uint16x4_t __a, uint16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7}); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vuzp2_u32 (uint32x2_t __a, uint32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3}); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vuzp2q_f32 (float32x4_t __a, float32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7}); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vuzp2q_f64 (float64x2_t __a, float64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3}); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vuzp2q_p8 (poly8x16_t __a, poly8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vuzp2q_p16 (poly16x8_t __a, poly16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15}); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vuzp2q_s8 (int8x16_t __a, int8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vuzp2q_s16 (int16x8_t __a, int16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15}); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vuzp2q_s32 (int32x4_t __a, int32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7}); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vuzp2q_s64 (int64x2_t __a, int64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3}); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vuzp2q_u8 (uint8x16_t __a, uint8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vuzp2q_u16 (uint16x8_t __a, uint16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15}); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vuzp2q_u32 (uint32x4_t __a, uint32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7}); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vuzp2q_u64 (uint64x2_t __a, uint64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3}); } __extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__)) vuzp_f32 (float32x2_t a, float32x2_t b) { return (float32x2x2_t) {vuzp1_f32 (a, b), vuzp2_f32 (a, b)}; } __extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__)) vuzp_p8 (poly8x8_t a, poly8x8_t b) { return (poly8x8x2_t) {vuzp1_p8 (a, b), vuzp2_p8 (a, b)}; } __extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__)) vuzp_p16 (poly16x4_t a, poly16x4_t b) { return (poly16x4x2_t) {vuzp1_p16 (a, b), vuzp2_p16 (a, b)}; } __extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__)) vuzp_s8 (int8x8_t a, int8x8_t b) { return (int8x8x2_t) {vuzp1_s8 (a, b), vuzp2_s8 (a, b)}; } __extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__)) vuzp_s16 (int16x4_t a, int16x4_t b) { return (int16x4x2_t) {vuzp1_s16 (a, b), vuzp2_s16 (a, b)}; } __extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__)) vuzp_s32 (int32x2_t a, int32x2_t b) { return (int32x2x2_t) {vuzp1_s32 (a, b), vuzp2_s32 (a, b)}; } __extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__)) vuzp_u8 (uint8x8_t a, uint8x8_t b) { return (uint8x8x2_t) {vuzp1_u8 (a, b), vuzp2_u8 (a, b)}; } __extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__)) vuzp_u16 (uint16x4_t a, uint16x4_t b) { return (uint16x4x2_t) {vuzp1_u16 (a, b), vuzp2_u16 (a, b)}; } __extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__)) vuzp_u32 (uint32x2_t a, uint32x2_t b) { return (uint32x2x2_t) {vuzp1_u32 (a, b), vuzp2_u32 (a, b)}; } __extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__)) vuzpq_f32 (float32x4_t a, float32x4_t b) { return (float32x4x2_t) {vuzp1q_f32 (a, b), vuzp2q_f32 (a, b)}; } __extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__)) vuzpq_p8 (poly8x16_t a, poly8x16_t b) { return (poly8x16x2_t) {vuzp1q_p8 (a, b), vuzp2q_p8 (a, b)}; } __extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__)) vuzpq_p16 (poly16x8_t a, poly16x8_t b) { return (poly16x8x2_t) {vuzp1q_p16 (a, b), vuzp2q_p16 (a, b)}; } __extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__)) vuzpq_s8 (int8x16_t a, int8x16_t b) { return (int8x16x2_t) {vuzp1q_s8 (a, b), vuzp2q_s8 (a, b)}; } __extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__)) vuzpq_s16 (int16x8_t a, int16x8_t b) { return (int16x8x2_t) {vuzp1q_s16 (a, b), vuzp2q_s16 (a, b)}; } __extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__)) vuzpq_s32 (int32x4_t a, int32x4_t b) { return (int32x4x2_t) {vuzp1q_s32 (a, b), vuzp2q_s32 (a, b)}; } __extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__)) vuzpq_u8 (uint8x16_t a, uint8x16_t b) { return (uint8x16x2_t) {vuzp1q_u8 (a, b), vuzp2q_u8 (a, b)}; } __extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__)) vuzpq_u16 (uint16x8_t a, uint16x8_t b) { return (uint16x8x2_t) {vuzp1q_u16 (a, b), vuzp2q_u16 (a, b)}; } __extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__)) vuzpq_u32 (uint32x4_t a, uint32x4_t b) { return (uint32x4x2_t) {vuzp1q_u32 (a, b), vuzp2q_u32 (a, b)}; } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vzip1_f32 (float32x2_t __a, float32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2}); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vzip1_p8 (poly8x8_t __a, poly8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 1, 9, 2, 10, 3, 11}); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vzip1_p16 (poly16x4_t __a, poly16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 1, 5}); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vzip1_s8 (int8x8_t __a, int8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 1, 9, 2, 10, 3, 11}); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vzip1_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 1, 5}); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vzip1_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2}); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vzip1_u8 (uint8x8_t __a, uint8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 1, 9, 2, 10, 3, 11}); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vzip1_u16 (uint16x4_t __a, uint16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 1, 5}); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vzip1_u32 (uint32x2_t __a, uint32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2}); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vzip1q_f32 (float32x4_t __a, float32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 1, 5}); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vzip1q_f64 (float64x2_t __a, float64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2}); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vzip1q_p8 (poly8x16_t __a, poly8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vzip1q_p16 (poly16x8_t __a, poly16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 1, 9, 2, 10, 3, 11}); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vzip1q_s8 (int8x16_t __a, int8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vzip1q_s16 (int16x8_t __a, int16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 1, 9, 2, 10, 3, 11}); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vzip1q_s32 (int32x4_t __a, int32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 1, 5}); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vzip1q_s64 (int64x2_t __a, int64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2}); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vzip1q_u8 (uint8x16_t __a, uint8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vzip1q_u16 (uint16x8_t __a, uint16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 1, 9, 2, 10, 3, 11}); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vzip1q_u32 (uint32x4_t __a, uint32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 1, 5}); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vzip1q_u64 (uint64x2_t __a, uint64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2}); } __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vzip2_f32 (float32x2_t __a, float32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3}); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vzip2_p8 (poly8x8_t __a, poly8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {4, 12, 5, 13, 6, 14, 7, 15}); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vzip2_p16 (poly16x4_t __a, poly16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {2, 6, 3, 7}); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vzip2_s8 (int8x8_t __a, int8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {4, 12, 5, 13, 6, 14, 7, 15}); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vzip2_s16 (int16x4_t __a, int16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {2, 6, 3, 7}); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vzip2_s32 (int32x2_t __a, int32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3}); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vzip2_u8 (uint8x8_t __a, uint8x8_t __b) { return __builtin_shuffle (__a, __b, (uint8x8_t) {4, 12, 5, 13, 6, 14, 7, 15}); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vzip2_u16 (uint16x4_t __a, uint16x4_t __b) { return __builtin_shuffle (__a, __b, (uint16x4_t) {2, 6, 3, 7}); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vzip2_u32 (uint32x2_t __a, uint32x2_t __b) { return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3}); } __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vzip2q_f32 (float32x4_t __a, float32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {2, 6, 3, 7}); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vzip2q_f64 (float64x2_t __a, float64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3}); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vzip2q_p8 (poly8x16_t __a, poly8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vzip2q_p16 (poly16x8_t __a, poly16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {4, 12, 5, 13, 6, 14, 7, 15}); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vzip2q_s8 (int8x16_t __a, int8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vzip2q_s16 (int16x8_t __a, int16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {4, 12, 5, 13, 6, 14, 7, 15}); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vzip2q_s32 (int32x4_t __a, int32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {2, 6, 3, 7}); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vzip2q_s64 (int64x2_t __a, int64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3}); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vzip2q_u8 (uint8x16_t __a, uint8x16_t __b) { return __builtin_shuffle (__a, __b, (uint8x16_t) {8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vzip2q_u16 (uint16x8_t __a, uint16x8_t __b) { return __builtin_shuffle (__a, __b, (uint16x8_t) {4, 12, 5, 13, 6, 14, 7, 15}); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vzip2q_u32 (uint32x4_t __a, uint32x4_t __b) { return __builtin_shuffle (__a, __b, (uint32x4_t) {2, 6, 3, 7}); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vzip2q_u64 (uint64x2_t __a, uint64x2_t __b) { return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3}); } __extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__)) vzip_f32 (float32x2_t a, float32x2_t b) { return (float32x2x2_t) {vzip1_f32 (a, b), vzip2_f32 (a, b)}; } __extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__)) vzip_p8 (poly8x8_t a, poly8x8_t b) { return (poly8x8x2_t) {vzip1_p8 (a, b), vzip2_p8 (a, b)}; } __extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__)) vzip_p16 (poly16x4_t a, poly16x4_t b) { return (poly16x4x2_t) {vzip1_p16 (a, b), vzip2_p16 (a, b)}; } __extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__)) vzip_s8 (int8x8_t a, int8x8_t b) { return (int8x8x2_t) {vzip1_s8 (a, b), vzip2_s8 (a, b)}; } __extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__)) vzip_s16 (int16x4_t a, int16x4_t b) { return (int16x4x2_t) {vzip1_s16 (a, b), vzip2_s16 (a, b)}; } __extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__)) vzip_s32 (int32x2_t a, int32x2_t b) { return (int32x2x2_t) {vzip1_s32 (a, b), vzip2_s32 (a, b)}; } __extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__)) vzip_u8 (uint8x8_t a, uint8x8_t b) { return (uint8x8x2_t) {vzip1_u8 (a, b), vzip2_u8 (a, b)}; } __extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__)) vzip_u16 (uint16x4_t a, uint16x4_t b) { return (uint16x4x2_t) {vzip1_u16 (a, b), vzip2_u16 (a, b)}; } __extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__)) vzip_u32 (uint32x2_t a, uint32x2_t b) { return (uint32x2x2_t) {vzip1_u32 (a, b), vzip2_u32 (a, b)}; } __extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__)) vzipq_f32 (float32x4_t a, float32x4_t b) { return (float32x4x2_t) {vzip1q_f32 (a, b), vzip2q_f32 (a, b)}; } __extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__)) vzipq_p8 (poly8x16_t a, poly8x16_t b) { return (poly8x16x2_t) {vzip1q_p8 (a, b), vzip2q_p8 (a, b)}; } __extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__)) vzipq_p16 (poly16x8_t a, poly16x8_t b) { return (poly16x8x2_t) {vzip1q_p16 (a, b), vzip2q_p16 (a, b)}; } __extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__)) vzipq_s8 (int8x16_t a, int8x16_t b) { return (int8x16x2_t) {vzip1q_s8 (a, b), vzip2q_s8 (a, b)}; } __extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__)) vzipq_s16 (int16x8_t a, int16x8_t b) { return (int16x8x2_t) {vzip1q_s16 (a, b), vzip2q_s16 (a, b)}; } __extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__)) vzipq_s32 (int32x4_t a, int32x4_t b) { return (int32x4x2_t) {vzip1q_s32 (a, b), vzip2q_s32 (a, b)}; } __extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__)) vzipq_u8 (uint8x16_t a, uint8x16_t b) { return (uint8x16x2_t) {vzip1q_u8 (a, b), vzip2q_u8 (a, b)}; } __extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__)) vzipq_u16 (uint16x8_t a, uint16x8_t b) { return (uint16x8x2_t) {vzip1q_u16 (a, b), vzip2q_u16 (a, b)}; } __extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__)) vzipq_u32 (uint32x4_t a, uint32x4_t b) { return (uint32x4x2_t) {vzip1q_u32 (a, b), vzip2q_u32 (a, b)}; } # 25801 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/arm_neon.h" 3 4 #pragma GCC pop_options # 12 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct32x32_34_add_neon.c" 2 # 1 "../../third_party/libvpx/source/config/linux/arm64/./vpx_config.h" 1 # 14 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct32x32_34_add_neon.c" 2 # 1 "../../third_party/libvpx/source/config/linux/arm64/./vpx_dsp_rtcd.h" 1 # 14 "../../third_party/libvpx/source/config/linux/arm64/./vpx_dsp_rtcd.h" # 1 "../../third_party/libvpx/source/libvpx/vpx/vpx_integer.h" 1 # 15 "../../third_party/libvpx/source/libvpx/vpx/vpx_integer.h" # 1 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/stddef.h" 1 3 4 # 149 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/stddef.h" 3 4 typedef long int ptrdiff_t; # 216 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/stddef.h" 3 4 typedef long unsigned int size_t; # 328 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/stddef.h" 3 4 typedef unsigned int wchar_t; # 426 "/usr/lib/gcc-cross/aarch64-linux-gnu/5/include/stddef.h" 3 4 typedef struct { long long __max_align_ll __attribute__((__aligned__(__alignof__(long long)))); long double __max_align_ld __attribute__((__aligned__(__alignof__(long double)))); } max_align_t; # 16 "../../third_party/libvpx/source/libvpx/vpx/vpx_integer.h" 2 # 60 "../../third_party/libvpx/source/libvpx/vpx/vpx_integer.h" # 1 "/usr/aarch64-linux-gnu/include/inttypes.h" 1 3 # 34 "/usr/aarch64-linux-gnu/include/inttypes.h" 3 typedef unsigned int __gwchar_t; # 266 "/usr/aarch64-linux-gnu/include/inttypes.h" 3 typedef struct { long int quot; long int rem; } imaxdiv_t; # 290 "/usr/aarch64-linux-gnu/include/inttypes.h" 3 extern intmax_t imaxabs (intmax_t __n) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern imaxdiv_t imaxdiv (intmax_t __numer, intmax_t __denom) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__const__)); extern intmax_t strtoimax (const char *__restrict __nptr, char **__restrict __endptr, int __base) __attribute__ ((__nothrow__ , __leaf__)); extern uintmax_t strtoumax (const char *__restrict __nptr, char ** __restrict __endptr, int __base) __attribute__ ((__nothrow__ , __leaf__)); extern intmax_t wcstoimax (const __gwchar_t *__restrict __nptr, __gwchar_t **__restrict __endptr, int __base) __attribute__ ((__nothrow__ , __leaf__)); extern uintmax_t wcstoumax (const __gwchar_t *__restrict __nptr, __gwchar_t ** __restrict __endptr, int __base) __attribute__ ((__nothrow__ , __leaf__)); extern long int __strtol_internal (const char *__restrict __nptr, char **__restrict __endptr, int __base, int __group) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) __attribute__ ((__warn_unused_result__)); extern __inline __attribute__ ((__gnu_inline__)) intmax_t __attribute__ ((__nothrow__ , __leaf__)) strtoimax (const char *__restrict nptr, char **__restrict endptr, int base) { return __strtol_internal (nptr, endptr, base, 0); } extern unsigned long int __strtoul_internal (const char *__restrict __nptr, char ** __restrict __endptr, int __base, int __group) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) __attribute__ ((__warn_unused_result__)); extern __inline __attribute__ ((__gnu_inline__)) uintmax_t __attribute__ ((__nothrow__ , __leaf__)) strtoumax (const char *__restrict nptr, char **__restrict endptr, int base) { return __strtoul_internal (nptr, endptr, base, 0); } extern long int __wcstol_internal (const __gwchar_t * __restrict __nptr, __gwchar_t **__restrict __endptr, int __base, int __group) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) __attribute__ ((__warn_unused_result__)); extern __inline __attribute__ ((__gnu_inline__)) intmax_t __attribute__ ((__nothrow__ , __leaf__)) wcstoimax (const __gwchar_t *__restrict nptr, __gwchar_t **__restrict endptr, int base) { return __wcstol_internal (nptr, endptr, base, 0); } extern unsigned long int __wcstoul_internal (const __gwchar_t * __restrict __nptr, __gwchar_t ** __restrict __endptr, int __base, int __group) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1))) __attribute__ ((__warn_unused_result__)); extern __inline __attribute__ ((__gnu_inline__)) uintmax_t __attribute__ ((__nothrow__ , __leaf__)) wcstoumax (const __gwchar_t *__restrict nptr, __gwchar_t **__restrict endptr, int base) { return __wcstoul_internal (nptr, endptr, base, 0); } # 432 "/usr/aarch64-linux-gnu/include/inttypes.h" 3 # 61 "../../third_party/libvpx/source/libvpx/vpx/vpx_integer.h" 2 # 15 "../../third_party/libvpx/source/config/linux/arm64/./vpx_dsp_rtcd.h" 2 # 1 "../../third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp_common.h" 1 # 16 "../../third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp_common.h" # 1 "../../third_party/libvpx/source/libvpx/vpx_ports/mem.h" 1 # 14 "../../third_party/libvpx/source/libvpx/vpx_ports/mem.h" # 1 "../../third_party/libvpx/source/config/linux/arm64/vpx_config.h" 1 # 15 "../../third_party/libvpx/source/libvpx/vpx_ports/mem.h" 2 # 17 "../../third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp_common.h" 2 # 42 "../../third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp_common.h" # 42 "../../third_party/libvpx/source/libvpx/vpx_dsp/vpx_dsp_common.h" typedef int32_t tran_high_t; typedef int16_t tran_low_t; static inline uint8_t clip_pixel(int val) { return (val > 255) ? 255 : (val < 0) ? 0 : val; } static inline int clamp(int value, int low, int high) { return value < low ? low : (value > high ? high : value); } static inline double fclamp(double value, double low, double high) { return value < low ? low : (value > high ? high : value); } # 16 "../../third_party/libvpx/source/config/linux/arm64/./vpx_dsp_rtcd.h" 2 unsigned int vpx_avg_4x4_c(const uint8_t *, int p); unsigned int vpx_avg_4x4_neon(const uint8_t *, int p); unsigned int vpx_avg_8x8_c(const uint8_t *, int p); unsigned int vpx_avg_8x8_neon(const uint8_t *, int p); void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride); void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve_avg_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_convolve_copy_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_d117_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d117_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d117_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d135_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d135_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d135_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d135_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d135_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d135_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d153_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d153_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d153_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d207_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d207_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d207_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d207e_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d207e_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d207e_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d207e_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d45_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d45_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d45_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d45_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d45_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d45_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d45_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d45e_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d45e_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d45e_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d63_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d63_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d63_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d63e_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d63e_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d63e_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d63e_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_128_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_128_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_128_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_128_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_128_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_128_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_left_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_left_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_left_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_left_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_left_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_left_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_left_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_top_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_top_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_top_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_top_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_top_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_top_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_dc_top_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride); void vpx_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride); void vpx_fdct32x32_c(const int16_t *input, tran_low_t *output, int stride); void vpx_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride); void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *output, int stride); void vpx_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride); void vpx_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride); void vpx_fdct8x8_c(const int16_t *input, tran_low_t *output, int stride); void vpx_fdct8x8_neon(const int16_t *input, tran_low_t *output, int stride); void vpx_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride); void vpx_fdct8x8_1_neon(const int16_t *input, tran_low_t *output, int stride); void vpx_get16x16var_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum); void vpx_get16x16var_neon(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum); unsigned int vpx_get4x4sse_cs_c(const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride); unsigned int vpx_get4x4sse_cs_neon(const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride); void vpx_get8x8var_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum); void vpx_get8x8var_neon(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum); unsigned int vpx_get_mb_ss_c(const int16_t *); void vpx_h_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_h_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_h_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_h_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_hadamard_16x16_c(const int16_t *src_diff, int src_stride, int16_t *coeff); void vpx_hadamard_16x16_neon(const int16_t *src_diff, int src_stride, int16_t *coeff); void vpx_hadamard_8x8_c(const int16_t *src_diff, int src_stride, int16_t *coeff); void vpx_hadamard_8x8_neon(const int16_t *src_diff, int src_stride, int16_t *coeff); void vpx_he_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct16x16_10_add_neon(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct16x16_1_add_neon(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct16x16_256_add_neon(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct32x32_1024_add_neon(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct32x32_135_add_neon(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct32x32_1_add_neon(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct32x32_34_add_neon(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct4x4_16_add_neon(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct4x4_1_add_neon(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct8x8_12_add_neon(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct8x8_1_add_neon(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_idct8x8_64_add_neon(const tran_low_t *input, uint8_t *dest, int stride); int16_t vpx_int_pro_col_c(const uint8_t *ref, const int width); int16_t vpx_int_pro_col_neon(const uint8_t *ref, const int width); void vpx_int_pro_row_c(int16_t *hbuf, const uint8_t *ref, const int ref_stride, const int height); void vpx_int_pro_row_neon(int16_t *hbuf, const uint8_t *ref, const int ref_stride, const int height); void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_iwht4x4_1_add_c(const tran_low_t *input, uint8_t *dest, int stride); void vpx_lpf_horizontal_16_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_horizontal_16_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_horizontal_16_dual_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_horizontal_16_dual_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_horizontal_4_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_horizontal_4_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_horizontal_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); void vpx_lpf_horizontal_4_dual_neon(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); void vpx_lpf_horizontal_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_horizontal_8_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_horizontal_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); void vpx_lpf_horizontal_8_dual_neon(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); void vpx_lpf_vertical_16_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_vertical_16_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_vertical_16_dual_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_vertical_16_dual_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_vertical_4_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); void vpx_lpf_vertical_4_dual_neon(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); void vpx_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_vertical_8_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh); void vpx_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); void vpx_lpf_vertical_8_dual_neon(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1); void vpx_mbpost_proc_across_ip_c(unsigned char *dst, int pitch, int rows, int cols,int flimit); void vpx_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols,int flimit); void vpx_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max); void vpx_minmax_8x8_neon(const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max); unsigned int vpx_mse16x16_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse); unsigned int vpx_mse16x16_neon(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse); unsigned int vpx_mse16x8_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse); unsigned int vpx_mse8x16_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse); unsigned int vpx_mse8x8_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse); void vpx_plane_add_noise_c(uint8_t *start, const int8_t *noise, int blackclamp, int whiteclamp, int width, int height, int pitch); void vpx_post_proc_down_and_across_mb_row_c(unsigned char *src, unsigned char *dst, int src_pitch, int dst_pitch, int cols, unsigned char *flimits, int size); void vpx_post_proc_down_and_across_mb_row_neon(unsigned char *src, unsigned char *dst, int src_pitch, int dst_pitch, int cols, unsigned char *flimits, int size); void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan); void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan); unsigned int vpx_sad16x16_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad16x16_neon(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad16x16_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad16x16x3_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); void vpx_sad16x16x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); void vpx_sad16x16x4d_neon(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); void vpx_sad16x16x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); unsigned int vpx_sad16x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad16x32_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad16x32x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); unsigned int vpx_sad16x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad16x8_neon(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad16x8_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad16x8x3_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); void vpx_sad16x8x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); void vpx_sad16x8x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); unsigned int vpx_sad32x16_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad32x16_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad32x16x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); unsigned int vpx_sad32x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad32x32_neon(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad32x32_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad32x32x3_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); void vpx_sad32x32x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); void vpx_sad32x32x4d_neon(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); void vpx_sad32x32x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); unsigned int vpx_sad32x64_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad32x64_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad32x64x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); unsigned int vpx_sad4x4_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad4x4_neon(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad4x4_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad4x4x3_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); void vpx_sad4x4x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); void vpx_sad4x4x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); unsigned int vpx_sad4x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad4x8_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad4x8x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); void vpx_sad4x8x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); unsigned int vpx_sad64x32_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad64x32_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad64x32x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); unsigned int vpx_sad64x64_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad64x64_neon(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad64x64_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad64x64x3_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); void vpx_sad64x64x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); void vpx_sad64x64x4d_neon(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); void vpx_sad64x64x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); unsigned int vpx_sad8x16_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad8x16_neon(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad8x16_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad8x16x3_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); void vpx_sad8x16x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); void vpx_sad8x16x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); unsigned int vpx_sad8x4_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad8x4_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad8x4x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); void vpx_sad8x4x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); unsigned int vpx_sad8x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad8x8_neon(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride); unsigned int vpx_sad8x8_avg_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred); void vpx_sad8x8x3_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); void vpx_sad8x8x4d_c(const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array); void vpx_sad8x8x8_c(const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, uint32_t *sad_array); int vpx_satd_c(const int16_t *coeff, int length); int vpx_satd_neon(const int16_t *coeff, int length); void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); void vpx_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); uint32_t vpx_sub_pixel_avg_variance16x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_avg_variance16x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_avg_variance16x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_avg_variance32x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_avg_variance32x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_avg_variance32x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_avg_variance4x4_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_avg_variance4x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_avg_variance64x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_avg_variance64x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_avg_variance8x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_avg_variance8x4_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_avg_variance8x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred); uint32_t vpx_sub_pixel_variance16x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance16x16_neon(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance16x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance16x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance32x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance32x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance32x32_neon(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance32x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance4x4_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance4x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance64x32_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance64x64_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance64x64_neon(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance8x16_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance8x4_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance8x8_c(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); uint32_t vpx_sub_pixel_variance8x8_neon(const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse); void vpx_subtract_block_c(int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride); void vpx_subtract_block_neon(int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride); uint64_t vpx_sum_squares_2d_i16_c(const int16_t *src, int stride, int size); void vpx_tm_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_tm_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_tm_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_tm_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_v_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_v_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_v_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_v_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); void vpx_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); unsigned int vpx_variance16x16_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance16x16_neon(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance16x32_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance16x8_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance16x8_neon(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance32x16_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance32x32_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance32x32_neon(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance32x64_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance32x64_neon(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance4x4_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance4x8_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance64x32_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance64x32_neon(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance64x64_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance64x64_neon(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance8x16_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance8x16_neon(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance8x4_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance8x8_c(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); unsigned int vpx_variance8x8_neon(const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse); void vpx_ve_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left); int vpx_vector_var_c(const int16_t *ref, const int16_t *src, const int bwl); int vpx_vector_var_neon(const int16_t *ref, const int16_t *src, const int bwl); void vpx_dsp_rtcd(void); # 1 "../../third_party/libvpx/source/config/linux/arm64/./vpx_config.h" 1 # 840 "../../third_party/libvpx/source/config/linux/arm64/./vpx_dsp_rtcd.h" 2 # 15 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct32x32_34_add_neon.c" 2 # 1 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 1 # 17 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" # 1 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" 1 # 24 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" static inline int16x8x2_t vpx_vtrnq_s64(int32x4_t a0, int32x4_t a1) { int16x8x2_t b0; b0.val[0] = vcombine_s16(vreinterpret_s16_s32(vget_low_s32(a0)), vreinterpret_s16_s32(vget_low_s32(a1))); b0.val[1] = vcombine_s16(vreinterpret_s16_s32(vget_high_s32(a0)), vreinterpret_s16_s32(vget_high_s32(a1))); return b0; } static inline uint8x16x2_t vpx_vtrnq_u64(uint32x4_t a0, uint32x4_t a1) { uint8x16x2_t b0; b0.val[0] = vcombine_u8(vreinterpret_u8_u32(vget_low_u32(a0)), vreinterpret_u8_u32(vget_low_u32(a1))); b0.val[1] = vcombine_u8(vreinterpret_u8_u32(vget_high_u32(a0)), vreinterpret_u8_u32(vget_high_u32(a1))); return b0; } static inline uint16x8x2_t vpx_vtrnq_u64_to_u16(uint32x4_t a0, uint32x4_t a1) { uint16x8x2_t b0; b0.val[0] = vcombine_u16(vreinterpret_u16_u32(vget_low_u32(a0)), vreinterpret_u16_u32(vget_low_u32(a1))); b0.val[1] = vcombine_u16(vreinterpret_u16_u32(vget_high_u32(a0)), vreinterpret_u16_u32(vget_high_u32(a1))); return b0; } static inline void transpose_u8_4x4(uint8x8_t *a0, uint8x8_t *a1) { const uint16x4x2_t b0 = vtrn_u16(vreinterpret_u16_u8(*a0), vreinterpret_u16_u8(*a1)); const uint32x2x2_t c0 = vtrn_u32(vreinterpret_u32_u16(b0.val[0]), vreinterpret_u32_u16(b0.val[1])); const uint8x8x2_t d0 = vtrn_u8(vreinterpret_u8_u32(c0.val[0]), vreinterpret_u8_u32(c0.val[1])); *a0 = d0.val[0]; *a1 = d0.val[1]; } static inline void transpose_s16_4x4d(int16x4_t *a0, int16x4_t *a1, int16x4_t *a2, int16x4_t *a3) { # 93 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const int16x4x2_t b0 = vtrn_s16(*a0, *a1); const int16x4x2_t b1 = vtrn_s16(*a2, *a3); const int32x2x2_t c0 = vtrn_s32(vreinterpret_s32_s16(b0.val[0]), vreinterpret_s32_s16(b1.val[0])); const int32x2x2_t c1 = vtrn_s32(vreinterpret_s32_s16(b0.val[1]), vreinterpret_s32_s16(b1.val[1])); *a0 = vreinterpret_s16_s32(c0.val[0]); *a1 = vreinterpret_s16_s32(c1.val[0]); *a2 = vreinterpret_s16_s32(c0.val[1]); *a3 = vreinterpret_s16_s32(c1.val[1]); } static inline void transpose_u16_4x4q(uint16x8_t *a0, uint16x8_t *a1) { const uint32x4x2_t b0 = vtrnq_u32(vreinterpretq_u32_u16(*a0), vreinterpretq_u32_u16(*a1)); const uint32x4_t c0 = vcombine_u32(vget_low_u32(b0.val[0]), vget_low_u32(b0.val[1])); const uint32x4_t c1 = vcombine_u32(vget_high_u32(b0.val[0]), vget_high_u32(b0.val[1])); const uint16x8x2_t d0 = vtrnq_u16(vreinterpretq_u16_u32(c0), vreinterpretq_u16_u32(c1)); *a0 = d0.val[0]; *a1 = d0.val[1]; } static inline void transpose_s16_4x8(const int16x4_t a0, const int16x4_t a1, const int16x4_t a2, const int16x4_t a3, const int16x4_t a4, const int16x4_t a5, const int16x4_t a6, const int16x4_t a7, int16x8_t *o0, int16x8_t *o1, int16x8_t *o2, int16x8_t *o3) { # 169 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const int16x4x2_t b0 = vtrn_s16(a0, a1); const int16x4x2_t b1 = vtrn_s16(a2, a3); const int16x4x2_t b2 = vtrn_s16(a4, a5); const int16x4x2_t b3 = vtrn_s16(a6, a7); # 184 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const int32x2x2_t c0 = vtrn_s32(vreinterpret_s32_s16(b0.val[0]), vreinterpret_s32_s16(b1.val[0])); const int32x2x2_t c1 = vtrn_s32(vreinterpret_s32_s16(b0.val[1]), vreinterpret_s32_s16(b1.val[1])); const int32x2x2_t c2 = vtrn_s32(vreinterpret_s32_s16(b2.val[0]), vreinterpret_s32_s16(b3.val[0])); const int32x2x2_t c3 = vtrn_s32(vreinterpret_s32_s16(b2.val[1]), vreinterpret_s32_s16(b3.val[1])); *o0 = vcombine_s16(vreinterpret_s16_s32(c0.val[0]), vreinterpret_s16_s32(c2.val[0])); *o1 = vcombine_s16(vreinterpret_s16_s32(c1.val[0]), vreinterpret_s16_s32(c3.val[0])); *o2 = vcombine_s16(vreinterpret_s16_s32(c0.val[1]), vreinterpret_s16_s32(c2.val[1])); *o3 = vcombine_s16(vreinterpret_s16_s32(c1.val[1]), vreinterpret_s16_s32(c3.val[1])); } static inline void transpose_u8_8x4(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2, uint8x8_t *a3) { # 222 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint8x8x2_t b0 = vtrn_u8(*a0, *a1); const uint8x8x2_t b1 = vtrn_u8(*a2, *a3); const uint16x4x2_t c0 = vtrn_u16(vreinterpret_u16_u8(b0.val[0]), vreinterpret_u16_u8(b1.val[0])); const uint16x4x2_t c1 = vtrn_u16(vreinterpret_u16_u8(b0.val[1]), vreinterpret_u16_u8(b1.val[1])); *a0 = vreinterpret_u8_u16(c0.val[0]); *a1 = vreinterpret_u8_u16(c1.val[0]); *a2 = vreinterpret_u8_u16(c0.val[1]); *a3 = vreinterpret_u8_u16(c1.val[1]); } static inline void transpose_u16_8x4(uint16x8_t *a0, uint16x8_t *a1, uint16x8_t *a2, uint16x8_t *a3) { # 255 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint16x8x2_t b0 = vtrnq_u16(*a0, *a1); const uint16x8x2_t b1 = vtrnq_u16(*a2, *a3); const uint32x4x2_t c0 = vtrnq_u32(vreinterpretq_u32_u16(b0.val[0]), vreinterpretq_u32_u16(b1.val[0])); const uint32x4x2_t c1 = vtrnq_u32(vreinterpretq_u32_u16(b0.val[1]), vreinterpretq_u32_u16(b1.val[1])); *a0 = vreinterpretq_u16_u32(c0.val[0]); *a1 = vreinterpretq_u16_u32(c1.val[0]); *a2 = vreinterpretq_u16_u32(c0.val[1]); *a3 = vreinterpretq_u16_u32(c1.val[1]); } static inline void transpose_u8_8x8(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2, uint8x8_t *a3, uint8x8_t *a4, uint8x8_t *a5, uint8x8_t *a6, uint8x8_t *a7) { # 295 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint8x16x2_t b0 = vtrnq_u8(vcombine_u8(*a0, *a4), vcombine_u8(*a1, *a5)); const uint8x16x2_t b1 = vtrnq_u8(vcombine_u8(*a2, *a6), vcombine_u8(*a3, *a7)); const uint16x8x2_t c0 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[0]), vreinterpretq_u16_u8(b1.val[0])); const uint16x8x2_t c1 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[1]), vreinterpretq_u16_u8(b1.val[1])); const uint32x4x2_t d0 = vuzpq_u32(vreinterpretq_u32_u16(c0.val[0]), vreinterpretq_u32_u16(c1.val[0])); const uint32x4x2_t d1 = vuzpq_u32(vreinterpretq_u32_u16(c0.val[1]), vreinterpretq_u32_u16(c1.val[1])); *a0 = vreinterpret_u8_u32(vget_low_u32(d0.val[0])); *a1 = vreinterpret_u8_u32(vget_high_u32(d0.val[0])); *a2 = vreinterpret_u8_u32(vget_low_u32(d1.val[0])); *a3 = vreinterpret_u8_u32(vget_high_u32(d1.val[0])); *a4 = vreinterpret_u8_u32(vget_low_u32(d0.val[1])); *a5 = vreinterpret_u8_u32(vget_high_u32(d0.val[1])); *a6 = vreinterpret_u8_u32(vget_low_u32(d1.val[1])); *a7 = vreinterpret_u8_u32(vget_high_u32(d1.val[1])); } static inline void transpose_s16_8x8(int16x8_t *a0, int16x8_t *a1, int16x8_t *a2, int16x8_t *a3, int16x8_t *a4, int16x8_t *a5, int16x8_t *a6, int16x8_t *a7) { # 354 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const int16x8x2_t b0 = vtrnq_s16(*a0, *a1); const int16x8x2_t b1 = vtrnq_s16(*a2, *a3); const int16x8x2_t b2 = vtrnq_s16(*a4, *a5); const int16x8x2_t b3 = vtrnq_s16(*a6, *a7); # 369 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const int32x4x2_t c0 = vtrnq_s32(vreinterpretq_s32_s16(b0.val[0]), vreinterpretq_s32_s16(b1.val[0])); const int32x4x2_t c1 = vtrnq_s32(vreinterpretq_s32_s16(b0.val[1]), vreinterpretq_s32_s16(b1.val[1])); const int32x4x2_t c2 = vtrnq_s32(vreinterpretq_s32_s16(b2.val[0]), vreinterpretq_s32_s16(b3.val[0])); const int32x4x2_t c3 = vtrnq_s32(vreinterpretq_s32_s16(b2.val[1]), vreinterpretq_s32_s16(b3.val[1])); # 387 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const int16x8x2_t d0 = vpx_vtrnq_s64(c0.val[0], c2.val[0]); const int16x8x2_t d1 = vpx_vtrnq_s64(c1.val[0], c3.val[0]); const int16x8x2_t d2 = vpx_vtrnq_s64(c0.val[1], c2.val[1]); const int16x8x2_t d3 = vpx_vtrnq_s64(c1.val[1], c3.val[1]); *a0 = d0.val[0]; *a1 = d1.val[0]; *a2 = d2.val[0]; *a3 = d3.val[0]; *a4 = d0.val[1]; *a5 = d1.val[1]; *a6 = d2.val[1]; *a7 = d3.val[1]; } static inline void transpose_u16_8x8(uint16x8_t *a0, uint16x8_t *a1, uint16x8_t *a2, uint16x8_t *a3, uint16x8_t *a4, uint16x8_t *a5, uint16x8_t *a6, uint16x8_t *a7) { # 425 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint16x8x2_t b0 = vtrnq_u16(*a0, *a1); const uint16x8x2_t b1 = vtrnq_u16(*a2, *a3); const uint16x8x2_t b2 = vtrnq_u16(*a4, *a5); const uint16x8x2_t b3 = vtrnq_u16(*a6, *a7); # 440 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint32x4x2_t c0 = vtrnq_u32(vreinterpretq_u32_u16(b0.val[0]), vreinterpretq_u32_u16(b1.val[0])); const uint32x4x2_t c1 = vtrnq_u32(vreinterpretq_u32_u16(b0.val[1]), vreinterpretq_u32_u16(b1.val[1])); const uint32x4x2_t c2 = vtrnq_u32(vreinterpretq_u32_u16(b2.val[0]), vreinterpretq_u32_u16(b3.val[0])); const uint32x4x2_t c3 = vtrnq_u32(vreinterpretq_u32_u16(b2.val[1]), vreinterpretq_u32_u16(b3.val[1])); # 458 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint16x8x2_t d0 = vpx_vtrnq_u64_to_u16(c0.val[0], c2.val[0]); const uint16x8x2_t d1 = vpx_vtrnq_u64_to_u16(c1.val[0], c3.val[0]); const uint16x8x2_t d2 = vpx_vtrnq_u64_to_u16(c0.val[1], c2.val[1]); const uint16x8x2_t d3 = vpx_vtrnq_u64_to_u16(c1.val[1], c3.val[1]); *a0 = d0.val[0]; *a1 = d1.val[0]; *a2 = d2.val[0]; *a3 = d3.val[0]; *a4 = d0.val[1]; *a5 = d1.val[1]; *a6 = d2.val[1]; *a7 = d3.val[1]; } static inline void transpose_u8_16x8( const uint8x16_t i0, const uint8x16_t i1, const uint8x16_t i2, const uint8x16_t i3, const uint8x16_t i4, const uint8x16_t i5, const uint8x16_t i6, const uint8x16_t i7, uint8x8_t *o0, uint8x8_t *o1, uint8x8_t *o2, uint8x8_t *o3, uint8x8_t *o4, uint8x8_t *o5, uint8x8_t *o6, uint8x8_t *o7, uint8x8_t *o8, uint8x8_t *o9, uint8x8_t *o10, uint8x8_t *o11, uint8x8_t *o12, uint8x8_t *o13, uint8x8_t *o14, uint8x8_t *o15) { # 498 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint8x16x2_t b0 = vtrnq_u8(i0, i1); const uint8x16x2_t b1 = vtrnq_u8(i2, i3); const uint8x16x2_t b2 = vtrnq_u8(i4, i5); const uint8x16x2_t b3 = vtrnq_u8(i6, i7); # 512 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint16x8x2_t c0 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[0]), vreinterpretq_u16_u8(b1.val[0])); const uint16x8x2_t c1 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[1]), vreinterpretq_u16_u8(b1.val[1])); const uint16x8x2_t c2 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[0]), vreinterpretq_u16_u8(b3.val[0])); const uint16x8x2_t c3 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[1]), vreinterpretq_u16_u8(b3.val[1])); # 530 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint32x4x2_t d0 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[0]), vreinterpretq_u32_u16(c2.val[0])); const uint32x4x2_t d1 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[1]), vreinterpretq_u32_u16(c2.val[1])); const uint32x4x2_t d2 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[0]), vreinterpretq_u32_u16(c3.val[0])); const uint32x4x2_t d3 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[1]), vreinterpretq_u32_u16(c3.val[1])); # 556 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" *o0 = vget_low_u8(vreinterpretq_u8_u32(d0.val[0])); *o1 = vget_low_u8(vreinterpretq_u8_u32(d2.val[0])); *o2 = vget_low_u8(vreinterpretq_u8_u32(d1.val[0])); *o3 = vget_low_u8(vreinterpretq_u8_u32(d3.val[0])); *o4 = vget_low_u8(vreinterpretq_u8_u32(d0.val[1])); *o5 = vget_low_u8(vreinterpretq_u8_u32(d2.val[1])); *o6 = vget_low_u8(vreinterpretq_u8_u32(d1.val[1])); *o7 = vget_low_u8(vreinterpretq_u8_u32(d3.val[1])); *o8 = vget_high_u8(vreinterpretq_u8_u32(d0.val[0])); *o9 = vget_high_u8(vreinterpretq_u8_u32(d2.val[0])); *o10 = vget_high_u8(vreinterpretq_u8_u32(d1.val[0])); *o11 = vget_high_u8(vreinterpretq_u8_u32(d3.val[0])); *o12 = vget_high_u8(vreinterpretq_u8_u32(d0.val[1])); *o13 = vget_high_u8(vreinterpretq_u8_u32(d2.val[1])); *o14 = vget_high_u8(vreinterpretq_u8_u32(d1.val[1])); *o15 = vget_high_u8(vreinterpretq_u8_u32(d3.val[1])); } static inline void transpose_u8_8x16( const uint8x8_t i0, const uint8x8_t i1, const uint8x8_t i2, const uint8x8_t i3, const uint8x8_t i4, const uint8x8_t i5, const uint8x8_t i6, const uint8x8_t i7, const uint8x8_t i8, const uint8x8_t i9, const uint8x8_t i10, const uint8x8_t i11, const uint8x8_t i12, const uint8x8_t i13, const uint8x8_t i14, const uint8x8_t i15, uint8x16_t *o0, uint8x16_t *o1, uint8x16_t *o2, uint8x16_t *o3, uint8x16_t *o4, uint8x16_t *o5, uint8x16_t *o6, uint8x16_t *o7) { # 609 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint8x16_t a0 = vcombine_u8(i0, i8); const uint8x16_t a1 = vcombine_u8(i1, i9); const uint8x16_t a2 = vcombine_u8(i2, i10); const uint8x16_t a3 = vcombine_u8(i3, i11); const uint8x16_t a4 = vcombine_u8(i4, i12); const uint8x16_t a5 = vcombine_u8(i5, i13); const uint8x16_t a6 = vcombine_u8(i6, i14); const uint8x16_t a7 = vcombine_u8(i7, i15); # 627 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint8x16x2_t b0 = vtrnq_u8(a0, a1); const uint8x16x2_t b1 = vtrnq_u8(a2, a3); const uint8x16x2_t b2 = vtrnq_u8(a4, a5); const uint8x16x2_t b3 = vtrnq_u8(a6, a7); # 641 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint16x8x2_t c0 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[0]), vreinterpretq_u16_u8(b1.val[0])); const uint16x8x2_t c1 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[1]), vreinterpretq_u16_u8(b1.val[1])); const uint16x8x2_t c2 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[0]), vreinterpretq_u16_u8(b3.val[0])); const uint16x8x2_t c3 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[1]), vreinterpretq_u16_u8(b3.val[1])); # 659 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint32x4x2_t d0 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[0]), vreinterpretq_u32_u16(c2.val[0])); const uint32x4x2_t d1 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[1]), vreinterpretq_u32_u16(c2.val[1])); const uint32x4x2_t d2 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[0]), vreinterpretq_u32_u16(c3.val[0])); const uint32x4x2_t d3 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[1]), vreinterpretq_u32_u16(c3.val[1])); # 677 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" *o0 = vreinterpretq_u8_u32(d0.val[0]); *o1 = vreinterpretq_u8_u32(d2.val[0]); *o2 = vreinterpretq_u8_u32(d1.val[0]); *o3 = vreinterpretq_u8_u32(d3.val[0]); *o4 = vreinterpretq_u8_u32(d0.val[1]); *o5 = vreinterpretq_u8_u32(d2.val[1]); *o6 = vreinterpretq_u8_u32(d1.val[1]); *o7 = vreinterpretq_u8_u32(d3.val[1]); } static inline void transpose_u8_16x16( const uint8x16_t i0, const uint8x16_t i1, const uint8x16_t i2, const uint8x16_t i3, const uint8x16_t i4, const uint8x16_t i5, const uint8x16_t i6, const uint8x16_t i7, const uint8x16_t i8, const uint8x16_t i9, const uint8x16_t i10, const uint8x16_t i11, const uint8x16_t i12, const uint8x16_t i13, const uint8x16_t i14, const uint8x16_t i15, uint8x16_t *o0, uint8x16_t *o1, uint8x16_t *o2, uint8x16_t *o3, uint8x16_t *o4, uint8x16_t *o5, uint8x16_t *o6, uint8x16_t *o7, uint8x16_t *o8, uint8x16_t *o9, uint8x16_t *o10, uint8x16_t *o11, uint8x16_t *o12, uint8x16_t *o13, uint8x16_t *o14, uint8x16_t *o15) { # 732 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint8x16x2_t b0 = vtrnq_u8(i0, i1); const uint8x16x2_t b1 = vtrnq_u8(i2, i3); const uint8x16x2_t b2 = vtrnq_u8(i4, i5); const uint8x16x2_t b3 = vtrnq_u8(i6, i7); const uint8x16x2_t b4 = vtrnq_u8(i8, i9); const uint8x16x2_t b5 = vtrnq_u8(i10, i11); const uint8x16x2_t b6 = vtrnq_u8(i12, i13); const uint8x16x2_t b7 = vtrnq_u8(i14, i15); # 758 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint16x8x2_t c0 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[0]), vreinterpretq_u16_u8(b1.val[0])); const uint16x8x2_t c1 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[1]), vreinterpretq_u16_u8(b1.val[1])); const uint16x8x2_t c2 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[0]), vreinterpretq_u16_u8(b3.val[0])); const uint16x8x2_t c3 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[1]), vreinterpretq_u16_u8(b3.val[1])); const uint16x8x2_t c4 = vtrnq_u16(vreinterpretq_u16_u8(b4.val[0]), vreinterpretq_u16_u8(b5.val[0])); const uint16x8x2_t c5 = vtrnq_u16(vreinterpretq_u16_u8(b4.val[1]), vreinterpretq_u16_u8(b5.val[1])); const uint16x8x2_t c6 = vtrnq_u16(vreinterpretq_u16_u8(b6.val[0]), vreinterpretq_u16_u8(b7.val[0])); const uint16x8x2_t c7 = vtrnq_u16(vreinterpretq_u16_u8(b6.val[1]), vreinterpretq_u16_u8(b7.val[1])); # 792 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint32x4x2_t d0 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[0]), vreinterpretq_u32_u16(c2.val[0])); const uint32x4x2_t d1 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[1]), vreinterpretq_u32_u16(c2.val[1])); const uint32x4x2_t d2 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[0]), vreinterpretq_u32_u16(c3.val[0])); const uint32x4x2_t d3 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[1]), vreinterpretq_u32_u16(c3.val[1])); const uint32x4x2_t d4 = vtrnq_u32(vreinterpretq_u32_u16(c4.val[0]), vreinterpretq_u32_u16(c6.val[0])); const uint32x4x2_t d5 = vtrnq_u32(vreinterpretq_u32_u16(c4.val[1]), vreinterpretq_u32_u16(c6.val[1])); const uint32x4x2_t d6 = vtrnq_u32(vreinterpretq_u32_u16(c5.val[0]), vreinterpretq_u32_u16(c7.val[0])); const uint32x4x2_t d7 = vtrnq_u32(vreinterpretq_u32_u16(c5.val[1]), vreinterpretq_u32_u16(c7.val[1])); # 826 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" const uint8x16x2_t e0 = vpx_vtrnq_u64(d0.val[0], d4.val[0]); const uint8x16x2_t e1 = vpx_vtrnq_u64(d2.val[0], d6.val[0]); const uint8x16x2_t e2 = vpx_vtrnq_u64(d1.val[0], d5.val[0]); const uint8x16x2_t e3 = vpx_vtrnq_u64(d3.val[0], d7.val[0]); const uint8x16x2_t e4 = vpx_vtrnq_u64(d0.val[1], d4.val[1]); const uint8x16x2_t e5 = vpx_vtrnq_u64(d2.val[1], d6.val[1]); const uint8x16x2_t e6 = vpx_vtrnq_u64(d1.val[1], d5.val[1]); const uint8x16x2_t e7 = vpx_vtrnq_u64(d3.val[1], d7.val[1]); # 852 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/transpose_neon.h" *o0 = e0.val[0]; *o1 = e1.val[0]; *o2 = e2.val[0]; *o3 = e3.val[0]; *o4 = e4.val[0]; *o5 = e5.val[0]; *o6 = e6.val[0]; *o7 = e7.val[0]; *o8 = e0.val[1]; *o9 = e1.val[1]; *o10 = e2.val[1]; *o11 = e3.val[1]; *o12 = e4.val[1]; *o13 = e5.val[1]; *o14 = e6.val[1]; *o15 = e7.val[1]; } # 18 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 2 static inline int16x8_t load_tran_low_to_s16(const tran_low_t *buf) { return vld1q_s16(buf); } static inline int16x8_t multiply_shift_and_narrow_s16(const int16x8_t a, const int16_t a_const) { # 46 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" return vqrdmulhq_n_s16(a, a_const * 2); } static inline int16x8_t add_multiply_shift_and_narrow_s16( const int16x8_t a, const int16x8_t b, const int16_t ab_const) { int32x4_t temp_low = vaddl_s16(vget_low_s16(a), vget_low_s16(b)); int32x4_t temp_high = vaddl_s16(vget_high_s16(a), vget_high_s16(b)); temp_low = vmulq_n_s32(temp_low, ab_const); temp_high = vmulq_n_s32(temp_high, ab_const); return vcombine_s16( # 63 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 __extension__ ({ int32x4_t a_ = ( # 63 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" temp_low # 63 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 ); int16x4_t result; __asm__ ("rshrn %0.4h,%1.4s,%2" : "=w"(result) : "w"(a_), "i"( # 63 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 14 # 63 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 ) : ); result; }) # 63 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" , # 63 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 __extension__ ({ int32x4_t a_ = ( # 63 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" temp_high # 63 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 ); int16x4_t result; __asm__ ("rshrn %0.4h,%1.4s,%2" : "=w"(result) : "w"(a_), "i"( # 63 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 14 # 63 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 ) : ); result; }) # 63 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" ); } static inline int16x8_t sub_multiply_shift_and_narrow_s16( const int16x8_t a, const int16x8_t b, const int16_t ab_const) { int32x4_t temp_low = vsubl_s16(vget_low_s16(a), vget_low_s16(b)); int32x4_t temp_high = vsubl_s16(vget_high_s16(a), vget_high_s16(b)); temp_low = vmulq_n_s32(temp_low, ab_const); temp_high = vmulq_n_s32(temp_high, ab_const); return vcombine_s16( # 73 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 __extension__ ({ int32x4_t a_ = ( # 73 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" temp_low # 73 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 ); int16x4_t result; __asm__ ("rshrn %0.4h,%1.4s,%2" : "=w"(result) : "w"(a_), "i"( # 73 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 14 # 73 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 ) : ); result; }) # 73 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" , # 73 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 __extension__ ({ int32x4_t a_ = ( # 73 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" temp_high # 73 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 ); int16x4_t result; __asm__ ("rshrn %0.4h,%1.4s,%2" : "=w"(result) : "w"(a_), "i"( # 73 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 14 # 73 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 ) : ); result; }) # 73 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" ); } static inline int16x8_t multiply_accumulate_shift_and_narrow_s16( const int16x8_t a, const int16_t a_const, const int16x8_t b, const int16_t b_const) { int32x4_t temp_low = vmull_n_s16(vget_low_s16(a), a_const); int32x4_t temp_high = vmull_n_s16(vget_high_s16(a), a_const); temp_low = vmlal_n_s16(temp_low, vget_low_s16(b), b_const); temp_high = vmlal_n_s16(temp_high, vget_high_s16(b), b_const); return vcombine_s16( # 85 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 __extension__ ({ int32x4_t a_ = ( # 85 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" temp_low # 85 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 ); int16x4_t result; __asm__ ("rshrn %0.4h,%1.4s,%2" : "=w"(result) : "w"(a_), "i"( # 85 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 14 # 85 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 ) : ); result; }) # 85 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" , # 85 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 __extension__ ({ int32x4_t a_ = ( # 85 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" temp_high # 85 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 ); int16x4_t result; __asm__ ("rshrn %0.4h,%1.4s,%2" : "=w"(result) : "w"(a_), "i"( # 85 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 14 # 85 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" 3 4 ) : ); result; }) # 85 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct_neon.h" ); } static inline void load_and_transpose_s16_8x8(const int16_t *a, int a_stride, int16x8_t *a0, int16x8_t *a1, int16x8_t *a2, int16x8_t *a3, int16x8_t *a4, int16x8_t *a5, int16x8_t *a6, int16x8_t *a7) { *a0 = vld1q_s16(a); a += a_stride; *a1 = vld1q_s16(a); a += a_stride; *a2 = vld1q_s16(a); a += a_stride; *a3 = vld1q_s16(a); a += a_stride; *a4 = vld1q_s16(a); a += a_stride; *a5 = vld1q_s16(a); a += a_stride; *a6 = vld1q_s16(a); a += a_stride; *a7 = vld1q_s16(a); transpose_s16_8x8(a0, a1, a2, a3, a4, a5, a6, a7); } static inline void add_and_store_u8_s16(const int16x8_t a0, const int16x8_t a1, const int16x8_t a2, const int16x8_t a3, const int16x8_t a4, const int16x8_t a5, const int16x8_t a6, const int16x8_t a7, uint8_t *b, const int b_stride) { uint8x8_t b0, b1, b2, b3, b4, b5, b6, b7; int16x8_t c0, c1, c2, c3, c4, c5, c6, c7; b0 = vld1_u8(b); b += b_stride; b1 = vld1_u8(b); b += b_stride; b2 = vld1_u8(b); b += b_stride; b3 = vld1_u8(b); b += b_stride; b4 = vld1_u8(b); b += b_stride; b5 = vld1_u8(b); b += b_stride; b6 = vld1_u8(b); b += b_stride; b7 = vld1_u8(b); b -= (7 * b_stride); c0 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b0)), a0, 6); c1 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b1)), a1, 6); c2 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b2)), a2, 6); c3 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b3)), a3, 6); c4 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b4)), a4, 6); c5 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b5)), a5, 6); c6 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b6)), a6, 6); c7 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b7)), a7, 6); b0 = vqmovun_s16(c0); b1 = vqmovun_s16(c1); b2 = vqmovun_s16(c2); b3 = vqmovun_s16(c3); b4 = vqmovun_s16(c4); b5 = vqmovun_s16(c5); b6 = vqmovun_s16(c6); b7 = vqmovun_s16(c7); vst1_u8(b, b0); b += b_stride; vst1_u8(b, b1); b += b_stride; vst1_u8(b, b2); b += b_stride; vst1_u8(b, b3); b += b_stride; vst1_u8(b, b4); b += b_stride; vst1_u8(b, b5); b += b_stride; vst1_u8(b, b6); b += b_stride; vst1_u8(b, b7); } # 16 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct32x32_34_add_neon.c" 2 # 1 "../../third_party/libvpx/source/libvpx/vpx_dsp/txfm_common.h" 1 # 28 "../../third_party/libvpx/source/libvpx/vpx_dsp/txfm_common.h" static const tran_high_t cospi_1_64 = 16364; static const tran_high_t cospi_2_64 = 16305; static const tran_high_t cospi_3_64 = 16207; static const tran_high_t cospi_4_64 = 16069; static const tran_high_t cospi_5_64 = 15893; static const tran_high_t cospi_6_64 = 15679; static const tran_high_t cospi_7_64 = 15426; static const tran_high_t cospi_8_64 = 15137; static const tran_high_t cospi_9_64 = 14811; static const tran_high_t cospi_10_64 = 14449; static const tran_high_t cospi_11_64 = 14053; static const tran_high_t cospi_12_64 = 13623; static const tran_high_t cospi_13_64 = 13160; static const tran_high_t cospi_14_64 = 12665; static const tran_high_t cospi_15_64 = 12140; static const tran_high_t cospi_16_64 = 11585; static const tran_high_t cospi_17_64 = 11003; static const tran_high_t cospi_18_64 = 10394; static const tran_high_t cospi_19_64 = 9760; static const tran_high_t cospi_20_64 = 9102; static const tran_high_t cospi_21_64 = 8423; static const tran_high_t cospi_22_64 = 7723; static const tran_high_t cospi_23_64 = 7005; static const tran_high_t cospi_24_64 = 6270; static const tran_high_t cospi_25_64 = 5520; static const tran_high_t cospi_26_64 = 4756; static const tran_high_t cospi_27_64 = 3981; static const tran_high_t cospi_28_64 = 3196; static const tran_high_t cospi_29_64 = 2404; static const tran_high_t cospi_30_64 = 1606; static const tran_high_t cospi_31_64 = 804; static const tran_high_t sinpi_1_9 = 5283; static const tran_high_t sinpi_2_9 = 9929; static const tran_high_t sinpi_3_9 = 13377; static const tran_high_t sinpi_4_9 = 15212; # 17 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct32x32_34_add_neon.c" 2 # 37 "../../third_party/libvpx/source/libvpx/vpx_dsp/arm/idct32x32_34_add_neon.c" static void idct32_6_neon(const int16_t *input, int16_t *output) { int16x8_t in0, in1, in2, in3, in4, in5, in6, in7; int16x8_t s1_0, s1_1, s1_2, s1_3, s1_4, s1_5, s1_6, s1_7, s1_8, s1_9, s1_10, s1_11, s1_12, s1_13, s1_14, s1_15, s1_16, s1_17, s1_18, s1_19, s1_20, s1_21, s1_22, s1_23, s1_24, s1_25, s1_26, s1_27, s1_28, s1_29, s1_30, s1_31; int16x8_t s2_0, s2_1, s2_2, s2_3, s2_4, s2_5, s2_6, s2_7, s2_8, s2_9, s2_10, s2_11, s2_12, s2_13, s2_14, s2_15, s2_16, s2_17, s2_18, s2_19, s2_20, s2_21, s2_22, s2_23, s2_24, s2_25, s2_26, s2_27, s2_28, s2_29, s2_30, s2_31; int16x8_t s3_24, s3_25, s3_26, s3_27; load_and_transpose_s16_8x8(input, 32, &in0, &in1, &in2, &in3, &in4, &in5, &in6, &in7); s1_16 = multiply_shift_and_narrow_s16(in1, cospi_31_64); s1_31 = multiply_shift_and_narrow_s16(in1, cospi_1_64); s1_20 = multiply_shift_and_narrow_s16(in5, cospi_27_64); s1_27 = multiply_shift_and_narrow_s16(in5, cospi_5_64); s1_23 = multiply_shift_and_narrow_s16(in3, -cospi_29_64); s1_24 = multiply_shift_and_narrow_s16(in3, cospi_3_64); s2_8 = multiply_shift_and_narrow_s16(in2, cospi_30_64); s2_15 = multiply_shift_and_narrow_s16(in2, cospi_2_64); s1_4 = multiply_shift_and_narrow_s16(in4, cospi_28_64); s1_7 = multiply_shift_and_narrow_s16(in4, cospi_4_64); s1_17 = multiply_accumulate_shift_and_narrow_s16(s1_16, -cospi_4_64, s1_31, cospi_28_64); s1_30 = multiply_accumulate_shift_and_narrow_s16(s1_16, cospi_28_64, s1_31, cospi_4_64); s1_21 = multiply_accumulate_shift_and_narrow_s16(s1_20, -cospi_20_64, s1_27, cospi_12_64); s1_26 = multiply_accumulate_shift_and_narrow_s16(s1_20, cospi_12_64, s1_27, cospi_20_64); s1_22 = multiply_accumulate_shift_and_narrow_s16(s1_23, -cospi_12_64, s1_24, -cospi_20_64); s1_25 = multiply_accumulate_shift_and_narrow_s16(s1_23, -cospi_20_64, s1_24, cospi_12_64); s1_0 = multiply_shift_and_narrow_s16(in0, cospi_16_64); s2_9 = multiply_accumulate_shift_and_narrow_s16(s2_8, -cospi_8_64, s2_15, cospi_24_64); s2_14 = multiply_accumulate_shift_and_narrow_s16(s2_8, cospi_24_64, s2_15, cospi_8_64); s2_20 = vsubq_s16(s1_23, s1_20); s2_21 = vsubq_s16(s1_22, s1_21); s2_22 = vaddq_s16(s1_21, s1_22); s2_23 = vaddq_s16(s1_20, s1_23); s2_24 = vaddq_s16(s1_24, s1_27); s2_25 = vaddq_s16(s1_25, s1_26); s2_26 = vsubq_s16(s1_25, s1_26); s2_27 = vsubq_s16(s1_24, s1_27); s1_5 = sub_multiply_shift_and_narrow_s16(s1_7, s1_4, cospi_16_64); s1_6 = add_multiply_shift_and_narrow_s16(s1_4, s1_7, cospi_16_64); s1_18 = multiply_accumulate_shift_and_narrow_s16(s1_17, -cospi_8_64, s1_30, cospi_24_64); s1_29 = multiply_accumulate_shift_and_narrow_s16(s1_17, cospi_24_64, s1_30, cospi_8_64); s1_19 = multiply_accumulate_shift_and_narrow_s16(s1_16, -cospi_8_64, s1_31, cospi_24_64); s1_28 = multiply_accumulate_shift_and_narrow_s16(s1_16, cospi_24_64, s1_31, cospi_8_64); s1_20 = multiply_accumulate_shift_and_narrow_s16(s2_20, -cospi_24_64, s2_27, -cospi_8_64); s1_27 = multiply_accumulate_shift_and_narrow_s16(s2_20, -cospi_8_64, s2_27, cospi_24_64); s1_21 = multiply_accumulate_shift_and_narrow_s16(s2_21, -cospi_24_64, s2_26, -cospi_8_64); s1_26 = multiply_accumulate_shift_and_narrow_s16(s2_21, -cospi_8_64, s2_26, cospi_24_64); s2_0 = vaddq_s16(s1_0, s1_7); s2_1 = vaddq_s16(s1_0, s1_6); s2_2 = vaddq_s16(s1_0, s1_5); s2_3 = vaddq_s16(s1_0, s1_4); s2_4 = vsubq_s16(s1_0, s1_4); s2_5 = vsubq_s16(s1_0, s1_5); s2_6 = vsubq_s16(s1_0, s1_6); s2_7 = vsubq_s16(s1_0, s1_7); s2_10 = sub_multiply_shift_and_narrow_s16(s2_14, s2_9, cospi_16_64); s2_13 = add_multiply_shift_and_narrow_s16(s2_9, s2_14, cospi_16_64); s2_11 = sub_multiply_shift_and_narrow_s16(s2_15, s2_8, cospi_16_64); s2_12 = add_multiply_shift_and_narrow_s16(s2_8, s2_15, cospi_16_64); s2_16 = vaddq_s16(s1_16, s2_23); s2_17 = vaddq_s16(s1_17, s2_22); s2_18 = vaddq_s16(s1_18, s1_21); s2_19 = vaddq_s16(s1_19, s1_20); s2_20 = vsubq_s16(s1_19, s1_20); s2_21 = vsubq_s16(s1_18, s1_21); s2_22 = vsubq_s16(s1_17, s2_22); s2_23 = vsubq_s16(s1_16, s2_23); s3_24 = vsubq_s16(s1_31, s2_24); s3_25 = vsubq_s16(s1_30, s2_25); s3_26 = vsubq_s16(s1_29, s1_26); s3_27 = vsubq_s16(s1_28, s1_27); s2_28 = vaddq_s16(s1_27, s1_28); s2_29 = vaddq_s16(s1_26, s1_29); s2_30 = vaddq_s16(s2_25, s1_30); s2_31 = vaddq_s16(s2_24, s1_31); s1_0 = vaddq_s16(s2_0, s2_15); s1_1 = vaddq_s16(s2_1, s2_14); s1_2 = vaddq_s16(s2_2, s2_13); s1_3 = vaddq_s16(s2_3, s2_12); s1_4 = vaddq_s16(s2_4, s2_11); s1_5 = vaddq_s16(s2_5, s2_10); s1_6 = vaddq_s16(s2_6, s2_9); s1_7 = vaddq_s16(s2_7, s2_8); s1_8 = vsubq_s16(s2_7, s2_8); s1_9 = vsubq_s16(s2_6, s2_9); s1_10 = vsubq_s16(s2_5, s2_10); s1_11 = vsubq_s16(s2_4, s2_11); s1_12 = vsubq_s16(s2_3, s2_12); s1_13 = vsubq_s16(s2_2, s2_13); s1_14 = vsubq_s16(s2_1, s2_14); s1_15 = vsubq_s16(s2_0, s2_15); s1_20 = sub_multiply_shift_and_narrow_s16(s3_27, s2_20, cospi_16_64); s1_27 = add_multiply_shift_and_narrow_s16(s2_20, s3_27, cospi_16_64); s1_21 = sub_multiply_shift_and_narrow_s16(s3_26, s2_21, cospi_16_64); s1_26 = add_multiply_shift_and_narrow_s16(s2_21, s3_26, cospi_16_64); s1_22 = sub_multiply_shift_and_narrow_s16(s3_25, s2_22, cospi_16_64); s1_25 = add_multiply_shift_and_narrow_s16(s2_22, s3_25, cospi_16_64); s1_23 = sub_multiply_shift_and_narrow_s16(s3_24, s2_23, cospi_16_64); s1_24 = add_multiply_shift_and_narrow_s16(s2_23, s3_24, cospi_16_64); vst1q_s16(output, vaddq_s16(s1_0, s2_31)); output += 8; vst1q_s16(output, vaddq_s16(s1_1, s2_30)); output += 8; vst1q_s16(output, vaddq_s16(s1_2, s2_29)); output += 8; vst1q_s16(output, vaddq_s16(s1_3, s2_28)); output += 8; vst1q_s16(output, vaddq_s16(s1_4, s1_27)); output += 8; vst1q_s16(output, vaddq_s16(s1_5, s1_26)); output += 8; vst1q_s16(output, vaddq_s16(s1_6, s1_25)); output += 8; vst1q_s16(output, vaddq_s16(s1_7, s1_24)); output += 8; vst1q_s16(output, vaddq_s16(s1_8, s1_23)); output += 8; vst1q_s16(output, vaddq_s16(s1_9, s1_22)); output += 8; vst1q_s16(output, vaddq_s16(s1_10, s1_21)); output += 8; vst1q_s16(output, vaddq_s16(s1_11, s1_20)); output += 8; vst1q_s16(output, vaddq_s16(s1_12, s2_19)); output += 8; vst1q_s16(output, vaddq_s16(s1_13, s2_18)); output += 8; vst1q_s16(output, vaddq_s16(s1_14, s2_17)); output += 8; vst1q_s16(output, vaddq_s16(s1_15, s2_16)); output += 8; vst1q_s16(output, vsubq_s16(s1_15, s2_16)); output += 8; vst1q_s16(output, vsubq_s16(s1_14, s2_17)); output += 8; vst1q_s16(output, vsubq_s16(s1_13, s2_18)); output += 8; vst1q_s16(output, vsubq_s16(s1_12, s2_19)); output += 8; vst1q_s16(output, vsubq_s16(s1_11, s1_20)); output += 8; vst1q_s16(output, vsubq_s16(s1_10, s1_21)); output += 8; vst1q_s16(output, vsubq_s16(s1_9, s1_22)); output += 8; vst1q_s16(output, vsubq_s16(s1_8, s1_23)); output += 8; vst1q_s16(output, vsubq_s16(s1_7, s1_24)); output += 8; vst1q_s16(output, vsubq_s16(s1_6, s1_25)); output += 8; vst1q_s16(output, vsubq_s16(s1_5, s1_26)); output += 8; vst1q_s16(output, vsubq_s16(s1_4, s1_27)); output += 8; vst1q_s16(output, vsubq_s16(s1_3, s2_28)); output += 8; vst1q_s16(output, vsubq_s16(s1_2, s2_29)); output += 8; vst1q_s16(output, vsubq_s16(s1_1, s2_30)); output += 8; vst1q_s16(output, vsubq_s16(s1_0, s2_31)); } static void idct32_8_neon(const int16_t *input, uint8_t *output, int stride) { int16x8_t in0, in1, in2, in3, in4, in5, in6, in7; int16x8_t out0, out1, out2, out3, out4, out5, out6, out7; int16x8_t s1_0, s1_1, s1_2, s1_3, s1_4, s1_5, s1_6, s1_7, s1_8, s1_9, s1_10, s1_11, s1_12, s1_13, s1_14, s1_15, s1_16, s1_17, s1_18, s1_19, s1_20, s1_21, s1_22, s1_23, s1_24, s1_25, s1_26, s1_27, s1_28, s1_29, s1_30, s1_31; int16x8_t s2_0, s2_1, s2_2, s2_3, s2_4, s2_5, s2_6, s2_7, s2_8, s2_9, s2_10, s2_11, s2_12, s2_13, s2_14, s2_15, s2_16, s2_17, s2_18, s2_19, s2_20, s2_21, s2_22, s2_23, s2_24, s2_25, s2_26, s2_27, s2_28, s2_29, s2_30, s2_31; int16x8_t s3_24, s3_25, s3_26, s3_27; load_and_transpose_s16_8x8(input, 8, &in0, &in1, &in2, &in3, &in4, &in5, &in6, &in7); s1_16 = multiply_shift_and_narrow_s16(in1, cospi_31_64); s1_31 = multiply_shift_and_narrow_s16(in1, cospi_1_64); s1_19 = multiply_shift_and_narrow_s16(in7, -cospi_25_64); s1_28 = multiply_shift_and_narrow_s16(in7, cospi_7_64); s1_20 = multiply_shift_and_narrow_s16(in5, cospi_27_64); s1_27 = multiply_shift_and_narrow_s16(in5, cospi_5_64); s1_23 = multiply_shift_and_narrow_s16(in3, -cospi_29_64); s1_24 = multiply_shift_and_narrow_s16(in3, cospi_3_64); s2_8 = multiply_shift_and_narrow_s16(in2, cospi_30_64); s2_15 = multiply_shift_and_narrow_s16(in2, cospi_2_64); s2_11 = multiply_shift_and_narrow_s16(in6, -cospi_26_64); s2_12 = multiply_shift_and_narrow_s16(in6, cospi_6_64); s1_4 = multiply_shift_and_narrow_s16(in4, cospi_28_64); s1_7 = multiply_shift_and_narrow_s16(in4, cospi_4_64); s1_17 = multiply_accumulate_shift_and_narrow_s16(s1_16, -cospi_4_64, s1_31, cospi_28_64); s1_30 = multiply_accumulate_shift_and_narrow_s16(s1_16, cospi_28_64, s1_31, cospi_4_64); s1_18 = multiply_accumulate_shift_and_narrow_s16(s1_19, -cospi_28_64, s1_28, -cospi_4_64); s1_29 = multiply_accumulate_shift_and_narrow_s16(s1_19, -cospi_4_64, s1_28, cospi_28_64); s1_21 = multiply_accumulate_shift_and_narrow_s16(s1_20, -cospi_20_64, s1_27, cospi_12_64); s1_26 = multiply_accumulate_shift_and_narrow_s16(s1_20, cospi_12_64, s1_27, cospi_20_64); s1_22 = multiply_accumulate_shift_and_narrow_s16(s1_23, -cospi_12_64, s1_24, -cospi_20_64); s1_25 = multiply_accumulate_shift_and_narrow_s16(s1_23, -cospi_20_64, s1_24, cospi_12_64); s1_0 = multiply_shift_and_narrow_s16(in0, cospi_16_64); s2_9 = multiply_accumulate_shift_and_narrow_s16(s2_8, -cospi_8_64, s2_15, cospi_24_64); s2_14 = multiply_accumulate_shift_and_narrow_s16(s2_8, cospi_24_64, s2_15, cospi_8_64); s2_10 = multiply_accumulate_shift_and_narrow_s16(s2_11, -cospi_24_64, s2_12, -cospi_8_64); s2_13 = multiply_accumulate_shift_and_narrow_s16(s2_11, -cospi_8_64, s2_12, cospi_24_64); s2_16 = vaddq_s16(s1_16, s1_19); s2_17 = vaddq_s16(s1_17, s1_18); s2_18 = vsubq_s16(s1_17, s1_18); s2_19 = vsubq_s16(s1_16, s1_19); s2_20 = vsubq_s16(s1_23, s1_20); s2_21 = vsubq_s16(s1_22, s1_21); s2_22 = vaddq_s16(s1_21, s1_22); s2_23 = vaddq_s16(s1_20, s1_23); s2_24 = vaddq_s16(s1_24, s1_27); s2_25 = vaddq_s16(s1_25, s1_26); s2_26 = vsubq_s16(s1_25, s1_26); s2_27 = vsubq_s16(s1_24, s1_27); s2_28 = vsubq_s16(s1_31, s1_28); s2_29 = vsubq_s16(s1_30, s1_29); s2_30 = vaddq_s16(s1_29, s1_30); s2_31 = vaddq_s16(s1_28, s1_31); s1_5 = sub_multiply_shift_and_narrow_s16(s1_7, s1_4, cospi_16_64); s1_6 = add_multiply_shift_and_narrow_s16(s1_4, s1_7, cospi_16_64); s1_8 = vaddq_s16(s2_8, s2_11); s1_9 = vaddq_s16(s2_9, s2_10); s1_10 = vsubq_s16(s2_9, s2_10); s1_11 = vsubq_s16(s2_8, s2_11); s1_12 = vsubq_s16(s2_15, s2_12); s1_13 = vsubq_s16(s2_14, s2_13); s1_14 = vaddq_s16(s2_13, s2_14); s1_15 = vaddq_s16(s2_12, s2_15); s1_18 = multiply_accumulate_shift_and_narrow_s16(s2_18, -cospi_8_64, s2_29, cospi_24_64); s1_29 = multiply_accumulate_shift_and_narrow_s16(s2_18, cospi_24_64, s2_29, cospi_8_64); s1_19 = multiply_accumulate_shift_and_narrow_s16(s2_19, -cospi_8_64, s2_28, cospi_24_64); s1_28 = multiply_accumulate_shift_and_narrow_s16(s2_19, cospi_24_64, s2_28, cospi_8_64); s1_20 = multiply_accumulate_shift_and_narrow_s16(s2_20, -cospi_24_64, s2_27, -cospi_8_64); s1_27 = multiply_accumulate_shift_and_narrow_s16(s2_20, -cospi_8_64, s2_27, cospi_24_64); s1_21 = multiply_accumulate_shift_and_narrow_s16(s2_21, -cospi_24_64, s2_26, -cospi_8_64); s1_26 = multiply_accumulate_shift_and_narrow_s16(s2_21, -cospi_8_64, s2_26, cospi_24_64); s2_0 = vaddq_s16(s1_0, s1_7); s2_1 = vaddq_s16(s1_0, s1_6); s2_2 = vaddq_s16(s1_0, s1_5); s2_3 = vaddq_s16(s1_0, s1_4); s2_4 = vsubq_s16(s1_0, s1_4); s2_5 = vsubq_s16(s1_0, s1_5); s2_6 = vsubq_s16(s1_0, s1_6); s2_7 = vsubq_s16(s1_0, s1_7); s2_10 = sub_multiply_shift_and_narrow_s16(s1_13, s1_10, cospi_16_64); s2_13 = add_multiply_shift_and_narrow_s16(s1_10, s1_13, cospi_16_64); s2_11 = sub_multiply_shift_and_narrow_s16(s1_12, s1_11, cospi_16_64); s2_12 = add_multiply_shift_and_narrow_s16(s1_11, s1_12, cospi_16_64); s1_16 = vaddq_s16(s2_16, s2_23); s1_17 = vaddq_s16(s2_17, s2_22); s2_18 = vaddq_s16(s1_18, s1_21); s2_19 = vaddq_s16(s1_19, s1_20); s2_20 = vsubq_s16(s1_19, s1_20); s2_21 = vsubq_s16(s1_18, s1_21); s1_22 = vsubq_s16(s2_17, s2_22); s1_23 = vsubq_s16(s2_16, s2_23); s3_24 = vsubq_s16(s2_31, s2_24); s3_25 = vsubq_s16(s2_30, s2_25); s3_26 = vsubq_s16(s1_29, s1_26); s3_27 = vsubq_s16(s1_28, s1_27); s2_28 = vaddq_s16(s1_27, s1_28); s2_29 = vaddq_s16(s1_26, s1_29); s2_30 = vaddq_s16(s2_25, s2_30); s2_31 = vaddq_s16(s2_24, s2_31); s1_0 = vaddq_s16(s2_0, s1_15); s1_1 = vaddq_s16(s2_1, s1_14); s1_2 = vaddq_s16(s2_2, s2_13); s1_3 = vaddq_s16(s2_3, s2_12); s1_4 = vaddq_s16(s2_4, s2_11); s1_5 = vaddq_s16(s2_5, s2_10); s1_6 = vaddq_s16(s2_6, s1_9); s1_7 = vaddq_s16(s2_7, s1_8); s1_8 = vsubq_s16(s2_7, s1_8); s1_9 = vsubq_s16(s2_6, s1_9); s1_10 = vsubq_s16(s2_5, s2_10); s1_11 = vsubq_s16(s2_4, s2_11); s1_12 = vsubq_s16(s2_3, s2_12); s1_13 = vsubq_s16(s2_2, s2_13); s1_14 = vsubq_s16(s2_1, s1_14); s1_15 = vsubq_s16(s2_0, s1_15); s1_20 = sub_multiply_shift_and_narrow_s16(s3_27, s2_20, cospi_16_64); s1_27 = add_multiply_shift_and_narrow_s16(s2_20, s3_27, cospi_16_64); s1_21 = sub_multiply_shift_and_narrow_s16(s3_26, s2_21, cospi_16_64); s1_26 = add_multiply_shift_and_narrow_s16(s2_21, s3_26, cospi_16_64); s2_22 = sub_multiply_shift_and_narrow_s16(s3_25, s1_22, cospi_16_64); s1_25 = add_multiply_shift_and_narrow_s16(s1_22, s3_25, cospi_16_64); s2_23 = sub_multiply_shift_and_narrow_s16(s3_24, s1_23, cospi_16_64); s1_24 = add_multiply_shift_and_narrow_s16(s1_23, s3_24, cospi_16_64); out0 = vaddq_s16(s1_0, s2_31); out1 = vaddq_s16(s1_1, s2_30); out2 = vaddq_s16(s1_2, s2_29); out3 = vaddq_s16(s1_3, s2_28); out4 = vaddq_s16(s1_4, s1_27); out5 = vaddq_s16(s1_5, s1_26); out6 = vaddq_s16(s1_6, s1_25); out7 = vaddq_s16(s1_7, s1_24); add_and_store_u8_s16(out0, out1, out2, out3, out4, out5, out6, out7, output, stride); out0 = vaddq_s16(s1_8, s2_23); out1 = vaddq_s16(s1_9, s2_22); out2 = vaddq_s16(s1_10, s1_21); out3 = vaddq_s16(s1_11, s1_20); out4 = vaddq_s16(s1_12, s2_19); out5 = vaddq_s16(s1_13, s2_18); out6 = vaddq_s16(s1_14, s1_17); out7 = vaddq_s16(s1_15, s1_16); add_and_store_u8_s16(out0, out1, out2, out3, out4, out5, out6, out7, output + (8 * stride), stride); out0 = vsubq_s16(s1_15, s1_16); out1 = vsubq_s16(s1_14, s1_17); out2 = vsubq_s16(s1_13, s2_18); out3 = vsubq_s16(s1_12, s2_19); out4 = vsubq_s16(s1_11, s1_20); out5 = vsubq_s16(s1_10, s1_21); out6 = vsubq_s16(s1_9, s2_22); out7 = vsubq_s16(s1_8, s2_23); add_and_store_u8_s16(out0, out1, out2, out3, out4, out5, out6, out7, output + (16 * stride), stride); out0 = vsubq_s16(s1_7, s1_24); out1 = vsubq_s16(s1_6, s1_25); out2 = vsubq_s16(s1_5, s1_26); out3 = vsubq_s16(s1_4, s1_27); out4 = vsubq_s16(s1_3, s2_28); out5 = vsubq_s16(s1_2, s2_29); out6 = vsubq_s16(s1_1, s2_30); out7 = vsubq_s16(s1_0, s2_31); add_and_store_u8_s16(out0, out1, out2, out3, out4, out5, out6, out7, output + (24 * stride), stride); } void vpx_idct32x32_34_add_neon(const int16_t *input, uint8_t *dest, int stride) { int i; int16_t temp[32 * 8]; int16_t *t = temp; idct32_6_neon(input, t); for (i = 0; i < 32; i += 8) { idct32_8_neon(t, dest, stride); t += (8 * 8); dest += 8; } }