x86_64: Fix svml_s_asinf16_core_avx512.S code formatting

This commit contains following formatting changes

1. Instructions proceeded by a tab.
2. Instruction less than 8 characters in length have a tab
   between it and the first operand.
3. Instruction greater than 7 characters in length have a
   space between it and the first operand.
4. Tabs after `#define`d names and their value.
5. 8 space at the beginning of line replaced by tab.
6. Indent comments with code.
7. Remove redundent .text section.
8. 1 space between line content and line comment.
9. Space after all commas.

Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com>
This commit is contained in:
Sunil K Pandey 2022-03-07 10:47:09 -08:00
parent 7a5806ce1c
commit 62871830a4

View File

@ -28,233 +28,232 @@
/* Offsets for data table __svml_sasin_data_internal
*/
#define AbsMask 0
#define OneHalf 64
#define SmallNorm 128
#define One 192
#define Two 256
#define sqrt_coeff_1 320
#define sqrt_coeff_2 384
#define poly_coeff_1 448
#define poly_coeff_2 512
#define poly_coeff_3 576
#define poly_coeff_4 640
#define poly_coeff_5 704
#define Pi2H 768
#define AbsMask 0
#define OneHalf 64
#define SmallNorm 128
#define One 192
#define Two 256
#define sqrt_coeff_1 320
#define sqrt_coeff_2 384
#define poly_coeff_1 448
#define poly_coeff_2 512
#define poly_coeff_3 576
#define poly_coeff_4 640
#define poly_coeff_5 704
#define Pi2H 768
#include <sysdep.h>
.text
.section .text.exex512,"ax",@progbits
.section .text.exex512, "ax", @progbits
ENTRY(_ZGVeN16v_asinf_skx)
pushq %rbp
cfi_def_cfa_offset(16)
movq %rsp, %rbp
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
andq $-64, %rsp
subq $192, %rsp
vmovups __svml_sasin_data_internal(%rip), %zmm4
vmovups OneHalf+__svml_sasin_data_internal(%rip), %zmm6
pushq %rbp
cfi_def_cfa_offset(16)
movq %rsp, %rbp
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
andq $-64, %rsp
subq $192, %rsp
vmovups __svml_sasin_data_internal(%rip), %zmm4
vmovups OneHalf+__svml_sasin_data_internal(%rip), %zmm6
/* SQ ~ -2*sqrt(Y) */
vmovups SmallNorm+__svml_sasin_data_internal(%rip), %zmm8
vmovups Two+__svml_sasin_data_internal(%rip), %zmm12
vmovups sqrt_coeff_1+__svml_sasin_data_internal(%rip), %zmm13
vmovups One+__svml_sasin_data_internal(%rip), %zmm7
vmovaps %zmm0, %zmm3
/* SQ ~ -2*sqrt(Y) */
vmovups SmallNorm+__svml_sasin_data_internal(%rip), %zmm8
vmovups Two+__svml_sasin_data_internal(%rip), %zmm12
vmovups sqrt_coeff_1+__svml_sasin_data_internal(%rip), %zmm13
vmovups One+__svml_sasin_data_internal(%rip), %zmm7
vmovaps %zmm0, %zmm3
/* x = |arg| */
vandps %zmm3, %zmm4, %zmm2
vandnps %zmm3, %zmm4, %zmm1
/* x = |arg| */
vandps %zmm3, %zmm4, %zmm2
vandnps %zmm3, %zmm4, %zmm1
/* x^2 */
vmulps {rn-sae}, %zmm2, %zmm2, %zmm5
vcmpps $17, {sae}, %zmm2, %zmm7, %k0
vcmpps $21, {sae}, %zmm6, %zmm2, %k2
vmovups poly_coeff_2+__svml_sasin_data_internal(%rip), %zmm7
kmovw %k0, %edx
/* x^2 */
vmulps {rn-sae}, %zmm2, %zmm2, %zmm5
vcmpps $17, {sae}, %zmm2, %zmm7, %k0
vcmpps $21, {sae}, %zmm6, %zmm2, %k2
vmovups poly_coeff_2+__svml_sasin_data_internal(%rip), %zmm7
kmovw %k0, %edx
/* Y = 0.5 - 0.5*x */
vmovaps %zmm6, %zmm9
vfnmadd231ps {rn-sae}, %zmm2, %zmm6, %zmm9
vmovups poly_coeff_5+__svml_sasin_data_internal(%rip), %zmm6
vrsqrt14ps %zmm9, %zmm10
vcmpps $17, {sae}, %zmm8, %zmm9, %k1
vminps {sae}, %zmm9, %zmm5, %zmm0
vmovups sqrt_coeff_2+__svml_sasin_data_internal(%rip), %zmm8
vmovups poly_coeff_4+__svml_sasin_data_internal(%rip), %zmm5
vxorps %zmm10, %zmm10, %zmm10{%k1}
vaddps {rn-sae}, %zmm9, %zmm9, %zmm14
vmulps {rn-sae}, %zmm10, %zmm10, %zmm11
vmulps {rn-sae}, %zmm10, %zmm14, %zmm4
vfmsub213ps {rn-sae}, %zmm12, %zmm11, %zmm14
vmulps {rn-sae}, %zmm14, %zmm4, %zmm15
vfmadd231ps {rn-sae}, %zmm14, %zmm13, %zmm8
vmovups poly_coeff_3+__svml_sasin_data_internal(%rip), %zmm14
/* Y = 0.5 - 0.5*x */
vmovaps %zmm6, %zmm9
vfnmadd231ps {rn-sae}, %zmm2, %zmm6, %zmm9
vmovups poly_coeff_5+__svml_sasin_data_internal(%rip), %zmm6
vrsqrt14ps %zmm9, %zmm10
vcmpps $17, {sae}, %zmm8, %zmm9, %k1
vminps {sae}, %zmm9, %zmm5, %zmm0
vmovups sqrt_coeff_2+__svml_sasin_data_internal(%rip), %zmm8
vmovups poly_coeff_4+__svml_sasin_data_internal(%rip), %zmm5
vxorps %zmm10, %zmm10, %zmm10{%k1}
vaddps {rn-sae}, %zmm9, %zmm9, %zmm14
vmulps {rn-sae}, %zmm10, %zmm10, %zmm11
vmulps {rn-sae}, %zmm10, %zmm14, %zmm4
vfmsub213ps {rn-sae}, %zmm12, %zmm11, %zmm14
vmulps {rn-sae}, %zmm14, %zmm4, %zmm15
vfmadd231ps {rn-sae}, %zmm14, %zmm13, %zmm8
vmovups poly_coeff_3+__svml_sasin_data_internal(%rip), %zmm14
/* polynomial */
vmovups poly_coeff_1+__svml_sasin_data_internal(%rip), %zmm13
vfmsub213ps {rn-sae}, %zmm4, %zmm15, %zmm8
vfmadd231ps {rn-sae}, %zmm0, %zmm14, %zmm5
vfmadd231ps {rn-sae}, %zmm0, %zmm13, %zmm7
vmulps {rn-sae}, %zmm0, %zmm0, %zmm15
vblendmps %zmm8, %zmm2, %zmm2{%k2}
vfmadd213ps {rn-sae}, %zmm5, %zmm15, %zmm7
vfmadd213ps {rn-sae}, %zmm6, %zmm0, %zmm7
vmulps {rn-sae}, %zmm0, %zmm7, %zmm9
vmovups Pi2H+__svml_sasin_data_internal(%rip), %zmm0
vfmadd213ps {rn-sae}, %zmm2, %zmm2, %zmm9
vaddps {rn-sae}, %zmm0, %zmm9, %zmm9{%k2}
vxorps %zmm1, %zmm9, %zmm0
testl %edx, %edx
/* polynomial */
vmovups poly_coeff_1+__svml_sasin_data_internal(%rip), %zmm13
vfmsub213ps {rn-sae}, %zmm4, %zmm15, %zmm8
vfmadd231ps {rn-sae}, %zmm0, %zmm14, %zmm5
vfmadd231ps {rn-sae}, %zmm0, %zmm13, %zmm7
vmulps {rn-sae}, %zmm0, %zmm0, %zmm15
vblendmps %zmm8, %zmm2, %zmm2{%k2}
vfmadd213ps {rn-sae}, %zmm5, %zmm15, %zmm7
vfmadd213ps {rn-sae}, %zmm6, %zmm0, %zmm7
vmulps {rn-sae}, %zmm0, %zmm7, %zmm9
vmovups Pi2H+__svml_sasin_data_internal(%rip), %zmm0
vfmadd213ps {rn-sae}, %zmm2, %zmm2, %zmm9
vaddps {rn-sae}, %zmm0, %zmm9, %zmm9{%k2}
vxorps %zmm1, %zmm9, %zmm0
testl %edx, %edx
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 edx zmm0 zmm3
/* Go to special inputs processing branch */
jne L(SPECIAL_VALUES_BRANCH)
# LOE rbx r12 r13 r14 r15 edx zmm0 zmm3
/* Restore registers
* and exit the function
*/
/* Restore registers
* and exit the function
*/
L(EXIT):
movq %rbp, %rsp
popq %rbp
cfi_def_cfa(7, 8)
cfi_restore(6)
ret
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
movq %rbp, %rsp
popq %rbp
cfi_def_cfa(7, 8)
cfi_restore(6)
ret
cfi_def_cfa(6, 16)
cfi_offset(6, -16)
/* Branch to process
* special inputs
*/
/* Branch to process
* special inputs
*/
L(SPECIAL_VALUES_BRANCH):
vmovups %zmm3, 64(%rsp)
vmovups %zmm0, 128(%rsp)
# LOE rbx r12 r13 r14 r15 edx zmm0
vmovups %zmm3, 64(%rsp)
vmovups %zmm0, 128(%rsp)
# LOE rbx r12 r13 r14 r15 edx zmm0
xorl %eax, %eax
# LOE rbx r12 r13 r14 r15 eax edx
xorl %eax, %eax
# LOE rbx r12 r13 r14 r15 eax edx
vzeroupper
movq %r12, 16(%rsp)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
movl %eax, %r12d
movq %r13, 8(%rsp)
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
movl %edx, %r13d
movq %r14, (%rsp)
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d
vzeroupper
movq %r12, 16(%rsp)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
movl %eax, %r12d
movq %r13, 8(%rsp)
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
movl %edx, %r13d
movq %r14, (%rsp)
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r15 r12d r13d
/* Range mask
* bits check
*/
/* Range mask
* bits check
*/
L(RANGEMASK_CHECK):
btl %r12d, %r13d
btl %r12d, %r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d
/* Call scalar math function */
jc L(SCALAR_MATH_CALL)
# LOE rbx r15 r12d r13d
/* Special inputs
* processing loop
*/
/* Special inputs
* processing loop
*/
L(SPECIAL_VALUES_LOOP):
incl %r12d
cmpl $16, %r12d
incl %r12d
cmpl $16, %r12d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d
/* Check bits in range mask */
jl L(RANGEMASK_CHECK)
# LOE rbx r15 r12d r13d
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
vmovups 128(%rsp), %zmm0
movq 16(%rsp), %r12
cfi_restore(12)
movq 8(%rsp), %r13
cfi_restore(13)
movq (%rsp), %r14
cfi_restore(14)
vmovups 128(%rsp), %zmm0
/* Go to exit */
jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 zmm0
/* Go to exit */
jmp L(EXIT)
/* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */
.cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */
.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
/* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */
.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
# LOE rbx r12 r13 r14 r15 zmm0
/* Scalar math fucntion call
* to process special input
*/
/* Scalar math fucntion call
* to process special input
*/
L(SCALAR_MATH_CALL):
movl %r12d, %r14d
movss 64(%rsp,%r14,4), %xmm0
call asinf@PLT
# LOE rbx r14 r15 r12d r13d xmm0
movl %r12d, %r14d
movss 64(%rsp, %r14, 4), %xmm0
call asinf@PLT
# LOE rbx r14 r15 r12d r13d xmm0
movss %xmm0, 128(%rsp,%r14,4)
movss %xmm0, 128(%rsp, %r14, 4)
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx r15 r12d r13d
/* Process special inputs in loop */
jmp L(SPECIAL_VALUES_LOOP)
# LOE rbx r15 r12d r13d
END(_ZGVeN16v_asinf_skx)
.section .rodata, "a"
.align 64
.section .rodata, "a"
.align 64
#ifdef __svml_sasin_data_internal_typedef
typedef unsigned int VUINT32;
typedef struct {
__declspec(align(64)) VUINT32 AbsMask[16][1];
__declspec(align(64)) VUINT32 OneHalf[16][1];
__declspec(align(64)) VUINT32 SmallNorm[16][1];
__declspec(align(64)) VUINT32 One[16][1];
__declspec(align(64)) VUINT32 Two[16][1];
__declspec(align(64)) VUINT32 sqrt_coeff[2][16][1];
__declspec(align(64)) VUINT32 poly_coeff[5][16][1];
__declspec(align(64)) VUINT32 Pi2H[16][1];
__declspec(align(64)) VUINT32 AbsMask[16][1];
__declspec(align(64)) VUINT32 OneHalf[16][1];
__declspec(align(64)) VUINT32 SmallNorm[16][1];
__declspec(align(64)) VUINT32 One[16][1];
__declspec(align(64)) VUINT32 Two[16][1];
__declspec(align(64)) VUINT32 sqrt_coeff[2][16][1];
__declspec(align(64)) VUINT32 poly_coeff[5][16][1];
__declspec(align(64)) VUINT32 Pi2H[16][1];
} __svml_sasin_data_internal;
#endif
__svml_sasin_data_internal:
/*== AbsMask ==*/
.long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff
/*== OneHalf ==*/
.align 64
.long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000
/*== SmallNorm ==*/
.align 64
.long 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000
/*== One ==*/
.align 64
.long 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000
/*== Two ==*/
.align 64
.long 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000
/*== sqrt_coeff[2] ==*/
.align 64
.long 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004 /* sqrt_coeff2 */
.long 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001 /* sqrt_coeff1 */
/*== poly_coeff[5] ==*/
.align 64
.long 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07 /* poly_coeff5 */
.long 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B /* poly_coeff4 */
.long 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4 /* poly_coeff3 */
.long 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12 /* poly_coeff2 */
.long 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF /* poly_coeff1 */
/*== Pi2H ==*/
.align 64
.long 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB
.align 64
.type __svml_sasin_data_internal,@object
.size __svml_sasin_data_internal,.-__svml_sasin_data_internal
/* AbsMask */
.long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff
/* OneHalf */
.align 64
.long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000
/* SmallNorm */
.align 64
.long 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000, 0x2f800000
/* One */
.align 64
.long 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000
/* Two */
.align 64
.long 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000
/* sqrt_coeff[2] */
.align 64
.long 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004, 0xbdC00004 /* sqrt_coeff2 */
.long 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001, 0x3e800001 /* sqrt_coeff1 */
/* poly_coeff[5] */
.align 64
.long 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07, 0x3d2EDC07 /* poly_coeff5 */
.long 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B, 0x3CC32A6B /* poly_coeff4 */
.long 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4, 0x3d3A9AB4 /* poly_coeff3 */
.long 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12, 0x3d997C12 /* poly_coeff2 */
.long 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF, 0x3e2AAAFF /* poly_coeff1 */
/* Pi2H */
.align 64
.long 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB, 0x3fc90FDB
.align 64
.type __svml_sasin_data_internal, @object
.size __svml_sasin_data_internal, .-__svml_sasin_data_internal