re PR target/68263 (Vector "*mov<mode>_internal" fails to handle misaligned load/store from reload)

PR target/68263
	* config/i386/i386.h (BIGGEST_ALIGNMENT): Always define
	to 32 for IAMCU.
	* config/i386/sse.md (*mov<mode>_internal): Always enable
	AVX and SSE unaligned moves for IAMCU.

From-SVN: r230456
This commit is contained in:
Uros Bizjak 2015-11-17 10:45:35 +01:00
parent b6eab8196c
commit 0076c82f76
2 changed files with 7 additions and 7 deletions

View File

@ -814,7 +814,7 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
TARGET_ABSOLUTE_BIGGEST_ALIGNMENT. */
#define BIGGEST_ALIGNMENT \
(TARGET_AVX512F ? 512 : (TARGET_AVX ? 256 : (TARGET_IAMCU ? 32 : 128)))
(TARGET_IAMCU ? 32 : (TARGET_AVX512F ? 512 : (TARGET_AVX ? 256 : 128)))
/* Maximum stack alignment. */
#define MAX_STACK_ALIGNMENT MAX_OFILE_ALIGNMENT

View File

@ -892,30 +892,30 @@
case MODE_V16SF:
case MODE_V8SF:
case MODE_V4SF:
if (TARGET_AVX
if ((TARGET_AVX || TARGET_IAMCU)
&& (misaligned_operand (operands[0], <MODE>mode)
|| misaligned_operand (operands[1], <MODE>mode)))
return "vmovups\t{%1, %0|%0, %1}";
return "%vmovups\t{%1, %0|%0, %1}";
else
return "%vmovaps\t{%1, %0|%0, %1}";
case MODE_V8DF:
case MODE_V4DF:
case MODE_V2DF:
if (TARGET_AVX
if ((TARGET_AVX || TARGET_IAMCU)
&& (misaligned_operand (operands[0], <MODE>mode)
|| misaligned_operand (operands[1], <MODE>mode)))
return "vmovupd\t{%1, %0|%0, %1}";
return "%vmovupd\t{%1, %0|%0, %1}";
else
return "%vmovapd\t{%1, %0|%0, %1}";
case MODE_OI:
case MODE_TI:
if (TARGET_AVX
if ((TARGET_AVX || TARGET_IAMCU)
&& (misaligned_operand (operands[0], <MODE>mode)
|| misaligned_operand (operands[1], <MODE>mode)))
return TARGET_AVX512VL ? "vmovdqu64\t{%1, %0|%0, %1}"
: "vmovdqu\t{%1, %0|%0, %1}";
: "%vmovdqu\t{%1, %0|%0, %1}";
else
return TARGET_AVX512VL ? "vmovdqa64\t{%1, %0|%0, %1}"
: "%vmovdqa\t{%1, %0|%0, %1}";