mirror of
git://gcc.gnu.org/git/gcc.git
synced 2025-04-10 16:00:55 +08:00
re PR target/50305 (Inline asm reload failure when building Linux kernel)
gcc/ PR target/50305 * config/arm/arm.c (arm_legitimize_reload_address): Recognize output of a previous pass through legitimize_reload_address. Do not attempt to optimize addresses if the base register is equivalent to a constant. gcc/testsuite/ PR target/50305 * gcc.target/arm/pr50305.c: New test. From-SVN: r179603
This commit is contained in:
parent
fa7fd586c3
commit
4da6de81bc
@ -1,3 +1,11 @@
|
||||
2011-10-06 Ulrich Weigand <ulrich.weigand@linaro.org>
|
||||
|
||||
PR target/50305
|
||||
* config/arm/arm.c (arm_legitimize_reload_address): Recognize
|
||||
output of a previous pass through legitimize_reload_address.
|
||||
Do not attempt to optimize addresses if the base register is
|
||||
equivalent to a constant.
|
||||
|
||||
2011-10-06 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
|
||||
|
||||
* function.c (thread_prologue_and_epilogue_insns): Mark
|
||||
|
@ -6550,9 +6550,26 @@ arm_legitimize_reload_address (rtx *p,
|
||||
int opnum, int type,
|
||||
int ind_levels ATTRIBUTE_UNUSED)
|
||||
{
|
||||
/* We must recognize output that we have already generated ourselves. */
|
||||
if (GET_CODE (*p) == PLUS
|
||||
&& GET_CODE (XEXP (*p, 0)) == PLUS
|
||||
&& GET_CODE (XEXP (XEXP (*p, 0), 0)) == REG
|
||||
&& GET_CODE (XEXP (XEXP (*p, 0), 1)) == CONST_INT
|
||||
&& GET_CODE (XEXP (*p, 1)) == CONST_INT)
|
||||
{
|
||||
push_reload (XEXP (*p, 0), NULL_RTX, &XEXP (*p, 0), NULL,
|
||||
MODE_BASE_REG_CLASS (mode), GET_MODE (*p),
|
||||
VOIDmode, 0, 0, opnum, (enum reload_type) type);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (GET_CODE (*p) == PLUS
|
||||
&& GET_CODE (XEXP (*p, 0)) == REG
|
||||
&& ARM_REGNO_OK_FOR_BASE_P (REGNO (XEXP (*p, 0)))
|
||||
/* If the base register is equivalent to a constant, let the generic
|
||||
code handle it. Otherwise we will run into problems if a future
|
||||
reload pass decides to rematerialize the constant. */
|
||||
&& !reg_equiv_constant (ORIGINAL_REGNO (XEXP (*p, 0)))
|
||||
&& GET_CODE (XEXP (*p, 1)) == CONST_INT)
|
||||
{
|
||||
HOST_WIDE_INT val = INTVAL (XEXP (*p, 1));
|
||||
|
@ -1,3 +1,8 @@
|
||||
2011-10-06 Ulrich Weigand <ulrich.weigand@linaro.org>
|
||||
|
||||
PR target/50305
|
||||
* gcc.target/arm/pr50305.c: New test.
|
||||
|
||||
2011-10-06 Richard Guenther <rguenther@suse.de>
|
||||
|
||||
PR tree-optimization/38884
|
||||
|
60
gcc/testsuite/gcc.target/arm/pr50305.c
Normal file
60
gcc/testsuite/gcc.target/arm/pr50305.c
Normal file
@ -0,0 +1,60 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-skip-if "incompatible options" { arm*-*-* } { "-march=*" } { "-march=armv7-a" } } */
|
||||
/* { dg-options "-O2 -fno-omit-frame-pointer -marm -march=armv7-a -mfpu=vfp3" } */
|
||||
|
||||
struct event {
|
||||
unsigned long long id;
|
||||
unsigned int flag;
|
||||
};
|
||||
|
||||
void dummy(void)
|
||||
{
|
||||
/* This is here to ensure that the offset of perf_event_id below
|
||||
relative to the LANCHOR symbol exceeds the allowed displacement. */
|
||||
static int __warned[300];
|
||||
__warned[0] = 1;
|
||||
}
|
||||
|
||||
extern void *kmem_cache_alloc_trace (void *cachep);
|
||||
extern void *cs_cachep;
|
||||
extern int nr_cpu_ids;
|
||||
|
||||
struct event *
|
||||
event_alloc (int cpu)
|
||||
{
|
||||
static unsigned long long __attribute__((aligned(8))) perf_event_id;
|
||||
struct event *event;
|
||||
unsigned long long result;
|
||||
unsigned long tmp;
|
||||
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return 0;
|
||||
|
||||
event = kmem_cache_alloc_trace (cs_cachep);
|
||||
|
||||
__asm__ __volatile__ ("dmb" : : : "memory");
|
||||
|
||||
__asm__ __volatile__("@ atomic64_add_return\n"
|
||||
"1: ldrexd %0, %H0, [%3]\n"
|
||||
" adds %0, %0, %4\n"
|
||||
" adc %H0, %H0, %H4\n"
|
||||
" strexd %1, %0, %H0, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp), "+Qo" (perf_event_id)
|
||||
: "r" (&perf_event_id), "r" (1LL)
|
||||
: "cc");
|
||||
|
||||
__asm__ __volatile__ ("dmb" : : : "memory");
|
||||
|
||||
event->id = result;
|
||||
|
||||
if (cpu)
|
||||
event->flag = 1;
|
||||
|
||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
||||
kmem_cache_alloc_trace (cs_cachep);
|
||||
|
||||
return event;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user