Refactor gdb.reverse/insn-reverse.c

Changes in v2:

- Renamed arch-specific files to insn-reverse-<arch>.c.
- Adjusted according to reviews.

This patch prepares things for an upcoming testcase for record/replay support
on x86. As is, gdb.reverse/insn-reverse.c is divided into sections guarded by
a few #if blocks, and right now it only handles arm/aarch64.

If we move forward with requiring more tests for record/replay on different
architectures, i think this has the potential to become cluttered with a lot
of differing arch-specific code in the same file.

I've broken up the main file into other files with arch-specific bits
(insn-reverse-<arch>.c). The main file will hold the generic pieces that will
take care of calling the tests.

The arch-specific c files are then included at the top of the generic c file.

I've also added a generic initialize function since we need to run pre-test
checks on x86 to make sure the rdrand/rdseed instructions are supported,
otherwise we will run into a SIGILL.

The arch-specific files will implement their own initialize function with
whatever makes sense. Right now the aarch64 and arm files have an empty
initialization function.

Does this look reasonable?

gdb/testsuite/ChangeLog:

2017-01-26  Luis Machado  <lgustavo@codesourcery.com>

	* gdb.reverse/insn-reverse.c: Move arm and aarch64 code to their own
	files.
	(initialize): New function conditionally defined.
	(testcases): Move within conditional block.
	(main): Call initialize.
	* gdb.reverse/insn-reverse-aarch64.c: New file, based on aarch64 bits
	of gdb.reverse/insn-reverse.c.
	* gdb.reverse/insn-reverse-arm.c: New file, based on arm bits of
	gdb.reverse/insn-reverse.c.
This commit is contained in:
Luis Machado 2017-01-26 10:34:42 -06:00
parent 874a1c8c32
commit 8b00c17616

View File

@ -15,141 +15,32 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#if (defined __aarch64__)
#include <arm_neon.h>
#endif
#if (defined __aarch64__)
static void
load (void)
{
int buf[8];
asm ("ld1 { v1.8b }, [%[buf]]\n"
"ld1 { v2.8b, v3.8b }, [%[buf]]\n"
"ld1 { v3.8b, v4.8b, v5.8b }, [%[buf]]\n"
:
: [buf] "r" (buf)
: /* No clobbers */);
}
static void
move (void)
{
float32x2_t b1_ = vdup_n_f32(123.0f);
float32_t a1_ = 0;
float64x1_t b2_ = vdup_n_f64(456.0f);
float64_t a2_ = 0;
asm ("ins %0.s[0], %w1\n"
: "=w"(b1_)
: "r"(a1_), "0"(b1_)
: /* No clobbers */);
asm ("ins %0.d[1], %x1\n"
: "=w"(b2_)
: "r"(a2_), "0"(b2_)
: /* No clobbers */);
}
static void
adv_simd_mod_imm (void)
{
float32x2_t a1 = {2.0, 4.0};
asm ("bic %0.2s, #1\n"
"bic %0.2s, #1, lsl #8\n"
: "=w"(a1)
: "0"(a1)
: /* No clobbers */);
}
static void
adv_simd_scalar_index (void)
{
float64x2_t b_ = {0.0, 0.0};
float64_t a_ = 1.0;
float64_t result;
asm ("fmla %d0,%d1,%2.d[1]"
: "=w"(result)
: "w"(a_), "w"(b_)
: /* No clobbers */);
}
static void
adv_simd_smlal (void)
{
asm ("smlal v13.2d, v8.2s, v0.2s");
}
static void
adv_simd_vect_shift (void)
{
asm ("fcvtzs s0, s0, #1");
}
#elif (defined __arm__)
static void
ext_reg_load (void)
{
char in[8];
asm ("vldr d0, [%0]" : : "r" (in));
asm ("vldr s3, [%0]" : : "r" (in));
asm ("vldm %0, {d3-d4}" : : "r" (in));
asm ("vldm %0, {s9-s11}" : : "r" (in));
}
static void
ext_reg_mov (void)
{
int i, j;
double d;
i = 1;
j = 2;
asm ("vmov s4, s5, %0, %1" : "=r" (i), "=r" (j): );
asm ("vmov s7, s8, %0, %1" : "=r" (i), "=r" (j): );
asm ("vmov %0, %1, s10, s11" : : "r" (i), "r" (j));
asm ("vmov %0, %1, s1, s2" : : "r" (i), "r" (j));
asm ("vmov %P2, %0, %1" : "=r" (i), "=r" (j): "w" (d));
asm ("vmov %1, %2, %P0" : "=w" (d) : "r" (i), "r" (j));
}
static void
ext_reg_push_pop (void)
{
double d;
asm ("vpush {%P0}" : : "w" (d));
asm ("vpop {%P0}" : : "w" (d));
}
#endif
typedef void (*testcase_ftype) (void);
/* Functions testing instruction decodings. GDB will read n_testcases
to know how many functions to test. */
/* The arch-specific files need to implement both the initialize function
and define the testcases array. */
static testcase_ftype testcases[] =
{
#if (defined __aarch64__)
load,
move,
adv_simd_mod_imm,
adv_simd_scalar_index,
adv_simd_smlal,
adv_simd_vect_shift,
#include "insn-reverse-aarch64.c"
#elif (defined __arm__)
ext_reg_load,
ext_reg_mov,
ext_reg_push_pop,
#endif
};
#include "insn-reverse-arm.c"
#else
/* We get here if the current architecture being tested doesn't have any
record/replay instruction decoding tests implemented. */
static testcase_ftype testcases[] = {};
/* Dummy implementation in case this target doesn't have any record/replay
instruction decoding tests implemented. */
static void
initialize (void)
{
}
#endif
/* GDB will read n_testcases to know how many functions to test. The
functions are implemented in arch-specific files and the testcases
array is defined together with them. */
static int n_testcases = (sizeof (testcases) / sizeof (testcase_ftype));
int
@ -157,6 +48,9 @@ main ()
{
int i = 0;
/* Initialize any required arch-specific bits. */
initialize ();
for (i = 0; i < n_testcases; i++)
testcases[i] ();