2012-08-13 22:52:54 +08:00
/* This file is automatically generated by aarch64-gen. Do not edit! */
2017-01-02 11:36:43 +08:00
/* Copyright (C) 2012-2017 Free Software Foundation, Inc.
2012-08-13 22:52:54 +08:00
Contributed by ARM Ltd .
This file is part of the GNU opcodes library .
This library is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 3 , or ( at your option )
any later version .
It is distributed in the hope that it will be useful , but WITHOUT
ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public
License for more details .
You should have received a copy of the GNU General Public License
along with this program ; see the file COPYING3 . If not ,
see < http : //www.gnu.org/licenses/>. */
# include "sysdep.h"
# include "aarch64-opc.h"
const struct aarch64_operand aarch64_operands [ ] =
{
{ AARCH64_OPND_CLASS_NIL , " " , 0 , { 0 } , " <none> " } ,
{ AARCH64_OPND_CLASS_INT_REG , " Rd " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rd } , " an integer register " } ,
{ AARCH64_OPND_CLASS_INT_REG , " Rn " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an integer register " } ,
{ AARCH64_OPND_CLASS_INT_REG , " Rm " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rm } , " an integer register " } ,
{ AARCH64_OPND_CLASS_INT_REG , " Rt " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rt } , " an integer register " } ,
{ AARCH64_OPND_CLASS_INT_REG , " Rt2 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rt2 } , " an integer register " } ,
{ AARCH64_OPND_CLASS_INT_REG , " Rs " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rs } , " an integer register " } ,
{ AARCH64_OPND_CLASS_INT_REG , " Ra " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Ra } , " an integer register " } ,
{ AARCH64_OPND_CLASS_INT_REG , " Rt_SYS " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rt } , " an integer register " } ,
{ AARCH64_OPND_CLASS_INT_REG , " Rd_SP " , OPD_F_MAYBE_SP | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rd } , " an integer or stack pointer register " } ,
{ AARCH64_OPND_CLASS_INT_REG , " Rn_SP " , OPD_F_MAYBE_SP | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an integer or stack pointer register " } ,
2016-11-11 18:39:46 +08:00
{ AARCH64_OPND_CLASS_INT_REG , " Rm_SP " , OPD_F_MAYBE_SP | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rm } , " an integer or stack pointer register " } ,
2014-09-03 21:40:41 +08:00
{ AARCH64_OPND_CLASS_INT_REG , " PAIRREG " , OPD_F_HAS_EXTRACTOR , { } , " the second reg of a pair " } ,
2012-08-13 22:52:54 +08:00
{ AARCH64_OPND_CLASS_MODIFIED_REG , " Rm_EXT " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " an integer register with optional extension " } ,
{ AARCH64_OPND_CLASS_MODIFIED_REG , " Rm_SFT " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " an integer register with optional shift " } ,
{ AARCH64_OPND_CLASS_FP_REG , " Fd " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rd } , " a floating-point register " } ,
{ AARCH64_OPND_CLASS_FP_REG , " Fn " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " a floating-point register " } ,
{ AARCH64_OPND_CLASS_FP_REG , " Fm " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rm } , " a floating-point register " } ,
{ AARCH64_OPND_CLASS_FP_REG , " Fa " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Ra } , " a floating-point register " } ,
{ AARCH64_OPND_CLASS_FP_REG , " Ft " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rt } , " a floating-point register " } ,
{ AARCH64_OPND_CLASS_FP_REG , " Ft2 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rt2 } , " a floating-point register " } ,
{ AARCH64_OPND_CLASS_SISD_REG , " Sd " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rd } , " a SIMD scalar register " } ,
{ AARCH64_OPND_CLASS_SISD_REG , " Sn " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " a SIMD scalar register " } ,
{ AARCH64_OPND_CLASS_SISD_REG , " Sm " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rm } , " a SIMD scalar register " } ,
{ AARCH64_OPND_CLASS_SIMD_REG , " Vd " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rd } , " a SIMD vector register " } ,
{ AARCH64_OPND_CLASS_SIMD_REG , " Vn " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " a SIMD vector register " } ,
{ AARCH64_OPND_CLASS_SIMD_REG , " Vm " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rm } , " a SIMD vector register " } ,
{ AARCH64_OPND_CLASS_FP_REG , " VdD1 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rd } , " the top half of a 128-bit FP/SIMD register " } ,
{ AARCH64_OPND_CLASS_FP_REG , " VnD1 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " the top half of a 128-bit FP/SIMD register " } ,
{ AARCH64_OPND_CLASS_SIMD_ELEMENT , " Ed " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rd } , " a SIMD vector element " } ,
{ AARCH64_OPND_CLASS_SIMD_ELEMENT , " En " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " a SIMD vector element " } ,
{ AARCH64_OPND_CLASS_SIMD_ELEMENT , " Em " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rm } , " a SIMD vector element " } ,
{ AARCH64_OPND_CLASS_SIMD_REGLIST , " LVn " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " a SIMD vector register list " } ,
{ AARCH64_OPND_CLASS_SIMD_REGLIST , " LVt " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a SIMD vector register list " } ,
{ AARCH64_OPND_CLASS_SIMD_REGLIST , " LVt_AL " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a SIMD vector register list " } ,
{ AARCH64_OPND_CLASS_SIMD_REGLIST , " LEt " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a SIMD vector element list " } ,
2016-12-13 20:37:18 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " CRn " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_CRn } , " a 4-bit opcode field named for historical reasons C0 - C15 " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " CRm " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_CRm } , " a 4-bit opcode field named for historical reasons C0 - C15 " } ,
2012-08-13 22:52:54 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " IDX " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm4 } , " an immediate as the index of the least significant byte " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " IMM_VLSL " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a left shift amount for an AdvSIMD register " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " IMM_VLSR " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a right shift amount for an AdvSIMD register " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SIMD_IMM " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " an immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SIMD_IMM_SFT " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " an 8-bit unsigned immediate with optional shift " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SIMD_FPIMM " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " an 8-bit floating-point constant " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SHLL_IMM " , OPD_F_HAS_EXTRACTOR , { } , " an immediate shift amount of 8, 16 or 32 " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " IMM0 " , 0 , { } , " 0 " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " FPIMM0 " , 0 , { } , " 0.0 " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " FPIMM " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm8 } , " an 8-bit floating-point constant " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " IMMR " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_immr } , " the right rotate amount " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " IMMS " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm6 } , " the leftmost bit number to be moved from the source " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " WIDTH " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm6 } , " the width of the bit-field " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " IMM " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm6 } , " an immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " UIMM3_OP1 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_op1 } , " a 3-bit unsigned immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " UIMM3_OP2 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_op2 } , " a 3-bit unsigned immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " UIMM4 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_CRm } , " a 4-bit unsigned immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " UIMM7 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_CRm , FLD_op2 } , " a 7-bit unsigned immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " BIT_NUM " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_b5 , FLD_b40 } , " the bit number to be tested " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " EXCEPTION " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm16 } , " a 16-bit unsigned immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " CCMP_IMM " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm5 } , " a 5-bit unsigned immediate " } ,
[AArch64][SVE 27/32] Add SVE integer immediate operands
This patch adds the new SVE integer immediate operands. There are
three kinds:
- simple signed and unsigned ranges, but with new widths and positions.
- 13-bit logical immediates. These have the same form as in base AArch64,
but at a different bit position.
In the case of the "MOV Zn.<T>, #<limm>" alias of DUPM, the logical
immediate <limm> is not allowed to be a valid DUP immediate, since DUP
is preferred over DUPM for constants that both instructions can handle.
- a new 9-bit arithmetic immediate, of the form "<imm8>{, LSL #8}".
In some contexts the operand is signed and in others it's unsigned.
As an extension, we allow shifted immediates to be written as a single
integer, e.g. "#256" is equivalent to "#1, LSL #8". We also use the
shiftless form as the preferred disassembly, except for the special
case of "#0, LSL #8" (a redundant encoding of 0).
include/
* opcode/aarch64.h (AARCH64_OPND_SIMM5): New aarch64_opnd.
(AARCH64_OPND_SVE_AIMM, AARCH64_OPND_SVE_ASIMM)
(AARCH64_OPND_SVE_INV_LIMM, AARCH64_OPND_SVE_LIMM)
(AARCH64_OPND_SVE_LIMM_MOV, AARCH64_OPND_SVE_SHLIMM_PRED)
(AARCH64_OPND_SVE_SHLIMM_UNPRED, AARCH64_OPND_SVE_SHRIMM_PRED)
(AARCH64_OPND_SVE_SHRIMM_UNPRED, AARCH64_OPND_SVE_SIMM5)
(AARCH64_OPND_SVE_SIMM5B, AARCH64_OPND_SVE_SIMM6)
(AARCH64_OPND_SVE_SIMM8, AARCH64_OPND_SVE_UIMM3)
(AARCH64_OPND_SVE_UIMM7, AARCH64_OPND_SVE_UIMM8)
(AARCH64_OPND_SVE_UIMM8_53): Likewise.
(aarch64_sve_dupm_mov_immediate_p): Declare.
opcodes/
* aarch64-tbl.h (AARCH64_OPERANDS): Add entries for the new SVE
integer immediate operands.
* aarch64-opc.h (FLD_SVE_immN, FLD_SVE_imm3, FLD_SVE_imm5)
(FLD_SVE_imm5b, FLD_SVE_imm7, FLD_SVE_imm8, FLD_SVE_imm9)
(FLD_SVE_immr, FLD_SVE_imms, FLD_SVE_tszh): New aarch64_field_kinds.
* aarch64-opc.c (fields): Add corresponding entries.
(operand_general_constraint_met_p): Handle the new SVE integer
immediate operands.
(aarch64_print_operand): Likewise.
(aarch64_sve_dupm_mov_immediate_p): New function.
* aarch64-opc-2.c: Regenerate.
* aarch64-asm.h (ins_inv_limm, ins_sve_aimm, ins_sve_asimm)
(ins_sve_limm_mov, ins_sve_shlimm, ins_sve_shrimm): New inserters.
* aarch64-asm.c (aarch64_ins_limm_1): New function, split out from...
(aarch64_ins_limm): ...here.
(aarch64_ins_inv_limm): New function.
(aarch64_ins_sve_aimm): Likewise.
(aarch64_ins_sve_asimm): Likewise.
(aarch64_ins_sve_limm_mov): Likewise.
(aarch64_ins_sve_shlimm): Likewise.
(aarch64_ins_sve_shrimm): Likewise.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_inv_limm, ext_sve_aimm, ext_sve_asimm)
(ext_sve_limm_mov, ext_sve_shlimm, ext_sve_shrimm): New extractors.
* aarch64-dis.c (decode_limm): New function, split out from...
(aarch64_ext_limm): ...here.
(aarch64_ext_inv_limm): New function.
(decode_sve_aimm): Likewise.
(aarch64_ext_sve_aimm): Likewise.
(aarch64_ext_sve_asimm): Likewise.
(aarch64_ext_sve_limm_mov): Likewise.
(aarch64_top_bit): Likewise.
(aarch64_ext_sve_shlimm): Likewise.
(aarch64_ext_sve_shrimm): Likewise.
* aarch64-dis-2.c: Regenerate.
gas/
* config/tc-aarch64.c (parse_operands): Handle the new SVE integer
immediate operands.
2016-09-21 23:56:57 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " SIMM5 " , OPD_F_SEXT | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm5 } , " a 5-bit signed immediate " } ,
2012-08-13 22:52:54 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " NZCV " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_nzcv } , " a flag bit specifier giving an alternative value for each flag " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " LIMM " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_N , FLD_immr , FLD_imms } , " Logical immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " AIMM " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_shift , FLD_imm12 } , " a 12-bit unsigned immediate with optional left shift of 12 bits " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " HALF " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm16 } , " a 16-bit immediate with optional left shift " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " FBITS " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_scale } , " the number of bits after the binary point in the fixed-point value " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " IMM_MOV " , 0 , { } , " an immediate " } ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " IMM_ROT1 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_rotate1 } , " a 2-bit rotation specifier for complex arithmetic operations " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " IMM_ROT2 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_rotate2 } , " a 2-bit rotation specifier for complex arithmetic operations " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " IMM_ROT3 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_rotate3 } , " a 1-bit rotation specifier for complex arithmetic operations " } ,
2013-11-06 04:50:18 +08:00
{ AARCH64_OPND_CLASS_COND , " COND " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a condition " } ,
{ AARCH64_OPND_CLASS_COND , " COND1 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " one of the standard conditions, excluding AL and NV. " } ,
2012-08-13 22:52:54 +08:00
{ AARCH64_OPND_CLASS_ADDRESS , " ADDR_ADRP " , OPD_F_SEXT | OPD_F_HAS_EXTRACTOR , { FLD_immhi , FLD_immlo } , " 21-bit PC-relative address of a 4KB page " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " ADDR_PCREL14 " , OPD_F_SEXT | OPD_F_SHIFT_BY_2 | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm14 } , " 14-bit PC-relative address " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " ADDR_PCREL19 " , OPD_F_SEXT | OPD_F_SHIFT_BY_2 | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm19 } , " 19-bit PC-relative address " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " ADDR_PCREL21 " , OPD_F_SEXT | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_immhi , FLD_immlo } , " 21-bit PC-relative address " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " ADDR_PCREL26 " , OPD_F_SEXT | OPD_F_SHIFT_BY_2 | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm26 } , " 26-bit PC-relative address " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " ADDR_SIMPLE " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " an address with base register (no offset) " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " ADDR_REGOFF " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " an address with register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " ADDR_SIMM7 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm7 , FLD_index2 } , " an address with 7-bit signed immediate offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " ADDR_SIMM9 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm9 , FLD_index } , " an address with 9-bit signed immediate offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " ADDR_SIMM9_2 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm9 , FLD_index } , " an address with 9-bit negative or unaligned immediate offset " } ,
2016-11-18 17:49:06 +08:00
{ AARCH64_OPND_CLASS_ADDRESS , " ADDR_SIMM10 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_S_imm10 , FLD_imm9 , FLD_index } , " an address with 10-bit scaled, signed immediate offset " } ,
2012-08-13 22:52:54 +08:00
{ AARCH64_OPND_CLASS_ADDRESS , " ADDR_UIMM12 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_imm12 } , " an address with scaled, unsigned immediate offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SIMD_ADDR_SIMPLE " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " an address with base register (no offset) " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SIMD_ADDR_POST " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a post-indexed address with immediate or register increment " } ,
{ AARCH64_OPND_CLASS_SYSTEM , " SYSREG " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a system register " } ,
{ AARCH64_OPND_CLASS_SYSTEM , " PSTATEFIELD " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a PSTATE field name " } ,
{ AARCH64_OPND_CLASS_SYSTEM , " SYSREG_AT " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " an address translation operation specifier " } ,
{ AARCH64_OPND_CLASS_SYSTEM , " SYSREG_DC " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a data cache maintenance operation specifier " } ,
2015-12-11 18:11:27 +08:00
{ AARCH64_OPND_CLASS_SYSTEM , " SYSREG_IC " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " an instruction cache maintenance operation specifier " } ,
2012-08-13 22:52:54 +08:00
{ AARCH64_OPND_CLASS_SYSTEM , " SYSREG_TLBI " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a TBL invalidation operation specifier " } ,
{ AARCH64_OPND_CLASS_SYSTEM , " BARRIER " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a barrier option name " } ,
{ AARCH64_OPND_CLASS_SYSTEM , " BARRIER_ISB " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " the ISB option name SY or an optional 4-bit unsigned immediate " } ,
2015-12-11 18:11:27 +08:00
{ AARCH64_OPND_CLASS_SYSTEM , " PRFOP " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " a prefetch operation specifier " } ,
2015-12-11 18:22:40 +08:00
{ AARCH64_OPND_CLASS_SYSTEM , " BARRIER_PSB " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { } , " the PSB option name CSYNC " } ,
[AArch64] Additional SVE instructions
This patch supports some additions to the SVE architecture prior to
its public release.
include/
* opcode/aarch64.h (AARCH64_OPND_SVE_ADDR_RI_S4x16)
(AARCH64_OPND_SVE_IMM_ROT1, AARCH64_OPND_SVE_IMM_ROT2)
(AARCH64_OPND_SVE_Zm3_INDEX, AARCH64_OPND_SVE_Zm3_22_INDEX)
(AARCH64_OPND_SVE_Zm4_INDEX): New aarch64_opnds.
opcodes/
* aarch64-tbl.h (OP_SVE_HMH, OP_SVE_VMU_HSD, OP_SVE_VMVU_HSD)
(OP_SVE_VMVV_HSD, OP_SVE_VMVVU_HSD, OP_SVE_VM_HSD, OP_SVE_VUVV_HSD)
(OP_SVE_VUV_HSD, OP_SVE_VU_HSD, OP_SVE_VVVU_H, OP_SVE_VVVU_S)
(OP_SVE_VVVU_HSD, OP_SVE_VVV_D, OP_SVE_VVV_D_H, OP_SVE_VVV_H)
(OP_SVE_VVV_HSD, OP_SVE_VVV_S, OP_SVE_VVV_S_B, OP_SVE_VVV_SD_BH)
(OP_SVE_VV_BHSDQ, OP_SVE_VV_HSD, OP_SVE_VZVV_HSD, OP_SVE_VZV_HSD)
(OP_SVE_V_HSD): New macros.
(OP_SVE_VMU_SD, OP_SVE_VMVU_SD, OP_SVE_VM_SD, OP_SVE_VUVV_SD)
(OP_SVE_VU_SD, OP_SVE_VVVU_SD, OP_SVE_VVV_SD, OP_SVE_VZVV_SD)
(OP_SVE_VZV_SD, OP_SVE_V_SD): Delete.
(aarch64_opcode_table): Add new SVE instructions.
(aarch64_opcode_table): Use imm_rotate{1,2} instead of imm_rotate
for rotation operands. Add new SVE operands.
* aarch64-asm.h (ins_sve_addr_ri_s4): New inserter.
(ins_sve_quad_index): Likewise.
(ins_imm_rotate): Split into...
(ins_imm_rotate1, ins_imm_rotate2): ...these two inserters.
* aarch64-asm.c (aarch64_ins_imm_rotate): Split into...
(aarch64_ins_imm_rotate1, aarch64_ins_imm_rotate2): ...these two
functions.
(aarch64_ins_sve_addr_ri_s4): New function.
(aarch64_ins_sve_quad_index): Likewise.
(do_misc_encoding): Handle "MOV Zn.Q, Qm".
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_sve_addr_ri_s4): New extractor.
(ext_sve_quad_index): Likewise.
(ext_imm_rotate): Split into...
(ext_imm_rotate1, ext_imm_rotate2): ...these two extractors.
* aarch64-dis.c (aarch64_ext_imm_rotate): Split into...
(aarch64_ext_imm_rotate1, aarch64_ext_imm_rotate2): ...these two
functions.
(aarch64_ext_sve_addr_ri_s4): New function.
(aarch64_ext_sve_quad_index): Likewise.
(aarch64_ext_sve_index): Allow quad indices.
(do_misc_decoding): Likewise.
* aarch64-dis-2.c: Regenerate.
* aarch64-opc.h (FLD_SVE_i3h, FLD_SVE_rot1, FLD_SVE_rot2): New
aarch64_field_kinds.
(OPD_F_OD_MASK): Widen by one bit.
(OPD_F_NO_ZR): Bump accordingly.
(get_operand_field_width): New function.
* aarch64-opc.c (fields): Add new SVE fields.
(operand_general_constraint_met_p): Handle new SVE operands.
(aarch64_print_operand): Likewise.
* aarch64-opc-2.c: Regenerate.
gas/
* doc/c-aarch64.texi: Document that sve implies fp16, simd and compnum.
* config/tc-aarch64.c (parse_vector_type_for_operand): Allow .q
to be used with SVE registers.
(parse_operands): Handle new SVE operands.
(aarch64_features): Make "sve" require F16 rather than FP. Also
require COMPNUM.
* testsuite/gas/aarch64/sve.s: Add tests for new instructions.
Include compnum tests.
* testsuite/gas/aarch64/sve.d: Update accordingly.
* testsuite/gas/aarch64/sve-invalid.s: Add tests for new instructions.
* testsuite/gas/aarch64/sve-invalid.l: Update accordingly. Also
update expected output for new FMOV and MOV alternatives.
2017-02-25 02:29:00 +08:00
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RI_S4x16 " , 4 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an address with a 4-bit signed offset, multiplied by 16 " } ,
[AArch64][SVE 26/32] Add SVE MUL VL addressing modes
This patch adds support for addresses of the form:
[<base>, #<offset>, MUL VL]
This involves adding a new AARCH64_MOD_MUL_VL modifier, which is
why I split it out from the other addressing modes.
For LD2, LD3 and LD4, the offset must be a multiple of the structure
size, so for LD3 the possible values are 0, 3, 6, .... The patch
therefore extends value_aligned_p to handle non-power-of-2 alignments.
include/
* opcode/aarch64.h (AARCH64_OPND_SVE_ADDR_RI_S4xVL): New aarch64_opnd.
(AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, AARCH64_OPND_SVE_ADDR_RI_S4x3xVL)
(AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, AARCH64_OPND_SVE_ADDR_RI_S6xVL)
(AARCH64_OPND_SVE_ADDR_RI_S9xVL): Likewise.
(AARCH64_MOD_MUL_VL): New aarch64_modifier_kind.
opcodes/
* aarch64-tbl.h (AARCH64_OPERANDS): Add entries for new MUL VL
operands.
* aarch64-opc.c (aarch64_operand_modifiers): Initialize
the AARCH64_MOD_MUL_VL entry.
(value_aligned_p): Cope with non-power-of-two alignments.
(operand_general_constraint_met_p): Handle the new MUL VL addresses.
(print_immediate_offset_address): Likewise.
(aarch64_print_operand): Likewise.
* aarch64-opc-2.c: Regenerate.
* aarch64-asm.h (ins_sve_addr_ri_s4xvl, ins_sve_addr_ri_s6xvl)
(ins_sve_addr_ri_s9xvl): New inserters.
* aarch64-asm.c (aarch64_ins_sve_addr_ri_s4xvl): New function.
(aarch64_ins_sve_addr_ri_s6xvl): Likewise.
(aarch64_ins_sve_addr_ri_s9xvl): Likewise.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_sve_addr_ri_s4xvl, ext_sve_addr_ri_s6xvl)
(ext_sve_addr_ri_s9xvl): New extractors.
* aarch64-dis.c (aarch64_ext_sve_addr_reg_mul_vl): New function.
(aarch64_ext_sve_addr_ri_s4xvl): Likewise.
(aarch64_ext_sve_addr_ri_s6xvl): Likewise.
(aarch64_ext_sve_addr_ri_s9xvl): Likewise.
* aarch64-dis-2.c: Regenerate.
gas/
* config/tc-aarch64.c (SHIFTED_NONE, SHIFTED_MUL_VL): New
parse_shift_modes.
(parse_shift): Handle SHIFTED_MUL_VL.
(parse_address_main): Add an imm_shift_mode parameter.
(parse_address, parse_sve_address): Update accordingly.
(parse_operands): Handle MUL VL addressing modes.
2016-09-21 23:56:15 +08:00
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RI_S4xVL " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an address with a 4-bit signed offset, multiplied by VL " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RI_S4x2xVL " , 1 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an address with a 4-bit signed offset, multiplied by 2*VL " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RI_S4x3xVL " , 2 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an address with a 4-bit signed offset, multiplied by 3*VL " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RI_S4x4xVL " , 3 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an address with a 4-bit signed offset, multiplied by 4*VL " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RI_S6xVL " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an address with a 6-bit signed offset, multiplied by VL " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RI_S9xVL " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an address with a 9-bit signed offset, multiplied by VL " } ,
[AArch64][SVE 25/32] Add support for SVE addressing modes
This patch adds most of the new SVE addressing modes and associated
operands. A follow-on patch adds MUL VL, since handling it separately
makes the changes easier to read.
The patch also introduces a new "operand-dependent data" field to the
operand flags, based closely on the existing one for opcode flags.
For SVE this new field needs only 2 bits, but it could be widened
in future if necessary.
include/
* opcode/aarch64.h (AARCH64_OPND_SVE_ADDR_RI_U6): New aarch64_opnd.
(AARCH64_OPND_SVE_ADDR_RI_U6x2, AARCH64_OPND_SVE_ADDR_RI_U6x4)
(AARCH64_OPND_SVE_ADDR_RI_U6x8, AARCH64_OPND_SVE_ADDR_RR)
(AARCH64_OPND_SVE_ADDR_RR_LSL1, AARCH64_OPND_SVE_ADDR_RR_LSL2)
(AARCH64_OPND_SVE_ADDR_RR_LSL3, AARCH64_OPND_SVE_ADDR_RX)
(AARCH64_OPND_SVE_ADDR_RX_LSL1, AARCH64_OPND_SVE_ADDR_RX_LSL2)
(AARCH64_OPND_SVE_ADDR_RX_LSL3, AARCH64_OPND_SVE_ADDR_RZ)
(AARCH64_OPND_SVE_ADDR_RZ_LSL1, AARCH64_OPND_SVE_ADDR_RZ_LSL2)
(AARCH64_OPND_SVE_ADDR_RZ_LSL3, AARCH64_OPND_SVE_ADDR_RZ_XTW_14)
(AARCH64_OPND_SVE_ADDR_RZ_XTW_22, AARCH64_OPND_SVE_ADDR_RZ_XTW1_14)
(AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, AARCH64_OPND_SVE_ADDR_RZ_XTW2_14)
(AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, AARCH64_OPND_SVE_ADDR_RZ_XTW3_14)
(AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, AARCH64_OPND_SVE_ADDR_ZI_U5)
(AARCH64_OPND_SVE_ADDR_ZI_U5x2, AARCH64_OPND_SVE_ADDR_ZI_U5x4)
(AARCH64_OPND_SVE_ADDR_ZI_U5x8, AARCH64_OPND_SVE_ADDR_ZZ_LSL)
(AARCH64_OPND_SVE_ADDR_ZZ_SXTW, AARCH64_OPND_SVE_ADDR_ZZ_UXTW):
Likewise.
opcodes/
* aarch64-tbl.h (AARCH64_OPERANDS): Add entries for the new SVE
address operands.
* aarch64-opc.h (FLD_SVE_imm6, FLD_SVE_msz, FLD_SVE_xs_14)
(FLD_SVE_xs_22): New aarch64_field_kinds.
(OPD_F_OD_MASK, OPD_F_OD_LSB, OPD_F_NO_ZR): New flags.
(get_operand_specific_data): New function.
* aarch64-opc.c (fields): Add entries for FLD_SVE_imm6, FLD_SVE_msz,
FLD_SVE_xs_14 and FLD_SVE_xs_22.
(operand_general_constraint_met_p): Handle the new SVE address
operands.
(sve_reg): New array.
(get_addr_sve_reg_name): New function.
(aarch64_print_operand): Handle the new SVE address operands.
* aarch64-opc-2.c: Regenerate.
* aarch64-asm.h (ins_sve_addr_ri_u6, ins_sve_addr_rr_lsl)
(ins_sve_addr_rz_xtw, ins_sve_addr_zi_u5, ins_sve_addr_zz_lsl)
(ins_sve_addr_zz_sxtw, ins_sve_addr_zz_uxtw): New inserters.
* aarch64-asm.c (aarch64_ins_sve_addr_ri_u6): New function.
(aarch64_ins_sve_addr_rr_lsl): Likewise.
(aarch64_ins_sve_addr_rz_xtw): Likewise.
(aarch64_ins_sve_addr_zi_u5): Likewise.
(aarch64_ins_sve_addr_zz): Likewise.
(aarch64_ins_sve_addr_zz_lsl): Likewise.
(aarch64_ins_sve_addr_zz_sxtw): Likewise.
(aarch64_ins_sve_addr_zz_uxtw): Likewise.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_sve_addr_ri_u6, ext_sve_addr_rr_lsl)
(ext_sve_addr_rz_xtw, ext_sve_addr_zi_u5, ext_sve_addr_zz_lsl)
(ext_sve_addr_zz_sxtw, ext_sve_addr_zz_uxtw): New extractors.
* aarch64-dis.c (aarch64_ext_sve_add_reg_imm): New function.
(aarch64_ext_sve_addr_ri_u6): Likewise.
(aarch64_ext_sve_addr_rr_lsl): Likewise.
(aarch64_ext_sve_addr_rz_xtw): Likewise.
(aarch64_ext_sve_addr_zi_u5): Likewise.
(aarch64_ext_sve_addr_zz): Likewise.
(aarch64_ext_sve_addr_zz_lsl): Likewise.
(aarch64_ext_sve_addr_zz_sxtw): Likewise.
(aarch64_ext_sve_addr_zz_uxtw): Likewise.
* aarch64-dis-2.c: Regenerate.
gas/
* config/tc-aarch64.c (REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET): New
register types.
(get_reg_expected_msg): Handle them.
(aarch64_addr_reg_parse): New function, split out from
aarch64_reg_parse_32_64. Handle Z registers too.
(aarch64_reg_parse_32_64): Call it.
(parse_address_main): Add base_qualifier, offset_qualifier,
base_type and offset_type parameters. Handle SVE base and offset
registers.
(parse_address): Update call to parse_address_main.
(parse_sve_address): New function.
(parse_operands): Parse the new SVE address operands.
2016-09-21 23:55:49 +08:00
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RI_U6 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an address with a 6-bit unsigned offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RI_U6x2 " , 1 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an address with a 6-bit unsigned offset, multiplied by 2 " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RI_U6x4 " , 2 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an address with a 6-bit unsigned offset, multiplied by 4 " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RI_U6x8 " , 3 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn } , " an address with a 6-bit unsigned offset, multiplied by 8 " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RR " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_Rm } , " an address with a scalar register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RR_LSL1 " , 1 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_Rm } , " an address with a scalar register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RR_LSL2 " , 2 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_Rm } , " an address with a scalar register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RR_LSL3 " , 3 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_Rm } , " an address with a scalar register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RX " , ( 0 < < OPD_F_OD_LSB ) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_Rm } , " an address with a scalar register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RX_LSL1 " , ( 1 < < OPD_F_OD_LSB ) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_Rm } , " an address with a scalar register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RX_LSL2 " , ( 2 < < OPD_F_OD_LSB ) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_Rm } , " an address with a scalar register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RX_LSL3 " , ( 3 < < OPD_F_OD_LSB ) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_Rm } , " an address with a scalar register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RZ " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_SVE_Zm_16 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RZ_LSL1 " , 1 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_SVE_Zm_16 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RZ_LSL2 " , 2 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_SVE_Zm_16 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RZ_LSL3 " , 3 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_SVE_Zm_16 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RZ_XTW_14 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_SVE_Zm_16 , FLD_SVE_xs_14 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RZ_XTW_22 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_SVE_Zm_16 , FLD_SVE_xs_22 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RZ_XTW1_14 " , 1 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_SVE_Zm_16 , FLD_SVE_xs_14 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RZ_XTW1_22 " , 1 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_SVE_Zm_16 , FLD_SVE_xs_22 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RZ_XTW2_14 " , 2 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_SVE_Zm_16 , FLD_SVE_xs_14 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RZ_XTW2_22 " , 2 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_SVE_Zm_16 , FLD_SVE_xs_22 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RZ_XTW3_14 " , 3 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_SVE_Zm_16 , FLD_SVE_xs_14 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_RZ_XTW3_22 " , 3 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_Rn , FLD_SVE_Zm_16 , FLD_SVE_xs_22 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_ZI_U5 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zn } , " an address with a 5-bit unsigned offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_ZI_U5x2 " , 1 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zn } , " an address with a 5-bit unsigned offset, multiplied by 2 " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_ZI_U5x4 " , 2 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zn } , " an address with a 5-bit unsigned offset, multiplied by 4 " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_ZI_U5x8 " , 3 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zn } , " an address with a 5-bit unsigned offset, multiplied by 8 " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_ZZ_LSL " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zn , FLD_SVE_Zm_16 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_ZZ_SXTW " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zn , FLD_SVE_Zm_16 } , " an address with a vector register offset " } ,
{ AARCH64_OPND_CLASS_ADDRESS , " SVE_ADDR_ZZ_UXTW " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zn , FLD_SVE_Zm_16 } , " an address with a vector register offset " } ,
[AArch64][SVE 27/32] Add SVE integer immediate operands
This patch adds the new SVE integer immediate operands. There are
three kinds:
- simple signed and unsigned ranges, but with new widths and positions.
- 13-bit logical immediates. These have the same form as in base AArch64,
but at a different bit position.
In the case of the "MOV Zn.<T>, #<limm>" alias of DUPM, the logical
immediate <limm> is not allowed to be a valid DUP immediate, since DUP
is preferred over DUPM for constants that both instructions can handle.
- a new 9-bit arithmetic immediate, of the form "<imm8>{, LSL #8}".
In some contexts the operand is signed and in others it's unsigned.
As an extension, we allow shifted immediates to be written as a single
integer, e.g. "#256" is equivalent to "#1, LSL #8". We also use the
shiftless form as the preferred disassembly, except for the special
case of "#0, LSL #8" (a redundant encoding of 0).
include/
* opcode/aarch64.h (AARCH64_OPND_SIMM5): New aarch64_opnd.
(AARCH64_OPND_SVE_AIMM, AARCH64_OPND_SVE_ASIMM)
(AARCH64_OPND_SVE_INV_LIMM, AARCH64_OPND_SVE_LIMM)
(AARCH64_OPND_SVE_LIMM_MOV, AARCH64_OPND_SVE_SHLIMM_PRED)
(AARCH64_OPND_SVE_SHLIMM_UNPRED, AARCH64_OPND_SVE_SHRIMM_PRED)
(AARCH64_OPND_SVE_SHRIMM_UNPRED, AARCH64_OPND_SVE_SIMM5)
(AARCH64_OPND_SVE_SIMM5B, AARCH64_OPND_SVE_SIMM6)
(AARCH64_OPND_SVE_SIMM8, AARCH64_OPND_SVE_UIMM3)
(AARCH64_OPND_SVE_UIMM7, AARCH64_OPND_SVE_UIMM8)
(AARCH64_OPND_SVE_UIMM8_53): Likewise.
(aarch64_sve_dupm_mov_immediate_p): Declare.
opcodes/
* aarch64-tbl.h (AARCH64_OPERANDS): Add entries for the new SVE
integer immediate operands.
* aarch64-opc.h (FLD_SVE_immN, FLD_SVE_imm3, FLD_SVE_imm5)
(FLD_SVE_imm5b, FLD_SVE_imm7, FLD_SVE_imm8, FLD_SVE_imm9)
(FLD_SVE_immr, FLD_SVE_imms, FLD_SVE_tszh): New aarch64_field_kinds.
* aarch64-opc.c (fields): Add corresponding entries.
(operand_general_constraint_met_p): Handle the new SVE integer
immediate operands.
(aarch64_print_operand): Likewise.
(aarch64_sve_dupm_mov_immediate_p): New function.
* aarch64-opc-2.c: Regenerate.
* aarch64-asm.h (ins_inv_limm, ins_sve_aimm, ins_sve_asimm)
(ins_sve_limm_mov, ins_sve_shlimm, ins_sve_shrimm): New inserters.
* aarch64-asm.c (aarch64_ins_limm_1): New function, split out from...
(aarch64_ins_limm): ...here.
(aarch64_ins_inv_limm): New function.
(aarch64_ins_sve_aimm): Likewise.
(aarch64_ins_sve_asimm): Likewise.
(aarch64_ins_sve_limm_mov): Likewise.
(aarch64_ins_sve_shlimm): Likewise.
(aarch64_ins_sve_shrimm): Likewise.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_inv_limm, ext_sve_aimm, ext_sve_asimm)
(ext_sve_limm_mov, ext_sve_shlimm, ext_sve_shrimm): New extractors.
* aarch64-dis.c (decode_limm): New function, split out from...
(aarch64_ext_limm): ...here.
(aarch64_ext_inv_limm): New function.
(decode_sve_aimm): Likewise.
(aarch64_ext_sve_aimm): Likewise.
(aarch64_ext_sve_asimm): Likewise.
(aarch64_ext_sve_limm_mov): Likewise.
(aarch64_top_bit): Likewise.
(aarch64_ext_sve_shlimm): Likewise.
(aarch64_ext_sve_shrimm): Likewise.
* aarch64-dis-2.c: Regenerate.
gas/
* config/tc-aarch64.c (parse_operands): Handle the new SVE integer
immediate operands.
2016-09-21 23:56:57 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_AIMM " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_imm9 } , " a 9-bit unsigned arithmetic operand " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_ASIMM " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_imm9 } , " a 9-bit signed arithmetic operand " } ,
2016-09-21 23:57:22 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_FPIMM8 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_imm8 } , " an 8-bit floating-point immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_I1_HALF_ONE " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_i1 } , " either 0.5 or 1.0 " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_I1_HALF_TWO " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_i1 } , " either 0.5 or 2.0 " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_I1_ZERO_ONE " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_i1 } , " either 0.0 or 1.0 " } ,
[AArch64] Additional SVE instructions
This patch supports some additions to the SVE architecture prior to
its public release.
include/
* opcode/aarch64.h (AARCH64_OPND_SVE_ADDR_RI_S4x16)
(AARCH64_OPND_SVE_IMM_ROT1, AARCH64_OPND_SVE_IMM_ROT2)
(AARCH64_OPND_SVE_Zm3_INDEX, AARCH64_OPND_SVE_Zm3_22_INDEX)
(AARCH64_OPND_SVE_Zm4_INDEX): New aarch64_opnds.
opcodes/
* aarch64-tbl.h (OP_SVE_HMH, OP_SVE_VMU_HSD, OP_SVE_VMVU_HSD)
(OP_SVE_VMVV_HSD, OP_SVE_VMVVU_HSD, OP_SVE_VM_HSD, OP_SVE_VUVV_HSD)
(OP_SVE_VUV_HSD, OP_SVE_VU_HSD, OP_SVE_VVVU_H, OP_SVE_VVVU_S)
(OP_SVE_VVVU_HSD, OP_SVE_VVV_D, OP_SVE_VVV_D_H, OP_SVE_VVV_H)
(OP_SVE_VVV_HSD, OP_SVE_VVV_S, OP_SVE_VVV_S_B, OP_SVE_VVV_SD_BH)
(OP_SVE_VV_BHSDQ, OP_SVE_VV_HSD, OP_SVE_VZVV_HSD, OP_SVE_VZV_HSD)
(OP_SVE_V_HSD): New macros.
(OP_SVE_VMU_SD, OP_SVE_VMVU_SD, OP_SVE_VM_SD, OP_SVE_VUVV_SD)
(OP_SVE_VU_SD, OP_SVE_VVVU_SD, OP_SVE_VVV_SD, OP_SVE_VZVV_SD)
(OP_SVE_VZV_SD, OP_SVE_V_SD): Delete.
(aarch64_opcode_table): Add new SVE instructions.
(aarch64_opcode_table): Use imm_rotate{1,2} instead of imm_rotate
for rotation operands. Add new SVE operands.
* aarch64-asm.h (ins_sve_addr_ri_s4): New inserter.
(ins_sve_quad_index): Likewise.
(ins_imm_rotate): Split into...
(ins_imm_rotate1, ins_imm_rotate2): ...these two inserters.
* aarch64-asm.c (aarch64_ins_imm_rotate): Split into...
(aarch64_ins_imm_rotate1, aarch64_ins_imm_rotate2): ...these two
functions.
(aarch64_ins_sve_addr_ri_s4): New function.
(aarch64_ins_sve_quad_index): Likewise.
(do_misc_encoding): Handle "MOV Zn.Q, Qm".
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_sve_addr_ri_s4): New extractor.
(ext_sve_quad_index): Likewise.
(ext_imm_rotate): Split into...
(ext_imm_rotate1, ext_imm_rotate2): ...these two extractors.
* aarch64-dis.c (aarch64_ext_imm_rotate): Split into...
(aarch64_ext_imm_rotate1, aarch64_ext_imm_rotate2): ...these two
functions.
(aarch64_ext_sve_addr_ri_s4): New function.
(aarch64_ext_sve_quad_index): Likewise.
(aarch64_ext_sve_index): Allow quad indices.
(do_misc_decoding): Likewise.
* aarch64-dis-2.c: Regenerate.
* aarch64-opc.h (FLD_SVE_i3h, FLD_SVE_rot1, FLD_SVE_rot2): New
aarch64_field_kinds.
(OPD_F_OD_MASK): Widen by one bit.
(OPD_F_NO_ZR): Bump accordingly.
(get_operand_field_width): New function.
* aarch64-opc.c (fields): Add new SVE fields.
(operand_general_constraint_met_p): Handle new SVE operands.
(aarch64_print_operand): Likewise.
* aarch64-opc-2.c: Regenerate.
gas/
* doc/c-aarch64.texi: Document that sve implies fp16, simd and compnum.
* config/tc-aarch64.c (parse_vector_type_for_operand): Allow .q
to be used with SVE registers.
(parse_operands): Handle new SVE operands.
(aarch64_features): Make "sve" require F16 rather than FP. Also
require COMPNUM.
* testsuite/gas/aarch64/sve.s: Add tests for new instructions.
Include compnum tests.
* testsuite/gas/aarch64/sve.d: Update accordingly.
* testsuite/gas/aarch64/sve-invalid.s: Add tests for new instructions.
* testsuite/gas/aarch64/sve-invalid.l: Update accordingly. Also
update expected output for new FMOV and MOV alternatives.
2017-02-25 02:29:00 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_IMM_ROT1 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_rot1 } , " a 1-bit rotation specifier for complex arithmetic operations " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_IMM_ROT2 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_rot2 } , " a 2-bit rotation specifier for complex arithmetic operations " } ,
[AArch64][SVE 27/32] Add SVE integer immediate operands
This patch adds the new SVE integer immediate operands. There are
three kinds:
- simple signed and unsigned ranges, but with new widths and positions.
- 13-bit logical immediates. These have the same form as in base AArch64,
but at a different bit position.
In the case of the "MOV Zn.<T>, #<limm>" alias of DUPM, the logical
immediate <limm> is not allowed to be a valid DUP immediate, since DUP
is preferred over DUPM for constants that both instructions can handle.
- a new 9-bit arithmetic immediate, of the form "<imm8>{, LSL #8}".
In some contexts the operand is signed and in others it's unsigned.
As an extension, we allow shifted immediates to be written as a single
integer, e.g. "#256" is equivalent to "#1, LSL #8". We also use the
shiftless form as the preferred disassembly, except for the special
case of "#0, LSL #8" (a redundant encoding of 0).
include/
* opcode/aarch64.h (AARCH64_OPND_SIMM5): New aarch64_opnd.
(AARCH64_OPND_SVE_AIMM, AARCH64_OPND_SVE_ASIMM)
(AARCH64_OPND_SVE_INV_LIMM, AARCH64_OPND_SVE_LIMM)
(AARCH64_OPND_SVE_LIMM_MOV, AARCH64_OPND_SVE_SHLIMM_PRED)
(AARCH64_OPND_SVE_SHLIMM_UNPRED, AARCH64_OPND_SVE_SHRIMM_PRED)
(AARCH64_OPND_SVE_SHRIMM_UNPRED, AARCH64_OPND_SVE_SIMM5)
(AARCH64_OPND_SVE_SIMM5B, AARCH64_OPND_SVE_SIMM6)
(AARCH64_OPND_SVE_SIMM8, AARCH64_OPND_SVE_UIMM3)
(AARCH64_OPND_SVE_UIMM7, AARCH64_OPND_SVE_UIMM8)
(AARCH64_OPND_SVE_UIMM8_53): Likewise.
(aarch64_sve_dupm_mov_immediate_p): Declare.
opcodes/
* aarch64-tbl.h (AARCH64_OPERANDS): Add entries for the new SVE
integer immediate operands.
* aarch64-opc.h (FLD_SVE_immN, FLD_SVE_imm3, FLD_SVE_imm5)
(FLD_SVE_imm5b, FLD_SVE_imm7, FLD_SVE_imm8, FLD_SVE_imm9)
(FLD_SVE_immr, FLD_SVE_imms, FLD_SVE_tszh): New aarch64_field_kinds.
* aarch64-opc.c (fields): Add corresponding entries.
(operand_general_constraint_met_p): Handle the new SVE integer
immediate operands.
(aarch64_print_operand): Likewise.
(aarch64_sve_dupm_mov_immediate_p): New function.
* aarch64-opc-2.c: Regenerate.
* aarch64-asm.h (ins_inv_limm, ins_sve_aimm, ins_sve_asimm)
(ins_sve_limm_mov, ins_sve_shlimm, ins_sve_shrimm): New inserters.
* aarch64-asm.c (aarch64_ins_limm_1): New function, split out from...
(aarch64_ins_limm): ...here.
(aarch64_ins_inv_limm): New function.
(aarch64_ins_sve_aimm): Likewise.
(aarch64_ins_sve_asimm): Likewise.
(aarch64_ins_sve_limm_mov): Likewise.
(aarch64_ins_sve_shlimm): Likewise.
(aarch64_ins_sve_shrimm): Likewise.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_inv_limm, ext_sve_aimm, ext_sve_asimm)
(ext_sve_limm_mov, ext_sve_shlimm, ext_sve_shrimm): New extractors.
* aarch64-dis.c (decode_limm): New function, split out from...
(aarch64_ext_limm): ...here.
(aarch64_ext_inv_limm): New function.
(decode_sve_aimm): Likewise.
(aarch64_ext_sve_aimm): Likewise.
(aarch64_ext_sve_asimm): Likewise.
(aarch64_ext_sve_limm_mov): Likewise.
(aarch64_top_bit): Likewise.
(aarch64_ext_sve_shlimm): Likewise.
(aarch64_ext_sve_shrimm): Likewise.
* aarch64-dis-2.c: Regenerate.
gas/
* config/tc-aarch64.c (parse_operands): Handle the new SVE integer
immediate operands.
2016-09-21 23:56:57 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_INV_LIMM " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_N , FLD_SVE_immr , FLD_SVE_imms } , " an inverted 13-bit logical immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_LIMM " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_N , FLD_SVE_immr , FLD_SVE_imms } , " a 13-bit logical immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_LIMM_MOV " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_N , FLD_SVE_immr , FLD_SVE_imms } , " a 13-bit logical move immediate " } ,
2016-09-21 23:54:53 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_PATTERN " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_pattern } , " an enumeration value such as POW2 " } ,
[AArch64][SVE 24/32] Add AARCH64_OPND_SVE_PATTERN_SCALED
Some SVE instructions count the number of elements in a given vector
pattern and allow a scale factor of [1, 16] to be applied to the result.
This scale factor is written ", MUL #n", where "MUL" is a new operator.
E.g.:
UQINCD X0, POW2, MUL #2
This patch adds support for this kind of operand.
All existing operators were shifts of some kind, so there was a natural
range of [0, 63] regardless of context. This was then narrowered further
by later checks (e.g. to [0, 31] when used for 32-bit values).
In contrast, MUL doesn't really have a natural context-independent range.
Rather than pick one arbitrarily, it seemed better to make the "shift"
amount a full 64-bit value and leave the range test to the usual
operand-checking code. I've rearranged the fields of aarch64_opnd_info
so that this doesn't increase the size of the structure (although I don't
think its size is critical anyway).
include/
* opcode/aarch64.h (AARCH64_OPND_SVE_PATTERN_SCALED): New
aarch64_opnd.
(AARCH64_MOD_MUL): New aarch64_modifier_kind.
(aarch64_opnd_info): Make shifter.amount an int64_t and
rearrange the fields.
opcodes/
* aarch64-tbl.h (AARCH64_OPERANDS): Add an entry for
AARCH64_OPND_SVE_PATTERN_SCALED.
* aarch64-opc.h (FLD_SVE_imm4): New aarch64_field_kind.
* aarch64-opc.c (fields): Add a corresponding entry.
(set_multiplier_out_of_range_error): New function.
(aarch64_operand_modifiers): Add entry for AARCH64_MOD_MUL.
(operand_general_constraint_met_p): Handle
AARCH64_OPND_SVE_PATTERN_SCALED.
(print_register_offset_address): Use PRIi64 to print the
shift amount.
(aarch64_print_operand): Likewise. Handle
AARCH64_OPND_SVE_PATTERN_SCALED.
* aarch64-opc-2.c: Regenerate.
* aarch64-asm.h (ins_sve_scale): New inserter.
* aarch64-asm.c (aarch64_ins_sve_scale): New function.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_sve_scale): New inserter.
* aarch64-dis.c (aarch64_ext_sve_scale): New function.
* aarch64-dis-2.c: Regenerate.
gas/
* config/tc-aarch64.c (SHIFTED_MUL): New parse_shift_mode.
(parse_shift): Handle it. Reject AARCH64_MOD_MUL for all other
shift modes. Skip range tests for AARCH64_MOD_MUL.
(process_omitted_operand): Handle AARCH64_OPND_SVE_PATTERN_SCALED.
(parse_operands): Likewise.
2016-09-21 23:55:22 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_PATTERN_SCALED " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_pattern } , " an enumeration value such as POW2 " } ,
2016-09-21 23:54:53 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_PRFOP " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_prfop } , " an enumeration value such as PLDL1KEEP " } ,
[AArch64][SVE 21/32] Add Zn and Pn registers
This patch adds the Zn and Pn registers, and associated fields and
operands.
include/
* opcode/aarch64.h (AARCH64_OPND_CLASS_SVE_REG): New
aarch64_operand_class.
(AARCH64_OPND_CLASS_PRED_REG): Likewise.
(AARCH64_OPND_SVE_Pd, AARCH64_OPND_SVE_Pg3, AARCH64_OPND_SVE_Pg4_5)
(AARCH64_OPND_SVE_Pg4_10, AARCH64_OPND_SVE_Pg4_16)
(AARCH64_OPND_SVE_Pm, AARCH64_OPND_SVE_Pn, AARCH64_OPND_SVE_Pt)
(AARCH64_OPND_SVE_Za_5, AARCH64_OPND_SVE_Za_16, AARCH64_OPND_SVE_Zd)
(AARCH64_OPND_SVE_Zm_5, AARCH64_OPND_SVE_Zm_16, AARCH64_OPND_SVE_Zn)
(AARCH64_OPND_SVE_Zn_INDEX, AARCH64_OPND_SVE_ZnxN)
(AARCH64_OPND_SVE_Zt, AARCH64_OPND_SVE_ZtxN): New aarch64_opnds.
opcodes/
* aarch64-tbl.h (AARCH64_OPERANDS): Add entries for new SVE operands.
* aarch64-opc.h (FLD_SVE_Pd, FLD_SVE_Pg3, FLD_SVE_Pg4_5)
(FLD_SVE_Pg4_10, FLD_SVE_Pg4_16, FLD_SVE_Pm, FLD_SVE_Pn, FLD_SVE_Pt)
(FLD_SVE_Za_5, FLD_SVE_Za_16, FLD_SVE_Zd, FLD_SVE_Zm_5, FLD_SVE_Zm_16)
(FLD_SVE_Zn, FLD_SVE_Zt, FLD_SVE_tzsh): New aarch64_field_kinds.
* aarch64-opc.c (fields): Add corresponding entries here.
(operand_general_constraint_met_p): Check that SVE register lists
have the correct length. Check the ranges of SVE index registers.
Check for cases where p8-p15 are used in 3-bit predicate fields.
(aarch64_print_operand): Handle the new SVE operands.
* aarch64-opc-2.c: Regenerate.
* aarch64-asm.h (ins_sve_index, ins_sve_reglist): New inserters.
* aarch64-asm.c (aarch64_ins_sve_index): New function.
(aarch64_ins_sve_reglist): Likewise.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_sve_index, ext_sve_reglist): New extractors.
* aarch64-dis.c (aarch64_ext_sve_index): New function.
(aarch64_ext_sve_reglist): Likewise.
* aarch64-dis-2.c: Regenerate.
gas/
* config/tc-aarch64.c (NTA_HASVARWIDTH): New macro.
(AARCH64_REG_TYPES): Add ZN and PN.
(get_reg_expected_msg): Handle them.
(parse_vector_type_for_operand): Add a reg_type parameter.
Skip the width for Zn and Pn registers.
(parse_typed_reg): Extend vector handling to Zn and Pn. Update the
call to parse_vector_type_for_operand. Set HASVARTYPE for Zn and Pn,
expecting the width to be 0.
(parse_vector_reg_list): Restrict error about [BHSD]nn operands to
REG_TYPE_VN.
(vectype_to_qualifier): Use S_[BHSD] qualifiers for NTA_HASVARWIDTH.
(parse_operands): Handle the new Zn and Pn operands.
(REGSET16): New macro, split out from...
(REGSET31): ...here.
(reg_names): Add Zn and Pn entries.
2016-09-21 23:53:54 +08:00
{ AARCH64_OPND_CLASS_PRED_REG , " SVE_Pd " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Pd } , " an SVE predicate register " } ,
{ AARCH64_OPND_CLASS_PRED_REG , " SVE_Pg3 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Pg3 } , " an SVE predicate register " } ,
{ AARCH64_OPND_CLASS_PRED_REG , " SVE_Pg4_5 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Pg4_5 } , " an SVE predicate register " } ,
{ AARCH64_OPND_CLASS_PRED_REG , " SVE_Pg4_10 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Pg4_10 } , " an SVE predicate register " } ,
{ AARCH64_OPND_CLASS_PRED_REG , " SVE_Pg4_16 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Pg4_16 } , " an SVE predicate register " } ,
{ AARCH64_OPND_CLASS_PRED_REG , " SVE_Pm " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Pm } , " an SVE predicate register " } ,
{ AARCH64_OPND_CLASS_PRED_REG , " SVE_Pn " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Pn } , " an SVE predicate register " } ,
{ AARCH64_OPND_CLASS_PRED_REG , " SVE_Pt " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Pt } , " an SVE predicate register " } ,
2016-09-21 23:57:43 +08:00
{ AARCH64_OPND_CLASS_INT_REG , " SVE_Rm " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Rm } , " an integer register or zero " } ,
{ AARCH64_OPND_CLASS_INT_REG , " SVE_Rn_SP " , OPD_F_MAYBE_SP | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Rn } , " an integer register or SP " } ,
[AArch64][SVE 27/32] Add SVE integer immediate operands
This patch adds the new SVE integer immediate operands. There are
three kinds:
- simple signed and unsigned ranges, but with new widths and positions.
- 13-bit logical immediates. These have the same form as in base AArch64,
but at a different bit position.
In the case of the "MOV Zn.<T>, #<limm>" alias of DUPM, the logical
immediate <limm> is not allowed to be a valid DUP immediate, since DUP
is preferred over DUPM for constants that both instructions can handle.
- a new 9-bit arithmetic immediate, of the form "<imm8>{, LSL #8}".
In some contexts the operand is signed and in others it's unsigned.
As an extension, we allow shifted immediates to be written as a single
integer, e.g. "#256" is equivalent to "#1, LSL #8". We also use the
shiftless form as the preferred disassembly, except for the special
case of "#0, LSL #8" (a redundant encoding of 0).
include/
* opcode/aarch64.h (AARCH64_OPND_SIMM5): New aarch64_opnd.
(AARCH64_OPND_SVE_AIMM, AARCH64_OPND_SVE_ASIMM)
(AARCH64_OPND_SVE_INV_LIMM, AARCH64_OPND_SVE_LIMM)
(AARCH64_OPND_SVE_LIMM_MOV, AARCH64_OPND_SVE_SHLIMM_PRED)
(AARCH64_OPND_SVE_SHLIMM_UNPRED, AARCH64_OPND_SVE_SHRIMM_PRED)
(AARCH64_OPND_SVE_SHRIMM_UNPRED, AARCH64_OPND_SVE_SIMM5)
(AARCH64_OPND_SVE_SIMM5B, AARCH64_OPND_SVE_SIMM6)
(AARCH64_OPND_SVE_SIMM8, AARCH64_OPND_SVE_UIMM3)
(AARCH64_OPND_SVE_UIMM7, AARCH64_OPND_SVE_UIMM8)
(AARCH64_OPND_SVE_UIMM8_53): Likewise.
(aarch64_sve_dupm_mov_immediate_p): Declare.
opcodes/
* aarch64-tbl.h (AARCH64_OPERANDS): Add entries for the new SVE
integer immediate operands.
* aarch64-opc.h (FLD_SVE_immN, FLD_SVE_imm3, FLD_SVE_imm5)
(FLD_SVE_imm5b, FLD_SVE_imm7, FLD_SVE_imm8, FLD_SVE_imm9)
(FLD_SVE_immr, FLD_SVE_imms, FLD_SVE_tszh): New aarch64_field_kinds.
* aarch64-opc.c (fields): Add corresponding entries.
(operand_general_constraint_met_p): Handle the new SVE integer
immediate operands.
(aarch64_print_operand): Likewise.
(aarch64_sve_dupm_mov_immediate_p): New function.
* aarch64-opc-2.c: Regenerate.
* aarch64-asm.h (ins_inv_limm, ins_sve_aimm, ins_sve_asimm)
(ins_sve_limm_mov, ins_sve_shlimm, ins_sve_shrimm): New inserters.
* aarch64-asm.c (aarch64_ins_limm_1): New function, split out from...
(aarch64_ins_limm): ...here.
(aarch64_ins_inv_limm): New function.
(aarch64_ins_sve_aimm): Likewise.
(aarch64_ins_sve_asimm): Likewise.
(aarch64_ins_sve_limm_mov): Likewise.
(aarch64_ins_sve_shlimm): Likewise.
(aarch64_ins_sve_shrimm): Likewise.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_inv_limm, ext_sve_aimm, ext_sve_asimm)
(ext_sve_limm_mov, ext_sve_shlimm, ext_sve_shrimm): New extractors.
* aarch64-dis.c (decode_limm): New function, split out from...
(aarch64_ext_limm): ...here.
(aarch64_ext_inv_limm): New function.
(decode_sve_aimm): Likewise.
(aarch64_ext_sve_aimm): Likewise.
(aarch64_ext_sve_asimm): Likewise.
(aarch64_ext_sve_limm_mov): Likewise.
(aarch64_top_bit): Likewise.
(aarch64_ext_sve_shlimm): Likewise.
(aarch64_ext_sve_shrimm): Likewise.
* aarch64-dis-2.c: Regenerate.
gas/
* config/tc-aarch64.c (parse_operands): Handle the new SVE integer
immediate operands.
2016-09-21 23:56:57 +08:00
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_SHLIMM_PRED " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_tszh , FLD_SVE_imm5 } , " a shift-left immediate operand " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_SHLIMM_UNPRED " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_tszh , FLD_imm5 } , " a shift-left immediate operand " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_SHRIMM_PRED " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_tszh , FLD_SVE_imm5 } , " a shift-right immediate operand " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_SHRIMM_UNPRED " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_tszh , FLD_imm5 } , " a shift-right immediate operand " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_SIMM5 " , OPD_F_SEXT | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_imm5 } , " a 5-bit signed immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_SIMM5B " , OPD_F_SEXT | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_imm5b } , " a 5-bit signed immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_SIMM6 " , OPD_F_SEXT | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_imms } , " a 6-bit signed immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_SIMM8 " , OPD_F_SEXT | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_imm8 } , " an 8-bit signed immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_UIMM3 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_imm3 } , " a 3-bit unsigned immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_UIMM7 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_imm7 } , " a 7-bit unsigned immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_UIMM8 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_imm8 } , " an 8-bit unsigned immediate " } ,
{ AARCH64_OPND_CLASS_IMMEDIATE , " SVE_UIMM8_53 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_imm5 , FLD_imm3 } , " an 8-bit unsigned immediate " } ,
2016-09-21 23:57:43 +08:00
{ AARCH64_OPND_CLASS_SIMD_REG , " SVE_VZn " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zn } , " a SIMD register " } ,
{ AARCH64_OPND_CLASS_SIMD_REG , " SVE_Vd " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Vd } , " a SIMD register " } ,
{ AARCH64_OPND_CLASS_SIMD_REG , " SVE_Vm " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Vm } , " a SIMD register " } ,
{ AARCH64_OPND_CLASS_SIMD_REG , " SVE_Vn " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Vn } , " a SIMD register " } ,
[AArch64][SVE 21/32] Add Zn and Pn registers
This patch adds the Zn and Pn registers, and associated fields and
operands.
include/
* opcode/aarch64.h (AARCH64_OPND_CLASS_SVE_REG): New
aarch64_operand_class.
(AARCH64_OPND_CLASS_PRED_REG): Likewise.
(AARCH64_OPND_SVE_Pd, AARCH64_OPND_SVE_Pg3, AARCH64_OPND_SVE_Pg4_5)
(AARCH64_OPND_SVE_Pg4_10, AARCH64_OPND_SVE_Pg4_16)
(AARCH64_OPND_SVE_Pm, AARCH64_OPND_SVE_Pn, AARCH64_OPND_SVE_Pt)
(AARCH64_OPND_SVE_Za_5, AARCH64_OPND_SVE_Za_16, AARCH64_OPND_SVE_Zd)
(AARCH64_OPND_SVE_Zm_5, AARCH64_OPND_SVE_Zm_16, AARCH64_OPND_SVE_Zn)
(AARCH64_OPND_SVE_Zn_INDEX, AARCH64_OPND_SVE_ZnxN)
(AARCH64_OPND_SVE_Zt, AARCH64_OPND_SVE_ZtxN): New aarch64_opnds.
opcodes/
* aarch64-tbl.h (AARCH64_OPERANDS): Add entries for new SVE operands.
* aarch64-opc.h (FLD_SVE_Pd, FLD_SVE_Pg3, FLD_SVE_Pg4_5)
(FLD_SVE_Pg4_10, FLD_SVE_Pg4_16, FLD_SVE_Pm, FLD_SVE_Pn, FLD_SVE_Pt)
(FLD_SVE_Za_5, FLD_SVE_Za_16, FLD_SVE_Zd, FLD_SVE_Zm_5, FLD_SVE_Zm_16)
(FLD_SVE_Zn, FLD_SVE_Zt, FLD_SVE_tzsh): New aarch64_field_kinds.
* aarch64-opc.c (fields): Add corresponding entries here.
(operand_general_constraint_met_p): Check that SVE register lists
have the correct length. Check the ranges of SVE index registers.
Check for cases where p8-p15 are used in 3-bit predicate fields.
(aarch64_print_operand): Handle the new SVE operands.
* aarch64-opc-2.c: Regenerate.
* aarch64-asm.h (ins_sve_index, ins_sve_reglist): New inserters.
* aarch64-asm.c (aarch64_ins_sve_index): New function.
(aarch64_ins_sve_reglist): Likewise.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_sve_index, ext_sve_reglist): New extractors.
* aarch64-dis.c (aarch64_ext_sve_index): New function.
(aarch64_ext_sve_reglist): Likewise.
* aarch64-dis-2.c: Regenerate.
gas/
* config/tc-aarch64.c (NTA_HASVARWIDTH): New macro.
(AARCH64_REG_TYPES): Add ZN and PN.
(get_reg_expected_msg): Handle them.
(parse_vector_type_for_operand): Add a reg_type parameter.
Skip the width for Zn and Pn registers.
(parse_typed_reg): Extend vector handling to Zn and Pn. Update the
call to parse_vector_type_for_operand. Set HASVARTYPE for Zn and Pn,
expecting the width to be 0.
(parse_vector_reg_list): Restrict error about [BHSD]nn operands to
REG_TYPE_VN.
(vectype_to_qualifier): Use S_[BHSD] qualifiers for NTA_HASVARWIDTH.
(parse_operands): Handle the new Zn and Pn operands.
(REGSET16): New macro, split out from...
(REGSET31): ...here.
(reg_names): Add Zn and Pn entries.
2016-09-21 23:53:54 +08:00
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_Za_5 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Za_5 } , " an SVE vector register " } ,
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_Za_16 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Za_16 } , " an SVE vector register " } ,
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_Zd " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zd } , " an SVE vector register " } ,
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_Zm_5 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zm_5 } , " an SVE vector register " } ,
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_Zm_16 " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zm_16 } , " an SVE vector register " } ,
[AArch64] Additional SVE instructions
This patch supports some additions to the SVE architecture prior to
its public release.
include/
* opcode/aarch64.h (AARCH64_OPND_SVE_ADDR_RI_S4x16)
(AARCH64_OPND_SVE_IMM_ROT1, AARCH64_OPND_SVE_IMM_ROT2)
(AARCH64_OPND_SVE_Zm3_INDEX, AARCH64_OPND_SVE_Zm3_22_INDEX)
(AARCH64_OPND_SVE_Zm4_INDEX): New aarch64_opnds.
opcodes/
* aarch64-tbl.h (OP_SVE_HMH, OP_SVE_VMU_HSD, OP_SVE_VMVU_HSD)
(OP_SVE_VMVV_HSD, OP_SVE_VMVVU_HSD, OP_SVE_VM_HSD, OP_SVE_VUVV_HSD)
(OP_SVE_VUV_HSD, OP_SVE_VU_HSD, OP_SVE_VVVU_H, OP_SVE_VVVU_S)
(OP_SVE_VVVU_HSD, OP_SVE_VVV_D, OP_SVE_VVV_D_H, OP_SVE_VVV_H)
(OP_SVE_VVV_HSD, OP_SVE_VVV_S, OP_SVE_VVV_S_B, OP_SVE_VVV_SD_BH)
(OP_SVE_VV_BHSDQ, OP_SVE_VV_HSD, OP_SVE_VZVV_HSD, OP_SVE_VZV_HSD)
(OP_SVE_V_HSD): New macros.
(OP_SVE_VMU_SD, OP_SVE_VMVU_SD, OP_SVE_VM_SD, OP_SVE_VUVV_SD)
(OP_SVE_VU_SD, OP_SVE_VVVU_SD, OP_SVE_VVV_SD, OP_SVE_VZVV_SD)
(OP_SVE_VZV_SD, OP_SVE_V_SD): Delete.
(aarch64_opcode_table): Add new SVE instructions.
(aarch64_opcode_table): Use imm_rotate{1,2} instead of imm_rotate
for rotation operands. Add new SVE operands.
* aarch64-asm.h (ins_sve_addr_ri_s4): New inserter.
(ins_sve_quad_index): Likewise.
(ins_imm_rotate): Split into...
(ins_imm_rotate1, ins_imm_rotate2): ...these two inserters.
* aarch64-asm.c (aarch64_ins_imm_rotate): Split into...
(aarch64_ins_imm_rotate1, aarch64_ins_imm_rotate2): ...these two
functions.
(aarch64_ins_sve_addr_ri_s4): New function.
(aarch64_ins_sve_quad_index): Likewise.
(do_misc_encoding): Handle "MOV Zn.Q, Qm".
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_sve_addr_ri_s4): New extractor.
(ext_sve_quad_index): Likewise.
(ext_imm_rotate): Split into...
(ext_imm_rotate1, ext_imm_rotate2): ...these two extractors.
* aarch64-dis.c (aarch64_ext_imm_rotate): Split into...
(aarch64_ext_imm_rotate1, aarch64_ext_imm_rotate2): ...these two
functions.
(aarch64_ext_sve_addr_ri_s4): New function.
(aarch64_ext_sve_quad_index): Likewise.
(aarch64_ext_sve_index): Allow quad indices.
(do_misc_decoding): Likewise.
* aarch64-dis-2.c: Regenerate.
* aarch64-opc.h (FLD_SVE_i3h, FLD_SVE_rot1, FLD_SVE_rot2): New
aarch64_field_kinds.
(OPD_F_OD_MASK): Widen by one bit.
(OPD_F_NO_ZR): Bump accordingly.
(get_operand_field_width): New function.
* aarch64-opc.c (fields): Add new SVE fields.
(operand_general_constraint_met_p): Handle new SVE operands.
(aarch64_print_operand): Likewise.
* aarch64-opc-2.c: Regenerate.
gas/
* doc/c-aarch64.texi: Document that sve implies fp16, simd and compnum.
* config/tc-aarch64.c (parse_vector_type_for_operand): Allow .q
to be used with SVE registers.
(parse_operands): Handle new SVE operands.
(aarch64_features): Make "sve" require F16 rather than FP. Also
require COMPNUM.
* testsuite/gas/aarch64/sve.s: Add tests for new instructions.
Include compnum tests.
* testsuite/gas/aarch64/sve.d: Update accordingly.
* testsuite/gas/aarch64/sve-invalid.s: Add tests for new instructions.
* testsuite/gas/aarch64/sve-invalid.l: Update accordingly. Also
update expected output for new FMOV and MOV alternatives.
2017-02-25 02:29:00 +08:00
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_Zm3_INDEX " , 3 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zm_16 } , " an indexed SVE vector register " } ,
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_Zm3_22_INDEX " , 3 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_i3h , FLD_SVE_Zm_16 } , " an indexed SVE vector register " } ,
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_Zm4_INDEX " , 4 < < OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zm_16 } , " an indexed SVE vector register " } ,
[AArch64][SVE 21/32] Add Zn and Pn registers
This patch adds the Zn and Pn registers, and associated fields and
operands.
include/
* opcode/aarch64.h (AARCH64_OPND_CLASS_SVE_REG): New
aarch64_operand_class.
(AARCH64_OPND_CLASS_PRED_REG): Likewise.
(AARCH64_OPND_SVE_Pd, AARCH64_OPND_SVE_Pg3, AARCH64_OPND_SVE_Pg4_5)
(AARCH64_OPND_SVE_Pg4_10, AARCH64_OPND_SVE_Pg4_16)
(AARCH64_OPND_SVE_Pm, AARCH64_OPND_SVE_Pn, AARCH64_OPND_SVE_Pt)
(AARCH64_OPND_SVE_Za_5, AARCH64_OPND_SVE_Za_16, AARCH64_OPND_SVE_Zd)
(AARCH64_OPND_SVE_Zm_5, AARCH64_OPND_SVE_Zm_16, AARCH64_OPND_SVE_Zn)
(AARCH64_OPND_SVE_Zn_INDEX, AARCH64_OPND_SVE_ZnxN)
(AARCH64_OPND_SVE_Zt, AARCH64_OPND_SVE_ZtxN): New aarch64_opnds.
opcodes/
* aarch64-tbl.h (AARCH64_OPERANDS): Add entries for new SVE operands.
* aarch64-opc.h (FLD_SVE_Pd, FLD_SVE_Pg3, FLD_SVE_Pg4_5)
(FLD_SVE_Pg4_10, FLD_SVE_Pg4_16, FLD_SVE_Pm, FLD_SVE_Pn, FLD_SVE_Pt)
(FLD_SVE_Za_5, FLD_SVE_Za_16, FLD_SVE_Zd, FLD_SVE_Zm_5, FLD_SVE_Zm_16)
(FLD_SVE_Zn, FLD_SVE_Zt, FLD_SVE_tzsh): New aarch64_field_kinds.
* aarch64-opc.c (fields): Add corresponding entries here.
(operand_general_constraint_met_p): Check that SVE register lists
have the correct length. Check the ranges of SVE index registers.
Check for cases where p8-p15 are used in 3-bit predicate fields.
(aarch64_print_operand): Handle the new SVE operands.
* aarch64-opc-2.c: Regenerate.
* aarch64-asm.h (ins_sve_index, ins_sve_reglist): New inserters.
* aarch64-asm.c (aarch64_ins_sve_index): New function.
(aarch64_ins_sve_reglist): Likewise.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_sve_index, ext_sve_reglist): New extractors.
* aarch64-dis.c (aarch64_ext_sve_index): New function.
(aarch64_ext_sve_reglist): Likewise.
* aarch64-dis-2.c: Regenerate.
gas/
* config/tc-aarch64.c (NTA_HASVARWIDTH): New macro.
(AARCH64_REG_TYPES): Add ZN and PN.
(get_reg_expected_msg): Handle them.
(parse_vector_type_for_operand): Add a reg_type parameter.
Skip the width for Zn and Pn registers.
(parse_typed_reg): Extend vector handling to Zn and Pn. Update the
call to parse_vector_type_for_operand. Set HASVARTYPE for Zn and Pn,
expecting the width to be 0.
(parse_vector_reg_list): Restrict error about [BHSD]nn operands to
REG_TYPE_VN.
(vectype_to_qualifier): Use S_[BHSD] qualifiers for NTA_HASVARWIDTH.
(parse_operands): Handle the new Zn and Pn operands.
(REGSET16): New macro, split out from...
(REGSET31): ...here.
(reg_names): Add Zn and Pn entries.
2016-09-21 23:53:54 +08:00
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_Zn " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zn } , " an SVE vector register " } ,
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_Zn_INDEX " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zn } , " an indexed SVE vector register " } ,
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_ZnxN " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zn } , " a list of SVE vector registers " } ,
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_Zt " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zt } , " an SVE vector register " } ,
{ AARCH64_OPND_CLASS_SVE_REG , " SVE_ZtxN " , OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR , { FLD_SVE_Zt } , " a list of SVE vector registers " } ,
2012-08-13 22:52:54 +08:00
{ AARCH64_OPND_CLASS_NIL , " " , 0 , { 0 } , " DUMMY " } ,
} ;
/* Indexed by an enum aarch64_op enumerator, the value is the offset of
the corresponding aarch64_opcode entry in the aarch64_opcode_table . */
static const unsigned op_enum_table [ ] =
{
0 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
863 ,
864 ,
[AArch64] Add ARMv8.3 combined pointer authentication branch instructions
Add support for ARMv8.3 pointer authentication instructions
that are encoded as unconditional branch instructions.
opcodes/
2016-11-11 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (arch64_opcode_table): Add braa, brab, blraa, blrab, braaz,
brabz, blraaz, blrabz, retaa, retab, eretaa, eretab.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Regenerate.
* aarch64-opc-2.c: Regenerate.
gas/
2016-11-08 Szabolcs Nagy <szabolcs.nagy@arm.com>
* testsuite/gas/aarch64/pac.s: Add ARMv8.3 branch instruction tests.
* testsuite/gas/aarch64/pac.d: Likewise.
2016-11-11 18:43:15 +08:00
865 ,
868 ,
869 ,
870 ,
2016-11-18 17:53:45 +08:00
871 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
872 ,
866 ,
867 ,
873 ,
874 ,
896 ,
897 ,
[AArch64] Add ARMv8.3 combined pointer authentication branch instructions
Add support for ARMv8.3 pointer authentication instructions
that are encoded as unconditional branch instructions.
opcodes/
2016-11-11 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (arch64_opcode_table): Add braa, brab, blraa, blrab, braaz,
brabz, blraaz, blrabz, retaa, retab, eretaa, eretab.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Regenerate.
* aarch64-opc-2.c: Regenerate.
gas/
2016-11-08 Szabolcs Nagy <szabolcs.nagy@arm.com>
* testsuite/gas/aarch64/pac.s: Add ARMv8.3 branch instruction tests.
* testsuite/gas/aarch64/pac.d: Likewise.
2016-11-11 18:43:15 +08:00
898 ,
901 ,
902 ,
903 ,
2016-11-18 17:53:45 +08:00
904 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
905 ,
899 ,
900 ,
906 ,
907 ,
2016-11-18 17:58:38 +08:00
955 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
956 ,
957 ,
958 ,
2015-12-15 01:35:47 +08:00
12 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
630 ,
631 ,
2016-11-18 17:58:38 +08:00
1150 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
1152 ,
1154 ,
962 ,
1153 ,
1151 ,
312 ,
618 ,
629 ,
628 ,
960 ,
2015-12-15 01:46:21 +08:00
625 ,
622 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
614 ,
613 ,
620 ,
2015-12-15 01:46:21 +08:00
621 ,
624 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
626 ,
627 ,
970 ,
[AArch64] Add ARMv8.3 combined pointer authentication branch instructions
Add support for ARMv8.3 pointer authentication instructions
that are encoded as unconditional branch instructions.
opcodes/
2016-11-11 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (arch64_opcode_table): Add braa, brab, blraa, blrab, braaz,
brabz, blraaz, blrabz, retaa, retab, eretaa, eretab.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Regenerate.
* aarch64-opc-2.c: Regenerate.
gas/
2016-11-08 Szabolcs Nagy <szabolcs.nagy@arm.com>
* testsuite/gas/aarch64/pac.s: Add ARMv8.3 branch instruction tests.
* testsuite/gas/aarch64/pac.d: Likewise.
2016-11-11 18:43:15 +08:00
658 ,
661 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
664 ,
[AArch64] Add ARMv8.3 combined pointer authentication branch instructions
Add support for ARMv8.3 pointer authentication instructions
that are encoded as unconditional branch instructions.
opcodes/
2016-11-11 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (arch64_opcode_table): Add braa, brab, blraa, blrab, braaz,
brabz, blraaz, blrabz, retaa, retab, eretaa, eretab.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Regenerate.
* aarch64-opc-2.c: Regenerate.
gas/
2016-11-08 Szabolcs Nagy <szabolcs.nagy@arm.com>
* testsuite/gas/aarch64/pac.s: Add ARMv8.3 branch instruction tests.
* testsuite/gas/aarch64/pac.d: Likewise.
2016-11-11 18:43:15 +08:00
659 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
662 ,
807 ,
2015-12-15 01:16:50 +08:00
172 ,
173 ,
2015-12-15 01:22:36 +08:00
174 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
175 ,
510 ,
747 ,
383 ,
385 ,
407 ,
409 ,
1215 ,
2016-11-18 17:49:06 +08:00
1220 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
1213 ,
1212 ,
1216 ,
2016-11-18 17:58:38 +08:00
1223 ,
1225 ,
[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
Add support for FCMLA and FCADD complex arithmetic SIMD instructions.
FCMLA has an indexed element variant where the index range has to be
treated specially because a complex number takes two elements and the
indexed vector size depends on the other operands.
These complex number SIMD instructions are part of ARMv8.3
https://community.arm.com/groups/processors/blog/2016/10/27/armv8-a-architecture-2016-additions
include/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* opcode/aarch64.h (enum aarch64_opnd): Add AARCH64_OPND_IMM_ROT1,
AARCH64_OPND_IMM_ROT2, AARCH64_OPND_IMM_ROT3.
(enum aarch64_op): Add OP_FCMLA_ELEM.
opcodes/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* aarch64-tbl.h (QL_V3SAMEHSD_ROT, QL_ELEMENT_ROT): Define.
(aarch64_feature_simd_v8_3, SIMD_V8_3): Define.
(aarch64_opcode_table): Add fcmla and fcadd.
(AARCH64_OPERANDS): Add IMM_ROT{1,2,3}.
* aarch64-asm.h (aarch64_ins_imm_rotate): Declare.
* aarch64-asm.c (aarch64_ins_imm_rotate): Define.
* aarch64-dis.h (aarch64_ext_imm_rotate): Declare.
* aarch64-dis.c (aarch64_ext_imm_rotate): Define.
* aarch64-opc.h (enum aarch64_field_kind): Add FLD_rotate{1,2,3}.
* aarch64-opc.c (fields): Add FLD_rotate{1,2,3}.
(operand_general_constraint_met_p): Rotate and index range check.
(aarch64_print_operand): Handle rotate operand.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis-2.c: Likewise.
* aarch64-opc-2.c: Likewise.
gas/
2016-11-18 Szabolcs Nagy <szabolcs.nagy@arm.com>
* config/tc-aarch64.c (parse_operands): Handle AARCH64_OPND_IMM_ROT*.
* testsuite/gas/aarch64/advsimd-armv8_3.d: New.
* testsuite/gas/aarch64/advsimd-armv8_3.s: New.
* testsuite/gas/aarch64/illegal-fcmla.s: New.
* testsuite/gas/aarch64/illegal-fcmla.l: New.
* testsuite/gas/aarch64/illegal-fcmla.d: New.
2016-11-18 18:02:16 +08:00
1226 ,
1222 ,
1228 ,
1227 ,
129 ,
2012-08-13 22:52:54 +08:00
} ;
/* Given the opcode enumerator OP, return the pointer to the corresponding
opcode entry . */
const aarch64_opcode *
aarch64_get_opcode ( enum aarch64_op op )
{
return aarch64_opcode_table + op_enum_table [ op ] ;
}