diff --git a/x86/iflags.ph b/x86/iflags.ph index a2c35c11..cc638138 100644 --- a/x86/iflags.ph +++ b/x86/iflags.ph @@ -104,6 +104,9 @@ if_("CMPCCXADD", "CMPccXADD instructions"); if_("PREFETCHI", "PREFETCHI0 and PREFETCHI1"); if_("WRMSRNS", "WRMSRNS"); if_("MSRLIST", "RDMSRLIST and WRMSRLIST"); +if_("AVXNECONVERT", "AVX exceptionless floating-point conversions"); +if_("AVXVNNIINT8", "AVX Vector Neural Network 8-bit integer instructions"); +if_("AVXIFMA", "AVX integer multiply and add"); # Put these last to minimize their relevance if_("OBSOLETE", "Instruction removed from architecture"); diff --git a/x86/insns.dat b/x86/insns.dat index 75ae9e36..3ec13a03 100644 --- a/x86/insns.dat +++ b/x86/insns.dat @@ -6307,6 +6307,42 @@ VSUBPH zmmreg|mask|z,zmmreg*,zmmrm512|b16|er [rvm:fv: evex.nds.512.np.map5.w0 5 VSUBSH xmmreg|mask|z,xmmreg*,xmmrm16|er [rvm:t1s: evex.nds.lig.f3.map5.w0 5c /r] AVX512FP16,FUTURE VUCOMISH xmmreg,xmmrm16|sae [rm:t1s: evex.lig.np.map5.w0 2e /r] AVX512FP16,FUTURE +;# AVX no exception conversions +VBCSTNEBF16PS xmmreg,mem16 [rm: vex.128.f3.0f38.w0 b1 /r] AVXNECONVERT,FUTURE,SW +VBCSTNEBF16PS ymmreg,mem16 [rm: vex.256.f3.0f38.w0 b1 /r] AVXNECONVERT,FUTURE,SW +VBCSTNESH2PS xmmreg,mem16 [rm: vex.128.66.0f38.w0 b1 /r] AVXNECONVERT,FUTURE,SW +VBCSTNESH2PS ymmreg,mem16 [rm: vex.256.66.0f38.w0 b1 /r] AVXNECONVERT,FUTURE,SW +VCVTNEEBF162PS xmmreg,mem128 [rm: vex.128.f3.0f38.w0 b0 /r] AVXNECONVERT,FUTURE,SX +VCVTNEEBF162PS ymmreg,mem256 [rm: vex.256.f3.0f38.w0 b0 /r] AVXNECONVERT,FUTURE,SY +VCVTNEEPH2PS xmmreg,mem128 [rm: vex.128.66.0f38.w0 b0 /r] AVXNECONVERT,FUTURE,SX +VCVTNEEPH2PS ymmreg,mem256 [rm: vex.256.66.0f38.w0 b0 /r] AVXNECONVERT,FUTURE,SY +VCVTNEOBF162PS xmmreg,mem128 [rm: vex.128.f2.0f38.w0 b0 /r] AVXNECONVERT,FUTURE,SX +VCVTNEOBF162PS ymmreg,mem256 [rm: vex.256.f2.0f38.w0 b0 /r] AVXNECONVERT,FUTURE,SY +VCVTNEOPH2PS xmmreg,mem128 [rm: vex.128.np.0f38.w0 b0 /r] AVXNECONVERT,FUTURE,SX +VCVTNEOPH2PS ymmreg,mem256 [rm: vex.256.np.0f38.w0 b0 /r] AVXNECONVERT,FUTURE,SY +VCVTNEPS2BF16 xmmreg,xmmrm128 [rm: vex.128.f3.0f38.w0 72 /r] AVXNECONVERT,FUTURE,SX +VCVTNEPS2BF16 ymmreg,ymmrm256 [rm: vex.256.f3.0f38.w0 72 /r] AVXNECONVERT,FUTURE,SY + +;# AVX Vector Neural Network Instructions +VPDPBSSD xmmreg,xmmreg,xmmrm128 [rvm: vex.128.f2.0f38.w0 50 /r] AVXVNNIINT8,FUTURE,SX +VPDPBSSD ymmreg,ymmreg,ymmrm256 [rvm: vex.256.f2.0f38.w0 50 /r] AVXVNNIINT8,FUTURE,SY +VPDPBSSDS xmmreg,xmmreg,xmmrm128 [rvm: vex.128.f2.0f38.w0 51 /r] AVXVNNIINT8,FUTURE,SX +VPDPBSSDS ymmreg,ymmreg,ymmrm256 [rvm: vex.256.f2.0f38.w0 51 /r] AVXVNNIINT8,FUTURE,SY +VPDPBSUD xmmreg,xmmreg,xmmrm128 [rvm: vex.128.f3.0f38.w0 50 /r] AVXVNNIINT8,FUTURE,SX +VPDPBSUD ymmreg,ymmreg,ymmrm256 [rvm: vex.256.f3.0f38.w0 50 /r] AVXVNNIINT8,FUTURE,SY +VPDPBSUDS xmmreg,xmmreg,xmmrm128 [rvm: vex.128.f3.0f38.w0 51 /r] AVXVNNIINT8,FUTURE,SX +VPDPBSUDS ymmreg,ymmreg,ymmrm256 [rvm: vex.256.f3.0f38.w0 51 /r] AVXVNNIINT8,FUTURE,SY +VPDPBUUD xmmreg,xmmreg,xmmrm128 [rvm: vex.128.np.0f38.w0 50 /r] AVXVNNIINT8,FUTURE,SX +VPDPBUUD ymmreg,ymmreg,ymmrm256 [rvm: vex.256.np.0f38.w0 50 /r] AVXVNNIINT8,FUTURE,SY +VPDPBUUDS xmmreg,xmmreg,xmmrm128 [rvm: vex.128.np.0f38.w0 51 /r] AVXVNNIINT8,FUTURE,SX +VPDPBUUDS ymmreg,ymmreg,ymmrm256 [rvm: vex.256.np.0f38.w0 51 /r] AVXVNNIINT8,FUTURE,SY + +;# AVX Integer Fused Multiply-Add +VPMADD52HUQ xmmreg,xmmreg,xmmrm128 [rvm: vex.128.66.0f38.w1 b5 /r] AVXIFMA,FUTURE,SX +VPMADD52HUQ ymmreg,ymmreg,ymmrm256 [rvm: vex.256.66.0f38.w1 b5 /r] AVXIFMA,FUTURE,SY +VPMADD52LUQ xmmreg,xmmreg,xmmrm128 [rvm: vex.128.66.0f38.w1 b4 /r] AVXIFMA,FUTURE,SX +VPMADD52LUQ ymmreg,ymmreg,ymmrm256 [rvm: vex.256.66.0f38.w1 b4 /r] AVXIFMA,FUTURE,SY + ;# RAO-INT weakly ordered atomic operations AADD mem32,reg32 [mr: norexw np 0f 38 fc /r ] RAOINT,FUTURE,SD AADD mem64,reg64 [mr: o64 np 0f 38 fc /r ] RAOINT,FUTURE,SQ,LONG