summaryrefslogtreecommitdiff
path: root/plugins/arm
diff options
context:
space:
mode:
authorCyrille Bagard <nocbos@gmail.com>2018-05-28 22:24:57 (GMT)
committerCyrille Bagard <nocbos@gmail.com>2018-05-28 22:24:57 (GMT)
commit85f8cfc150a9cbb07136863bc1ec379b29c723ab (patch)
treee6c25f51e2fdcb547b1f80847fa3bcc6edf80889 /plugins/arm
parent5311a943dffcc410739509b9215ca464f6d1e54c (diff)
Added support for some ARMv7 SIMD instructions.
Diffstat (limited to 'plugins/arm')
-rw-r--r--plugins/arm/v7/helpers.h52
-rw-r--r--plugins/arm/v7/opdefs/A88277_vaba.d909
-rw-r--r--plugins/arm/v7/opdefs/A88278_vabd.d909
-rw-r--r--plugins/arm/v7/opdefs/A88279_vabd.d137
-rw-r--r--plugins/arm/v7/opdefs/A88280_vabs.d317
-rw-r--r--plugins/arm/v7/opdefs/A88281_vac.d229
-rw-r--r--plugins/arm/v7/opdefs/A88282_vadd.d221
-rw-r--r--plugins/arm/v7/opdefs/A88283_vadd.d237
-rw-r--r--plugins/arm/v7/opdefs/A88284_vaddhn.d177
-rw-r--r--plugins/arm/v7/opdefs/A88285_vadd.d621
-rw-r--r--plugins/arm/v7/opdefs/A88287_vand.d133
-rw-r--r--plugins/arm/v7/opdefs/A88288_vbic.d1277
-rw-r--r--plugins/arm/v7/opdefs/A88289_vbic.d133
-rw-r--r--plugins/arm/v7/opdefs/A88290_vb.d321
-rw-r--r--plugins/arm/v7/opdefs/A88291_vceq.d425
-rw-r--r--plugins/arm/v7/opdefs/A88292_vceq.d429
-rw-r--r--plugins/arm/v7/opdefs/A88293_vcge.d725
-rw-r--r--plugins/arm/v7/opdefs/A88294_vcge.d429
-rw-r--r--plugins/arm/v7/opdefs/A88295_vcgt.d725
-rw-r--r--plugins/arm/v7/opdefs/A88296_vcgt.d429
-rw-r--r--plugins/arm/v7/opdefs/A88298_vcle.d429
-rw-r--r--plugins/arm/v7/opdefs/A88299_vcls.d309
-rw-r--r--plugins/arm/v7/opdefs/A88301_vclt.d429
-rw-r--r--plugins/arm/v7/opdefs/A88302_vclz.d309
-rw-r--r--plugins/arm/v7/opdefs/A88303_vcmp.d409
-rw-r--r--plugins/arm/v7/opdefs/A88304_vcnt.d129
-rw-r--r--plugins/arm/v7/opdefs/A88305_vcvt.d413
-rw-r--r--plugins/arm/v7/opdefs/A88312_vdiv.d133
-rw-r--r--plugins/arm/v7/opdefs/A88314_vdup.d321
-rw-r--r--plugins/arm/v7/opdefs/A88315_veor.d133
-rw-r--r--plugins/arm/v7/opdefs/A88317_vfm.d433
-rw-r--r--plugins/arm/v7/opdefs/A88318_vfnm.d229
-rw-r--r--plugins/arm/v7/opdefs/A88319_vh.d1245
-rw-r--r--plugins/arm/v7/opdefs/A88334_vmax.d1245
-rw-r--r--plugins/arm/v7/opdefs/A88335_vmax.d229
-rw-r--r--plugins/arm/v7/opdefs/A88337_vmla.d433
-rw-r--r--plugins/arm/v7/opdefs/A88345_vmov.d133
-rw-r--r--plugins/arm/v7/opdefs/A88346_vmovl.d309
-rw-r--r--plugins/arm/v7/opdefs/A88347_vmovn.d171
-rw-r--r--plugins/arm/v7/opdefs/A88351_vmul.d237
-rw-r--r--plugins/arm/v7/opdefs/A88354_vmvn.d75
-rw-r--r--plugins/arm/v7/opdefs/A88355_vneg.d317
-rw-r--r--plugins/arm/v7/opdefs/A88356_vnm.d329
-rw-r--r--plugins/arm/v7/opdefs/A88358_vorn.d133
-rw-r--r--plugins/arm/v7/opdefs/A88359_vorr.d1277
-rw-r--r--plugins/arm/v7/opdefs/A88360_vorr.d133
-rw-r--r--plugins/arm/v7/opdefs/A88361_vpadal.d597
-rw-r--r--plugins/arm/v7/opdefs/A88362_vpadd.d183
-rw-r--r--plugins/arm/v7/opdefs/A88363_vpadd.d91
-rw-r--r--plugins/arm/v7/opdefs/A88364_vpaddl.d597
-rw-r--r--plugins/arm/v7/opdefs/A88365_vpmax.d645
-rw-r--r--plugins/arm/v7/opdefs/A88366_vpmax.d141
-rw-r--r--plugins/arm/v7/opdefs/A88369_vqabs.d309
-rw-r--r--plugins/arm/v7/opdefs/A88375_vqneg.d309
-rw-r--r--plugins/arm/v7/opdefs/A88377_vqrshl.d813
-rw-r--r--plugins/arm/v7/opdefs/A88379_vqshl.d813
-rw-r--r--plugins/arm/v7/opdefs/A88382_vqsub.d813
-rw-r--r--plugins/arm/v7/opdefs/A88383_vraddhn.d177
-rw-r--r--plugins/arm/v7/opdefs/A88384_vrecpe.d229
-rw-r--r--plugins/arm/v7/opdefs/A88385_vrecps.d133
-rw-r--r--plugins/arm/v7/opdefs/A88386_vrev.d873
-rw-r--r--plugins/arm/v7/opdefs/A88387_vrhadd.d621
-rw-r--r--plugins/arm/v7/opdefs/A88388_vrshl.d813
-rw-r--r--plugins/arm/v7/opdefs/A88391_vrsqrte.d229
-rw-r--r--plugins/arm/v7/opdefs/A88392_vrsqrts.d137
-rw-r--r--plugins/arm/v7/opdefs/A88394_vrsubhn.d177
-rw-r--r--plugins/arm/v7/opdefs/A88396_vshl.d813
-rw-r--r--plugins/arm/v7/opdefs/A88401_vsqrt.d129
-rw-r--r--plugins/arm/v7/opdefs/A88414_vsub.d221
-rw-r--r--plugins/arm/v7/opdefs/A88415_vsub.d237
-rw-r--r--plugins/arm/v7/opdefs/A88416_vsubhn.d177
-rw-r--r--plugins/arm/v7/opdefs/A88417_vsub.d621
-rw-r--r--plugins/arm/v7/opdefs/A88418_vswp.d133
-rw-r--r--plugins/arm/v7/opdefs/A88420_vtrn.d309
-rw-r--r--plugins/arm/v7/opdefs/A88421_vtst.d321
-rw-r--r--plugins/arm/v7/opdefs/A88422_vuzp.d309
-rw-r--r--plugins/arm/v7/opdefs/A88423_vzip.d309
-rw-r--r--plugins/arm/v7/opdefs/Makefile.am76
-rw-r--r--plugins/arm/v7/pseudo.c187
-rw-r--r--plugins/arm/v7/pseudo.h16
80 files changed, 31335 insertions, 0 deletions
diff --git a/plugins/arm/v7/helpers.h b/plugins/arm/v7/helpers.h
index ad23bc7..968dbf4 100644
--- a/plugins/arm/v7/helpers.h
+++ b/plugins/arm/v7/helpers.h
@@ -41,6 +41,7 @@
#include "registers/banked.h"
#include "registers/basic.h"
#include "registers/coproc.h"
+#include "registers/simd.h"
#include "registers/special.h"
@@ -50,6 +51,18 @@
*/
+#define AdvSIMDExpandImm(op, cmode, imm8) \
+ ({ \
+ GArchOperand *__result; \
+ uint64_t __val; \
+ if (armv7_advanced_simd_expand_imm(op, cmode, imm8, &__val)) \
+ __result = g_imm_operand_new_from_value(MDS_64_BITS_UNSIGNED, __val); \
+ else \
+ __result = NULL; \
+ __result; \
+ })
+
+
#define ARMExpandImm(imm12) \
({ \
GArchOperand *__result; \
@@ -151,6 +164,19 @@
})
+#define DoubleWordVector(idx) \
+ ({ \
+ GArchOperand *__result; \
+ GArchRegister *__reg; \
+ __reg = g_armv7_simd_register_new(SRM_DOUBLE_WORD, idx); \
+ if (__reg == NULL) \
+ __result = NULL; \
+ else \
+ __result = g_armv7_register_operand_new(G_ARMV7_REGISTER(__reg)); \
+ __result; \
+ })
+
+
#define Endian(big) \
({ \
GArchOperand *__result; \
@@ -254,6 +280,19 @@
})
+#define QuadWordVector(idx) \
+ ({ \
+ GArchOperand *__result; \
+ GArchRegister *__reg; \
+ __reg = g_armv7_simd_register_new(SRM_QUAD_WORD, idx); \
+ if (__reg == NULL) \
+ __result = NULL; \
+ else \
+ __result = g_armv7_register_operand_new(G_ARMV7_REGISTER(__reg)); \
+ __result; \
+ })
+
+
#define SignExtend(val, size, top) \
({ \
GArchOperand *__result; \
@@ -274,6 +313,19 @@
})
+#define SingleWordVector(idx) \
+ ({ \
+ GArchOperand *__result; \
+ GArchRegister *__reg; \
+ __reg = g_armv7_simd_register_new(SRM_SINGLE_WORD, idx); \
+ if (__reg == NULL) \
+ __result = NULL; \
+ else \
+ __result = g_armv7_register_operand_new(G_ARMV7_REGISTER(__reg)); \
+ __result; \
+ })
+
+
#define Register(idx) \
({ \
GArchOperand *__result; \
diff --git a/plugins/arm/v7/opdefs/A88277_vaba.d b/plugins/arm/v7/opdefs/A88277_vaba.d
new file mode 100644
index 0000000..32bbb27
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88277_vaba.d
@@ -0,0 +1,909 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VABA, VABAL
+
+@id 269
+
+@desc {
+
+ Vector Absolute Difference and Accumulate {Long} subtracts the elements of one vector from the corresponding elements of another vector, and accumulates the absolute values of the results into the elements of the destination vector. Operand and result elements are either all integers of the same length, or optionally the results can be double the length of the operands. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction that is not also available as a VFP instruction, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 1 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 799
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaba.s8 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 800
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaba.s16 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 801
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaba.s32 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 802
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaba.u8 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 803
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaba.u16 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 804
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaba.u32 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 805
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaba.s8 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 806
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaba.s16 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 807
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaba.s32 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 808
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaba.u8 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 809
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaba.u16 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 810
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaba.u32 dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 U(1) 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 1 0 1 N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 811
+
+ @assert {
+
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabal.s8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 812
+
+ @assert {
+
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabal.s16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 813
+
+ @assert {
+
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabal.s32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 814
+
+ @assert {
+
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabal.u8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 815
+
+ @assert {
+
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabal.u16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 816
+
+ @assert {
+
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabal.u32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 1 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 817
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaba.s8 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 818
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaba.s16 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 819
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaba.s32 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 820
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaba.u8 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 821
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaba.u16 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 822
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaba.u32 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 823
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaba.s8 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 824
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaba.s16 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 825
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaba.s32 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 826
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaba.u8 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 827
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaba.u16 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 828
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaba.u32 dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 U(1) 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 1 0 1 N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 829
+
+ @assert {
+
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabal.s8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 830
+
+ @assert {
+
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabal.s16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 831
+
+ @assert {
+
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabal.s32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 832
+
+ @assert {
+
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabal.u8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 833
+
+ @assert {
+
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabal.u16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 834
+
+ @assert {
+
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabal.u32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88278_vabd.d b/plugins/arm/v7/opdefs/A88278_vabd.d
new file mode 100644
index 0000000..966c960
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88278_vabd.d
@@ -0,0 +1,909 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VABD, VABDL (integer)
+
+@id 270
+
+@desc {
+
+ Vector Absolute Difference {Long} (integer) subtracts the elements of one vector from the corresponding elements of another vector, and places the absolute values of the results in the elements of the destination vector. Operand and result elements are either all integers of the same length, or optionally the results can be double the length of the operands. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction that is not also available as a VFP instruction, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 1 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 835
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 836
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 837
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 838
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 839
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 840
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 841
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 842
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 843
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 844
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 845
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 846
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 U(1) 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 1 1 1 N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 847
+
+ @assert {
+
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabdl.s8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 848
+
+ @assert {
+
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabdl.s16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 849
+
+ @assert {
+
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabdl.s32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 850
+
+ @assert {
+
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabdl.u8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 851
+
+ @assert {
+
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabdl.u16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 852
+
+ @assert {
+
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabdl.u32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 1 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 853
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 854
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 855
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 856
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 857
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 858
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 859
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 860
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 861
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 862
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 863
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 864
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 U(1) 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 1 1 1 N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 865
+
+ @assert {
+
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabdl.s8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 866
+
+ @assert {
+
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabdl.s16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 867
+
+ @assert {
+
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabdl.s32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 868
+
+ @assert {
+
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabdl.u8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 869
+
+ @assert {
+
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabdl.u16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 870
+
+ @assert {
+
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabdl.u32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88279_vabd.d b/plugins/arm/v7/opdefs/A88279_vabd.d
new file mode 100644
index 0000000..564176c
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88279_vabd.d
@@ -0,0 +1,137 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VABD (floating-point)
+
+@id 271
+
+@desc {
+
+ Vector Absolute Difference (floating-point) subtracts the elements of one vector from the corresponding elements of another vector, and places the absolute values of the results in the elements of the destination vector. Operand and result elements are all single-precision floating-point numbers. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction that is not also available as a VFP instruction, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) 1 sz(1) Vn(4) Vd(4) 1 1 0 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 871
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 872
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) 1 sz(1) Vn(4) Vd(4) 1 1 0 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 873
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabd.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 874
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabd.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88280_vabs.d b/plugins/arm/v7/opdefs/A88280_vabs.d
new file mode 100644
index 0000000..0badc22
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88280_vabs.d
@@ -0,0 +1,317 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VABS
+
+@id 272
+
+@desc {
+
+ Vector Absolute takes the absolute value of each element in a vector, and places the results in a second vector. The floating-point version only clears the sign bit. Depending on settings in the CPACR, NSACR, HCPTR, and FPEXC registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 and Summary of access controls for Advanced SIMD functionality on page B1-1232 summarize these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction that is not also available as a VFP instruction, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 1 1 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 875
+
+ @assert {
+
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabs.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 876
+
+ @assert {
+
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabs.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 877
+
+ @assert {
+
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabs.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 878
+
+ @assert {
+
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabs.f32 qwvec_D qwvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 1 1 0 0 0 0 Vd(4) 1 0 1 sz(1) 1 1 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 879
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vabs.f32 swvec_D swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 880
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabs.f64 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 1 1 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 881
+
+ @assert {
+
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabs.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 882
+
+ @assert {
+
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabs.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 883
+
+ @assert {
+
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabs.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 884
+
+ @assert {
+
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vabs.f32 qwvec_D qwvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 1 1 0 0 0 0 Vd(4) 1 0 1 sz(1) 1 1 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 885
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vabs.f32 swvec_D swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 886
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vabs.f64 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88281_vac.d b/plugins/arm/v7/opdefs/A88281_vac.d
new file mode 100644
index 0000000..30ee91f
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88281_vac.d
@@ -0,0 +1,229 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VACGE, VACGT, VACLE, VACLT
+
+@id 273
+
+@desc {
+
+ VACGE (Vector Absolute Compare Greater Than or Equal) and VACGT (Vector Absolute Compare Greater Than) take the absolute value of each element in a vector, and compare it with the absolute value of the corresponding element of a second vector. If the condition is true, the corresponding element in the destination vector is set to all ones. Otherwise, it is set to all zeros. VACLE (Vector Absolute Compare Less Than or Equal) is a pseudo-instruction, equivalent to a VACGE instruction with the operands reversed. Disassembly produces the VACGE instruction. VACLT (Vector Absolute Compare Less Than) is a pseudo-instruction, equivalent to a VACGT instruction with the operands reversed. Disassembly produces the VACGT instruction. The operands and result can be quadword or doubleword vectors. They must all be the same size. The operand vector elements must be 32-bit floating-point numbers. The result vector elements are 32-bit fields. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction that is not also available as a VFP instruction, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) op(1) sz(1) Vn(4) Vd(4) 1 1 1 0 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 887
+
+ @assert {
+
+ Q == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vacge.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 888
+
+ @assert {
+
+ Q == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vacgt.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 889
+
+ @assert {
+
+ Q == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vacge.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 890
+
+ @assert {
+
+ Q == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vacgt.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) op(1) sz(1) Vn(4) Vd(4) 1 1 1 0 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 891
+
+ @assert {
+
+ Q == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vacge.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 892
+
+ @assert {
+
+ Q == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vacgt.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 893
+
+ @assert {
+
+ Q == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vacge.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 894
+
+ @assert {
+
+ Q == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vacgt.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88282_vadd.d b/plugins/arm/v7/opdefs/A88282_vadd.d
new file mode 100644
index 0000000..9045d95
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88282_vadd.d
@@ -0,0 +1,221 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VADD (integer)
+
+@id 274
+
+@desc {
+
+ Vector Add adds corresponding elements in two vectors, and places the results in the destination vector. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 1 0 0 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 895
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vadd.i8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 896
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vadd.i16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 897
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vadd.i32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 898
+
+ @assert {
+
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vadd.i64 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 1 0 0 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 899
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vadd.i8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 900
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vadd.i16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 901
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vadd.i32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 902
+
+ @assert {
+
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vadd.i64 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88283_vadd.d b/plugins/arm/v7/opdefs/A88283_vadd.d
new file mode 100644
index 0000000..096dbc0
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88283_vadd.d
@@ -0,0 +1,237 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VADD (floating-point)
+
+@id 275
+
+@desc {
+
+ Vector Add adds corresponding elements in two vectors, and places the results in the destination vector. Depending on settings in the CPACR, NSACR, HCPTR, and FPEXC registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 and Summary of access controls for Advanced SIMD functionality on page B1-1232 summarize these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 0 sz(1) Vn(4) Vd(4) 1 1 0 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 903
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vadd.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 904
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vadd.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 0 1 1 1 0 0 D(1) 1 1 Vn(4) Vd(4) 1 0 1 sz(1) N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 905
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vadd.f32 ?swvec_D swvec_N swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 906
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vadd.f64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 0 sz(1) Vn(4) Vd(4) 1 1 0 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 907
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vadd.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 908
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vadd.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 0 1 1 1 0 0 D(1) 1 1 Vn(4) Vd(4) 1 0 1 sz(1) N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 909
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vadd.f32 ?swvec_D swvec_N swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 910
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vadd.f64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88284_vaddhn.d b/plugins/arm/v7/opdefs/A88284_vaddhn.d
new file mode 100644
index 0000000..7b7c9b9
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88284_vaddhn.d
@@ -0,0 +1,177 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VADDHN
+
+@id 276
+
+@desc {
+
+ Vector Add and Narrow, returning High Half adds corresponding elements in two quadword vectors, and places the most significant half of each result in a doubleword vector. The results are truncated. (For rounded results, see VRADDHN on page A8-1022). The operand elements can be 16-bit, 32-bit, or 64-bit integers. There is no distinction between signed and unsigned integers. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 1 0 0 N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 911
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaddhn.i16 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 912
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaddhn.i32 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 913
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaddhn.i64 dwvec_D qwvec_N qwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 1 0 0 N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 914
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaddhn.i16 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 915
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaddhn.i32 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 916
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vaddhn.i64 dwvec_D qwvec_N qwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88285_vadd.d b/plugins/arm/v7/opdefs/A88285_vadd.d
new file mode 100644
index 0000000..4137719
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88285_vadd.d
@@ -0,0 +1,621 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VADDL, VADDW
+
+@id 277
+
+@desc {
+
+ VADDL (Vector Add Long) adds corresponding elements in two doubleword vectors, and places the results in a quadword vector. Before adding, it sign-extends or zero-extends the elements of both operands. VADDW (Vector Add Wide) adds corresponding elements in one quadword and one doubleword vector, and places the results in a quadword vector. Before adding, it sign-extends or zero-extends the elements of the doubleword operand. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 0 0 op(1) N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 917
+
+ @assert {
+
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddl.s8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 918
+
+ @assert {
+
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddl.s16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 919
+
+ @assert {
+
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddl.s32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 920
+
+ @assert {
+
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddl.u8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 921
+
+ @assert {
+
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddl.u16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 922
+
+ @assert {
+
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddl.u32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 923
+
+ @assert {
+
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddw.s8 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 924
+
+ @assert {
+
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddw.s16 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 925
+
+ @assert {
+
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddw.s32 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 926
+
+ @assert {
+
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddw.u8 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 927
+
+ @assert {
+
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddw.u16 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 928
+
+ @assert {
+
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddw.u32 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 0 0 op(1) N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 929
+
+ @assert {
+
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddl.s8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 930
+
+ @assert {
+
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddl.s16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 931
+
+ @assert {
+
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddl.s32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 932
+
+ @assert {
+
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddl.u8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 933
+
+ @assert {
+
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddl.u16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 934
+
+ @assert {
+
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddl.u32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 935
+
+ @assert {
+
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddw.s8 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 936
+
+ @assert {
+
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddw.s16 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 937
+
+ @assert {
+
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddw.s32 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 938
+
+ @assert {
+
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddw.u8 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 939
+
+ @assert {
+
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddw.u16 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 940
+
+ @assert {
+
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vaddw.u32 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88287_vand.d b/plugins/arm/v7/opdefs/A88287_vand.d
new file mode 100644
index 0000000..4bc6229
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88287_vand.d
@@ -0,0 +1,133 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VAND (register)
+
+@id 279
+
+@desc {
+
+ This instruction performs a bitwise AND operation between two registers, and places the result in the destination register. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 0 0 Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 941
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vand ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 942
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vand ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 0 0 Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 943
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vand ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 944
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vand ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88288_vbic.d b/plugins/arm/v7/opdefs/A88288_vbic.d
new file mode 100644
index 0000000..4961fc9
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88288_vbic.d
@@ -0,0 +1,1277 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VBIC (immediate)
+
+@id 280
+
+@desc {
+
+ Vector Bitwise Bit Clear (immediate) performs a bitwise AND between a register value and the complement of an immediate value, and returns the result into the destination vector. For the range of constants available, see One register and a modified immediate value on page A7-269. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 i(1) 1 1 1 1 1 D(1) 0 0 0 imm3(3) Vd(4) cmode(4) 0 Q(1) 1 1 imm4(4)
+
+ @syntax {
+
+ @subid 945
+
+ @assert {
+
+ Q == 1
+ cmode == 1000
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 946
+
+ @assert {
+
+ Q == 1
+ cmode == 1001
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 947
+
+ @assert {
+
+ Q == 1
+ cmode == 1010
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 948
+
+ @assert {
+
+ Q == 1
+ cmode == 1011
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 949
+
+ @assert {
+
+ Q == 1
+ cmode == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 950
+
+ @assert {
+
+ Q == 1
+ cmode == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 951
+
+ @assert {
+
+ Q == 1
+ cmode == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 952
+
+ @assert {
+
+ Q == 1
+ cmode == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 953
+
+ @assert {
+
+ Q == 1
+ cmode == 100
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 954
+
+ @assert {
+
+ Q == 1
+ cmode == 101
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 955
+
+ @assert {
+
+ Q == 1
+ cmode == 110
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 956
+
+ @assert {
+
+ Q == 1
+ cmode == 111
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 957
+
+ @assert {
+
+ Q == 1
+ cmode == 1100
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 958
+
+ @assert {
+
+ Q == 1
+ cmode == 1101
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 959
+
+ @assert {
+
+ Q == 0
+ cmode == 1000
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 960
+
+ @assert {
+
+ Q == 0
+ cmode == 1001
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 961
+
+ @assert {
+
+ Q == 0
+ cmode == 1010
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 962
+
+ @assert {
+
+ Q == 0
+ cmode == 1011
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 963
+
+ @assert {
+
+ Q == 0
+ cmode == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 964
+
+ @assert {
+
+ Q == 0
+ cmode == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 965
+
+ @assert {
+
+ Q == 0
+ cmode == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 966
+
+ @assert {
+
+ Q == 0
+ cmode == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 967
+
+ @assert {
+
+ Q == 0
+ cmode == 100
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 968
+
+ @assert {
+
+ Q == 0
+ cmode == 101
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 969
+
+ @assert {
+
+ Q == 0
+ cmode == 110
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 970
+
+ @assert {
+
+ Q == 0
+ cmode == 111
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 971
+
+ @assert {
+
+ Q == 0
+ cmode == 1100
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 972
+
+ @assert {
+
+ Q == 0
+ cmode == 1101
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 i(1) 1 1 1 1 1 D(1) 0 0 0 imm3(3) Vd(4) cmode(4) 0 Q(1) 1 1 imm4(4)
+
+ @syntax {
+
+ @subid 973
+
+ @assert {
+
+ Q == 1
+ cmode == 1000
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 974
+
+ @assert {
+
+ Q == 1
+ cmode == 1001
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 975
+
+ @assert {
+
+ Q == 1
+ cmode == 1010
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 976
+
+ @assert {
+
+ Q == 1
+ cmode == 1011
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 977
+
+ @assert {
+
+ Q == 1
+ cmode == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 978
+
+ @assert {
+
+ Q == 1
+ cmode == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 979
+
+ @assert {
+
+ Q == 1
+ cmode == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 980
+
+ @assert {
+
+ Q == 1
+ cmode == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 981
+
+ @assert {
+
+ Q == 1
+ cmode == 100
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 982
+
+ @assert {
+
+ Q == 1
+ cmode == 101
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 983
+
+ @assert {
+
+ Q == 1
+ cmode == 110
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 984
+
+ @assert {
+
+ Q == 1
+ cmode == 111
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 985
+
+ @assert {
+
+ Q == 1
+ cmode == 1100
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 986
+
+ @assert {
+
+ Q == 1
+ cmode == 1101
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 987
+
+ @assert {
+
+ Q == 0
+ cmode == 1000
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 988
+
+ @assert {
+
+ Q == 0
+ cmode == 1001
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 989
+
+ @assert {
+
+ Q == 0
+ cmode == 1010
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 990
+
+ @assert {
+
+ Q == 0
+ cmode == 1011
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 991
+
+ @assert {
+
+ Q == 0
+ cmode == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 992
+
+ @assert {
+
+ Q == 0
+ cmode == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 993
+
+ @assert {
+
+ Q == 0
+ cmode == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 994
+
+ @assert {
+
+ Q == 0
+ cmode == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 995
+
+ @assert {
+
+ Q == 0
+ cmode == 100
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 996
+
+ @assert {
+
+ Q == 0
+ cmode == 101
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 997
+
+ @assert {
+
+ Q == 0
+ cmode == 110
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 998
+
+ @assert {
+
+ Q == 0
+ cmode == 111
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 999
+
+ @assert {
+
+ Q == 0
+ cmode == 1100
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1000
+
+ @assert {
+
+ Q == 0
+ cmode == 1101
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('1', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vbic.i32 dwvec_D imm64
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88289_vbic.d b/plugins/arm/v7/opdefs/A88289_vbic.d
new file mode 100644
index 0000000..1756fd7
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88289_vbic.d
@@ -0,0 +1,133 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VBIC (register)
+
+@id 281
+
+@desc {
+
+ Vector Bitwise Bit Clear (register) performs a bitwise AND between a register value and the complement of a register value, and places the result in the destination register. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 0 1 Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1001
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbic ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1002
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbic ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 0 1 Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1003
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbic ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1004
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbic ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88290_vb.d b/plugins/arm/v7/opdefs/A88290_vb.d
new file mode 100644
index 0000000..c4673c0
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88290_vb.d
@@ -0,0 +1,321 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VBIF, VBIT, VBSL
+
+@id 282
+
+@desc {
+
+ VBIF (Vector Bitwise Insert if False), VBIT (Vector Bitwise Insert if True), and VBSL (Vector Bitwise Select) perform bitwise selection under the control of a mask, and place the results in the destination register. The registers can be either quadword or doubleword, and must all be the same size. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) op(2) Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1005
+
+ @assert {
+
+ Q == 1
+ op == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbif ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1006
+
+ @assert {
+
+ Q == 1
+ op == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbit ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1007
+
+ @assert {
+
+ Q == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbsl ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1008
+
+ @assert {
+
+ Q == 0
+ op == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbif ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1009
+
+ @assert {
+
+ Q == 0
+ op == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbit ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1010
+
+ @assert {
+
+ Q == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbsl ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) op(2) Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1011
+
+ @assert {
+
+ Q == 1
+ op == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbif ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1012
+
+ @assert {
+
+ Q == 1
+ op == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbit ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1013
+
+ @assert {
+
+ Q == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbsl ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1014
+
+ @assert {
+
+ Q == 0
+ op == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbif ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1015
+
+ @assert {
+
+ Q == 0
+ op == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbit ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1016
+
+ @assert {
+
+ Q == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbsl ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88291_vceq.d b/plugins/arm/v7/opdefs/A88291_vceq.d
new file mode 100644
index 0000000..18a4700
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88291_vceq.d
@@ -0,0 +1,425 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCEQ (register)
+
+@id 283
+
+@desc {
+
+ VCEQ (Vector Compare Equal) takes each element in a vector, and compares it with the corresponding element of a second vector. If they are equal, the corresponding element in the destination vector is set to all ones. Otherwise, it is set to all zeros. The operand vector elements can be any one of: • 8-bit, 16-bit, or 32-bit integers. There is no distinction between signed and unsigned integers. • 32-bit floating-point numbers. The result vector elements are fields the same size as the operand vector elements. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 1 0 0 0 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1017
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vceq.i8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1018
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vceq.i16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1019
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vceq.i32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1020
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vceq.i8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1021
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vceq.i16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1022
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vceq.i32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 0 sz(1) Vn(4) Vd(4) 1 1 1 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1023
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vceq.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1024
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vceq.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 1 0 0 0 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1025
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vceq.i8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1026
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vceq.i16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1027
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vceq.i32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1028
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vceq.i8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1029
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vceq.i16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1030
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vceq.i32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 0 sz(1) Vn(4) Vd(4) 1 1 1 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1031
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vceq.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1032
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vceq.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88292_vceq.d b/plugins/arm/v7/opdefs/A88292_vceq.d
new file mode 100644
index 0000000..dce989f
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88292_vceq.d
@@ -0,0 +1,429 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCEQ (immediate #0)
+
+@id 284
+
+@desc {
+
+ VCEQ #0 (Vector Compare Equal to zero) takes each element in a vector, and compares it with zero. If it is equal to zero, the corresponding element in the destination vector is set to all ones. Otherwise, it is set to all zeros. The operand vector elements can be any one of: • 8-bit, 16-bit, or 32-bit integers. There is no distinction between signed and unsigned integers. • 32-bit floating-point numbers. The result vector elements are fields the same size as the operand vector elements. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 0 1 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1033
+
+ @assert {
+
+ Q == 1
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.i8 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1034
+
+ @assert {
+
+ Q == 1
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.i16 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1035
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.i32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1036
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.f32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1037
+
+ @assert {
+
+ Q == 0
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.i8 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1038
+
+ @assert {
+
+ Q == 0
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.i16 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1039
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.i32 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1040
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.f32 ?dwvec_D dwvec_M zero
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 0 1 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1041
+
+ @assert {
+
+ Q == 1
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.i8 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1042
+
+ @assert {
+
+ Q == 1
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.i16 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1043
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.i32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1044
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.f32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1045
+
+ @assert {
+
+ Q == 0
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.i8 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1046
+
+ @assert {
+
+ Q == 0
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.i16 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1047
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.i32 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1048
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vceq.f32 ?dwvec_D dwvec_M zero
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88293_vcge.d b/plugins/arm/v7/opdefs/A88293_vcge.d
new file mode 100644
index 0000000..44909a0
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88293_vcge.d
@@ -0,0 +1,725 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCGE (register)
+
+@id 285
+
+@desc {
+
+ VCGE (Vector Compare Greater Than or Equal) takes each element in a vector, and compares it with the corresponding element of a second vector. If the first is greater than or equal to the second, the corresponding element in the destination vector is set to all ones. Otherwise, it is set to all zeros. The operand vector elements can be any one of: • 8-bit, 16-bit, or 32-bit signed integers • 8-bit, 16-bit, or 32-bit unsigned integers • 32-bit floating-point numbers. The result vector elements are fields the same size as the operand vector elements. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 0 1 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1049
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1050
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1051
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1052
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1053
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1054
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1055
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1056
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1057
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1058
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1059
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1060
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) 0 sz(1) Vn(4) Vd(4) 1 1 1 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1061
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1062
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 0 1 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1063
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1064
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1065
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1066
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1067
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1068
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1069
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1070
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1071
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1072
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1073
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1074
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) 0 sz(1) Vn(4) Vd(4) 1 1 1 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1075
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcge.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1076
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcge.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88294_vcge.d b/plugins/arm/v7/opdefs/A88294_vcge.d
new file mode 100644
index 0000000..f389298
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88294_vcge.d
@@ -0,0 +1,429 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCGE (immediate #0)
+
+@id 286
+
+@desc {
+
+ VCGE #0 (Vector Compare Greater Than or Equal to Zero) take each element in a vector, and compares it with zero. If it is greater than or equal to zero, the corresponding element in the destination vector is set to all ones. Otherwise, it is set to all zeros. The operand vector elements can be any one of: • 8-bit, 16-bit, or 32-bit signed integers • 32-bit floating-point numbers. The result vector elements are fields the same size as the operand vector elements. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 0 0 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1077
+
+ @assert {
+
+ Q == 1
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.s8 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1078
+
+ @assert {
+
+ Q == 1
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.s16 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1079
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.s32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1080
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.f32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1081
+
+ @assert {
+
+ Q == 0
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.s8 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1082
+
+ @assert {
+
+ Q == 0
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.s16 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1083
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.s32 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1084
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.f32 ?dwvec_D dwvec_M zero
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 0 0 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1085
+
+ @assert {
+
+ Q == 1
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.s8 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1086
+
+ @assert {
+
+ Q == 1
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.s16 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1087
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.s32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1088
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.f32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1089
+
+ @assert {
+
+ Q == 0
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.s8 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1090
+
+ @assert {
+
+ Q == 0
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.s16 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1091
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.s32 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1092
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcge.f32 ?dwvec_D dwvec_M zero
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88295_vcgt.d b/plugins/arm/v7/opdefs/A88295_vcgt.d
new file mode 100644
index 0000000..cb95340
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88295_vcgt.d
@@ -0,0 +1,725 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCGT (register)
+
+@id 287
+
+@desc {
+
+ VCGT (Vector Compare Greater Than) takes each element in a vector, and compares it with the corresponding element of a second vector. If the first is greater than the second, the corresponding element in the destination vector is set to all ones. Otherwise, it is set to all zeros. The operand vector elements can be any one of: • 8-bit, 16-bit, or 32-bit signed integers • 8-bit, 16-bit, or 32-bit unsigned integers • 32-bit floating-point numbers. The result vector elements are fields the same size as the operand vector elements. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 0 1 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1093
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1094
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1095
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1096
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1097
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1098
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1099
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1100
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1101
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1102
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1103
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1104
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) 1 sz(1) Vn(4) Vd(4) 1 1 1 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1105
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1106
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 0 1 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1107
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1108
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1109
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1110
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1111
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1112
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1113
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1114
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1115
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1116
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1117
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1118
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) 1 sz(1) Vn(4) Vd(4) 1 1 1 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1119
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1120
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcgt.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88296_vcgt.d b/plugins/arm/v7/opdefs/A88296_vcgt.d
new file mode 100644
index 0000000..ea7b1e8
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88296_vcgt.d
@@ -0,0 +1,429 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCGT (immediate #0)
+
+@id 288
+
+@desc {
+
+ VCGT #0 (Vector Compare Greater Than Zero) take each element in a vector, and compares it with zero. If it is greater than zero, the corresponding element in the destination vector is set to all ones. Otherwise, it is set to all zeros. The operand vector elements can be any one of: • 8-bit, 16-bit, or 32-bit signed integers • 32-bit floating-point numbers. The result vector elements are fields the same size as the operand vector elements. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 0 0 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1121
+
+ @assert {
+
+ Q == 1
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.s8 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1122
+
+ @assert {
+
+ Q == 1
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.s16 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1123
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.s32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1124
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.f32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1125
+
+ @assert {
+
+ Q == 0
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.s8 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1126
+
+ @assert {
+
+ Q == 0
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.s16 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1127
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.s32 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1128
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.f32 ?dwvec_D dwvec_M zero
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 0 0 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1129
+
+ @assert {
+
+ Q == 1
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.s8 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1130
+
+ @assert {
+
+ Q == 1
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.s16 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1131
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.s32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1132
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.f32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1133
+
+ @assert {
+
+ Q == 0
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.s8 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1134
+
+ @assert {
+
+ Q == 0
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.s16 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1135
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.s32 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1136
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcgt.f32 ?dwvec_D dwvec_M zero
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88298_vcle.d b/plugins/arm/v7/opdefs/A88298_vcle.d
new file mode 100644
index 0000000..e3215e1
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88298_vcle.d
@@ -0,0 +1,429 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCLE (immediate #0)
+
+@id 290
+
+@desc {
+
+ VCLE #0 (Vector Compare Less Than or Equal to Zero) take each element in a vector, and compares it with zero. If it is less than or equal to zero, the corresponding element in the destination vector is set to all ones. Otherwise, it is set to all zeros. The operand vector elements can be any one of: • 8-bit, 16-bit, or 32-bit signed integers • 32-bit floating-point numbers. The result vector elements are fields the same size as the operand vector elements. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 0 1 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1137
+
+ @assert {
+
+ Q == 1
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.s8 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1138
+
+ @assert {
+
+ Q == 1
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.s16 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1139
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.s32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1140
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.f32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1141
+
+ @assert {
+
+ Q == 0
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.s8 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1142
+
+ @assert {
+
+ Q == 0
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.s16 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1143
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.s32 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1144
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.f32 ?dwvec_D dwvec_M zero
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 0 1 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1145
+
+ @assert {
+
+ Q == 1
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.s8 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1146
+
+ @assert {
+
+ Q == 1
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.s16 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1147
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.s32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1148
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.f32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1149
+
+ @assert {
+
+ Q == 0
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.s8 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1150
+
+ @assert {
+
+ Q == 0
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.s16 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1151
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.s32 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1152
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcle.f32 ?dwvec_D dwvec_M zero
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88299_vcls.d b/plugins/arm/v7/opdefs/A88299_vcls.d
new file mode 100644
index 0000000..67464c2
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88299_vcls.d
@@ -0,0 +1,309 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCLS
+
+@id 291
+
+@desc {
+
+ Vector Count Leading Sign Bits counts the number of consecutive bits following the topmost bit, that are the same as the topmost bit, in each element in a vector, and places the results in a second vector. The count does not include the topmost bit itself. The operand vector elements can be any one of 8-bit, 16-bit, or 32-bit signed integers. The result vector elements are the same data type as the operand vector elements. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 0 0 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1153
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcls.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1154
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcls.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1155
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcls.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1156
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcls.s8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1157
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcls.s16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1158
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcls.s32 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 0 0 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1159
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcls.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1160
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcls.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1161
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcls.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1162
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcls.s8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1163
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcls.s16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1164
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcls.s32 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88301_vclt.d b/plugins/arm/v7/opdefs/A88301_vclt.d
new file mode 100644
index 0000000..caf768a
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88301_vclt.d
@@ -0,0 +1,429 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCLT (immediate #0)
+
+@id 293
+
+@desc {
+
+ VCLT #0 (Vector Compare Less Than Zero) take each element in a vector, and compares it with zero. If it is less than zero, the corresponding element in the destination vector is set to all ones. Otherwise, it is set to all zeros. The operand vector elements can be any one of: • 8-bit, 16-bit, or 32-bit signed integers • 32-bit floating-point numbers. The result vector elements are fields the same size as the operand vector elements. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 1 0 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1165
+
+ @assert {
+
+ Q == 1
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.s8 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1166
+
+ @assert {
+
+ Q == 1
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.s16 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1167
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.s32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1168
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.f32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1169
+
+ @assert {
+
+ Q == 0
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.s8 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1170
+
+ @assert {
+
+ Q == 0
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.s16 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1171
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.s32 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1172
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.f32 ?dwvec_D dwvec_M zero
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 1 0 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1173
+
+ @assert {
+
+ Q == 1
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.s8 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1174
+
+ @assert {
+
+ Q == 1
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.s16 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1175
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.s32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1176
+
+ @assert {
+
+ Q == 1
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.f32 ?qwvec_D qwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1177
+
+ @assert {
+
+ Q == 0
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.s8 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1178
+
+ @assert {
+
+ Q == 0
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.s16 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1179
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.s32 ?dwvec_D dwvec_M zero
+
+ }
+
+ @syntax {
+
+ @subid 1180
+
+ @assert {
+
+ Q == 0
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ zero = Zeros(8)
+
+ }
+
+ @asm vclt.f32 ?dwvec_D dwvec_M zero
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88302_vclz.d b/plugins/arm/v7/opdefs/A88302_vclz.d
new file mode 100644
index 0000000..c65d663
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88302_vclz.d
@@ -0,0 +1,309 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCLZ
+
+@id 294
+
+@desc {
+
+ Vector Count Leading Zeros counts the number of consecutive zeros, starting from the most significant bit, in each element in a vector, and places the results in a second vector. The operand vector elements can be any one of 8-bit, 16-bit, or 32-bit integers. There is no distinction between signed and unsigned integers. The result vector elements are the same data type as the operand vector elements. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 0 0 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1181
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vclz.i8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1182
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vclz.i16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1183
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vclz.i32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1184
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vclz.i8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1185
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vclz.i16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1186
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vclz.i32 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 0 0 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1187
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vclz.i8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1188
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vclz.i16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1189
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vclz.i32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1190
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vclz.i8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1191
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vclz.i16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1192
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vclz.i32 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88303_vcmp.d b/plugins/arm/v7/opdefs/A88303_vcmp.d
new file mode 100644
index 0000000..dc6803d
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88303_vcmp.d
@@ -0,0 +1,409 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCMP, VCMPE
+
+@id 295
+
+@desc {
+
+ This instruction compares two floating-point registers, or one floating-point register and zero. It writes the result to the FPSCR flags. These are normally transferred to the ARM flags by a subsequent VMRS instruction. It can optionally raise an Invalid Operation exception if either operand is any type of NaN. It always raises an Invalid Operation exception if either operand is a signaling NaN. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 summarizes these controls.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 1 1 0 1 0 0 Vd(4) 1 0 1 sz(1) E(1) 1 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1193
+
+ @assert {
+
+ sz == 1
+ E == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcmp.f64 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1194
+
+ @assert {
+
+ sz == 1
+ E == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcmpe.f64 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1195
+
+ @assert {
+
+ sz == 0
+ E == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vcmp.f32 swvec_D swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1196
+
+ @assert {
+
+ sz == 0
+ E == 1
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vcmpe.f32 swvec_D swvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 1 1 0 1 0 1 Vd(4) 1 0 1 sz(1) E(1) 1 0 0 0 0 0 0
+
+ @syntax {
+
+ @subid 1197
+
+ @assert {
+
+ sz == 1
+ E == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcmp.f64 dwvec_D zero
+
+ }
+
+ @syntax {
+
+ @subid 1198
+
+ @assert {
+
+ sz == 1
+ E == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcmpe.f64 dwvec_D zero
+
+ }
+
+ @syntax {
+
+ @subid 1199
+
+ @assert {
+
+ sz == 0
+ E == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcmp.f32 swvec_D zero
+
+ }
+
+ @syntax {
+
+ @subid 1200
+
+ @assert {
+
+ sz == 0
+ E == 1
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcmpe.f32 swvec_D zero
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 1 1 0 1 0 0 Vd(4) 1 0 1 sz(1) E(1) 1 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1201
+
+ @assert {
+
+ sz == 1
+ E == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcmp.f64 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1202
+
+ @assert {
+
+ sz == 1
+ E == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcmpe.f64 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1203
+
+ @assert {
+
+ sz == 0
+ E == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vcmp.f32 swvec_D swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1204
+
+ @assert {
+
+ sz == 0
+ E == 1
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vcmpe.f32 swvec_D swvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 1 1 0 1 0 1 Vd(4) 1 0 1 sz(1) E(1) 1 0 0 0 0 0 0
+
+ @syntax {
+
+ @subid 1205
+
+ @assert {
+
+ sz == 1
+ E == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcmp.f64 dwvec_D zero
+
+ }
+
+ @syntax {
+
+ @subid 1206
+
+ @assert {
+
+ sz == 1
+ E == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcmpe.f64 dwvec_D zero
+
+ }
+
+ @syntax {
+
+ @subid 1207
+
+ @assert {
+
+ sz == 0
+ E == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcmp.f32 swvec_D zero
+
+ }
+
+ @syntax {
+
+ @subid 1208
+
+ @assert {
+
+ sz == 0
+ E == 1
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ zero = Zeros(8)
+
+ }
+
+ @asm vcmpe.f32 swvec_D zero
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88304_vcnt.d b/plugins/arm/v7/opdefs/A88304_vcnt.d
new file mode 100644
index 0000000..e3535bb
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88304_vcnt.d
@@ -0,0 +1,129 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCNT
+
+@id 296
+
+@desc {
+
+ This instruction counts the number of bits that are one in each element in a vector, and places the results in a second vector. The operand vector elements must be 8-bit fields. The result vector elements are 8-bit integers. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 0 1 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1209
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcnt.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1210
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcnt.8 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 0 1 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1211
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcnt.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1212
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcnt.8 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88305_vcvt.d b/plugins/arm/v7/opdefs/A88305_vcvt.d
new file mode 100644
index 0000000..d2699d3
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88305_vcvt.d
@@ -0,0 +1,413 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VCVT (between floating-point and integer, Advanced SIMD)
+
+@id 297
+
+@desc {
+
+ This instruction converts each element in a vector from floating-point to integer, or from integer to floating-point, and places the results in a second vector. The vector elements must be 32-bit floating-point numbers, or 32-bit integers. Signed and unsigned integers are distinct. The floating-point to integer operation uses the Round towards Zero rounding mode. The integer to floating-point operation uses the Round to Nearest rounding mode. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 1 Vd(4) 0 1 1 op(2) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1213
+
+ @assert {
+
+ Q == 1
+ op == 10
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.s32.f32. qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1214
+
+ @assert {
+
+ Q == 1
+ op == 11
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.u32.f32. qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1215
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.f32.s32. qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1216
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.f32.u32. qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1217
+
+ @assert {
+
+ Q == 0
+ op == 10
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.s32.f32. dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1218
+
+ @assert {
+
+ Q == 0
+ op == 11
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.u32.f32. dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1219
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.f32.s32. dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1220
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.f32.u32. dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 1 Vd(4) 0 1 1 op(2) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1221
+
+ @assert {
+
+ Q == 1
+ op == 10
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.s32.f32. qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1222
+
+ @assert {
+
+ Q == 1
+ op == 11
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.u32.f32. qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1223
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.f32.s32. qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1224
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.f32.u32. qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1225
+
+ @assert {
+
+ Q == 0
+ op == 10
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.s32.f32. dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1226
+
+ @assert {
+
+ Q == 0
+ op == 11
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.u32.f32. dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1227
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.f32.s32. dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1228
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vcvt.f32.u32. dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88312_vdiv.d b/plugins/arm/v7/opdefs/A88312_vdiv.d
new file mode 100644
index 0000000..75a39a4
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88312_vdiv.d
@@ -0,0 +1,133 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VDIV
+
+@id 298
+
+@desc {
+
+ This instruction divides one floating-point value by another floating-point value and writes the result to a third floating-point register. Depending on settings in the CPACR, NSACR, HCPTR, and FPEXC registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 summarizes these controls.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 0 0 Vn(4) Vd(4) 1 0 1 sz(1) N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1229
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vdiv.f64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1230
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vdiv.f32 ?swvec_D swvec_N swvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 0 0 Vn(4) Vd(4) 1 0 1 sz(1) N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1231
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vdiv.f64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1232
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vdiv.f32 ?swvec_D swvec_N swvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88314_vdup.d b/plugins/arm/v7/opdefs/A88314_vdup.d
new file mode 100644
index 0000000..0496d92
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88314_vdup.d
@@ -0,0 +1,321 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VDUP (ARM core register)
+
+@id 299
+
+@desc {
+
+ This instruction duplicates an element from an ARM core register into every element of the destination vector. The destination vector elements can be 8-bit, 16-bit, or 32-bit fields. The source element is the least significant 8, 16, or 32 bits of the ARM core register. There is no distinction between data types. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 0 1 B(1) Q(1) 0 Vd(4) Rt(4) 1 0 1 1 D(1) 0 E(1) 1 0 0 0 0
+
+ @syntax {
+
+ @subid 1233
+
+ @assert {
+
+ Q == 1
+ B == 1
+ E == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ reg_T = Register(Rt)
+
+ }
+
+ @asm vdup.8 qwvec_D reg_T
+
+ }
+
+ @syntax {
+
+ @subid 1234
+
+ @assert {
+
+ Q == 1
+ B == 0
+ E == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ reg_T = Register(Rt)
+
+ }
+
+ @asm vdup.16 qwvec_D reg_T
+
+ }
+
+ @syntax {
+
+ @subid 1235
+
+ @assert {
+
+ Q == 1
+ B == 0
+ E == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ reg_T = Register(Rt)
+
+ }
+
+ @asm vdup.32 qwvec_D reg_T
+
+ }
+
+ @syntax {
+
+ @subid 1236
+
+ @assert {
+
+ Q == 0
+ B == 1
+ E == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ reg_T = Register(Rt)
+
+ }
+
+ @asm vdup.8 dwvec_D reg_T
+
+ }
+
+ @syntax {
+
+ @subid 1237
+
+ @assert {
+
+ Q == 0
+ B == 0
+ E == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ reg_T = Register(Rt)
+
+ }
+
+ @asm vdup.16 dwvec_D reg_T
+
+ }
+
+ @syntax {
+
+ @subid 1238
+
+ @assert {
+
+ Q == 0
+ B == 0
+ E == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ reg_T = Register(Rt)
+
+ }
+
+ @asm vdup.32 dwvec_D reg_T
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 0 1 B(1) Q(1) 0 Vd(4) Rt(4) 1 0 1 1 D(1) 0 E(1) 1 0 0 0 0
+
+ @syntax {
+
+ @subid 1239
+
+ @assert {
+
+ Q == 1
+ B == 1
+ E == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ reg_T = Register(Rt)
+
+ }
+
+ @asm vdup.8 qwvec_D reg_T
+
+ }
+
+ @syntax {
+
+ @subid 1240
+
+ @assert {
+
+ Q == 1
+ B == 0
+ E == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ reg_T = Register(Rt)
+
+ }
+
+ @asm vdup.16 qwvec_D reg_T
+
+ }
+
+ @syntax {
+
+ @subid 1241
+
+ @assert {
+
+ Q == 1
+ B == 0
+ E == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ reg_T = Register(Rt)
+
+ }
+
+ @asm vdup.32 qwvec_D reg_T
+
+ }
+
+ @syntax {
+
+ @subid 1242
+
+ @assert {
+
+ Q == 0
+ B == 1
+ E == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ reg_T = Register(Rt)
+
+ }
+
+ @asm vdup.8 dwvec_D reg_T
+
+ }
+
+ @syntax {
+
+ @subid 1243
+
+ @assert {
+
+ Q == 0
+ B == 0
+ E == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ reg_T = Register(Rt)
+
+ }
+
+ @asm vdup.16 dwvec_D reg_T
+
+ }
+
+ @syntax {
+
+ @subid 1244
+
+ @assert {
+
+ Q == 0
+ B == 0
+ E == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ reg_T = Register(Rt)
+
+ }
+
+ @asm vdup.32 dwvec_D reg_T
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88315_veor.d b/plugins/arm/v7/opdefs/A88315_veor.d
new file mode 100644
index 0000000..fa3f718
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88315_veor.d
@@ -0,0 +1,133 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VEOR
+
+@id 300
+
+@desc {
+
+ Vector Bitwise Exclusive OR performs a bitwise Exclusive OR operation between two registers, and places the result in the destination register. The operand and result registers can be quadword or doubleword. They must all be the same size. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) 0 0 Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1245
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm veor ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1246
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm veor ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) 0 0 Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1247
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm veor ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1248
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm veor ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88317_vfm.d b/plugins/arm/v7/opdefs/A88317_vfm.d
new file mode 100644
index 0000000..63444d3
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88317_vfm.d
@@ -0,0 +1,433 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VFMA, VFMS
+
+@id 301
+
+@desc {
+
+ Vector Fused Multiply Accumulate multiplies corresponding elements of two vectors, and accumulates the results into the elements of the destination vector. The instruction does not round the result of the multiply before the accumulation. Vector Fused Multiply Subtract negates the elements of one vector and multiplies them with the corresponding elements of another vector, adds the products to the corresponding elements of the destination vector, and places the results in the destination vector. The instruction does not round the result of the multiply before the addition. Depending on settings in the CPACR, NSACR, HCPTR, and FPEXC registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 and Summary of access controls for Advanced SIMD functionality on page B1-1232 summarize these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) op(1) sz(1) Vn(4) Vd(4) 1 1 0 0 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1249
+
+ @assert {
+
+ Q == 1
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vfma.f32 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1250
+
+ @assert {
+
+ Q == 1
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vfms.f32 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1251
+
+ @assert {
+
+ Q == 0
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vfma.f32 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1252
+
+ @assert {
+
+ Q == 0
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vfms.f32 dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 1 0 Vn(4) Vd(4) 1 0 1 sz(1) N(1) op(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1253
+
+ @assert {
+
+ sz == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vfma.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1254
+
+ @assert {
+
+ sz == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vfms.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1255
+
+ @assert {
+
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vfma.f32 swvec_D swvec_N swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1256
+
+ @assert {
+
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vfms.f32 swvec_D swvec_N swvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) op(1) sz(1) Vn(4) Vd(4) 1 1 0 0 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1257
+
+ @assert {
+
+ Q == 1
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vfma.f32 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1258
+
+ @assert {
+
+ Q == 1
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vfms.f32 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1259
+
+ @assert {
+
+ Q == 0
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vfma.f32 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1260
+
+ @assert {
+
+ Q == 0
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vfms.f32 dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 1 0 Vn(4) Vd(4) 1 0 1 sz(1) N(1) op(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1261
+
+ @assert {
+
+ sz == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vfma.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1262
+
+ @assert {
+
+ sz == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vfms.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1263
+
+ @assert {
+
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vfma.f32 swvec_D swvec_N swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1264
+
+ @assert {
+
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vfms.f32 swvec_D swvec_N swvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88318_vfnm.d b/plugins/arm/v7/opdefs/A88318_vfnm.d
new file mode 100644
index 0000000..fcb10b7
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88318_vfnm.d
@@ -0,0 +1,229 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VFNMA, VFNMS
+
+@id 302
+
+@desc {
+
+ Vector Fused Negate Multiply Accumulate negates one floating-point register value and multiplies it by another floating-point register value, adds the negation of the floating-point value in the destination register to the product, and writes the result back to the destination register. The instruction does not round the result of the multiply before the addition. Vector Fused Negate Multiply Subtract multiplies together two floating-point register values, adds the negation of the floating-point value in the destination register to the product, and writes the result back to the destination register. The instruction does not round the result of the multiply before the addition. Depending on settings in the CPACR, NSACR, HCPTR, and FPEXC registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 summarizes these controls.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 0 1 Vn(4) Vd(4) 1 0 1 sz(1) N(1) op(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1265
+
+ @assert {
+
+ sz == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vfnma.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1266
+
+ @assert {
+
+ sz == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vfnms.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1267
+
+ @assert {
+
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vfnma.f32 swvec_D swvec_N swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1268
+
+ @assert {
+
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vfnms.f32 swvec_D swvec_N swvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 0 1 Vn(4) Vd(4) 1 0 1 sz(1) N(1) op(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1269
+
+ @assert {
+
+ sz == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vfnma.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1270
+
+ @assert {
+
+ sz == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vfnms.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1271
+
+ @assert {
+
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vfnma.f32 swvec_D swvec_N swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1272
+
+ @assert {
+
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vfnms.f32 swvec_D swvec_N swvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88319_vh.d b/plugins/arm/v7/opdefs/A88319_vh.d
new file mode 100644
index 0000000..efb94bc
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88319_vh.d
@@ -0,0 +1,1245 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VHADD, VHSUB
+
+@id 303
+
+@desc {
+
+ Vector Halving Add adds corresponding elements in two vectors of integers, shifts each result right one bit, and places the final results in the destination vector. The results of the halving operations are truncated (for rounded results see VRHADD on page A8-1030). Vector Halving Subtract subtracts the elements of the second operand from the corresponding elements of the first operand, shifts each result right one bit, and places the final results in the destination vector. The results of the halving operations are truncated (there is no rounding version). The operand and result elements are all the same type, and can be any one of: • 8-bit, 16-bit, or 32-bit signed integers • 8-bit, 16-bit, or 32-bit unsigned integers. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 0 op(1) 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1273
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1274
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1275
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1276
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1277
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1278
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1279
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1280
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1281
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1282
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1283
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1284
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1285
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1286
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1287
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1288
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1289
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1290
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1291
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1292
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1293
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1294
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1295
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1296
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 0 op(1) 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1297
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1298
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1299
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1300
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1301
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1302
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1303
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1304
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1305
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1306
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1307
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1308
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1309
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1310
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1311
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1312
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1313
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1314
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhadd.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1315
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1316
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1317
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1318
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1319
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1320
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vhsub.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88334_vmax.d b/plugins/arm/v7/opdefs/A88334_vmax.d
new file mode 100644
index 0000000..bc637aa
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88334_vmax.d
@@ -0,0 +1,1245 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VMAX, VMIN (integer)
+
+@id 304
+
+@desc {
+
+ Vector Maximum compares corresponding elements in two vectors, and copies the larger of each pair into the corresponding element in the destination vector. Vector Minimum compares corresponding elements in two vectors, and copies the smaller of each pair into the corresponding element in the destination vector. The operand vector elements can be any one of: • 8-bit, 16-bit, or 32-bit signed integers • 8-bit, 16-bit, or 32-bit unsigned integers. The result vector elements are the same size as the operand vector elements. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 1 0 N(1) Q(1) M(1) op(1) Vm(4)
+
+ @syntax {
+
+ @subid 1321
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1322
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1323
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1324
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1325
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1326
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1327
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1328
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1329
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1330
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1331
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1332
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1333
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1334
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1335
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1336
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1337
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1338
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1339
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1340
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1341
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1342
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1343
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1344
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 1 0 N(1) Q(1) M(1) op(1) Vm(4)
+
+ @syntax {
+
+ @subid 1345
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1346
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1347
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1348
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1349
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1350
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1351
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1352
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1353
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1354
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1355
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1356
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1357
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1358
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1359
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1360
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1361
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1362
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1363
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1364
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1365
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1366
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1367
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1368
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88335_vmax.d b/plugins/arm/v7/opdefs/A88335_vmax.d
new file mode 100644
index 0000000..8bab225
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88335_vmax.d
@@ -0,0 +1,229 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VMAX, VMIN (floating-point)
+
+@id 305
+
+@desc {
+
+ Vector Maximum compares corresponding elements in two vectors, and copies the larger of each pair into the corresponding element in the destination vector. Vector Minimum compares corresponding elements in two vectors, and copies the smaller of each pair into the corresponding element in the destination vector. The operand vector elements are 32-bit floating-point numbers. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) op(1) sz(1) Vn(4) Vd(4) 1 1 1 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1369
+
+ @assert {
+
+ Q == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1370
+
+ @assert {
+
+ Q == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1371
+
+ @assert {
+
+ Q == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1372
+
+ @assert {
+
+ Q == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) op(1) sz(1) Vn(4) Vd(4) 1 1 1 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1373
+
+ @assert {
+
+ Q == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmax.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1374
+
+ @assert {
+
+ Q == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmin.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1375
+
+ @assert {
+
+ Q == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmax.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1376
+
+ @assert {
+
+ Q == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmin.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88337_vmla.d b/plugins/arm/v7/opdefs/A88337_vmla.d
new file mode 100644
index 0000000..678aa4c
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88337_vmla.d
@@ -0,0 +1,433 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VMLA, VMLS (floating-point)
+
+@id 306
+
+@desc {
+
+ Vector Multiply Accumulate multiplies corresponding elements in two vectors, and accumulates the results into the elements of the destination vector. Vector Multiply Subtract multiplies corresponding elements in two vectors, subtracts the products from corresponding elements of the destination vector, and places the results in the destination vector. Note ARM recommends that software does not use the VMLS instruction in the Round towards Plus Infinity and Round towards Minus Infinity rounding modes, because the rounding of the product and of the sum can change the result of the instruction in opposite directions, defeating the purpose of these rounding modes. Depending on settings in the CPACR, NSACR, HCPTR, and FPEXC registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 and Summary of access controls for Advanced SIMD functionality on page B1-1232 summarize these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) op(1) sz(1) Vn(4) Vd(4) 1 1 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1377
+
+ @assert {
+
+ Q == 1
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmla.f32 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1378
+
+ @assert {
+
+ Q == 1
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmls.f32 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1379
+
+ @assert {
+
+ Q == 0
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmla.f32 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1380
+
+ @assert {
+
+ Q == 0
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmls.f32 dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 0 1 1 1 0 0 D(1) 0 0 Vn(4) Vd(4) 1 0 1 sz(1) N(1) op(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1381
+
+ @assert {
+
+ sz == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmla.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1382
+
+ @assert {
+
+ sz == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmls.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1383
+
+ @assert {
+
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vmla.f32 swvec_D swvec_N swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1384
+
+ @assert {
+
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vmls.f32 swvec_D swvec_N swvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) op(1) sz(1) Vn(4) Vd(4) 1 1 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1385
+
+ @assert {
+
+ Q == 1
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmla.f32 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1386
+
+ @assert {
+
+ Q == 1
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmls.f32 qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1387
+
+ @assert {
+
+ Q == 0
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmla.f32 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1388
+
+ @assert {
+
+ Q == 0
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmls.f32 dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 0 1 1 1 0 0 D(1) 0 0 Vn(4) Vd(4) 1 0 1 sz(1) N(1) op(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1389
+
+ @assert {
+
+ sz == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmla.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1390
+
+ @assert {
+
+ sz == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmls.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1391
+
+ @assert {
+
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vmla.f32 swvec_D swvec_N swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1392
+
+ @assert {
+
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vmls.f32 swvec_D swvec_N swvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88345_vmov.d b/plugins/arm/v7/opdefs/A88345_vmov.d
new file mode 100644
index 0000000..b2305e3
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88345_vmov.d
@@ -0,0 +1,133 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VMOV (between two ARM core registers and a doubleword extension register)
+
+@id 307
+
+@desc {
+
+ This instruction copies two words from two ARM core registers into a doubleword extension register, or from a doubleword extension register to two ARM core registers. Depending on settings in the CPACR, NSACR, HCPTR, and FPEXC registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 and Summary of access controls for Advanced SIMD functionality on page B1-1232 summarize these controls.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 0 0 0 1 0 op(1) Rt2(4) Rt(4) 1 0 1 1 0 0 M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1393
+
+ @assert {
+
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_M = DoubleWordVector(M:Vm)
+ reg_T = Register(Rt)
+ reg_T2 = Register(Rt2)
+
+ }
+
+ @asm vmov dwvec_M reg_T reg_T2
+
+ }
+
+ @syntax {
+
+ @subid 1394
+
+ @assert {
+
+ op == 1
+
+ }
+
+ @conv {
+
+ reg_T = Register(Rt)
+ reg_T2 = Register(Rt2)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmov reg_T reg_T2 dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 0 0 0 1 0 op(1) Rt2(4) Rt(4) 1 0 1 1 0 0 M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1395
+
+ @assert {
+
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_M = DoubleWordVector(M:Vm)
+ reg_T = Register(Rt)
+ reg_T2 = Register(Rt2)
+
+ }
+
+ @asm vmov dwvec_M reg_T reg_T2
+
+ }
+
+ @syntax {
+
+ @subid 1396
+
+ @assert {
+
+ op == 1
+
+ }
+
+ @conv {
+
+ reg_T = Register(Rt)
+ reg_T2 = Register(Rt2)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmov reg_T reg_T2 dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88346_vmovl.d b/plugins/arm/v7/opdefs/A88346_vmovl.d
new file mode 100644
index 0000000..b5a1800
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88346_vmovl.d
@@ -0,0 +1,309 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VMOVL
+
+@id 308
+
+@desc {
+
+ Vector Move Long takes each element in a doubleword vector, sign or zero-extends them to twice their original length, and places the results in a quadword vector. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 1 D(1) imm3(3) 0 0 0 Vd(4) 1 0 1 0 0 0 M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1397
+
+ @assert {
+
+ U == 0
+ imm3 == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmovl.s8 qwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1398
+
+ @assert {
+
+ U == 0
+ imm3 == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmovl.s16 qwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1399
+
+ @assert {
+
+ U == 0
+ imm3 == 100
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmovl.s32 qwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1400
+
+ @assert {
+
+ U == 1
+ imm3 == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmovl.u8 qwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1401
+
+ @assert {
+
+ U == 1
+ imm3 == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmovl.u16 qwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1402
+
+ @assert {
+
+ U == 1
+ imm3 == 100
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmovl.u32 qwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 1 D(1) imm3(3) 0 0 0 Vd(4) 1 0 1 0 0 0 M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1403
+
+ @assert {
+
+ U == 0
+ imm3 == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmovl.s8 qwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1404
+
+ @assert {
+
+ U == 0
+ imm3 == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmovl.s16 qwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1405
+
+ @assert {
+
+ U == 0
+ imm3 == 100
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmovl.s32 qwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1406
+
+ @assert {
+
+ U == 1
+ imm3 == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmovl.u8 qwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1407
+
+ @assert {
+
+ U == 1
+ imm3 == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmovl.u16 qwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1408
+
+ @assert {
+
+ U == 1
+ imm3 == 100
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmovl.u32 qwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88347_vmovn.d b/plugins/arm/v7/opdefs/A88347_vmovn.d
new file mode 100644
index 0000000..7710b2d
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88347_vmovn.d
@@ -0,0 +1,171 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VMOVN
+
+@id 309
+
+@desc {
+
+ Vector Move and Narrow copies the least significant half of each element of a quadword vector into the corresponding elements of a doubleword vector. The operand vector elements can be any one of 16-bit, 32-bit, or 64-bit integers. There is no distinction between signed and unsigned integers. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 0 Vd(4) 0 0 1 0 0 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1409
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmovn.i16 dwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1410
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmovn.i32 dwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1411
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmovn.i64 dwvec_D qwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 0 Vd(4) 0 0 1 0 0 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1412
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmovn.i16 dwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1413
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmovn.i32 dwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1414
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmovn.i64 dwvec_D qwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88351_vmul.d b/plugins/arm/v7/opdefs/A88351_vmul.d
new file mode 100644
index 0000000..35df978
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88351_vmul.d
@@ -0,0 +1,237 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VMUL (floating-point)
+
+@id 310
+
+@desc {
+
+ Vector Multiply multiplies corresponding elements in two vectors, and places the results in the destination vector. Depending on settings in the CPACR, NSACR, HCPTR, and FPEXC registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 and Summary of access controls for Advanced SIMD functionality on page B1-1232 summarize these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) 0 sz(1) Vn(4) Vd(4) 1 1 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1415
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmul.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1416
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmul.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 0 1 1 1 0 0 D(1) 1 0 Vn(4) Vd(4) 1 0 1 sz(1) N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1417
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmul.f64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1418
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vmul.f32 ?swvec_D swvec_N swvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) 0 sz(1) Vn(4) Vd(4) 1 1 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1419
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmul.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1420
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmul.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 0 1 1 1 0 0 D(1) 1 0 Vn(4) Vd(4) 1 0 1 sz(1) N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1421
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vmul.f64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1422
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vmul.f32 ?swvec_D swvec_N swvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88354_vmvn.d b/plugins/arm/v7/opdefs/A88354_vmvn.d
new file mode 100644
index 0000000..0062cbf
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88354_vmvn.d
@@ -0,0 +1,75 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VMVN (register)
+
+@id 311
+
+@desc {
+
+ Vector Bitwise NOT (register) takes a value from a register, inverts the value of each bit, and places the result in the destination register. The registers can be either doubleword or quadword. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 0 1 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1423
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmvn qwvec_D qwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 0 1 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1424
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vmvn qwvec_D qwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88355_vneg.d b/plugins/arm/v7/opdefs/A88355_vneg.d
new file mode 100644
index 0000000..77efcf0
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88355_vneg.d
@@ -0,0 +1,317 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VNEG
+
+@id 312
+
+@desc {
+
+ Vector Negate negates each element in a vector, and places the results in a second vector. The floating-point version only inverts the sign bit. Depending on settings in the CPACR, NSACR, HCPTR, and FPEXC registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 and Summary of access controls for Advanced SIMD functionality on page B1-1232 summarize these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 1 1 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1425
+
+ @assert {
+
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vneg.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1426
+
+ @assert {
+
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vneg.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1427
+
+ @assert {
+
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vneg.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1428
+
+ @assert {
+
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vneg.f32 qwvec_D qwvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 1 1 0 0 0 1 Vd(4) 1 0 1 sz(1) 0 1 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1429
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vneg.f32 swvec_D swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1430
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vneg.f64 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 1 Vd(4) 0 F(1) 1 1 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1431
+
+ @assert {
+
+ size == 0
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vneg.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1432
+
+ @assert {
+
+ size == 1
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vneg.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1433
+
+ @assert {
+
+ size == 10
+ F == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vneg.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1434
+
+ @assert {
+
+ size == 10
+ F == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vneg.f32 qwvec_D qwvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 1 1 0 0 0 1 Vd(4) 1 0 1 sz(1) 0 1 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1435
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vneg.f32 swvec_D swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1436
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vneg.f64 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88356_vnm.d b/plugins/arm/v7/opdefs/A88356_vnm.d
new file mode 100644
index 0000000..20f3313
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88356_vnm.d
@@ -0,0 +1,329 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VNMLA, VNMLS, VNMUL
+
+@id 313
+
+@desc {
+
+ VNMLA multiplies together two floating-point register values, adds the negation of the floating-point value in the destination register to the negation of the product, and writes the result back to the destination register. VNMLS multiplies together two floating-point register values, adds the negation of the floating-point value in the destination register to the product, and writes the result back to the destination register. VNMUL multiplies together two floating-point register values, and writes the negation of the result to the destination register. Note ARM recommends that software does not use the VNMLA instruction in the Round towards Plus Infinity and Round towards Minus Infinity rounding modes, because the rounding of the product and of the sum can change the result of the instruction in opposite directions, defeating the purpose of these rounding modes. Depending on settings in the CPACR, NSACR, HCPTR, and FPEXC registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 summarizes these controls.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 0 0 D(1) 0 1 Vn(4) Vd(4) 1 0 1 sz(1) N(1) op(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1437
+
+ @assert {
+
+ sz == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vnmla.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1438
+
+ @assert {
+
+ sz == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vnmls.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1439
+
+ @assert {
+
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vnmla.f32 swvec_D swvec_N swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1440
+
+ @assert {
+
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vnmls.f32 swvec_D swvec_N swvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 0 1 1 1 0 0 D(1) 1 0 Vn(4) Vd(4) 1 0 1 sz(1) N(1) 1 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1441
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vnmul.f64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1442
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vnmul.f32 ?swvec_D swvec_N swvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 0 0 D(1) 0 1 Vn(4) Vd(4) 1 0 1 sz(1) N(1) op(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1443
+
+ @assert {
+
+ sz == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vnmla.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1444
+
+ @assert {
+
+ sz == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vnmls.f64 dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1445
+
+ @assert {
+
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vnmla.f32 swvec_D swvec_N swvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1446
+
+ @assert {
+
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vnmls.f32 swvec_D swvec_N swvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 0 1 1 1 0 0 D(1) 1 0 Vn(4) Vd(4) 1 0 1 sz(1) N(1) 1 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1447
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vnmul.f64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1448
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vnmul.f32 ?swvec_D swvec_N swvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88358_vorn.d b/plugins/arm/v7/opdefs/A88358_vorn.d
new file mode 100644
index 0000000..595185f
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88358_vorn.d
@@ -0,0 +1,133 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VORN (register)
+
+@id 315
+
+@desc {
+
+ This instruction performs a bitwise OR NOT operation between two registers, and places the result in the destination register. The operand and result registers can be quadword or doubleword. They must all be the same size. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 1 1 Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1449
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vorn ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1450
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vorn ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 1 1 Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1451
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vorn ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1452
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vorn ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88359_vorr.d b/plugins/arm/v7/opdefs/A88359_vorr.d
new file mode 100644
index 0000000..eab567f
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88359_vorr.d
@@ -0,0 +1,1277 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VORR (immediate)
+
+@id 316
+
+@desc {
+
+ This instruction takes the contents of the destination vector, performs a bitwise OR with an immediate constant, and returns the result into the destination vector. For the range of constants available, see One register and a modified immediate value on page A7-269. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 i(1) 1 1 1 1 1 D(1) 0 0 0 imm3(3) Vd(4) cmode(4) 0 Q(1) 0 1 imm4(4)
+
+ @syntax {
+
+ @subid 1453
+
+ @assert {
+
+ Q == 1
+ cmode == 1000
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1454
+
+ @assert {
+
+ Q == 1
+ cmode == 1001
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1455
+
+ @assert {
+
+ Q == 1
+ cmode == 1010
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1456
+
+ @assert {
+
+ Q == 1
+ cmode == 1011
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1457
+
+ @assert {
+
+ Q == 1
+ cmode == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1458
+
+ @assert {
+
+ Q == 1
+ cmode == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1459
+
+ @assert {
+
+ Q == 1
+ cmode == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1460
+
+ @assert {
+
+ Q == 1
+ cmode == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1461
+
+ @assert {
+
+ Q == 1
+ cmode == 100
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1462
+
+ @assert {
+
+ Q == 1
+ cmode == 101
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1463
+
+ @assert {
+
+ Q == 1
+ cmode == 110
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1464
+
+ @assert {
+
+ Q == 1
+ cmode == 111
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1465
+
+ @assert {
+
+ Q == 1
+ cmode == 1100
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1466
+
+ @assert {
+
+ Q == 1
+ cmode == 1101
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1467
+
+ @assert {
+
+ Q == 0
+ cmode == 1000
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1468
+
+ @assert {
+
+ Q == 0
+ cmode == 1001
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1469
+
+ @assert {
+
+ Q == 0
+ cmode == 1010
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1470
+
+ @assert {
+
+ Q == 0
+ cmode == 1011
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1471
+
+ @assert {
+
+ Q == 0
+ cmode == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1472
+
+ @assert {
+
+ Q == 0
+ cmode == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1473
+
+ @assert {
+
+ Q == 0
+ cmode == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1474
+
+ @assert {
+
+ Q == 0
+ cmode == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1475
+
+ @assert {
+
+ Q == 0
+ cmode == 100
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1476
+
+ @assert {
+
+ Q == 0
+ cmode == 101
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1477
+
+ @assert {
+
+ Q == 0
+ cmode == 110
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1478
+
+ @assert {
+
+ Q == 0
+ cmode == 111
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1479
+
+ @assert {
+
+ Q == 0
+ cmode == 1100
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1480
+
+ @assert {
+
+ Q == 0
+ cmode == 1101
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 i(1) 1 1 1 1 1 D(1) 0 0 0 imm3(3) Vd(4) cmode(4) 0 Q(1) 0 1 imm4(4)
+
+ @syntax {
+
+ @subid 1481
+
+ @assert {
+
+ Q == 1
+ cmode == 1000
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1482
+
+ @assert {
+
+ Q == 1
+ cmode == 1001
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1483
+
+ @assert {
+
+ Q == 1
+ cmode == 1010
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1484
+
+ @assert {
+
+ Q == 1
+ cmode == 1011
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1485
+
+ @assert {
+
+ Q == 1
+ cmode == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1486
+
+ @assert {
+
+ Q == 1
+ cmode == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1487
+
+ @assert {
+
+ Q == 1
+ cmode == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1488
+
+ @assert {
+
+ Q == 1
+ cmode == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1489
+
+ @assert {
+
+ Q == 1
+ cmode == 100
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1490
+
+ @assert {
+
+ Q == 1
+ cmode == 101
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1491
+
+ @assert {
+
+ Q == 1
+ cmode == 110
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1492
+
+ @assert {
+
+ Q == 1
+ cmode == 111
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1493
+
+ @assert {
+
+ Q == 1
+ cmode == 1100
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1494
+
+ @assert {
+
+ Q == 1
+ cmode == 1101
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 qwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1495
+
+ @assert {
+
+ Q == 0
+ cmode == 1000
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1496
+
+ @assert {
+
+ Q == 0
+ cmode == 1001
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1497
+
+ @assert {
+
+ Q == 0
+ cmode == 1010
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1498
+
+ @assert {
+
+ Q == 0
+ cmode == 1011
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i16 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1499
+
+ @assert {
+
+ Q == 0
+ cmode == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1500
+
+ @assert {
+
+ Q == 0
+ cmode == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1501
+
+ @assert {
+
+ Q == 0
+ cmode == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1502
+
+ @assert {
+
+ Q == 0
+ cmode == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1503
+
+ @assert {
+
+ Q == 0
+ cmode == 100
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1504
+
+ @assert {
+
+ Q == 0
+ cmode == 101
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1505
+
+ @assert {
+
+ Q == 0
+ cmode == 110
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1506
+
+ @assert {
+
+ Q == 0
+ cmode == 111
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1507
+
+ @assert {
+
+ Q == 0
+ cmode == 1100
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+ @syntax {
+
+ @subid 1508
+
+ @assert {
+
+ Q == 0
+ cmode == 1101
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ imm64 = AdvSIMDExpandImm('0', cmode, i:imm3:imm4)
+
+ }
+
+ @asm vorr.i32 dwvec_D imm64
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88360_vorr.d b/plugins/arm/v7/opdefs/A88360_vorr.d
new file mode 100644
index 0000000..3ad1b76
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88360_vorr.d
@@ -0,0 +1,133 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VORR (register)
+
+@id 317
+
+@desc {
+
+ This instruction performs a bitwise OR operation between two registers, and places the result in the destination register. The operand and result registers can be quadword or doubleword. They must all be the same size. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 1 0 Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1509
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vorr ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1510
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vorr ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 1 0 Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1511
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vorr ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1512
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vorr ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88361_vpadal.d b/plugins/arm/v7/opdefs/A88361_vpadal.d
new file mode 100644
index 0000000..79b542c
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88361_vpadal.d
@@ -0,0 +1,597 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VPADAL
+
+@id 318
+
+@desc {
+
+ Vector Pairwise Add and Accumulate Long adds adjacent pairs of elements of a vector, and accumulates the results into the elements of the destination vector. The vectors can be doubleword or quadword. The operand elements can be 8-bit, 16-bit, or 32-bit integers. The result elements are twice the length of the operand elements. Figure A8-2 shows an example of the operation of VPADAL. Dm + + Dd Figure A8-2 VPADAL doubleword operation for data type S16 Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 1 0 op(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1513
+
+ @assert {
+
+ Q == 1
+ size == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1514
+
+ @assert {
+
+ Q == 1
+ size == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1515
+
+ @assert {
+
+ Q == 1
+ size == 10
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1516
+
+ @assert {
+
+ Q == 1
+ size == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.u8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1517
+
+ @assert {
+
+ Q == 1
+ size == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.u16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1518
+
+ @assert {
+
+ Q == 1
+ size == 10
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.u32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1519
+
+ @assert {
+
+ Q == 0
+ size == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.s8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1520
+
+ @assert {
+
+ Q == 0
+ size == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.s16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1521
+
+ @assert {
+
+ Q == 0
+ size == 10
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.s32 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1522
+
+ @assert {
+
+ Q == 0
+ size == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.u8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1523
+
+ @assert {
+
+ Q == 0
+ size == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.u16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1524
+
+ @assert {
+
+ Q == 0
+ size == 10
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.u32 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 1 0 op(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1525
+
+ @assert {
+
+ Q == 1
+ size == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1526
+
+ @assert {
+
+ Q == 1
+ size == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1527
+
+ @assert {
+
+ Q == 1
+ size == 10
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1528
+
+ @assert {
+
+ Q == 1
+ size == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.u8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1529
+
+ @assert {
+
+ Q == 1
+ size == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.u16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1530
+
+ @assert {
+
+ Q == 1
+ size == 10
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.u32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1531
+
+ @assert {
+
+ Q == 0
+ size == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.s8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1532
+
+ @assert {
+
+ Q == 0
+ size == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.s16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1533
+
+ @assert {
+
+ Q == 0
+ size == 10
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.s32 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1534
+
+ @assert {
+
+ Q == 0
+ size == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.u8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1535
+
+ @assert {
+
+ Q == 0
+ size == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.u16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1536
+
+ @assert {
+
+ Q == 0
+ size == 10
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadal.u32 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88362_vpadd.d b/plugins/arm/v7/opdefs/A88362_vpadd.d
new file mode 100644
index 0000000..17e298d
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88362_vpadd.d
@@ -0,0 +1,183 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VPADD (integer)
+
+@id 319
+
+@desc {
+
+ Vector Pairwise Add (integer) adds adjacent pairs of elements of two vectors, and places the results in the destination vector. The operands and result are doubleword vectors. The operand and result elements must all be the same type, and can be 8-bit, 16-bit, or 32-bit integers. There is no distinction between signed and unsigned integers. Figure A8-3 shows an example of the operation of VPADD. Dm Dn + + + + Dd Figure A8-3 VPADD operation for data type I16 Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 1 0 1 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1537
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadd.i8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1538
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadd.i16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1539
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadd.i32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 1 0 1 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1540
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadd.i8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1541
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadd.i16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1542
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadd.i32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88363_vpadd.d b/plugins/arm/v7/opdefs/A88363_vpadd.d
new file mode 100644
index 0000000..a68632b
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88363_vpadd.d
@@ -0,0 +1,91 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VPADD (floating-point)
+
+@id 320
+
+@desc {
+
+ Vector Pairwise Add (floating-point) adds adjacent pairs of elements of two vectors, and places the results in the destination vector. The operands and result are doubleword vectors. The operand and result elements are 32-bit floating-point numbers. Figure A8-3 on page A8-980 shows an example of the operation of VPADD. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) 0 sz(1) Vn(4) Vd(4) 1 1 0 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1543
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadd.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) 0 sz(1) Vn(4) Vd(4) 1 1 0 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1544
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpadd.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88364_vpaddl.d b/plugins/arm/v7/opdefs/A88364_vpaddl.d
new file mode 100644
index 0000000..d508785
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88364_vpaddl.d
@@ -0,0 +1,597 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VPADDL
+
+@id 321
+
+@desc {
+
+ Vector Pairwise Add Long adds adjacent pairs of elements of two vectors, and places the results in the destination vector. The vectors can be doubleword or quadword. The operand elements can be 8-bit, 16-bit, or 32-bit integers. The result elements are twice the length of the operand elements. Figure A8-4 shows an example of the operation of VPADDL. Dm + + Dd Figure A8-4 VPADDL doubleword operation for data type S16 Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 0 1 0 op(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1545
+
+ @assert {
+
+ Q == 1
+ size == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1546
+
+ @assert {
+
+ Q == 1
+ size == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1547
+
+ @assert {
+
+ Q == 1
+ size == 10
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1548
+
+ @assert {
+
+ Q == 1
+ size == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.u8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1549
+
+ @assert {
+
+ Q == 1
+ size == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.u16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1550
+
+ @assert {
+
+ Q == 1
+ size == 10
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.u32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1551
+
+ @assert {
+
+ Q == 0
+ size == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.s8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1552
+
+ @assert {
+
+ Q == 0
+ size == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.s16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1553
+
+ @assert {
+
+ Q == 0
+ size == 10
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.s32 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1554
+
+ @assert {
+
+ Q == 0
+ size == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.u8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1555
+
+ @assert {
+
+ Q == 0
+ size == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.u16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1556
+
+ @assert {
+
+ Q == 0
+ size == 10
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.u32 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 0 1 0 op(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1557
+
+ @assert {
+
+ Q == 1
+ size == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1558
+
+ @assert {
+
+ Q == 1
+ size == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1559
+
+ @assert {
+
+ Q == 1
+ size == 10
+ op == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1560
+
+ @assert {
+
+ Q == 1
+ size == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.u8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1561
+
+ @assert {
+
+ Q == 1
+ size == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.u16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1562
+
+ @assert {
+
+ Q == 1
+ size == 10
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.u32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1563
+
+ @assert {
+
+ Q == 0
+ size == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.s8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1564
+
+ @assert {
+
+ Q == 0
+ size == 1
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.s16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1565
+
+ @assert {
+
+ Q == 0
+ size == 10
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.s32 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1566
+
+ @assert {
+
+ Q == 0
+ size == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.u8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1567
+
+ @assert {
+
+ Q == 0
+ size == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.u16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1568
+
+ @assert {
+
+ Q == 0
+ size == 10
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpaddl.u32 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88365_vpmax.d b/plugins/arm/v7/opdefs/A88365_vpmax.d
new file mode 100644
index 0000000..84cc47a
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88365_vpmax.d
@@ -0,0 +1,645 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VPMAX, VPMIN (integer)
+
+@id 322
+
+@desc {
+
+ Vector Pairwise Maximum compares adjacent pairs of elements in two doubleword vectors, and copies the larger of each pair into the corresponding element in the destination doubleword vector. Vector Pairwise Minimum compares adjacent pairs of elements in two doubleword vectors, and copies the smaller of each pair into the corresponding element in the destination doubleword vector. Figure A8-5 shows an example of the operation of VPMAX. Dm Dn max max max max Dd Figure A8-5 VPMAX operation for data type S16 or U16 Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 1 0 1 0 N(1) Q(1) M(1) op(1) Vm(4)
+
+ @syntax {
+
+ @subid 1569
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1570
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1571
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1572
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1573
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1574
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1575
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1576
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1577
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1578
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1579
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1580
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 1 0 1 0 N(1) Q(1) M(1) op(1) Vm(4)
+
+ @syntax {
+
+ @subid 1581
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1582
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1583
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1584
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1585
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1586
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1587
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1588
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1589
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1590
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1591
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1592
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88366_vpmax.d b/plugins/arm/v7/opdefs/A88366_vpmax.d
new file mode 100644
index 0000000..7e617cb
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88366_vpmax.d
@@ -0,0 +1,141 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VPMAX, VPMIN (floating-point)
+
+@id 323
+
+@desc {
+
+ Vector Pairwise Maximum compares adjacent pairs of elements in two doubleword vectors, and copies the larger of each pair into the corresponding element in the destination doubleword vector. Vector Pairwise Minimum compares adjacent pairs of elements in two doubleword vectors, and copies the smaller of each pair into the corresponding element in the destination doubleword vector. Figure A8-5 on page A8-986 shows an example of the operation of VPMAX. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) op(1) sz(1) Vn(4) Vd(4) 1 1 1 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1593
+
+ @assert {
+
+ Q == 0
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1594
+
+ @assert {
+
+ Q == 0
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) op(1) sz(1) Vn(4) Vd(4) 1 1 1 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1595
+
+ @assert {
+
+ Q == 0
+ sz == 0
+ op == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmax.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1596
+
+ @assert {
+
+ Q == 0
+ sz == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vpmin.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88369_vqabs.d b/plugins/arm/v7/opdefs/A88369_vqabs.d
new file mode 100644
index 0000000..f472f3f
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88369_vqabs.d
@@ -0,0 +1,309 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VQABS
+
+@id 324
+
+@desc {
+
+ Vector Saturating Absolute takes the absolute value of each element in a vector, and places the results in the destination vector. If any of the results overflow, they are saturated. The cumulative saturation bit, FPSCR.QC, is set if saturation occurs. For details see Pseudocode details of saturation on page A2-44. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 1 1 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1597
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqabs.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1598
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqabs.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1599
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqabs.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1600
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqabs.s8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1601
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqabs.s16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1602
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqabs.s32 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 1 1 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1603
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqabs.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1604
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqabs.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1605
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqabs.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1606
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqabs.s8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1607
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqabs.s16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1608
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqabs.s32 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88375_vqneg.d b/plugins/arm/v7/opdefs/A88375_vqneg.d
new file mode 100644
index 0000000..2fbaa1a
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88375_vqneg.d
@@ -0,0 +1,309 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VQNEG
+
+@id 325
+
+@desc {
+
+ Vector Saturating Negate negates each element in a vector, and places the results in the destination vector. If any of the results overflow, they are saturated. The cumulative saturation bit, FPSCR.QC, is set if saturation occurs. For details see Pseudocode details of saturation on page A2-44. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 1 1 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1609
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqneg.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1610
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqneg.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1611
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqneg.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1612
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqneg.s8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1613
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqneg.s16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1614
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqneg.s32 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 1 1 1 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1615
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqneg.s8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1616
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqneg.s16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1617
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqneg.s32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1618
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqneg.s8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1619
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqneg.s16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1620
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqneg.s32 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88377_vqrshl.d b/plugins/arm/v7/opdefs/A88377_vqrshl.d
new file mode 100644
index 0000000..3d3ccd2
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88377_vqrshl.d
@@ -0,0 +1,813 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VQRSHL
+
+@id 326
+
+@desc {
+
+ Vector Saturating Rounding Shift Left takes each element in a vector, shifts them by a value from the least significant byte of the corresponding element of a second vector, and places the results in the destination vector. If the shift value is positive, the operation is a left shift. Otherwise, it is a right shift. For truncated results see VQSHL (register) on page A8-1014. The first operand and result elements are the same data type, and can be any one of: • 8-bit, 16-bit, 32-bit, or 64-bit signed integers • 8-bit, 16-bit, 32-bit, or 64-bit unsigned integers. The second operand is a signed integer of the same size. If any of the results overflow, they are saturated. The cumulative saturation bit, FPSCR.QC, is set if saturation occurs. For details see Pseudocode details of saturation on page A2-44. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1621
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1622
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1623
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1624
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1625
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1626
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1627
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1628
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1629
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1630
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1631
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1632
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1633
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1634
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1635
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1636
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1637
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1638
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1639
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1640
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1641
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1642
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1643
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1644
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1645
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1646
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1647
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1648
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.s64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1649
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1650
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1651
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1652
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqrshl.u64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88379_vqshl.d b/plugins/arm/v7/opdefs/A88379_vqshl.d
new file mode 100644
index 0000000..2cfbb49
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88379_vqshl.d
@@ -0,0 +1,813 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VQSHL (register)
+
+@id 327
+
+@desc {
+
+ Vector Saturating Shift Left (register) takes each element in a vector, shifts them by a value from the least significant byte of the corresponding element of a second vector, and places the results in the destination vector. If the shift value is positive, the operation is a left shift. Otherwise, it is a right shift. The results are truncated. For rounded results, see VQRSHL on page A8-1010. The first operand and result elements are the same data type, and can be any one of: • 8-bit, 16-bit, 32-bit, or 64-bit signed integers • 8-bit, 16-bit, 32-bit, or 64-bit unsigned integers. The second operand is a signed integer of the same size. If any of the results overflow, they are saturated. The cumulative saturation bit, FPSCR.QC, is set if saturation occurs. For details see Pseudocode details of saturation on page A2-44. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 0 0 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1653
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1654
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1655
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1656
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1657
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1658
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1659
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1660
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1661
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1662
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1663
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1664
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1665
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1666
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1667
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1668
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 0 0 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1669
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1670
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1671
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1672
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1673
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1674
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1675
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1676
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1677
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1678
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1679
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1680
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.s64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1681
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1682
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1683
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1684
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vqshl.u64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88382_vqsub.d b/plugins/arm/v7/opdefs/A88382_vqsub.d
new file mode 100644
index 0000000..3441a1f
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88382_vqsub.d
@@ -0,0 +1,813 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VQSUB
+
+@id 328
+
+@desc {
+
+ Vector Saturating Subtract subtracts the elements of the second operand vector from the corresponding elements of the first operand vector, and places the results in the destination vector. Signed and unsigned operations are distinct. The operand and result elements must all be the same type, and can be any one of: • 8-bit, 16-bit, 32-bit, or 64-bit signed integers • 8-bit, 16-bit, 32-bit, or 64-bit unsigned integers. If any of the results overflow, they are saturated. The cumulative saturation bit, FPSCR.QC, is set if saturation occurs. For details see Pseudocode details of saturation on page A2-44. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 0 1 0 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1685
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1686
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1687
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1688
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s64 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1689
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1690
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1691
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1692
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u64 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1693
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1694
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1695
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1696
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1697
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1698
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1699
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1700
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 0 1 0 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1701
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1702
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1703
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1704
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s64 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1705
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1706
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1707
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1708
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u64 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1709
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1710
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1711
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1712
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.s64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1713
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1714
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1715
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1716
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vqsub.u64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88383_vraddhn.d b/plugins/arm/v7/opdefs/A88383_vraddhn.d
new file mode 100644
index 0000000..c6de30c
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88383_vraddhn.d
@@ -0,0 +1,177 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VRADDHN
+
+@id 329
+
+@desc {
+
+ Vector Rounding Add and Narrow, returning High Half adds corresponding elements in two quadword vectors, and places the most significant half of each result in a doubleword vector. The results are rounded. (For truncated results, see VADDHN on page A8-832.) The operand elements can be 16-bit, 32-bit, or 64-bit integers. There is no distinction between signed and unsigned integers. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 1 0 0 N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1717
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vraddhn.i16 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1718
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vraddhn.i32 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1719
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vraddhn.i64 dwvec_D qwvec_N qwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 1 0 0 N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1720
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vraddhn.i16 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1721
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vraddhn.i32 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1722
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vraddhn.i64 dwvec_D qwvec_N qwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88384_vrecpe.d b/plugins/arm/v7/opdefs/A88384_vrecpe.d
new file mode 100644
index 0000000..a89daeb
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88384_vrecpe.d
@@ -0,0 +1,229 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VRECPE
+
+@id 330
+
+@desc {
+
+ Vector Reciprocal Estimate finds an approximate reciprocal of each element in the operand vector, and places the results in the destination vector. The operand and result elements are the same type, and can be 32-bit floating-point numbers, or 32-bit unsigned integers. For details of the operation performed by this instruction see Floating-point reciprocal estimate and step on page A2-85. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 1 Vd(4) 0 1 0 F(1) 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1723
+
+ @assert {
+
+ Q == 1
+ F == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrecpe.u32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1724
+
+ @assert {
+
+ Q == 1
+ F == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrecpe.f32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1725
+
+ @assert {
+
+ Q == 0
+ F == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrecpe.u32 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1726
+
+ @assert {
+
+ Q == 0
+ F == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrecpe.f32 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 1 Vd(4) 0 1 0 F(1) 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1727
+
+ @assert {
+
+ Q == 1
+ F == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrecpe.u32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1728
+
+ @assert {
+
+ Q == 1
+ F == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrecpe.f32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1729
+
+ @assert {
+
+ Q == 0
+ F == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrecpe.u32 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1730
+
+ @assert {
+
+ Q == 0
+ F == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrecpe.f32 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88385_vrecps.d b/plugins/arm/v7/opdefs/A88385_vrecps.d
new file mode 100644
index 0000000..b916c05
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88385_vrecps.d
@@ -0,0 +1,133 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VRECPS
+
+@id 331
+
+@desc {
+
+ Vector Reciprocal Step multiplies the elements of one vector by the corresponding elements of another vector, subtracts each of the products from 2.0, and places the results into the elements of the destination vector. The operand and result elements are 32-bit floating-point numbers. For details of the operation performed by this instruction see Floating-point reciprocal estimate and step on page A2-85. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 0 sz(1) Vn(4) Vd(4) 1 1 1 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1731
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrecps.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1732
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrecps.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 0 sz(1) Vn(4) Vd(4) 1 1 1 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1733
+
+ @assert {
+
+ Q == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrecps.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1734
+
+ @assert {
+
+ Q == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrecps.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88386_vrev.d b/plugins/arm/v7/opdefs/A88386_vrev.d
new file mode 100644
index 0000000..af8b569
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88386_vrev.d
@@ -0,0 +1,873 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VREV16, VREV32, VREV64
+
+@id 332
+
+@desc {
+
+ VREV16 (Vector Reverse in halfwords) reverses the order of 8-bit elements in each halfword of the vector, and places the result in the corresponding destination vector. VREV32 (Vector Reverse in words) reverses the order of 8-bit or 16-bit elements in each word of the vector, and places the result in the corresponding destination vector. VREV64 (Vector Reverse in doublewords) reverses the order of 8-bit, 16-bit, or 32-bit elements in each doubleword of the vector, and places the result in the corresponding destination vector. There is no distinction between data types, other than size. Figure A8-6 shows two examples of the operation of VREV. VREV64.8, doubleword VREV64.32, quadword Dm Qm Dd Qm Figure A8-6 VREV operation examples Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 0 0 op(2) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1735
+
+ @assert {
+
+ Q == 1
+ op == 10
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev16.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1736
+
+ @assert {
+
+ Q == 1
+ op == 10
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev16.16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1737
+
+ @assert {
+
+ Q == 1
+ op == 10
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev16.32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1738
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev32.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1739
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev32.16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1740
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev32.32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1741
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev64.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1742
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev64.16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1743
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev64.32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1744
+
+ @assert {
+
+ Q == 0
+ op == 10
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev16.8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1745
+
+ @assert {
+
+ Q == 0
+ op == 10
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev16.16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1746
+
+ @assert {
+
+ Q == 0
+ op == 10
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev16.32 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1747
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev32.8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1748
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev32.16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1749
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev32.32 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1750
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev64.8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1751
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev64.16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1752
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev64.32 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 0 0 Vd(4) 0 0 0 op(2) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1753
+
+ @assert {
+
+ Q == 1
+ op == 10
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev16.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1754
+
+ @assert {
+
+ Q == 1
+ op == 10
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev16.16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1755
+
+ @assert {
+
+ Q == 1
+ op == 10
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev16.32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1756
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev32.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1757
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev32.16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1758
+
+ @assert {
+
+ Q == 1
+ op == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev32.32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1759
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev64.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1760
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev64.16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1761
+
+ @assert {
+
+ Q == 1
+ op == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrev64.32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1762
+
+ @assert {
+
+ Q == 0
+ op == 10
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev16.8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1763
+
+ @assert {
+
+ Q == 0
+ op == 10
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev16.16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1764
+
+ @assert {
+
+ Q == 0
+ op == 10
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev16.32 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1765
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev32.8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1766
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev32.16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1767
+
+ @assert {
+
+ Q == 0
+ op == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev32.32 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1768
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev64.8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1769
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev64.16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1770
+
+ @assert {
+
+ Q == 0
+ op == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrev64.32 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88387_vrhadd.d b/plugins/arm/v7/opdefs/A88387_vrhadd.d
new file mode 100644
index 0000000..ced6922
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88387_vrhadd.d
@@ -0,0 +1,621 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VRHADD
+
+@id 333
+
+@desc {
+
+ Vector Rounding Halving Add adds corresponding elements in two vectors of integers, shifts each result right one bit, and places the final results in the destination vector. The operand and result elements are all the same type, and can be any one of: • 8-bit, 16-bit, or 32-bit signed integers • 8-bit, 16-bit, or 32-bit unsigned integers. The results of the halving operations are rounded. For truncated results see VHADD, VHSUB on page A8-896. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1771
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1772
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1773
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1774
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1775
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1776
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1777
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1778
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1779
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1780
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1781
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1782
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1783
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.s8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1784
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.s16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1785
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.s32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1786
+
+ @assert {
+
+ Q == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.u8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1787
+
+ @assert {
+
+ Q == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.u16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1788
+
+ @assert {
+
+ Q == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.u32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1789
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.s8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1790
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.s16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1791
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.s32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1792
+
+ @assert {
+
+ Q == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.u8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1793
+
+ @assert {
+
+ Q == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.u16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1794
+
+ @assert {
+
+ Q == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrhadd.u32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88388_vrshl.d b/plugins/arm/v7/opdefs/A88388_vrshl.d
new file mode 100644
index 0000000..bed534d
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88388_vrshl.d
@@ -0,0 +1,813 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VRSHL
+
+@id 334
+
+@desc {
+
+ Vector Rounding Shift Left takes each element in a vector, shifts them by a value from the least significant byte of the corresponding element of a second vector, and places the results in the destination vector. If the shift value is positive, the operation is a left shift. If the shift value is negative, it is a rounding right shift. (For a truncating shift, see VSHL (register) on page A8-1048). The first operand and result elements are the same data type, and can be any one of: • 8-bit, 16-bit, 32-bit, or 64-bit signed integers • 8-bit, 16-bit, 32-bit, or 64-bit unsigned integers. The second operand is always a signed integer of the same size. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 0 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1795
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1796
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1797
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1798
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1799
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1800
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1801
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1802
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1803
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1804
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1805
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1806
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1807
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1808
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1809
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1810
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 0 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1811
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1812
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1813
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1814
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1815
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1816
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1817
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1818
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1819
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1820
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1821
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1822
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.s64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1823
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1824
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1825
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1826
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vrshl.u64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88391_vrsqrte.d b/plugins/arm/v7/opdefs/A88391_vrsqrte.d
new file mode 100644
index 0000000..4f60170
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88391_vrsqrte.d
@@ -0,0 +1,229 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VRSQRTE
+
+@id 335
+
+@desc {
+
+ Vector Reciprocal Square Root Estimate finds an approximate reciprocal square root of each element in a vector, and places the results in a second vector. The operand and result elements are the same type, and can be 32-bit floating-point numbers, or 32-bit unsigned integers. For details of the operation performed by this instruction see Floating-point reciprocal square root estimate and step on page A2-87. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 1 Vd(4) 0 1 0 F(1) 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1827
+
+ @assert {
+
+ Q == 1
+ F == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrsqrte.u32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1828
+
+ @assert {
+
+ Q == 1
+ F == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrsqrte.f32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1829
+
+ @assert {
+
+ Q == 0
+ F == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrsqrte.u32 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1830
+
+ @assert {
+
+ Q == 0
+ F == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrsqrte.f32 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 1 Vd(4) 0 1 0 F(1) 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1831
+
+ @assert {
+
+ Q == 1
+ F == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrsqrte.u32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1832
+
+ @assert {
+
+ Q == 1
+ F == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrsqrte.f32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1833
+
+ @assert {
+
+ Q == 0
+ F == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrsqrte.u32 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1834
+
+ @assert {
+
+ Q == 0
+ F == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrsqrte.f32 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88392_vrsqrts.d b/plugins/arm/v7/opdefs/A88392_vrsqrts.d
new file mode 100644
index 0000000..c35aa87
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88392_vrsqrts.d
@@ -0,0 +1,137 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VRSQRTS
+
+@id 336
+
+@desc {
+
+ Vector Reciprocal Square Root Step multiplies the elements of one vector by the corresponding elements of another vector, subtracts each of the products from 3.0, divides these results by 2.0, and places the results into the elements of the destination vector. The operand and result elements are 32-bit floating-point numbers. For details of the operation performed by this instruction see Floating-point reciprocal square root estimate and step on page A2-87. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 1 sz(1) Vn(4) Vd(4) 1 1 1 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1835
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrsqrts.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1836
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrsqrts.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 1 sz(1) Vn(4) Vd(4) 1 1 1 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1837
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrsqrts.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1838
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vrsqrts.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88394_vrsubhn.d b/plugins/arm/v7/opdefs/A88394_vrsubhn.d
new file mode 100644
index 0000000..a0fcf7c
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88394_vrsubhn.d
@@ -0,0 +1,177 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VRSUBHN
+
+@id 337
+
+@desc {
+
+ Vector Rounding Subtract and Narrow, returning High Half subtracts the elements of one quadword vector from the corresponding elements of another quadword vector takes the most significant half of each result, and places the final results in a doubleword vector. The results are rounded. (For truncated results, see VSUBHN on page A8-1088.) The operand elements can be 16-bit, 32-bit, or 64-bit integers. There is no distinction between signed and unsigned integers. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 1 1 0 N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1839
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrsubhn.i16 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1840
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrsubhn.i32 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1841
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrsubhn.i64 dwvec_D qwvec_N qwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 1 1 0 N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1842
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrsubhn.i16 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1843
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrsubhn.i32 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1844
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vrsubhn.i64 dwvec_D qwvec_N qwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88396_vshl.d b/plugins/arm/v7/opdefs/A88396_vshl.d
new file mode 100644
index 0000000..4df1c92
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88396_vshl.d
@@ -0,0 +1,813 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VSHL (register)
+
+@id 338
+
+@desc {
+
+ Vector Shift Left (register) takes each element in a vector, shifts them by a value from the least significant byte of the corresponding element of a second vector, and places the results in the destination vector. If the shift value is positive, the operation is a left shift. If the shift value is negative, it is a truncating right shift. Note For a rounding shift, see VRSHL on page A8-1032. The first operand and result elements are the same data type, and can be any one of: • 8-bit, 16-bit, 32-bit, or 64-bit signed integers • 8-bit, 16-bit, 32-bit, or 64-bit unsigned integers. The second operand is always a signed integer of the same size. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 0 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1845
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1846
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1847
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1848
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1849
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1850
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1851
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1852
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1853
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1854
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1855
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1856
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1857
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1858
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1859
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1860
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 0 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1861
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1862
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1863
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1864
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1865
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1866
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1867
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1868
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1869
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1870
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1871
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1872
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1873
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1874
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1875
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1876
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88401_vsqrt.d b/plugins/arm/v7/opdefs/A88401_vsqrt.d
new file mode 100644
index 0000000..e0a6708
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88401_vsqrt.d
@@ -0,0 +1,129 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VSQRT
+
+@id 339
+
+@desc {
+
+ This instruction calculates the square root of the value in a floating-point register and writes the result to another floating-point register. Depending on settings in the CPACR, NSACR, HCPTR, and FPEXC registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 summarizes these controls.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 1 1 0 0 0 1 Vd(4) 1 0 1 sz(1) 1 1 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1877
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsqrt.f64 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1878
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vsqrt.f32 swvec_D swvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 0 1 D(1) 1 1 0 0 0 1 Vd(4) 1 0 1 sz(1) 1 1 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1879
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsqrt.f64 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1880
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vsqrt.f32 swvec_D swvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88414_vsub.d b/plugins/arm/v7/opdefs/A88414_vsub.d
new file mode 100644
index 0000000..0d98caf
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88414_vsub.d
@@ -0,0 +1,221 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VSUB (integer)
+
+@id 340
+
+@desc {
+
+ Vector Subtract subtracts the elements of one vector from the corresponding elements of another vector, and places the results in the destination vector. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 1 0 0 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1881
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsub.i8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1882
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsub.i16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1883
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsub.i32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1884
+
+ @assert {
+
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsub.i64 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 1 0 0 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1885
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsub.i8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1886
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsub.i16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1887
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsub.i32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1888
+
+ @assert {
+
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsub.i64 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88415_vsub.d b/plugins/arm/v7/opdefs/A88415_vsub.d
new file mode 100644
index 0000000..e8db54c
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88415_vsub.d
@@ -0,0 +1,237 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VSUB (floating-point)
+
+@id 341
+
+@desc {
+
+ Vector Subtract subtracts the elements of one vector from the corresponding elements of another vector, and places the results in the destination vector. Depending on settings in the CPACR, NSACR, HCPTR, and FPEXC registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of general controls of CP10 and CP11 functionality on page B1-1230 and Summary of access controls for Advanced SIMD functionality on page B1-1232 summarize these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 1 sz(1) Vn(4) Vd(4) 1 1 0 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1889
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsub.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1890
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsub.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (T2) {
+
+ @word 1 1 1 0 1 1 1 0 0 D(1) 1 1 Vn(4) Vd(4) 1 0 1 sz(1) N(1) 1 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1891
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsub.f64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1892
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vsub.f32 ?swvec_D swvec_N swvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) 1 sz(1) Vn(4) Vd(4) 1 1 0 1 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1893
+
+ @assert {
+
+ Q == 1
+ sz == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsub.f32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1894
+
+ @assert {
+
+ Q == 0
+ sz == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsub.f32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A2) {
+
+ @word 1 1 1 0 1 1 1 0 0 D(1) 1 1 Vn(4) Vd(4) 1 0 1 sz(1) N(1) 1 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1895
+
+ @assert {
+
+ sz == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsub.f64 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1896
+
+ @assert {
+
+ sz == 0
+
+ }
+
+ @conv {
+
+ swvec_D = SingleWordVector(Vd:D)
+ swvec_N = SingleWordVector(Vn:N)
+ swvec_M = SingleWordVector(Vm:M)
+
+ }
+
+ @asm vsub.f32 ?swvec_D swvec_N swvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88416_vsubhn.d b/plugins/arm/v7/opdefs/A88416_vsubhn.d
new file mode 100644
index 0000000..eb3c53d
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88416_vsubhn.d
@@ -0,0 +1,177 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VSUBHN
+
+@id 342
+
+@desc {
+
+ Vector Subtract and Narrow, returning High Half subtracts the elements of one quadword vector from the corresponding elements of another quadword vector, takes the most significant half of each result, and places the final results in a doubleword vector. The results are truncated. (For rounded results, see VRSUBHN on page A8-1044. There is no distinction between signed and unsigned integers. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 1 1 0 N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1897
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsubhn.i16 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1898
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsubhn.i32 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1899
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsubhn.i64 dwvec_D qwvec_N qwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 1 1 0 N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1900
+
+ @assert {
+
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsubhn.i16 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1901
+
+ @assert {
+
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsubhn.i32 dwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1902
+
+ @assert {
+
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vsubhn.i64 dwvec_D qwvec_N qwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88417_vsub.d b/plugins/arm/v7/opdefs/A88417_vsub.d
new file mode 100644
index 0000000..77a6863
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88417_vsub.d
@@ -0,0 +1,621 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VSUBL, VSUBW
+
+@id 343
+
+@desc {
+
+ Vector Subtract Long subtracts the elements of one doubleword vector from the corresponding elements of another doubleword vector, and places the results in a quadword vector. Before subtracting, it sign-extends or zero-extends the elements of both operands. Vector Subtract Wide subtracts the elements of a doubleword vector from the corresponding elements of a quadword vector, and places the results in another quadword vector. Before subtracting, it sign-extends or zero-extends the elements of the doubleword operand. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 0 1 op(1) N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1903
+
+ @assert {
+
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubl.s8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1904
+
+ @assert {
+
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubl.s16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1905
+
+ @assert {
+
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubl.s32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1906
+
+ @assert {
+
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubl.u8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1907
+
+ @assert {
+
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubl.u16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1908
+
+ @assert {
+
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubl.u32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1909
+
+ @assert {
+
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubw.s8 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1910
+
+ @assert {
+
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubw.s16 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1911
+
+ @assert {
+
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubw.s32 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1912
+
+ @assert {
+
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubw.u8 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1913
+
+ @assert {
+
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubw.u16 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1914
+
+ @assert {
+
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubw.u32 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 1 D(1) size(2) Vn(4) Vd(4) 0 0 1 op(1) N(1) 0 M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1915
+
+ @assert {
+
+ op == 0
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubl.s8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1916
+
+ @assert {
+
+ op == 0
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubl.s16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1917
+
+ @assert {
+
+ op == 0
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubl.s32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1918
+
+ @assert {
+
+ op == 0
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubl.u8 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1919
+
+ @assert {
+
+ op == 0
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubl.u16 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1920
+
+ @assert {
+
+ op == 0
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubl.u32 qwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1921
+
+ @assert {
+
+ op == 1
+ size == 0
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubw.s8 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1922
+
+ @assert {
+
+ op == 1
+ size == 1
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubw.s16 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1923
+
+ @assert {
+
+ op == 1
+ size == 10
+ U == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubw.s32 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1924
+
+ @assert {
+
+ op == 1
+ size == 0
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubw.u8 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1925
+
+ @assert {
+
+ op == 1
+ size == 1
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubw.u16 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1926
+
+ @assert {
+
+ op == 1
+ size == 10
+ U == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vsubw.u32 ?qwvec_D qwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88418_vswp.d b/plugins/arm/v7/opdefs/A88418_vswp.d
new file mode 100644
index 0000000..2f11036
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88418_vswp.d
@@ -0,0 +1,133 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VSWP
+
+@id 344
+
+@desc {
+
+ VSWP (Vector Swap) exchanges the contents of two vectors. The vectors can be either doubleword or quadword. There is no distinction between data types. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 0 Vd(4) 0 0 0 0 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1927
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vswp qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1928
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vswp dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 0 Vd(4) 0 0 0 0 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1929
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vswp qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1930
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vswp dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88420_vtrn.d b/plugins/arm/v7/opdefs/A88420_vtrn.d
new file mode 100644
index 0000000..a8481ca
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88420_vtrn.d
@@ -0,0 +1,309 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VTRN
+
+@id 345
+
+@desc {
+
+ Vector Transpose treats the elements of its operand vectors as elements of 2 × 2 matrices, and transposes the matrices. The elements of the vectors can be 8-bit, 16-bit, or 32-bit. There is no distinction between data types. Figure A8-7 shows the operation of doubleword VTRN. Quadword VTRN performs the same operation as doubleword VTRN twice, once on the upper halves of the quadword vectors, and once on the lower halves VTRN.32 VTRN.16 VTRN.8 1 0 3 2 1 0 7 6 5 4 3 2 1 0 Dd Dd Dd Dm Dm Dm Figure A8-7 VTRN doubleword operation Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 0 Vd(4) 0 0 0 0 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1931
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vtrn.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1932
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vtrn.16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1933
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vtrn.32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1934
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vtrn.8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1935
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vtrn.16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1936
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vtrn.32 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 0 Vd(4) 0 0 0 0 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1937
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vtrn.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1938
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vtrn.16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1939
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vtrn.32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1940
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vtrn.8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1941
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vtrn.16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1942
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vtrn.32 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88421_vtst.d b/plugins/arm/v7/opdefs/A88421_vtst.d
new file mode 100644
index 0000000..b4e6881
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88421_vtst.d
@@ -0,0 +1,321 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VTST
+
+@id 346
+
+@desc {
+
+ Vector Test Bits takes each element in a vector, and bitwise ANDs it with the corresponding element of a second vector. If the result is not zero, the corresponding element in the destination vector is set to all ones. Otherwise, it is set to all zeros. The operand vector elements can be any one of: • 8-bit, 16-bit, or 32-bit fields. The result vector elements are fields the same size as the operand vector elements. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 1 0 0 0 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1943
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vtst.8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1944
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vtst.16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1945
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vtst.32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1946
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vtst.8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1947
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vtst.16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1948
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vtst.32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 0 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 1 0 0 0 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1949
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vtst.8 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1950
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vtst.16 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1951
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vtst.32 ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1952
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vtst.8 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1953
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vtst.16 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1954
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vtst.32 ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88422_vuzp.d b/plugins/arm/v7/opdefs/A88422_vuzp.d
new file mode 100644
index 0000000..d3d7e44
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88422_vuzp.d
@@ -0,0 +1,309 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VUZP
+
+@id 347
+
+@desc {
+
+ Vector Unzip de-interleaves the elements of two vectors. See Table A8-13 and Table A8-14 for examples of the operation. The elements of the vectors can be 8-bit, 16-bit, or 32-bit. There is no distinction between data types. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 0 Vd(4) 0 0 0 1 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1955
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vuzp.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1956
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vuzp.16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1957
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vuzp.32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1958
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vuzp.8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1959
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vuzp.16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1960
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vuzp.32 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 0 Vd(4) 0 0 0 1 0 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1961
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vuzp.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1962
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vuzp.16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1963
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vuzp.32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1964
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vuzp.8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1965
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vuzp.16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1966
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vuzp.32 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/A88423_vzip.d b/plugins/arm/v7/opdefs/A88423_vzip.d
new file mode 100644
index 0000000..b289dd8
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88423_vzip.d
@@ -0,0 +1,309 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VZIP
+
+@id 348
+
+@desc {
+
+ Vector Zip interleaves the elements of two vectors. See Table A8-15 and Table A8-16 for examples of the operation. The elements of the vectors can be 8-bit, 16-bit, or 32-bit. There is no distinction between data types. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 0 Vd(4) 0 0 0 1 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1967
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vzip.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1968
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vzip.16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1969
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vzip.32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1970
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vzip.8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1971
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vzip.16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1972
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vzip.32 dwvec_D dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 1 D(1) 1 1 size(2) 1 0 Vd(4) 0 0 0 1 1 Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1973
+
+ @assert {
+
+ Q == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vzip.8 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1974
+
+ @assert {
+
+ Q == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vzip.16 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1975
+
+ @assert {
+
+ Q == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vzip.32 qwvec_D qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1976
+
+ @assert {
+
+ Q == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vzip.8 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1977
+
+ @assert {
+
+ Q == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vzip.16 dwvec_D dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1978
+
+ @assert {
+
+ Q == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vzip.32 dwvec_D dwvec_M
+
+ }
+
+}
+
diff --git a/plugins/arm/v7/opdefs/Makefile.am b/plugins/arm/v7/opdefs/Makefile.am
index 68e6c6f..3bae426 100644
--- a/plugins/arm/v7/opdefs/Makefile.am
+++ b/plugins/arm/v7/opdefs/Makefile.am
@@ -309,6 +309,82 @@ ARMV7_DEFS = \
A88274_uxtb.d \
A88275_uxtb16.d \
A88276_uxth.d \
+ A88277_vaba.d \
+ A88278_vabd.d \
+ A88279_vabd.d \
+ A88280_vabs.d \
+ A88281_vac.d \
+ A88282_vadd.d \
+ A88283_vadd.d \
+ A88284_vaddhn.d \
+ A88285_vadd.d \
+ A88287_vand.d \
+ A88288_vbic.d \
+ A88289_vbic.d \
+ A88290_vb.d \
+ A88291_vceq.d \
+ A88292_vceq.d \
+ A88293_vcge.d \
+ A88294_vcge.d \
+ A88295_vcgt.d \
+ A88296_vcgt.d \
+ A88298_vcle.d \
+ A88299_vcls.d \
+ A88301_vclt.d \
+ A88302_vclz.d \
+ A88303_vcmp.d \
+ A88304_vcnt.d \
+ A88305_vcvt.d \
+ A88312_vdiv.d \
+ A88314_vdup.d \
+ A88315_veor.d \
+ A88317_vfm.d \
+ A88318_vfnm.d \
+ A88319_vh.d \
+ A88334_vmax.d \
+ A88335_vmax.d \
+ A88337_vmla.d \
+ A88345_vmov.d \
+ A88346_vmovl.d \
+ A88347_vmovn.d \
+ A88351_vmul.d \
+ A88354_vmvn.d \
+ A88355_vneg.d \
+ A88356_vnm.d \
+ A88358_vorn.d \
+ A88359_vorr.d \
+ A88360_vorr.d \
+ A88361_vpadal.d \
+ A88362_vpadd.d \
+ A88363_vpadd.d \
+ A88364_vpaddl.d \
+ A88365_vpmax.d \
+ A88366_vpmax.d \
+ A88369_vqabs.d \
+ A88375_vqneg.d \
+ A88377_vqrshl.d \
+ A88379_vqshl.d \
+ A88382_vqsub.d \
+ A88383_vraddhn.d \
+ A88384_vrecpe.d \
+ A88385_vrecps.d \
+ A88386_vrev.d \
+ A88387_vrhadd.d \
+ A88388_vrshl.d \
+ A88391_vrsqrte.d \
+ A88392_vrsqrts.d \
+ A88394_vrsubhn.d \
+ A88396_vshl.d \
+ A88401_vsqrt.d \
+ A88414_vsub.d \
+ A88415_vsub.d \
+ A88416_vsubhn.d \
+ A88417_vsub.d \
+ A88418_vswp.d \
+ A88420_vtrn.d \
+ A88421_vtst.d \
+ A88422_vuzp.d \
+ A88423_vzip.d \
A88424_wfe.d \
A88425_wfi.d \
A88426_yield.d \
diff --git a/plugins/arm/v7/pseudo.c b/plugins/arm/v7/pseudo.c
index 4055040..e50da2d 100644
--- a/plugins/arm/v7/pseudo.c
+++ b/plugins/arm/v7/pseudo.c
@@ -24,6 +24,7 @@
#include "pseudo.h"
+#include <assert.h>
#include <stddef.h>
@@ -470,6 +471,126 @@ bool armv7_thumb_expand_imm(uint32_t imm12, uint32_t *value)
/******************************************************************************
* *
+* Paramètres : op = encodage d'opération. *
+* cmode = détails quant au mode d'opération. *
+* imm8 = valeur sur 8 bits à étendre. *
+* value = nouvelle valeur calculée. [OUT] *
+* *
+* Description : Traduit la fonction 'AdvSIMDExpandImm'. *
+* *
+* Retour : Bilan de l'opération. *
+* *
+* Remarques : - *
+* *
+******************************************************************************/
+
+bool armv7_advanced_simd_expand_imm(bool op, uint8_t cmode, uint8_t imm8, uint64_t *value)
+{
+ bool result; /* Bilan à retourner */
+ uint64_t raw; /* Valeur d'entrée sur 64 bits */
+ uint8_t cmode_31; /* Partie d'argument ciblée */
+ uint64_t imm8a; /* Valeur sur 8 bits #1 */
+ uint64_t imm8b; /* Valeur sur 8 bits #2 */
+ uint64_t imm8c; /* Valeur sur 8 bits #3 */
+ uint64_t imm8d; /* Valeur sur 8 bits #4 */
+ uint64_t imm8e; /* Valeur sur 8 bits #5 */
+ uint64_t imm8f; /* Valeur sur 8 bits #6 */
+ uint64_t imm8g; /* Valeur sur 8 bits #7 */
+ uint64_t imm8h; /* Valeur sur 8 bits #8 */
+ uint32_t imm32; /* Valeur sur 32 bits */
+
+ result = true;
+
+ raw = imm8;
+
+ cmode_31 = (cmode >> 1) & 0x7;
+
+ switch (cmode_31)
+ {
+ case b000:
+ *value = armv7_replicate_64(raw, 2);
+ break;
+
+ case b001:
+ *value = armv7_replicate_64(raw << 8, 2);
+ break;
+
+ case b010:
+ *value = armv7_replicate_64(raw << 16, 2);
+ break;
+
+ case b011:
+ *value = armv7_replicate_64(raw << 24, 2);
+ break;
+
+ case b100:
+ *value = armv7_replicate_64(raw, 4);
+ break;
+
+ case b101:
+ *value = armv7_replicate_64(raw << 8, 4);
+ break;
+
+ case b110:
+
+ if ((cmode & 0x1) == 0)
+ *value = armv7_replicate_64(raw << 8 | 0xff, 2);
+ else
+ *value = armv7_replicate_64(raw << 16 | 0xffff, 2);
+ break;
+
+ case b111:
+
+ if ((cmode & 0x1) == 0)
+ {
+ if (!op)
+ *value = armv7_replicate_64(raw, 8);
+
+ else
+ {
+ imm8a = armv7_replicate_8((imm8 & 0x80) >> 7, 8);
+ imm8b = armv7_replicate_8((imm8 & 0x40) >> 6, 8);
+ imm8c = armv7_replicate_8((imm8 & 0x20) >> 5, 8);
+ imm8d = armv7_replicate_8((imm8 & 0x10) >> 4, 8);
+ imm8e = armv7_replicate_8((imm8 & 0x8) >> 3, 8);
+ imm8f = armv7_replicate_8((imm8 & 0x4) >> 2, 8);
+ imm8g = armv7_replicate_8((imm8 & 0x2) >> 1, 8);
+ imm8h = armv7_replicate_8((imm8 & 0x1) >> 0, 8);
+
+ *value = (imm8a << 56) | (imm8b << 48) | (imm8c << 40) | (imm8d << 32) \
+ | (imm8e << 24) | (imm8f << 16) | (imm8g << 8) | imm8h;
+
+ }
+
+ }
+ else
+ {
+ if (!op)
+ {
+ imm32 = (raw & 0x80) << 31 \
+ | ((uint64_t)(raw & 0x40 ? 0 : 1)) << 30 \
+ | armv7_replicate_8((raw & 0x40) >> 6, 5) \
+ | ((raw & 0x3f) << 19);
+
+ *value = armv7_replicate_64(imm32, 2);
+
+ }
+
+ else
+ result = false;
+
+ }
+ break;
+
+ }
+
+ return result;
+
+}
+
+
+/******************************************************************************
+* *
* Paramètres : type2 = type de décalage encodé sur 2 bits. *
* imm5 = valeur de décalage entière sur 5 bits. *
* type = type de décalage à constituer. [OUT] *
@@ -664,6 +785,72 @@ bool armv7_shift(uint32_t x, unsigned int n, SRType type, unsigned int amount, b
/******************************************************************************
* *
+* Paramètres : x = valeur sur 8 bits à traiter. *
+* n = nombre de partie à recopier. *
+* *
+* Description : Constitue une value à partir de réplications. *
+* *
+* Retour : Nouvelle valeur calculée. *
+* *
+* Remarques : - *
+* *
+******************************************************************************/
+
+uint8_t armv7_replicate_8(uint8_t x, unsigned int n)
+{
+ uint8_t result; /* Value à retourner */
+ unsigned int step; /* Marge de progression */
+ unsigned int i; /* Boucle de parcours */
+
+ assert(8 % n == 0);
+
+ result = 0;
+
+ step = 8 / n;
+
+ for (i = 0; i < 8; i += step)
+ result |= (x << (i * step));
+
+ return result;
+
+}
+
+
+/******************************************************************************
+* *
+* Paramètres : x = valeur sur 64 bits à traiter. *
+* n = nombre de partie à recopier. *
+* *
+* Description : Constitue une value à partir de réplications. *
+* *
+* Retour : Nouvelle valeur calculée. *
+* *
+* Remarques : - *
+* *
+******************************************************************************/
+
+uint64_t armv7_replicate_64(uint64_t x, unsigned int n)
+{
+ uint64_t result; /* Value à retourner */
+ unsigned int step; /* Marge de progression */
+ unsigned int i; /* Boucle de parcours */
+
+ assert(64 % n == 0);
+
+ result = 0;
+
+ step = 64 / n;
+
+ for (i = 0; i < 64; i += step)
+ result |= (x << (i * step));
+
+ return result;
+
+}
+
+
+/******************************************************************************
+* *
* Paramètres : x = valeur sur 32 bits maximum à traiter. *
* n = nombre de bits à prendre en compte. *
* i = taille finale à obtenir. *
diff --git a/plugins/arm/v7/pseudo.h b/plugins/arm/v7/pseudo.h
index 7a598f9..930ef3a 100644
--- a/plugins/arm/v7/pseudo.h
+++ b/plugins/arm/v7/pseudo.h
@@ -94,6 +94,16 @@ bool armv7_thumb_expand_imm(uint32_t, uint32_t *);
/**
+ * § A7.4.6 - One register and a modified immediate value
+ */
+
+
+/* Traduit la fonction 'AdvSIMDExpandImm'. */
+bool armv7_advanced_simd_expand_imm(bool, uint8_t, uint8_t, uint64_t *);
+
+
+
+/**
* § A8.4.3 - Pseudocode details of instruction-specified shifts and rotates
*/
@@ -128,6 +138,12 @@ bool armv7_shift(uint32_t, unsigned int, SRType, unsigned int, bool, uint32_t *)
*/
+/* Constitue une value à partir de réplications. */
+uint8_t armv7_replicate_8(uint8_t, unsigned int);
+
+/* Constitue une value à partir de réplications. */
+uint64_t armv7_replicate_64(uint64_t, unsigned int);
+
/* Traduit la fonction 'ZeroExtend'. */
uint32_t armv7_zero_extend(uint32_t, unsigned int, unsigned int);