summaryrefslogtreecommitdiff
path: root/plugins/arm/v7/opdefs/A88290_vb.d
diff options
context:
space:
mode:
Diffstat (limited to 'plugins/arm/v7/opdefs/A88290_vb.d')
-rw-r--r--plugins/arm/v7/opdefs/A88290_vb.d321
1 files changed, 321 insertions, 0 deletions
diff --git a/plugins/arm/v7/opdefs/A88290_vb.d b/plugins/arm/v7/opdefs/A88290_vb.d
new file mode 100644
index 0000000..c4673c0
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88290_vb.d
@@ -0,0 +1,321 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VBIF, VBIT, VBSL
+
+@id 282
+
+@desc {
+
+ VBIF (Vector Bitwise Insert if False), VBIT (Vector Bitwise Insert if True), and VBSL (Vector Bitwise Select) perform bitwise selection under the control of a mask, and place the results in the destination register. The registers can be either quadword or doubleword, and must all be the same size. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) op(2) Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1005
+
+ @assert {
+
+ Q == 1
+ op == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbif ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1006
+
+ @assert {
+
+ Q == 1
+ op == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbit ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1007
+
+ @assert {
+
+ Q == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbsl ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1008
+
+ @assert {
+
+ Q == 0
+ op == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbif ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1009
+
+ @assert {
+
+ Q == 0
+ op == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbit ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1010
+
+ @assert {
+
+ Q == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbsl ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 1 1 1 1 1 0 D(1) op(2) Vn(4) Vd(4) 0 0 0 1 N(1) Q(1) M(1) 1 Vm(4)
+
+ @syntax {
+
+ @subid 1011
+
+ @assert {
+
+ Q == 1
+ op == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbif ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1012
+
+ @assert {
+
+ Q == 1
+ op == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbit ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1013
+
+ @assert {
+
+ Q == 1
+ op == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_N = QuadWordVector(N:Vn)
+ qwvec_M = QuadWordVector(M:Vm)
+
+ }
+
+ @asm vbsl ?qwvec_D qwvec_N qwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1014
+
+ @assert {
+
+ Q == 0
+ op == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbif ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1015
+
+ @assert {
+
+ Q == 0
+ op == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbit ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+ @syntax {
+
+ @subid 1016
+
+ @assert {
+
+ Q == 0
+ op == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_N = DoubleWordVector(N:Vn)
+ dwvec_M = DoubleWordVector(M:Vm)
+
+ }
+
+ @asm vbsl ?dwvec_D dwvec_N dwvec_M
+
+ }
+
+}
+