summaryrefslogtreecommitdiff
path: root/plugins/arm/v7/opdefs/A88396_vshl.d
diff options
context:
space:
mode:
Diffstat (limited to 'plugins/arm/v7/opdefs/A88396_vshl.d')
-rw-r--r--plugins/arm/v7/opdefs/A88396_vshl.d813
1 files changed, 813 insertions, 0 deletions
diff --git a/plugins/arm/v7/opdefs/A88396_vshl.d b/plugins/arm/v7/opdefs/A88396_vshl.d
new file mode 100644
index 0000000..4df1c92
--- /dev/null
+++ b/plugins/arm/v7/opdefs/A88396_vshl.d
@@ -0,0 +1,813 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * ##FILE## - traduction d'instructions ARMv7
+ *
+ * Copyright (C) 2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Chrysalide. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+@title VSHL (register)
+
+@id 338
+
+@desc {
+
+ Vector Shift Left (register) takes each element in a vector, shifts them by a value from the least significant byte of the corresponding element of a second vector, and places the results in the destination vector. If the shift value is positive, the operation is a left shift. If the shift value is negative, it is a truncating right shift. Note For a rounding shift, see VRSHL on page A8-1032. The first operand and result elements are the same data type, and can be any one of: • 8-bit, 16-bit, 32-bit, or 64-bit signed integers • 8-bit, 16-bit, 32-bit, or 64-bit unsigned integers. The second operand is always a signed integer of the same size. Depending on settings in the CPACR, NSACR, and HCPTR registers, and the security state and mode in which the instruction is executed, an attempt to execute the instruction might be UNDEFINED, or trapped to Hyp mode. Summary of access controls for Advanced SIMD functionality on page B1-1232 summarizes these controls. ARM deprecates the conditional execution of any Advanced SIMD instruction encoding that is not also available as a VFP instruction encoding, see Conditional execution on page A8-288.
+
+}
+
+@encoding (T1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 0 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1845
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1846
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1847
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1848
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1849
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1850
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1851
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1852
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1853
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1854
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1855
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1856
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1857
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1858
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1859
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1860
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+}
+
+@encoding (A1) {
+
+ @word 1 1 1 U(1) 1 1 1 1 0 D(1) size(2) Vn(4) Vd(4) 0 1 0 0 N(1) Q(1) M(1) 0 Vm(4)
+
+ @syntax {
+
+ @subid 1861
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1862
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1863
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1864
+
+ @assert {
+
+ Q == 1
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1865
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u8 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1866
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u16 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1867
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u32 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1868
+
+ @assert {
+
+ Q == 1
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ qwvec_D = QuadWordVector(D:Vd)
+ qwvec_M = QuadWordVector(M:Vm)
+ qwvec_N = QuadWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u64 ?qwvec_D qwvec_M qwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1869
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1870
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1871
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1872
+
+ @assert {
+
+ Q == 0
+ U == 0
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.s64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1873
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 0
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u8 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1874
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 1
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u16 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1875
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 10
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u32 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+ @syntax {
+
+ @subid 1876
+
+ @assert {
+
+ Q == 0
+ U == 1
+ size == 11
+
+ }
+
+ @conv {
+
+ dwvec_D = DoubleWordVector(D:Vd)
+ dwvec_M = DoubleWordVector(M:Vm)
+ dwvec_N = DoubleWordVector(N:Vn)
+
+ }
+
+ @asm vshl.u64 ?dwvec_D dwvec_M dwvec_N
+
+ }
+
+}
+