summaryrefslogtreecommitdiff
path: root/plugins/arm/v7/helpers.h
diff options
context:
space:
mode:
Diffstat (limited to 'plugins/arm/v7/helpers.h')
-rw-r--r--plugins/arm/v7/helpers.h441
1 files changed, 441 insertions, 0 deletions
diff --git a/plugins/arm/v7/helpers.h b/plugins/arm/v7/helpers.h
new file mode 100644
index 0000000..94b85f9
--- /dev/null
+++ b/plugins/arm/v7/helpers.h
@@ -0,0 +1,441 @@
+
+/* Chrysalide - Outil d'analyse de fichiers binaires
+ * helpers.h - prototypes pour l'aide à la mise en place des opérandes ARMv7
+ *
+ * Copyright (C) 2014-2017 Cyrille Bagard
+ *
+ * This file is part of Chrysalide.
+ *
+ * Chrysalide is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Chrysalide is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Foobar. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#ifndef _PLUGINS_ARM_V7_HELPERS_H
+#define _PLUGINS_ARM_V7_HELPERS_H
+
+
+#include <arch/operand.h>
+
+
+#include "cregister.h"
+#include "pseudo.h"
+#include "operands/coproc.h"
+#include "operands/estate.h"
+#include "operands/limitation.h"
+#include "operands/maccess.h"
+#include "operands/offset.h"
+#include "operands/reglist.h"
+#include "operands/rotation.h"
+#include "operands/shift.h"
+#include "../register.h"
+
+
+
+
+
+#define BarrierLimitation(opt) \
+ ({ \
+ GArchOperand *__result; \
+ __result = g_armv7_limitation_operand_new(opt); \
+ __result; \
+ })
+
+
+#define BitDiff(msb, lsb) \
+ ({ \
+ GArchOperand *__result; \
+ uint32_t __width; \
+ __width = g_imm_operand_get_raw_value(G_IMM_OPERAND(msb)); \
+ __width -= g_imm_operand_get_raw_value(G_IMM_OPERAND(lsb)); \
+ __width += 1; \
+ __result = g_imm_operand_new_from_value(MDS_32_BITS_UNSIGNED, __width); \
+ __result; \
+ })
+
+
+#define BuildImm8(val) \
+ ({ \
+ GArchOperand *__result; \
+ __result = g_imm_operand_new_from_value(MDS_8_BITS_UNSIGNED, (uint8_t)val); \
+ __result; \
+ })
+
+
+#define BuildImm16(val) \
+ ({ \
+ GArchOperand *__result; \
+ __result = g_imm_operand_new_from_value(MDS_16_BITS_UNSIGNED, (uint16_t)val); \
+ __result; \
+ })
+
+
+#define CoProcessor(idx) \
+ ({ \
+ GArchOperand *__result; \
+ __result = g_armv7_coproc_operand_new(idx); \
+ __result; \
+ })
+
+
+#define CRegister(idx) \
+ ({ \
+ GArchOperand *__result; \
+ GArmV7CRegister *__reg; \
+ __reg = g_armv7_cregister_new(idx); \
+ if (__reg == NULL) \
+ __result = NULL; \
+ else \
+ __result = g_register_operand_new(G_ARCH_REGISTER(__reg)); \
+ __result; \
+ })
+
+
+#define IncWidth(widthm1) \
+ ({ \
+ GArchOperand *__result; \
+ uint32_t __width; \
+ __width = widthm1 + 1; \
+ __result = g_imm_operand_new_from_value(MDS_32_BITS_UNSIGNED, __width); \
+ __result; \
+ })
+
+
+#define DecodeImmShift(type, imm5) \
+ ({ \
+ GArchOperand *__result; \
+ SRType __shift_t; \
+ uint32_t __shift_n; \
+ GArchOperand *__op_n; \
+ if (!armv7_decode_imm_shift(type, imm5, &__shift_t, &__shift_n)) \
+ __result = NULL; \
+ else \
+ { \
+ __op_n = g_imm_operand_new_from_value(MDS_32_BITS_UNSIGNED, __shift_n); \
+ __result = g_armv7_shift_operand_new(__shift_t, __op_n); \
+ } \
+ __result; \
+ })
+
+
+#define EndianState(big) \
+ ({ \
+ GArchOperand *__result; \
+ __result = g_armv7_endian_operand_new(big); \
+ __result; \
+ })
+
+
+#define MakeMemoryAccess(base, off, shift, index, add, wback) \
+ ({ \
+ GArchOperand *__result; \
+ GArchOperand *__offset; \
+ if (off != NULL) \
+ __offset = g_armv7_offset_operand_new(add, off); \
+ else \
+ __offset = NULL; \
+ __result = g_armv7_maccess_operand_new(base, __offset, shift, index, wback); \
+ __result; \
+ })
+
+
+#define NextRegister(prev) \
+ ({ \
+ GRegisterOperand *__prev_op; \
+ GArchRegister *__reg; \
+ uint8_t __id; \
+ __prev_op = G_REGISTER_OPERAND(prev); \
+ __reg = g_register_operand_get_register(__prev_op); \
+ __id = g_arm_register_get_index(G_ARM_REGISTER(__reg)); \
+ Register(__id + 1); \
+ })
+
+
+#define RawValue(val) \
+ ({ \
+ GArchOperand *__result; \
+ __result = g_imm_operand_new_from_value(MDS_32_BITS_UNSIGNED, (uint32_t)val); \
+ __result; \
+ })
+
+
+#define Register(idx) \
+ ({ \
+ GArchOperand *__result; \
+ GArmV7Register *__reg; \
+ __reg = g_armv7_register_new(idx); \
+ if (__reg == NULL) \
+ __result = NULL; \
+ else \
+ __result = g_register_operand_new(G_ARCH_REGISTER(__reg)); \
+ __result; \
+ })
+
+
+#define RegisterShift(shift_t, rs) \
+ ({ \
+ GArchOperand *__result; \
+ GArchOperand *__reg; \
+ __reg = Register(rs); \
+ if (__reg == NULL) \
+ __result = NULL; \
+ else \
+ __result = g_armv7_shift_operand_new(shift_t, __reg); \
+ __result; \
+ })
+
+
+#define Rotation(val5) \
+ ({ \
+ GArchOperand *__result; \
+ uint8_t __rot; \
+ GArchOperand *__rot_op; \
+ __rot = val5; \
+ __rot_op = g_imm_operand_new_from_value(MDS_8_BITS_UNSIGNED, __rot); \
+ __result = g_armv7_rotation_operand_new(__rot_op); \
+ if (__result == NULL) \
+ g_object_unref(G_OBJECT(__rot_op)); \
+ __result; \
+ })
+
+
+#define UInt(val) \
+ ({ \
+ GArchOperand *__result; \
+ __result = g_imm_operand_new_from_value(MDS_8_BITS_UNSIGNED, (uint8_t)val); \
+ __result; \
+ })
+
+
+
+
+//#define DecodeImmShift(raw_type, raw_imm5);
+//g_armv7_shift_operand_new(SRType type, GArchOperand *value)
+
+
+
+//#define MakeMemoryAccess(base, off, shift, index, add, wback) NULL
+
+//g_armv7_maccess_operand_new(GArchOperand *base, GArchOperand *offset, GArchOperand *shift, bool indexed, bool writeb)
+
+//g_armv7_offset_operand_new(add, off)
+
+
+
+
+////////////////////
+
+#define Imm16(imm16) \
+ ({ \
+ GArchOperand *__result; \
+ __result = g_imm_operand_new_from_value(MDS_16_BITS_UNSIGNED, (uint16_t)imm16); \
+ __result; \
+ })
+
+#define ARMExpandImm_C(imm12, c) \
+ ({ \
+ GArchOperand *__result; \
+ uint32_t __val; \
+ if (armv7_arm_expand_imm_c(imm12, (bool []) { c }, &__val)) \
+ __result = g_imm_operand_new_from_value(MDS_32_BITS_UNSIGNED, __val); \
+ else \
+ __result = NULL; \
+ __result; \
+ })
+
+#define ARMExpandImm(imm12) \
+ ({ \
+ GArchOperand *__result; \
+ uint32_t __val; \
+ if (armv7_arm_expand_imm(imm12, &__val)) \
+ __result = g_imm_operand_new_from_value(MDS_32_BITS_UNSIGNED, __val); \
+ else \
+ __result = NULL; \
+ __result; \
+ })
+
+#define ThumbExpandImm_C(imm12, c) \
+ ({ \
+ GArchOperand *__result; \
+ uint32_t __val; \
+ if (armv7_thumb_expand_imm_c(imm12, (bool []) { c }, &__val)) \
+ __result = g_imm_operand_new_from_value(MDS_32_BITS_UNSIGNED, __val); \
+ else \
+ __result = NULL; \
+ __result; \
+ })
+
+#define ThumbExpandImm(imm12) \
+ ({ \
+ GArchOperand *__result; \
+ uint32_t __val; \
+ if (armv7_thumb_expand_imm(imm12, &__val)) \
+ __result = g_imm_operand_new_from_value(MDS_32_BITS_UNSIGNED, __val); \
+ else \
+ __result = NULL; \
+ __result; \
+ })
+
+
+
+
+
+
+
+#define DecodeImmShiftValue(imm5) \
+ ({ \
+ GArchOperand *__result; \
+ uint32_t __shift_n; \
+ if (!armv7_decode_imm_shift(0, imm5, (SRType []) { 0 }, &__shift_n)) \
+ __result = NULL; \
+ else \
+ __result = g_imm_operand_new_from_value(MDS_32_BITS_UNSIGNED, __shift_n); \
+ __result; \
+ })
+
+
+#if 0
+// DecodeRegShift()
+// ================
+SRType DecodeRegShift(bits(2) type)
+case type of
+when '00' shift_t = SRType_LSL;
+when '01' shift_t = SRType_LSR;
+when '10' shift_t = SRType_ASR;
+when '11' shift_t = SRType_ROR;
+return shift_t;
+#endif
+
+
+
+#define ZeroExtend(x, i) \
+ ({ \
+ MemoryDataSize __mds; \
+ uint ## i ## _t __val; \
+ __mds = MDS_ ## i ## _BITS_UNSIGNED; \
+ __val = armv7_zero_extend(x, 0/**/, i); \
+ g_imm_operand_new_from_value(__mds, __val); \
+ })
+
+
+
+#define Zeros(i) \
+ ({ \
+ MemoryDataSize __mds; \
+ uint ## i ## _t __val; \
+ __mds = MDS_ ## i ## _BITS_UNSIGNED; \
+ __val = 0; \
+ g_imm_operand_new_from_value(__mds, __val); \
+ })
+
+
+
+
+
+
+
+
+/**
+ * Glue purement interne.
+ */
+
+
+#define MakeAccessOffset(add, off) \
+ g_armv7_offset_operand_new(add, off)
+
+
+#define MakeShiftedMemoryAccess(base, off, shift, wr) \
+ g_armv7_maccess_operand_new(base, off, shift, wr)
+
+
+#define _MakeMemoryAccess(base, off, wr) \
+ MakeShiftedMemoryAccess(base, off, NULL, wr)
+
+/*
+#define MakeMemoryAccess(base, off, add, wr) \
+ ({ \
+ GArchOperand *__off; \
+ __off = MakeAccessOffset(add, off); \
+ _MakeMemoryAccess(base, __off, wr); \
+ })
+*/
+
+#define MakeMemoryNotIndexed(base, wr) \
+ _MakeMemoryAccess(base, NULL, wr)
+
+
+
+
+
+
+
+
+// type == '10', pas 2 ! (FIXME)
+#define FixedShift(type, imm5) \
+ ({ \
+ GArchOperand *__result; \
+ uint32_t __shift_n; \
+ __shift_n = imm5; \
+ __result = g_imm_operand_new_from_value(MDS_32_BITS_UNSIGNED, __shift_n); \
+ __result; \
+ })
+
+
+
+
+
+/**
+ * Glue purement interne pour les listes de registres.
+ */
+
+#define RegistersList(mask) \
+ ({ \
+ GArchOperand *__result; \
+ __result = g_armv7_reglist_operand_new(); \
+ if (!g_armv7_reglist_load_registers(G_ARMV7_REGLIST_OPERAND(__result), mask)) \
+ { \
+ g_object_unref(G_OBJECT(__result)); \
+ __result = NULL; \
+ } \
+ __result; \
+ })
+
+
+#define ListFromRegister(regop) \
+ ({ \
+ GArchOperand *__result; \
+ GArmV7Register *__reg; \
+ __result = g_armv7_reglist_operand_new(); \
+ __reg = G_ARMV7_REGISTER(regop); \
+ g_armv7_reglist_add_register(G_ARMV7_REGLIST_OPERAND(__result), __reg); \
+ __result; \
+ })
+
+
+
+
+/* Effectue une rotation vers la droit d'une valeur. */
+GArchOperand *ror_armv7_imm(uint32_t, unsigned int);
+
+/* Crée un opérande de valeur immédiate avec extension de signe. */
+GArchOperand *sign_extend_armv7_imm(uint32_t, bool, unsigned int);
+
+/* Etend une valeur immédiate en mode 'Thumb' ARMv7. */
+GArchOperand *thumb_expand_armv7_imm(uint32_t);
+
+/* Réalise un simple transtypage de valeur entière. */
+GArchOperand *zero_extend_armv7_imm(uint32_t, unsigned int);
+
+
+
+#endif /* _PLUGINS_ARM_V7_HELPERS_H */