added speculative execution to see if a virtual branch address is legit

or not...
main
_xeroxz 3 years ago
parent 6755dd4c15
commit e086bf0377

2
deps/vmprofiler vendored

@ -1 +1 @@
Subproject commit 4e8c37dbe0fa60018e543cbc47da2ee22e159efd Subproject commit 322a8bcf97d47a6d792c97d803429303106958da

@ -25,6 +25,11 @@ class emu_t {
const vm::vmctx_t* m_vm; const vm::vmctx_t* m_vm;
zydis_reg_t vip, vsp; zydis_reg_t vip, vsp;
/// <summary>
/// used in branch_pred_spec_exec to count legit SREG virtual instructions...
/// </summary>
std::uint8_t m_sreg_cnt;
/// <summary> /// <summary>
/// current code trace... /// current code trace...
/// </summary> /// </summary>
@ -44,7 +49,7 @@ class emu_t {
/// <summary> /// <summary>
/// unicorn engine hook /// unicorn engine hook
/// </summary> /// </summary>
uc_hook code_exec_hook, invalid_mem_hook, int_hook; uc_hook code_exec_hook, invalid_mem_hook, int_hook, branch_pred_hook;
/// <summary> /// <summary>
/// code execution callback for executable memory ranges of the vmprotect'ed /// code execution callback for executable memory ranges of the vmprotect'ed
@ -61,6 +66,21 @@ class emu_t {
uint32_t size, uint32_t size,
emu_t* obj); emu_t* obj);
/// <summary>
/// branch predicition with speculative execution (emulation)... this callback
/// ensures there are at least 10 SREG's and that all of the imm values are
/// legit...
/// </summary>
/// <param name="uc"></param>
/// <param name="address"></param>
/// <param name="size"></param>
/// <param name="obj"></param>
/// <returns></returns>
static bool branch_pred_spec_exec(uc_engine* uc,
uint64_t address,
uint32_t size,
emu_t* obj);
/// <summary> /// <summary>
/// invalid memory access handler. no runtime values can possibly effect the /// invalid memory access handler. no runtime values can possibly effect the
/// decryption of virtual instructions. thus invalid memory accesses can be /// decryption of virtual instructions. thus invalid memory accesses can be
@ -89,21 +109,21 @@ class emu_t {
static void int_callback(uc_engine* uc, std::uint32_t intno, emu_t* obj); static void int_callback(uc_engine* uc, std::uint32_t intno, emu_t* obj);
/// <summary> /// <summary>
/// determines if its possible that the virtual instruction stream contains a /// determines if there *could* be a JCC in the virtual code block... its not
/// virtual JCC... /// 100%... speculative execution is required to ensure that both branches
/// /// discovered are legit...
/// this simply checks to see if there are at least 3 LCONST that load 64bit /// </summary>
/// constant values... /// <param name="vinstrs"></param>
/// /// <returns></returns>
/// it also checks if the last 2 LCONST's load image based addresses which std::optional<std::pair<std::uintptr_t, std::uintptr_t>> could_have_jcc(
/// land inside of executable sections... std::vector<vm::instrs::vinstr_t>& vinstrs);
///
/// this function cannot be used to determine if there is a virtual branch or /// <summary>
/// not... it is only a useful/preliminary function... /// determines if a branch is legit or not...
/// </summary> /// </summary>
/// <param name="vinstrs">vector of virtual instructions...</param> /// <param name="vblk"></param>
/// <returns>returns true if there is at least 3 LCONST in the virtual /// <param name="branch_addr"></param>
/// instruction stream that load 64bit values...</returns> /// <returns></returns>
bool could_have_jcc(std::vector<vm::instrs::vinstr_t>& vinstrs); bool legit_branch(vm::instrs::vblk_t& vblk, std::uintptr_t branch_addr);
}; };
} // namespace vm } // namespace vm

@ -2,7 +2,10 @@
namespace vm { namespace vm {
emu_t::emu_t(vm::vmctx_t* vm_ctx) emu_t::emu_t(vm::vmctx_t* vm_ctx)
: m_vm(vm_ctx), vip(vm_ctx->get_vip()), vsp(vm_ctx->get_vsp()) {} : m_vm(vm_ctx),
vip(vm_ctx->get_vip()),
vsp(vm_ctx->get_vsp()),
m_sreg_cnt(0u) {}
emu_t::~emu_t() { emu_t::~emu_t() {
if (uc) if (uc)
@ -81,6 +84,7 @@ bool emu_t::emulate(std::uint32_t vmenter_rva, vm::instrs::vrtn_t& vrtn) {
vm::instrs::vblk_t blk; vm::instrs::vblk_t blk;
blk.m_vip = {0ull, 0ull}; blk.m_vip = {0ull, 0ull};
blk.m_vm = {vip, vsp};
cc_blk = &blk; cc_blk = &blk;
cc_vrtn = &vrtn; cc_vrtn = &vrtn;
@ -91,9 +95,27 @@ bool emu_t::emulate(std::uint32_t vmenter_rva, vm::instrs::vrtn_t& vrtn) {
return false; return false;
} }
std::printf( auto res = could_have_jcc(blk.m_vinstrs);
"> blk_%p, number of virtual instructions = %d, could have a jcc = %d\n", std::printf("> blk_%p, number of virtual instructions = %d\n",
blk.m_vip.img_base, blk.m_vinstrs.size(), could_have_jcc(blk.m_vinstrs)); blk.m_vip.img_base, blk.m_vinstrs.size());
if (res.has_value()) {
const auto [b1, b2] = res.value();
auto b1_legit =
legit_branch(blk, (b1 - m_vm->m_image_base) + m_vm->m_module_base);
auto b2_legit =
legit_branch(blk, (b2 - m_vm->m_image_base) + m_vm->m_module_base);
if (b1_legit && b2_legit) {
blk.branches.push_back(b1);
blk.branches.push_back(b2);
blk.branch_type = vm::instrs::vbranch_type::jcc;
std::printf("> code block has jcc, b1 = %p, b2 = %p\n", b1, b2);
} else {
blk.branch_type = vm::instrs::vbranch_type::absolute;
std::printf("> code block has absolute jmp...\n");
}
}
return true; return true;
} }
@ -128,6 +150,92 @@ void emu_t::int_callback(uc_engine* uc, std::uint32_t intno, emu_t* obj) {
} }
} }
bool emu_t::branch_pred_spec_exec(uc_engine* uc,
uint64_t address,
uint32_t size,
emu_t* obj) {
uc_err err;
static thread_local zydis_decoded_instr_t instr;
if (!ZYAN_SUCCESS(ZydisDecoderDecodeBuffer(vm::utils::g_decoder.get(),
reinterpret_cast<void*>(address),
PAGE_4KB, &instr))) {
std::printf("> failed to decode instruction at = 0x%p\n", address);
if ((err = uc_emu_stop(uc))) {
std::printf("> failed to stop emulation, exiting... reason = %d\n", err);
exit(0);
}
return false;
}
if (instr.mnemonic == ZYDIS_MNEMONIC_INVALID)
return false;
uc_context* ctx;
uc_context_alloc(uc, &ctx);
uc_context_save(uc, ctx);
// if this is the first instruction of this handler then save the stack...
if (!obj->cc_trace.m_instrs.size()) {
obj->cc_trace.m_stack = reinterpret_cast<std::uint8_t*>(malloc(STACK_SIZE));
uc_mem_read(uc, STACK_BASE, obj->cc_trace.m_stack, STACK_SIZE);
}
obj->cc_trace.m_instrs.push_back({instr, ctx});
// RET or JMP REG means the end of a vm handler...
if (instr.mnemonic == ZYDIS_MNEMONIC_RET ||
(instr.mnemonic == ZYDIS_MNEMONIC_JMP &&
instr.operands[0].type == ZYDIS_OPERAND_TYPE_REGISTER)) {
// deobfuscate the instruction stream before profiling...
// makes it easier for profiles to be correct...
vm::instrs::deobfuscate(obj->cc_trace);
// find the last MOV REG, DWORD PTR [VIP] in the instruction stream, then
// remove any instructions from this instruction to the JMP/RET...
const auto rva_fetch = std::find_if(
obj->cc_trace.m_instrs.rbegin(), obj->cc_trace.m_instrs.rend(),
[&vip = obj->vip](const vm::instrs::emu_instr_t& instr) -> bool {
const auto& i = instr.m_instr;
return i.mnemonic == ZYDIS_MNEMONIC_MOV &&
i.operands[0].type == ZYDIS_OPERAND_TYPE_REGISTER &&
i.operands[1].type == ZYDIS_OPERAND_TYPE_MEMORY &&
i.operands[1].mem.base == vip && i.operands[1].size == 32;
});
if (rva_fetch != obj->cc_trace.m_instrs.rend())
obj->cc_trace.m_instrs.erase((rva_fetch + 1).base(),
obj->cc_trace.m_instrs.end());
const auto vinstr =
vm::instrs::determine(obj->vip, obj->vsp, obj->cc_trace);
// -- free the trace since we will start a new one...
std::for_each(obj->cc_trace.m_instrs.begin(), obj->cc_trace.m_instrs.end(),
[&](const vm::instrs::emu_instr_t& instr) {
uc_context_free(instr.m_cpu);
});
free(obj->cc_trace.m_stack);
obj->cc_trace.m_instrs.clear();
if (vinstr.mnemonic != vm::instrs::mnemonic_t::jmp) {
if (vinstr.mnemonic != vm::instrs::mnemonic_t::sreg)
uc_emu_stop(uc);
if (!vinstr.imm.has_imm)
uc_emu_stop(uc);
if (vinstr.imm.size != 8 ||
vinstr.imm.val > std::numeric_limits<std::uint8_t>::max())
uc_emu_stop(uc);
// -- stop after 10 legit SREG's...
if (++obj->m_sreg_cnt == 10)
uc_emu_stop(uc);
}
}
}
bool emu_t::code_exec_callback(uc_engine* uc, bool emu_t::code_exec_callback(uc_engine* uc,
uint64_t address, uint64_t address,
uint32_t size, uint32_t size,
@ -154,7 +262,7 @@ bool emu_t::code_exec_callback(uc_engine* uc,
// if this is the first instruction of this handler then save the stack... // if this is the first instruction of this handler then save the stack...
if (!obj->cc_trace.m_instrs.size()) { if (!obj->cc_trace.m_instrs.size()) {
obj->cc_trace.m_stack = reinterpret_cast<std::uint8_t*>(malloc(STACK_SIZE)); obj->cc_trace.m_stack = new std::uint8_t[STACK_SIZE];
uc_mem_read(uc, STACK_BASE, obj->cc_trace.m_stack, STACK_SIZE); uc_mem_read(uc, STACK_BASE, obj->cc_trace.m_stack, STACK_SIZE);
} }
@ -197,7 +305,7 @@ bool emu_t::code_exec_callback(uc_engine* uc,
uc_context* backup; uc_context* backup;
uc_context_alloc(uc, &backup); uc_context_alloc(uc, &backup);
uc_context_save(uc, backup); uc_context_save(uc, backup);
uc_context_restore(uc, (vip_write - 1)->m_cpu); uc_context_restore(uc, vip_write->m_cpu);
auto uc_reg = auto uc_reg =
vm::instrs::reg_map[vip_write->m_instr.operands[0].reg.value]; vm::instrs::reg_map[vip_write->m_instr.operands[0].reg.value];
@ -257,7 +365,8 @@ bool emu_t::code_exec_callback(uc_engine* uc,
// set current code block virtual jmp instruction information... // set current code block virtual jmp instruction information...
obj->cc_blk->m_jmp.ctx = copy; obj->cc_blk->m_jmp.ctx = copy;
obj->cc_blk->m_jmp.stack = new std::uint8_t[STACK_SIZE]; obj->cc_blk->m_jmp.stack = new std::uint8_t[STACK_SIZE];
uc_mem_read(uc, STACK_BASE, obj->cc_blk->m_jmp.stack, STACK_SIZE); std::memcpy(obj->cc_blk->m_jmp.stack, obj->cc_trace.m_stack,
STACK_SIZE);
} }
if (vinstr.mnemonic == vm::instrs::mnemonic_t::jmp || if (vinstr.mnemonic == vm::instrs::mnemonic_t::jmp ||
@ -316,7 +425,40 @@ void emu_t::invalid_mem(uc_engine* uc,
} }
} }
bool emu_t::could_have_jcc(std::vector<vm::instrs::vinstr_t>& vinstrs) { bool emu_t::legit_branch(vm::instrs::vblk_t& vblk, std::uintptr_t branch_addr) {
// remove normal execution callback...
uc_hook_del(uc, code_exec_hook);
// add branch pred hook...
uc_hook_add(uc, &branch_pred_hook, UC_HOOK_CODE,
(void*)&vm::emu_t::branch_pred_spec_exec, this,
m_vm->m_module_base, m_vm->m_module_base + m_vm->m_image_size);
// restore cpu and stack...
uc_context_restore(uc, vblk.m_jmp.ctx);
uc_mem_write(uc, STACK_BASE, vblk.m_jmp.stack, STACK_SIZE);
std::uintptr_t vsp_ptr = 0ull, rip = 0ull;
uc_reg_read(uc, UC_X86_REG_RIP, &rip);
uc_reg_read(uc, vm::instrs::reg_map[vblk.m_vm.vsp], &vsp_ptr);
uc_mem_write(uc, vsp_ptr, &branch_addr, sizeof branch_addr);
m_sreg_cnt = 0u;
uc_emu_start(uc, rip, 0ull, 0ull, 0ull);
// add normal execution callback back...
uc_hook_add(uc, &code_exec_hook, UC_HOOK_CODE,
(void*)&vm::emu_t::code_exec_callback, this, m_vm->m_module_base,
m_vm->m_module_base + m_vm->m_image_size);
return m_sreg_cnt == 10;
}
std::optional<std::pair<std::uintptr_t, std::uintptr_t>> emu_t::could_have_jcc(
std::vector<vm::instrs::vinstr_t>& vinstrs) {
if (vinstrs.back().mnemonic == vm::instrs::mnemonic_t::vmexit)
return {};
// check to see if there is at least 3 LCONST %i64's // check to see if there is at least 3 LCONST %i64's
if (std::accumulate( if (std::accumulate(
vinstrs.begin(), vinstrs.end(), 0u, vinstrs.begin(), vinstrs.end(), 0u,
@ -326,7 +468,7 @@ bool emu_t::could_have_jcc(std::vector<vm::instrs::vinstr_t>& vinstrs) {
? ++val ? ++val
: val; : val;
}) < 3) }) < 3)
return false; return {};
// extract the lconst64's out of the virtual instruction stream... // extract the lconst64's out of the virtual instruction stream...
static const auto lconst64_chk = [&](vm::instrs::vinstr_t& v) -> bool { static const auto lconst64_chk = [&](vm::instrs::vinstr_t& v) -> bool {
@ -337,19 +479,19 @@ bool emu_t::could_have_jcc(std::vector<vm::instrs::vinstr_t>& vinstrs) {
std::find_if(vinstrs.rbegin(), vinstrs.rend(), lconst64_chk); std::find_if(vinstrs.rbegin(), vinstrs.rend(), lconst64_chk);
if (lconst1 == vinstrs.rend()) if (lconst1 == vinstrs.rend())
return false; return {};
const auto lconst2 = std::find_if(lconst1 + 1, vinstrs.rend(), lconst64_chk); const auto lconst2 = std::find_if(lconst1 + 1, vinstrs.rend(), lconst64_chk);
if (lconst2 == vinstrs.rend()) if (lconst2 == vinstrs.rend())
return false; return {};
// check to see if the imm val is inside of the image... // check to see if the imm val is inside of the image...
if (lconst1->imm.val > m_vm->m_image_base + m_vm->m_image_size || if (lconst1->imm.val > m_vm->m_image_base + m_vm->m_image_size ||
lconst1->imm.val < m_vm->m_image_base || lconst1->imm.val < m_vm->m_image_base ||
lconst2->imm.val > m_vm->m_image_base + m_vm->m_image_size || lconst2->imm.val > m_vm->m_image_base + m_vm->m_image_size ||
lconst2->imm.val < m_vm->m_image_base) lconst2->imm.val < m_vm->m_image_base)
return false; return {};
// check to see if the imm's points to something inside of an executable // check to see if the imm's points to something inside of an executable
// section... // section...
@ -359,8 +501,8 @@ bool emu_t::could_have_jcc(std::vector<vm::instrs::vinstr_t>& vinstrs) {
!vm::utils::scn::executable( !vm::utils::scn::executable(
m_vm->m_module_base, m_vm->m_module_base,
(lconst2->imm.val - m_vm->m_image_base) + m_vm->m_module_base)) (lconst2->imm.val - m_vm->m_image_base) + m_vm->m_module_base))
return false; return {};
return true; return {{lconst1->imm.val, lconst2->imm.val}};
} }
} // namespace vm } // namespace vm
Loading…
Cancel
Save