merged with coltons changes... also treating argument 1 as the stack...

merge-requests/1/head
_xeroxz 3 years ago
parent 3755f866ba
commit a84ffc3e67

@ -65,11 +65,11 @@ namespace vm
{ vm::handler::LFLAGSQ, &lflagsq },
{ vm::handler::JMP, &jmp } };
static vm::llvm_value_t *and_flags( vm::vmp_rtn_t *rtn, std::uint8_t byte_size, vm::llvm_value_t *result );
static vm::llvm_value_t *add_flags( vm::vmp_rtn_t *rtn, std::uint8_t byte_size, vm::llvm_value_t *lhs,
vm::llvm_value_t *rhs );
static vm::llvm_value_t *and_flags( vm::vmp_rtn_t *rtn, std::uint8_t byte_size, vm::llvm_value_t *lhs,
vm::llvm_value_t *rhs );
static vm::llvm_value_t *shr_flags( vm::vmp_rtn_t *rtn, std::uint8_t byte_size, vm::llvm_value_t *lhs,
vm::llvm_value_t *rhs, vm::llvm_value_t *result );
public:
static lifters_t *get_instance( void )

@ -43,20 +43,19 @@ namespace vm
llvm_context_t *llvm_ctx;
llvm_module_t *llvm_module;
llvm_function_t *llvm_fptr;
llvm_alloca_inst_t *virtual_stack, *stack_ptr, *flags;
llvm_alloca_inst_t *flags;
vm::ctx_t *vm_ctx;
std::uintptr_t rtn_begin;
std::shared_ptr< llvm_irbuilder_t > ir_builder;
std::map< zydis_register_t, llvm_global_value_t * > native_registers;
std::vector< llvm_alloca_inst_t * > virtual_registers;
std::vector< std::pair< std::uintptr_t, llvm_basic_block_t * > > llvm_code_blocks;
std::vector< vm::instrs::code_block_t > vmp2_code_blocks;
void push( std::uint8_t byte_size, llvm::Value *input_val );
void push( std::uint8_t byte_size, llvm_value_t *input_val );
llvm::Value *pop( std::uint8_t byte_size );
llvm::Value *peek( std::uint8_t byte_size, std::uint8_t byte_offset = 0u );
llvm::Value *load_value( std::uint8_t byte_size, llvm_global_value_t *global );
llvm::Value *load_value( std::uint8_t byte_size, llvm_alloca_inst_t *var );
@ -65,10 +64,8 @@ namespace vm
llvm_value_t *compute_pf( std::uint8_t byte_size, llvm_value_t *val );
llvm_value_t *combine_flags( llvm_value_t *cf, llvm_value_t *pf, llvm_value_t *af, llvm_value_t *zf,
llvm_value_t *sf, llvm_value_t *of );
void create_native_registers( void );
void create_virtual_registers( void );
void create_routine( void );
void create_virtual_stack( void );
void lift_vm_entry( void );
};
} // namespace vm

@ -27,7 +27,8 @@ namespace vm
auto sf = rtn->compute_sf( byte_size, u_sum );
auto zf = rtn->compute_zf( byte_size, u_sum );
auto pf = rtn->compute_pf( byte_size, u_sum );
auto pf = llvm::ConstantInt::get( llvm::IntegerType::get( *rtn->llvm_ctx, 64 ),
0 ); // TODO make clean PF bit computation...
auto flags_calc = rtn->combine_flags(
cf, pf, llvm::ConstantInt::get( llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), 0 ), zf, sf, of );

@ -2,16 +2,14 @@
namespace vm
{
vm::llvm_value_t *lifters_t::and_flags( vm::vmp_rtn_t *rtn, std::uint8_t byte_size, vm::llvm_value_t *lhs,
vm::llvm_value_t *rhs )
vm::llvm_value_t *lifters_t::and_flags( vm::vmp_rtn_t *rtn, std::uint8_t byte_size, vm::llvm_value_t *result )
{
auto t0 = rtn->ir_builder->CreateAnd( lhs, rhs );
auto cf = llvm::ConstantInt::get( llvm::IntegerType::get( *rtn->llvm_ctx, byte_size * 8 ), 0 );
auto of = llvm::ConstantInt::get( llvm::IntegerType::get( *rtn->llvm_ctx, byte_size * 8 ), 0 );
auto cf = llvm::ConstantInt::get( llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), 0 );
auto of = llvm::ConstantInt::get( llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), 0 );
auto sf = rtn->compute_sf( byte_size, t0 );
auto zf = rtn->compute_zf( byte_size, t0 );
auto pf = rtn->compute_pf( byte_size, t0 );
auto sf = rtn->compute_sf( byte_size, result );
auto zf = rtn->compute_zf( byte_size, result );
auto pf = llvm::ConstantInt::get( llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), 0 );
return rtn->combine_flags( cf, pf, llvm::ConstantInt::get( llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), 0 ),
zf, sf, of );
@ -29,7 +27,7 @@ namespace vm
auto t3 = ir_builder->CreateAnd( { t1_not, t2_not } );
rtn->push( 8, t3 );
auto flags = and_flags( rtn, 8, t1, t2 );
auto flags = and_flags( rtn, 8, t3 );
ir_builder->CreateStore( flags, rtn->flags );
rtn->push( 8, rtn->load_value( 8, rtn->flags ) );
};
@ -46,11 +44,9 @@ namespace vm
auto t3 = ir_builder->CreateAnd( { t1_not, t2_not } );
rtn->push( 4, t3 );
// TODO: fix code for and_flags... setting byte value to anything
// other than 8 causes asserts to fire due to different types...
//
// auto flags = and_flags( rtn, 4, t1, t2 );
// ir_builder->CreateStore( flags, rtn->flags );
auto flags = and_flags( rtn, 4, t3 );
ir_builder->CreateStore( flags, rtn->flags );
rtn->push( 8, rtn->load_value( 8, rtn->flags ) );
};
} // namespace vm

@ -5,15 +5,6 @@ namespace vm
lifters_t::lifter_callback_t lifters_t::pushvsp =
[ & ]( vm::vmp_rtn_t *rtn, const vm::instrs::code_block_t &vm_code_block,
const vm::instrs::virt_instr_t &vinstr, llvm::IRBuilder<> *ir_builder ) {
auto current_spi =
ir_builder->CreateLoad( llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), rtn->stack_ptr, false );
auto *i64_zero = llvm::ConstantInt::get( *rtn->llvm_ctx, llvm::APInt( 64, 0 ) );
llvm_value_t *indices[ 2 ] = { i64_zero, current_spi };
auto stack_ptr =
ir_builder->CreateInBoundsGEP( rtn->virtual_stack, llvm::ArrayRef< llvm::Value * >( indices, 2 ) );
auto stack_ptr_val = ir_builder->CreatePtrToInt( stack_ptr, ir_builder->getInt64Ty() );
rtn->push( 8, stack_ptr_val );
};
}

@ -2,6 +2,24 @@
namespace vm
{
// our undefined behavior is that we don't model cases where the shift count is zero...
vm::llvm_value_t *lifters_t::shr_flags( vm::vmp_rtn_t *rtn, std::uint8_t byte_size, vm::llvm_value_t *lhs,
vm::llvm_value_t *rhs, vm::llvm_value_t *result )
{
auto op_size = llvm::IntegerType::get( *rtn->llvm_ctx, byte_size * 8 );
auto msb = rtn->ir_builder->CreateLShr( lhs, ( byte_size * 8 ) - 1 );
auto cf = rtn->ir_builder->CreateZExt( msb, llvm::IntegerType::get( *rtn->llvm_ctx, 64 ) );
auto of =
rtn->compute_sf( byte_size, lhs ); // we reuse the compute_sf helper since the flag expression is the same
auto sf = rtn->compute_sf( byte_size, result );
auto zf = rtn->compute_zf( byte_size, result );
auto pf = llvm::ConstantInt::get( llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), 0 );
return rtn->combine_flags( cf, pf, llvm::ConstantInt::get( llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), 0 ),
zf, sf, of );
}
lifters_t::lifter_callback_t lifters_t::shrq =
[ & ]( vm::vmp_rtn_t *rtn, const vm::instrs::code_block_t &vm_code_block,
const vm::instrs::virt_instr_t &vinstr, llvm::IRBuilder<> *ir_builder ) {
@ -10,11 +28,9 @@ namespace vm
auto t3 = ir_builder->CreateIntCast( t2, llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), false );
auto t4 = ir_builder->CreateLShr( t1, t3 );
// TODO: Compute flags
//
//
auto flags = lifters_t::shr_flags( rtn, 8, t1, t3, t4 );
ir_builder->CreateStore( flags, rtn->flags );
rtn->push( 8, t4 );
rtn->push( 8, rtn->load_value( 8, rtn->flags ) );
};
}
} // namespace vm

@ -7,7 +7,7 @@ namespace vm
const vm::instrs::virt_instr_t &vinstr, llvm::IRBuilder<> *ir_builder ) {
auto t1 = rtn->pop( 8 );
auto vreg = rtn->virtual_registers[ vinstr.operand.imm.u ? vinstr.operand.imm.u / 8 : 0 ];
ir_builder->CreateStore( t1, vreg )->setAlignment( llvm::Align( 8 ) );
ir_builder->CreateStore( t1, vreg );
};
lifters_t::lifter_callback_t lifters_t::sregdw =

@ -12,98 +12,8 @@ namespace vm
create_routine();
ir_builder = std::make_shared< llvm_irbuilder_t >( *llvm_ctx );
ir_builder->SetInsertPoint( llvm_code_blocks[ 0 ].second );
// do not change the ordering of these function calls...
create_native_registers();
create_virtual_stack();
lift_vm_entry();
create_virtual_registers();
}
void vmp_rtn_t::lift_vm_entry( void )
{
for ( const auto &instr : vm_ctx->vm_entry )
{
if ( instr.instr.mnemonic == ZYDIS_MNEMONIC_PUSH )
{
// push [xxxxx] we know this is zero and the next push is the image base...
if ( instr.instr.operands[ 0 ].type == ZYDIS_OPERAND_TYPE_MEMORY )
{
push( 8, llvm::ConstantInt::get( ir_builder->getInt64Ty(), 0ull ) );
push( 8, llvm::ConstantInt::get( ir_builder->getInt64Ty(), vm_ctx->image_base ) );
break; // dont make these if statements a switch case because we need to use this break...
}
else if ( instr.instr.operands[ 0 ].type == ZYDIS_OPERAND_TYPE_IMMEDIATE )
{
push( 8, llvm::ConstantInt::get( ir_builder->getInt64Ty(), instr.instr.operands[ 0 ].imm.value.u,
false ) );
}
else if ( instr.instr.operands[ 0 ].type == ZYDIS_OPERAND_TYPE_REGISTER )
{
push( 8, load_value( 8, native_registers[ instr.instr.operands[ 0 ].reg.value ] ) );
}
}
else if ( instr.instr.mnemonic == ZYDIS_MNEMONIC_PUSHFQ )
{
// just push 0 as the value itself wont matter...
push( 8, load_value( 8, native_registers[ ZYDIS_REGISTER_RFLAGS ] ) );
}
}
}
void vmp_rtn_t::create_native_registers( void )
{
llvm_module->getOrInsertGlobal( "rax", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_RAX ] = llvm_module->getGlobalVariable( "rax" );
llvm_module->getOrInsertGlobal( "rbx", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_RBX ] = llvm_module->getGlobalVariable( "rbx" );
llvm_module->getOrInsertGlobal( "rcx", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_RCX ] = llvm_module->getGlobalVariable( "rcx" );
llvm_module->getOrInsertGlobal( "rdx", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_RDX ] = llvm_module->getGlobalVariable( "rdx" );
llvm_module->getOrInsertGlobal( "rsi", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_RSI ] = llvm_module->getGlobalVariable( "rsi" );
llvm_module->getOrInsertGlobal( "rdi", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_RDI ] = llvm_module->getGlobalVariable( "rdi" );
llvm_module->getOrInsertGlobal( "rbp", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_RBP ] = llvm_module->getGlobalVariable( "rbp" );
llvm_module->getOrInsertGlobal( "rsp", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_RSP ] = llvm_module->getGlobalVariable( "rsp" );
llvm_module->getOrInsertGlobal( "r8", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_R8 ] = llvm_module->getGlobalVariable( "r8" );
llvm_module->getOrInsertGlobal( "r9", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_R9 ] = llvm_module->getGlobalVariable( "r9" );
llvm_module->getOrInsertGlobal( "r10", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_R10 ] = llvm_module->getGlobalVariable( "r10" );
llvm_module->getOrInsertGlobal( "r11", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_R11 ] = llvm_module->getGlobalVariable( "r11" );
llvm_module->getOrInsertGlobal( "r12", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_R12 ] = llvm_module->getGlobalVariable( "r12" );
llvm_module->getOrInsertGlobal( "r13", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_R13 ] = llvm_module->getGlobalVariable( "r13" );
llvm_module->getOrInsertGlobal( "r14", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_R14 ] = llvm_module->getGlobalVariable( "r14" );
llvm_module->getOrInsertGlobal( "r15", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_R15 ] = llvm_module->getGlobalVariable( "r15" );
llvm_module->getOrInsertGlobal( "rflags", ir_builder->getInt64Ty() );
native_registers[ ZYDIS_REGISTER_RFLAGS ] = llvm_module->getGlobalVariable( "rflags" );
flags = ir_builder->CreateAlloca( ir_builder->getInt64Ty(), nullptr, "flags" );
create_virtual_registers();
}
void vmp_rtn_t::create_virtual_registers( void )
@ -119,7 +29,8 @@ namespace vm
{
// function has no arguments and returns void... maybe change this in the future as i learn
// more and more LLVM...
auto func_ty = llvm::FunctionType::get( llvm::Type::getVoidTy( *llvm_ctx ), false );
auto func_ty = llvm::FunctionType::get( llvm::Type::getVoidTy( *llvm_ctx ),
{ llvm::PointerType::getInt64PtrTy( *llvm_ctx ) }, false );
// convert the rtn_begin address to a hex string and prepend "rtn_" to it...
std::stringstream rtn_name;
@ -137,78 +48,33 @@ namespace vm
}
}
void vmp_rtn_t::push( std::uint8_t byte_size, llvm_value_t *val )
{
auto spi = ir_builder->CreateLoad( ir_builder->getInt64Ty(), stack_ptr, false );
auto new_spi = ir_builder->CreateSub( spi, llvm::ConstantInt::get( ir_builder->getInt64Ty(), byte_size ) );
ir_builder->CreateStore( new_spi, stack_ptr );
auto *i64_zero = llvm::ConstantInt::get( *llvm_ctx, llvm::APInt( 64, 0 ) );
llvm_value_t *indices[ 2 ] = { i64_zero, new_spi };
auto stack_ptr = ir_builder->CreateInBoundsGEP( virtual_stack, llvm::ArrayRef< llvm_value_t * >( indices, 2 ) );
if ( byte_size > 1 )
{
auto casted_ptr = ir_builder->CreatePointerCast(
stack_ptr, llvm::PointerType::get( llvm::IntegerType::get( *llvm_ctx, byte_size * 8 ), 0 ) );
ir_builder->CreateStore( val, casted_ptr );
}
else
ir_builder->CreateStore( val, stack_ptr );
}
llvm_value_t *vmp_rtn_t::pop( std::uint8_t byte_size )
void vmp_rtn_t::push( std::uint8_t num_bytes, llvm_value_t *val )
{
llvm_value_t *output_value = nullptr;
auto current_spi = ir_builder->CreateLoad( ir_builder->getInt64Ty(), stack_ptr, false );
llvm_value_t *i64_zero = llvm::ConstantInt::get( *llvm_ctx, llvm::APInt( 64, 0 ) );
llvm_value_t *indices[ 2 ] = { i64_zero, current_spi };
auto top_stack = ir_builder->CreateInBoundsGEP( virtual_stack, llvm::ArrayRef< llvm::Value * >( indices, 2 ) );
if ( byte_size > 1 )
{
auto casted_ptr = ir_builder->CreatePointerCast(
top_stack, llvm::PointerType::get( llvm::IntegerType::get( *llvm_ctx, byte_size * 8 ), 0 ) );
// sub rsp, num_bytes
auto rsp = llvm_fptr->getArg( 0 );
auto rsp_i64 = ir_builder->CreatePtrToInt( rsp, ir_builder->getInt64Ty() );
auto sub_rsp_val = ir_builder->CreateSub( rsp_i64, ir_builder->getInt64( num_bytes ) );
ir_builder->CreateStore( sub_rsp_val, rsp );
output_value = ir_builder->CreateLoad( llvm::IntegerType::get( *llvm_ctx, byte_size * 8 ), casted_ptr );
}
else
output_value = ir_builder->CreateLoad( ir_builder->getInt8Ty(), top_stack );
auto new_spi = ir_builder->CreateAdd(
current_spi, llvm::ConstantInt::get( llvm::IntegerType::get( *llvm_ctx, 64 ), byte_size ) );
ir_builder->CreateStore( new_spi, stack_ptr );
return output_value;
// mov [rsp], val
auto rsp_cast_ptr = ir_builder->CreatePointerCast(
rsp, llvm::PointerType::get( llvm::IntegerType::get( *llvm_ctx, num_bytes * 8 ), false ) );
ir_builder->CreateStore( val, rsp_cast_ptr );
}
llvm_value_t *vmp_rtn_t::peek( std::uint8_t byte_size, std::uint8_t byte_offset )
llvm_value_t *vmp_rtn_t::pop( std::uint8_t num_bytes )
{
auto current_spi = ir_builder->CreateLoad( llvm::IntegerType::get( *llvm_ctx, 64 ), stack_ptr, false );
if ( byte_offset )
{
auto t1 = ir_builder->CreateAdd(
current_spi, llvm::ConstantInt::get( llvm::IntegerType::get( *llvm_ctx, 64 ), byte_offset ) );
current_spi = ir_builder->CreateLoad( llvm::IntegerType::get( *llvm_ctx, 64 ), t1, false );
}
auto i64_zero = llvm::ConstantInt::get( *llvm_ctx, llvm::APInt( 64, 0 ) );
llvm_value_t *indices[ 2 ] = { i64_zero, current_spi };
auto top_stack = ir_builder->CreateInBoundsGEP( virtual_stack, llvm::ArrayRef< llvm::Value * >( indices, 2 ) );
// mov rax, [rsp]
auto rsp = llvm_fptr->getArg( 0 );
auto rsp_cast_ptr = ir_builder->CreatePointerCast(
rsp, llvm::PointerType::get( llvm::IntegerType::get( *llvm_ctx, num_bytes * 8 ), false ) );
auto pop_val = ir_builder->CreateLoad( rsp_cast_ptr );
if ( byte_size > 1 )
{
auto casted_ptr = ir_builder->CreatePointerCast(
top_stack, llvm::PointerType::get( llvm::IntegerType::get( *llvm_ctx, byte_size * 8 ), 0 ) );
return ir_builder->CreateLoad( llvm::IntegerType::get( *llvm_ctx, byte_size * 8 ), casted_ptr );
}
else
return ir_builder->CreateLoad( llvm::IntegerType::get( *llvm_ctx, byte_size * 8 ), top_stack );
// add rsp, num_bytes
auto rsp_i64 = ir_builder->CreatePtrToInt( rsp, ir_builder->getInt64Ty() );
auto sub_rsp_val = ir_builder->CreateAdd( rsp_i64, ir_builder->getInt64( num_bytes ) );
ir_builder->CreateStore( sub_rsp_val, rsp );
return pop_val;
}
llvm_value_t *vmp_rtn_t::load_value( std::uint8_t byte_size, llvm_global_value_t *var )
@ -235,18 +101,6 @@ namespace vm
return ir_builder->CreateLoad( llvm::IntegerType::get( *llvm_ctx, byte_size * 8 ), var );
}
void vmp_rtn_t::create_virtual_stack( void )
{
// allocate stack space...
virtual_stack = ir_builder->CreateAlloca( llvm::ArrayType::get( llvm::IntegerType::get( *llvm_ctx, 8 ), 1024 ),
nullptr, "stack" );
// allocate stack pointer...
stack_ptr = ir_builder->CreateAlloca( llvm::IntegerType::get( *llvm_ctx, 64 ), nullptr, "sp" );
ir_builder->CreateStore( llvm::ConstantInt::get( llvm::IntegerType::getInt64Ty( *llvm_ctx ), 1024 ),
stack_ptr );
}
llvm_function_t *vmp_rtn_t::lift( void )
{
auto &code_blocks = llvm_fptr->getBasicBlockList();
@ -255,7 +109,6 @@ namespace vm
for ( auto idx = 0u; idx < code_blocks.size(); ++idx )
{
ir_builder->SetInsertPoint( llvm_code_blocks[ idx ].second );
for ( auto &vinstr : vmp2_code_blocks[ idx ].vinstrs )
{
if ( !lifters->lift( this, vmp2_code_blocks[ idx ], vinstr, ir_builder.get() ) )

Loading…
Cancel
Save