Implement flag calculations

colton
Colton1skees 3 years ago
parent 86a21dca30
commit a1b8070cc1

@ -27,13 +27,14 @@ namespace vmp2::lifters
extern lifter_callback_t pushvsp;
extern lifter_callback_t addq;
extern lifter_callback_t addw;
extern lifter_callback_t sregq;
extern lifter_callback_t lregq;
inline std::map< vm::handler::mnemonic_t, lifter_callback_t * > lifters = {
{ vm::handler::LCONSTQ, &lconstq }, { vm::handler::LCONSTDWSXQ, &lconstdwsxq },
{ vm::handler::ADDQ, &addq },
{ vm::handler::PUSHVSP, &pushvsp }, {vm::handler::PUSHVSPDW, &pushvspdw},
{ vm::handler::ADDQ, &addq }, { vm::handler::ADDW, &addq },
{ vm::handler::PUSHVSP, &pushvsp },
{ vm::handler::SREGQ, &sregq }, { vm::handler::LREGQ, &lregq } };
inline bool lift( vm::vmp_rtn_t *rtn, const vm::instrs::code_block_t &vm_code_block,
@ -45,4 +46,4 @@ namespace vmp2::lifters
( *( lifters[ vinstr.mnemonic_t ] ) )( rtn, vm_code_block, vinstr, ir_builder );
return true;
}
} // namespace vmp2::lifters
} // namespace vmp2::lifters

@ -2,16 +2,117 @@
namespace vmp2::lifters
{
llvm::Value* add_flags( vm::vmp_rtn_t *rtn, uint8_t byte_size, llvm::Value *lhs, llvm::Value *rhs )
{
// Get intrinsics
auto operandSize = llvm::IntegerType::get( *rtn->llvm_ctx, byte_size * 8 );
std::vector< llvm::Type * > intrinsic_arg_types;
intrinsic_arg_types.push_back( operandSize );
intrinsic_arg_types.push_back( operandSize );
auto sadd_with_overflow = llvm::Intrinsic::getDeclaration(
rtn->llvm_module, llvm::Intrinsic::sadd_with_overflow, intrinsic_arg_types );
auto uadd_with_overflow = llvm::Intrinsic::getDeclaration(
rtn->llvm_module, llvm::Intrinsic::uadd_with_overflow, intrinsic_arg_types );
auto ctpop = llvm::Intrinsic::getDeclaration( rtn->llvm_module, llvm::Intrinsic::ctpop,
{ llvm::IntegerType::get( *rtn->llvm_ctx, 64 ) } );
// Execute unsigned add intrinsic
auto u_add = rtn->ir_builder->CreateCall( uadd_with_overflow, { lhs, rhs }, "u_add");
// Retrieve unsigned sum
auto u_sum = rtn->ir_builder->CreateExtractValue( u_add, { 0 }, "u_sum" );
// Retrieve overflow bit of the unsigned add operation
auto u_of_bit = rtn->ir_builder->CreateExtractValue( u_add, { 1 }, "u_of_bit" );
// Compute CF from the unsigned OF bit
auto cf =
rtn->ir_builder->CreateZExt( u_of_bit, llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), "cf" );
// Execute unsigned add intrinsic
auto s_add = rtn->ir_builder->CreateCall( sadd_with_overflow, { lhs, rhs }, "s_add" );
// Retrieve signed sum
auto s_sum = rtn->ir_builder->CreateExtractValue( s_add, { 0 }, "s_sum" );
// Retrieve overflow bit of the signed add operation
auto s_of_bit = rtn->ir_builder->CreateExtractValue( s_add, { 1 }, "s_of_bit" );
// Compute OF from the signed OF bit
auto of =
rtn->ir_builder->CreateZExt( s_of_bit, llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), "of_extended" );
// Shift the sum to grab the sign bit
auto shifted_u_add = rtn->ir_builder->CreateLShr( u_sum, ( byte_size * 8 ) - 1, "shifted_sign" );
// Grab the sign bit
auto sf = rtn->ir_builder->CreateZExt( shifted_u_add, llvm::IntegerType::get( *rtn->llvm_ctx, 64 ) , "sf");
// Compare the unsigned sum to zero
auto t_zf = rtn->ir_builder->CreateICmpEQ( u_sum, llvm::ConstantInt::get(operandSize, 0), "t_zf" );
// Extend the zf to 64bit
auto zf = rtn->ir_builder->CreateZExt( t_zf, llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), "zf" );
// Retrieve the lower 8 bits(PF only operates on the lower 8 bits) and sign extend to 64
auto lower_bits =
rtn->ir_builder->CreateIntCast( u_sum, llvm::IntegerType::get( *rtn->llvm_ctx, 8 ), false, "lower_sum" );
// Extend the lower bits to i64
auto extended_bits =
rtn->ir_builder->CreateZExt( lower_bits, llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), "extended_bits" );
// Compute parity
auto pf = rtn->ir_builder->CreateCall( ctpop, { extended_bits }, "pf" );
auto shifted_pf = rtn->ir_builder->CreateShl( pf, 2, "shifted_pf", true, true );
auto shifted_af = rtn->ir_builder->CreateShl(
llvm::ConstantInt::get( llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), 0), 4, "shifted_af", true, true ); // treat af as zero
auto shifted_zf = rtn->ir_builder->CreateShl( zf, 6, "shifted_zf", true, true );
auto shifted_sf = rtn->ir_builder->CreateShl( sf, 7, "shifted_sf", true, true );
auto shifted_of = rtn->ir_builder->CreateShl( of, 11, "shifted_of", true, true );
auto or1 = rtn->ir_builder->CreateOr( cf, shifted_of );
auto or2 = rtn->ir_builder->CreateOr( or1, shifted_zf );
auto or3 = rtn->ir_builder->CreateOr( or2, shifted_sf );
auto or4 = rtn->ir_builder->CreateOr( or3, shifted_af );
auto or5 = rtn->ir_builder->CreateOr( or4, shifted_pf );
auto or6 = rtn->ir_builder->CreateXor(
or5, llvm::ConstantInt::get( llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), 514 ) ); // TODO: Figure out this constant
return or6;
}
lifter_callback_t addq = [ & ]( vm::vmp_rtn_t *rtn, const vm::instrs::code_block_t &vm_code_block,
const vm::instrs::virt_instr_t &vinstr, llvm::IRBuilder<> *ir_builder ) {
auto t1 = rtn->pop( 8 );
auto t2 = rtn->pop( 8 );
auto flags = add_flags( rtn, 8, t1, t2 );
auto t3 = ir_builder->CreateAdd( t1, t2 );
rtn->push( 8, t1 );
rtn->push( 8, t3 );
// TODO: compute and update RFLAGS...
// ir_builder->CreateStore( rtn->native_registers[ ZYDIS_REGISTER_RFLAGS ], flags ); // throwing an exception, todo: fix
rtn->push( 8, rtn->load_value( 8, rtn->native_registers[ ZYDIS_REGISTER_RFLAGS ] ) );
};
lifter_callback_t addw = [ & ]( vm::vmp_rtn_t *rtn, const vm::instrs::code_block_t &vm_code_block,
const vm::instrs::virt_instr_t &vinstr, llvm::IRBuilder<> *ir_builder ) {
auto t1 = rtn->pop( 2 );
auto t2 = rtn->pop( 2 );
auto flags = add_flags( rtn, 2, t1, t2 );
auto t3 = ir_builder->CreateAdd( t1, t2 );
rtn->push(2, t3 );
// ir_builder->CreateStore( rtn->native_registers[ ZYDIS_REGISTER_RFLAGS ], flags );
rtn->push( 8, rtn->load_value( 8, rtn->native_registers[ ZYDIS_REGISTER_RFLAGS ] ) );
};
}

@ -8,21 +8,13 @@ namespace vmp2::lifters
auto current_stack_index =
ir_builder->CreateLoad( llvm::IntegerType::get( *rtn->llvm_ctx, 64 ), rtn->stack_ptr, false );
// Push the current stack index
rtn->push( 8, current_stack_index );
};
lifter_callback_t pushvspdw = [ & ]( vm::vmp_rtn_t *rtn, const vm::instrs::code_block_t &vm_code_block,
const vm::instrs::virt_instr_t &vinstr, llvm::IRBuilder<> *ir_builder ) {
// Truncate the stack ptr to an i32* ptr
auto dword_stack_ptr =
ir_builder->CreatePointerCast( rtn->stack_ptr, llvm::IntegerType::get( *rtn->llvm_ctx, 32 ));
// Get a pointer to the top byte of the stack
llvm::Value *i64_zero = llvm::ConstantInt::get( *rtn->llvm_ctx, llvm::APInt( 64, 0 ) );
llvm::Value *indices[ 2 ] = { i64_zero, current_stack_index };
auto stack_ptr =
ir_builder->CreateInBoundsGEP( rtn->virtual_stack, llvm::ArrayRef< llvm::Value * >( indices, 2 ) );
// Read the stack index as a dword
auto dword_stack_index = ir_builder->CreateLoad( dword_stack_ptr );
// Push the current vsp index to the stack
rtn->push( 4, dword_stack_index );
auto stack_ptr_val = ir_builder->CreatePtrToInt( stack_ptr, ir_builder->getInt64Ty() );
rtn->push( 8, stack_ptr_val );
};
}
}

@ -15,20 +15,17 @@ namespace vm
ir_builder = std::make_shared< llvm::IRBuilder<> >( *llvm_ctx );
ir_builder->SetInsertPoint( llvm_code_blocks[ 0 ] );
// create native registers...
create_native_registers();
// create stack and stack pointer...
create_virtual_stack();
// create virtual registers...
create_virtual_registers();
// lift vm enter pushes to llvm ir...
lift_vm_entry();
// create virtual registers...
create_virtual_registers();
}
void vmp_rtn_t::lift_vm_entry( void )
@ -270,6 +267,15 @@ namespace vm
llvm::Function *vmp_rtn_t::lift( void )
{
auto &code_blocks = llvm_fptr->getBasicBlockList();
// retrieve ADD insn
auto targetInsn = vmp2_code_blocks[ 0 ].vinstrs.at( 0x2d );
vmp2::lifters::lift( this, vmp2_code_blocks[ 0 ], targetInsn, ir_builder.get() );
std::printf( "lifted add insn" );
llvm_module->print( llvm::outs(), nullptr );
std::printf( "alogged adda insn function" );
return llvm_fptr;
for ( auto idx = 0u; idx < code_blocks.size(); ++idx )
{
for ( auto &vinstr : vmp2_code_blocks[ idx ].vinstrs )
@ -285,4 +291,4 @@ namespace vm
}
return llvm_fptr;
}
} // namespace vm
} // namespace vm

Loading…
Cancel
Save