added asmjit and mutation example

2.0
_xeroxz 4 years ago
parent 20bce8a1d4
commit fdc6da3a19

@ -96,7 +96,6 @@
</ItemGroup>
<ItemGroup>
<ClCompile Include="DriverEntry.cpp" />
<ClCompile Include="ObfuscateDemo.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="Theodosius.h" />

@ -13,9 +13,6 @@
<ClCompile Include="DriverEntry.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="ObfuscateDemo.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Theodosius.h">

@ -1,7 +1,6 @@
#include "Theodosius.h"
extern "C" int __cdecl drv_entry()
ObfuscateRoutine extern "C" void drv_entry()
{
DbgPrint("> hello world! this is a demo!\n");
DbgPrint("> current pml4 = 0x%p\n", cppdemo::get_dirbase());
DbgPrint("> Hello World!\n");
}

@ -1,18 +0,0 @@
#include "Theodosius.h"
namespace cppdemo
{
ObfuscateRoutine
unsigned long long get_dirbase()
{
cr3 result;
result.flags =
*(unsigned long long*)(IoGetCurrentProcess() + 0x28);
result.flags = NULL;
if (!result.address_of_page_directory)
return -1;
return result.address_of_page_directory << 12;
}
}

@ -1,27 +1,7 @@
#pragma once
#include <intrin.h>
#define ObfuscateRoutine __declspec(code_seg(".theo"), noinline)
#define MutatedRoutine __declspec(code_seg(".theo1"), noinline)
#define EncryptedRoutine __declspec(code_seg(".theo2"), noinline)
#define MutateRoutine __declspec(code_seg(".theo1"), noinline)
extern "C" unsigned long DbgPrint(const char* format, ...);
extern "C" unsigned long long IoGetCurrentProcess();
namespace cppdemo
{
unsigned long long get_dirbase();
}
typedef union
{
struct
{
unsigned long long reserved1 : 3;
unsigned long long page_level_write_through : 1;
unsigned long long page_level_cache_disable : 1;
unsigned long long reserved2 : 7;
unsigned long long address_of_page_directory : 36;
unsigned long long reserved3 : 16;
};
unsigned long long flags;
} cr3;
extern "C" unsigned long long IoGetCurrentProcess();

@ -85,6 +85,54 @@
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="asmjit\core\archtraits.cpp" />
<ClCompile Include="asmjit\core\assembler.cpp" />
<ClCompile Include="asmjit\core\builder.cpp" />
<ClCompile Include="asmjit\core\codeholder.cpp" />
<ClCompile Include="asmjit\core\codewriter.cpp" />
<ClCompile Include="asmjit\core\compiler.cpp" />
<ClCompile Include="asmjit\core\constpool.cpp" />
<ClCompile Include="asmjit\core\cpuinfo.cpp" />
<ClCompile Include="asmjit\core\emithelper.cpp" />
<ClCompile Include="asmjit\core\emitter.cpp" />
<ClCompile Include="asmjit\core\emitterutils.cpp" />
<ClCompile Include="asmjit\core\environment.cpp" />
<ClCompile Include="asmjit\core\errorhandler.cpp" />
<ClCompile Include="asmjit\core\formatter.cpp" />
<ClCompile Include="asmjit\core\func.cpp" />
<ClCompile Include="asmjit\core\funcargscontext.cpp" />
<ClCompile Include="asmjit\core\globals.cpp" />
<ClCompile Include="asmjit\core\inst.cpp" />
<ClCompile Include="asmjit\core\jitallocator.cpp" />
<ClCompile Include="asmjit\core\jitruntime.cpp" />
<ClCompile Include="asmjit\core\logger.cpp" />
<ClCompile Include="asmjit\core\operand.cpp" />
<ClCompile Include="asmjit\core\osutils.cpp" />
<ClCompile Include="asmjit\core\ralocal.cpp" />
<ClCompile Include="asmjit\core\rapass.cpp" />
<ClCompile Include="asmjit\core\rastack.cpp" />
<ClCompile Include="asmjit\core\string.cpp" />
<ClCompile Include="asmjit\core\support.cpp" />
<ClCompile Include="asmjit\core\target.cpp" />
<ClCompile Include="asmjit\core\type.cpp" />
<ClCompile Include="asmjit\core\virtmem.cpp" />
<ClCompile Include="asmjit\core\zone.cpp" />
<ClCompile Include="asmjit\core\zonehash.cpp" />
<ClCompile Include="asmjit\core\zonelist.cpp" />
<ClCompile Include="asmjit\core\zonestack.cpp" />
<ClCompile Include="asmjit\core\zonetree.cpp" />
<ClCompile Include="asmjit\core\zonevector.cpp" />
<ClCompile Include="asmjit\x86\x86assembler.cpp" />
<ClCompile Include="asmjit\x86\x86builder.cpp" />
<ClCompile Include="asmjit\x86\x86compiler.cpp" />
<ClCompile Include="asmjit\x86\x86emithelper.cpp" />
<ClCompile Include="asmjit\x86\x86features.cpp" />
<ClCompile Include="asmjit\x86\x86formatter.cpp" />
<ClCompile Include="asmjit\x86\x86func.cpp" />
<ClCompile Include="asmjit\x86\x86instapi.cpp" />
<ClCompile Include="asmjit\x86\x86instdb.cpp" />
<ClCompile Include="asmjit\x86\x86operand.cpp" />
<ClCompile Include="asmjit\x86\x86rapass.cpp" />
<ClCompile Include="hmdm_ctx.cpp" />
<ClCompile Include="linker\linker.cpp" />
<ClCompile Include="main.cpp" />
@ -92,6 +140,77 @@
<ClCompile Include="obfuscation\obfuscation.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="asmjit\asmjit-scope-begin.h" />
<ClInclude Include="asmjit\asmjit-scope-end.h" />
<ClInclude Include="asmjit\asmjit.h" />
<ClInclude Include="asmjit\core.h" />
<ClInclude Include="asmjit\core\api-build_p.h" />
<ClInclude Include="asmjit\core\api-config.h" />
<ClInclude Include="asmjit\core\archcommons.h" />
<ClInclude Include="asmjit\core\archtraits.h" />
<ClInclude Include="asmjit\core\assembler.h" />
<ClInclude Include="asmjit\core\builder.h" />
<ClInclude Include="asmjit\core\codebuffer.h" />
<ClInclude Include="asmjit\core\codeholder.h" />
<ClInclude Include="asmjit\core\codewriter_p.h" />
<ClInclude Include="asmjit\core\compiler.h" />
<ClInclude Include="asmjit\core\compilerdefs.h" />
<ClInclude Include="asmjit\core\constpool.h" />
<ClInclude Include="asmjit\core\cpuinfo.h" />
<ClInclude Include="asmjit\core\datatypes.h" />
<ClInclude Include="asmjit\core\emithelper_p.h" />
<ClInclude Include="asmjit\core\emitter.h" />
<ClInclude Include="asmjit\core\emitterutils_p.h" />
<ClInclude Include="asmjit\core\environment.h" />
<ClInclude Include="asmjit\core\errorhandler.h" />
<ClInclude Include="asmjit\core\features.h" />
<ClInclude Include="asmjit\core\formatter.h" />
<ClInclude Include="asmjit\core\func.h" />
<ClInclude Include="asmjit\core\funcargscontext_p.h" />
<ClInclude Include="asmjit\core\globals.h" />
<ClInclude Include="asmjit\core\inst.h" />
<ClInclude Include="asmjit\core\jitallocator.h" />
<ClInclude Include="asmjit\core\jitruntime.h" />
<ClInclude Include="asmjit\core\logger.h" />
<ClInclude Include="asmjit\core\misc_p.h" />
<ClInclude Include="asmjit\core\operand.h" />
<ClInclude Include="asmjit\core\osutils.h" />
<ClInclude Include="asmjit\core\osutils_p.h" />
<ClInclude Include="asmjit\core\raassignment_p.h" />
<ClInclude Include="asmjit\core\rabuilders_p.h" />
<ClInclude Include="asmjit\core\radefs_p.h" />
<ClInclude Include="asmjit\core\ralocal_p.h" />
<ClInclude Include="asmjit\core\rapass_p.h" />
<ClInclude Include="asmjit\core\rastack_p.h" />
<ClInclude Include="asmjit\core\string.h" />
<ClInclude Include="asmjit\core\support.h" />
<ClInclude Include="asmjit\core\target.h" />
<ClInclude Include="asmjit\core\type.h" />
<ClInclude Include="asmjit\core\virtmem.h" />
<ClInclude Include="asmjit\core\zone.h" />
<ClInclude Include="asmjit\core\zonehash.h" />
<ClInclude Include="asmjit\core\zonelist.h" />
<ClInclude Include="asmjit\core\zonestack.h" />
<ClInclude Include="asmjit\core\zonestring.h" />
<ClInclude Include="asmjit\core\zonetree.h" />
<ClInclude Include="asmjit\core\zonevector.h" />
<ClInclude Include="asmjit\x86.h" />
<ClInclude Include="asmjit\x86\x86archtraits_p.h" />
<ClInclude Include="asmjit\x86\x86assembler.h" />
<ClInclude Include="asmjit\x86\x86builder.h" />
<ClInclude Include="asmjit\x86\x86compiler.h" />
<ClInclude Include="asmjit\x86\x86emithelper_p.h" />
<ClInclude Include="asmjit\x86\x86emitter.h" />
<ClInclude Include="asmjit\x86\x86features.h" />
<ClInclude Include="asmjit\x86\x86formatter_p.h" />
<ClInclude Include="asmjit\x86\x86func_p.h" />
<ClInclude Include="asmjit\x86\x86globals.h" />
<ClInclude Include="asmjit\x86\x86instapi_p.h" />
<ClInclude Include="asmjit\x86\x86instdb.h" />
<ClInclude Include="asmjit\x86\x86instdb_p.h" />
<ClInclude Include="asmjit\x86\x86opcode_p.h" />
<ClInclude Include="asmjit\x86\x86operand.h" />
<ClInclude Include="asmjit\x86\x86rapass_p.h" />
<ClInclude Include="hmdm_ctx.h" />
<ClInclude Include="ia32.hpp" />
<ClInclude Include="linker\linker.hpp" />

@ -24,6 +24,24 @@
<Filter Include="Header Files\Zycore\API">
<UniqueIdentifier>{c51e3b93-1496-49d7-838f-825d75b29ee6}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files\asmjit">
<UniqueIdentifier>{d28d9202-4139-42a0-9f49-71beb5e01670}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files\asmjit\core">
<UniqueIdentifier>{a847dc8c-08a3-4ea7-a20d-157963dd41a8}</UniqueIdentifier>
</Filter>
<Filter Include="Source Files\asmjit">
<UniqueIdentifier>{706001e9-56f5-41d2-b209-9f5543d0bd11}</UniqueIdentifier>
</Filter>
<Filter Include="Source Files\asmjit\core">
<UniqueIdentifier>{a8e52093-e1b2-4ef3-b427-ebea8772bbbf}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files\asmjit\x86">
<UniqueIdentifier>{da6ded33-7d62-4f83-b8e7-4d343fe49cd7}</UniqueIdentifier>
</Filter>
<Filter Include="Source Files\asmjit\x86">
<UniqueIdentifier>{244a52bf-80cb-43ac-ac0d-a6aad89b9eb0}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="main.cpp">
@ -41,6 +59,150 @@
<ClCompile Include="obfuscation\obfuscation.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\archtraits.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\assembler.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\builder.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\codeholder.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\codewriter.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\compiler.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\constpool.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\cpuinfo.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\emithelper.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\emitter.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\emitterutils.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\environment.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\errorhandler.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\formatter.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\func.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\funcargscontext.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\globals.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\inst.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\jitallocator.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\jitruntime.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\logger.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\operand.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\osutils.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\ralocal.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\rapass.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\rastack.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\string.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\support.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\target.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\type.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\virtmem.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\zone.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\zonehash.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\zonelist.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\zonestack.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\zonetree.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\core\zonevector.cpp">
<Filter>Source Files\asmjit\core</Filter>
</ClCompile>
<ClCompile Include="asmjit\x86\x86assembler.cpp">
<Filter>Source Files\asmjit\x86</Filter>
</ClCompile>
<ClCompile Include="asmjit\x86\x86builder.cpp">
<Filter>Source Files\asmjit\x86</Filter>
</ClCompile>
<ClCompile Include="asmjit\x86\x86compiler.cpp">
<Filter>Source Files\asmjit\x86</Filter>
</ClCompile>
<ClCompile Include="asmjit\x86\x86emithelper.cpp">
<Filter>Source Files\asmjit\x86</Filter>
</ClCompile>
<ClCompile Include="asmjit\x86\x86features.cpp">
<Filter>Source Files\asmjit\x86</Filter>
</ClCompile>
<ClCompile Include="asmjit\x86\x86formatter.cpp">
<Filter>Source Files\asmjit\x86</Filter>
</ClCompile>
<ClCompile Include="asmjit\x86\x86func.cpp">
<Filter>Source Files\asmjit\x86</Filter>
</ClCompile>
<ClCompile Include="asmjit\x86\x86instapi.cpp">
<Filter>Source Files\asmjit\x86</Filter>
</ClCompile>
<ClCompile Include="asmjit\x86\x86instdb.cpp">
<Filter>Source Files\asmjit\x86</Filter>
</ClCompile>
<ClCompile Include="asmjit\x86\x86operand.cpp">
<Filter>Source Files\asmjit\x86</Filter>
</ClCompile>
<ClCompile Include="asmjit\x86\x86rapass.cpp">
<Filter>Source Files\asmjit\x86</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Zydis\Generated\EnumInstructionCategory.h">
@ -205,6 +367,219 @@
<ClInclude Include="obfuscation\obfuscation.hpp">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="asmjit\asmjit.h">
<Filter>Header Files\asmjit</Filter>
</ClInclude>
<ClInclude Include="asmjit\asmjit-scope-begin.h">
<Filter>Header Files\asmjit</Filter>
</ClInclude>
<ClInclude Include="asmjit\asmjit-scope-end.h">
<Filter>Header Files\asmjit</Filter>
</ClInclude>
<ClInclude Include="asmjit\core.h">
<Filter>Header Files\asmjit</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86.h">
<Filter>Header Files\asmjit</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\api-build_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\api-config.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\archcommons.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\archtraits.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\assembler.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\builder.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\codebuffer.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\codeholder.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\codewriter_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\compiler.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\compilerdefs.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\constpool.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\cpuinfo.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\datatypes.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\emithelper_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\emitter.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\emitterutils_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\environment.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\errorhandler.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\features.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\formatter.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\func.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\funcargscontext_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\globals.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\inst.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\jitallocator.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\jitruntime.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\logger.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\misc_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\operand.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\osutils.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\osutils_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\raassignment_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\rabuilders_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\radefs_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\ralocal_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\rapass_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\rastack_p.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\string.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\support.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\target.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\type.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\virtmem.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\zone.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\zonehash.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\zonelist.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\zonestack.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\zonestring.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\zonetree.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\core\zonevector.h">
<Filter>Header Files\asmjit\core</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86archtraits_p.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86assembler.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86builder.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86compiler.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86emithelper_p.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86emitter.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86features.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86formatter_p.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86func_p.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86globals.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86instapi_p.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86instdb.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86instdb_p.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86opcode_p.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86operand.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
<ClInclude Include="asmjit\x86\x86rapass_p.h">
<Filter>Header Files\asmjit\x86</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<MASM Include="syscall_handler.asm">

@ -0,0 +1,201 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- asmjit visualizer for Visual Studio (natvis) -->
<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
<Type Name="asmjit::String">
<Intrinsic Name="isSmall" Expression="(_type &lt; 0x1F)"/>
<DisplayString Condition="isSmall()">{_small.data, s8}</DisplayString>
<DisplayString Condition="!isSmall()">{_large.data, s8}</DisplayString>
<Expand HideRawView="true">
<Synthetic Name="_type">
<DisplayString Condition="(_type &lt; 0x1F)">Small</DisplayString>
<DisplayString Condition="(_type == 0x1F)">Large</DisplayString>
<DisplayString Condition="(_type &gt; 0x1F)">External</DisplayString>
</Synthetic>
<Item Name="_size" Condition="isSmall()" ExcludeView="simple">(int)_small.type, d</Item>
<Item Name="_size" Condition="!isSmall()" ExcludeView="simple">_large.size, d</Item>
<Item Name="_capacity" Condition="isSmall()" ExcludeView="simple">asmjit::String::kSSOCapacity, d</Item>
<Item Name="_capacity" Condition="!isSmall()" ExcludeView="simple">_large.capacity, d</Item>
<Item Name="_data" Condition="isSmall()" ExcludeView="simple">_small.data, s8</Item>
<Item Name="_data" Condition="!isSmall()" ExcludeView="simple">_large.data, s8</Item>
</Expand>
</Type>
<Type Name="asmjit::ZoneVector&lt;*&gt;">
<DisplayString>{{ [size={_size, d} capacity={_capacity, d}] }}</DisplayString>
<Expand>
<Item Name="_size" ExcludeView="simple">_size, d</Item>
<Item Name="_capacity" ExcludeView="simple">_capacity, d</Item>
<ArrayItems>
<Size>_size</Size>
<ValuePointer>(($T1*)_data)</ValuePointer>
</ArrayItems>
</Expand>
</Type>
<Type Name="asmjit::Operand_">
<Intrinsic Name="opType" Expression="(unsigned int)(_signature &amp; 0x7)" />
<Intrinsic Name="opSize" Expression="(_signature &gt;&gt; 24) &amp; 0xFF" />
<Intrinsic Name="regType" Expression="(_signature &gt;&gt; 3) &amp; 0x1F" />
<Intrinsic Name="regGroup" Expression="(_signature &gt;&gt; 8) &amp; 0xF" />
<Intrinsic Name="memBaseType" Expression="(_signature &gt;&gt; 3) &amp; 0x1F" />
<Intrinsic Name="memIndexType" Expression="(_signature &gt;&gt; 8) &amp; 0x1F" />
<Intrinsic Name="memAddrType" Expression="(_signature &gt;&gt; 13) &amp; 0x3" />
<Intrinsic Name="memRegHome" Expression="(_signature &gt;&gt; 15) &amp; 0x1" />
<Intrinsic Name="memBaseId" Expression="_baseId" />
<Intrinsic Name="memIndexId" Expression="_data[0]" />
<Intrinsic Name="memOffset32b" Expression="(__int64)int(_data[1])" />
<Intrinsic Name="memOffset64b" Expression="(__int64) ((unsigned __int64)_baseId &lt;&lt; 32) | ((unsigned __int64)_data[1])" />
<Intrinsic Name="memOffset" Expression="memBaseType() != 0 ? memOffset32b() : memOffset64b()" />
<Intrinsic Name="immValue" Expression="((__int64)_data[1] &lt;&lt; 32) | (__int64)_data[0]" />
<DisplayString Condition="opType() == 0">[None]</DisplayString>
<DisplayString Condition="opType() == 1">[Reg] {{ id={_baseId, d} group={regGroup(), d} type={regType(), d} size={opSize(), d} }}</DisplayString>
<DisplayString Condition="opType() == 2">[Mem] {{ baseId={memBaseId(), d} indexId={memIndexId(), d} offset={(__int64)memOffset(), d} }}</DisplayString>
<DisplayString Condition="opType() == 3">[Imm] {{ val={immValue(), d} hex={immValue(), X} }}</DisplayString>
<DisplayString Condition="opType() == 4">[Label] {{ id={_baseId} }}</DisplayString>
<DisplayString Condition="opType() &gt; 4">[Unknown]</DisplayString>
<Expand HideRawView="true">
<Item Name="_signature">_signature, X</Item>
<Item Name="_signature.any.type">(asmjit::Operand_::OpType)opType()</Item>
<Item Name="_signature.any.size">opSize(), d</Item>
<Item Name="_signature.reg.type" Condition="opType() == 1">(asmjit::BaseReg::RegType)regType()</Item>
<Item Name="_signature.reg.group" Condition="opType() == 1">(asmjit::BaseReg::RegGroup)regGroup()</Item>
<Item Name="_signature.mem.baseType" Condition="opType() == 2">(asmjit::BaseReg::RegType)memBaseType()</Item>
<Item Name="_signature.mem.indexType" Condition="opType() == 2">(asmjit::BaseReg::RegType)memIndexType()</Item>
<Item Name="_signature.mem.addrType" Condition="opType() == 2">(asmjit::BaseMem::AddrType)memAddrType()</Item>
<Item Name="_signature.mem.regHome" Condition="opType() == 2">(bool)memRegHome()</Item>
<Item Name="_baseId">_baseId</Item>
<Item Name="_data[0]" Condition="opType() != 2 &amp;&amp; opType() != 3">_data[0]</Item>
<Item Name="_data[1]" Condition="opType() != 2 &amp;&amp; opType() != 3">_data[1]</Item>
<Item Name="_data[IndexId]" Condition="opType() == 2">_data[0]</Item>
<Item Name="_data[OffsetLo]" Condition="opType() == 2">_data[1]</Item>
<Item Name="_data[ImmHi]" Condition="opType() == 3">_data[0]</Item>
<Item Name="_data[ImmLo]" Condition="opType() == 3">_data[1]</Item>
</Expand>
</Type>
<Type Name="asmjit::FuncValue">
<Intrinsic Name="isReg" Expression="(_data &amp; asmjit::FuncValue::kFlagIsReg) != 0" />
<Intrinsic Name="isStack" Expression="(_data &amp; asmjit::FuncValue::kFlagIsStack) != 0" />
<Intrinsic Name="isIndirect" Expression="(_data &amp; asmjit::FuncValue::kFlagIsIndirect) != 0" />
<Intrinsic Name="isDone" Expression="(_data &amp; asmjit::FuncValue::kFlagIsDone) != 0" />
<Intrinsic Name="typeId" Expression="((_data &amp; asmjit::FuncValue::kTypeIdMask) &gt;&gt; asmjit::FuncValue::kTypeIdShift)" />
<Intrinsic Name="regId" Expression="((_data &amp; asmjit::FuncValue::kRegIdMask) &gt;&gt; asmjit::FuncValue::kRegIdShift)" />
<Intrinsic Name="regType" Expression="((_data &amp; asmjit::FuncValue::kRegTypeMask) &gt;&gt; asmjit::FuncValue::kRegTypeShift)" />
<Intrinsic Name="stackOffset" Expression="((_data &amp; asmjit::FuncValue::kStackOffsetMask) &gt;&gt; asmjit::FuncValue::kStackOffsetShift)" />
<DisplayString Condition="isReg()">[RegValue {{ regType={regType()} indirect={isIndirect()} done={isDone()} }}]</DisplayString>
<DisplayString Condition="isStack()">[StackValue {{ indirect={isIndirect()} done={isDone()} }}]</DisplayString>
<DisplayString Condition="!isReg() &amp;&amp; !isStack()">[Unknown]</DisplayString>
<Expand HideRawView="true">
<Item Name="data">_data</Item>
<Item Name="typeId">(asmjit::Type::Id)(typeId())</Item>
<Item Name="regType" Condition="isReg()">(asmjit::BaseReg::RegType)regType()</Item>
<Item Name="regId" Condition="isReg()">regId()</Item>
<Item Name="stackOffset" Condition="isStack()">stackOffset()</Item>
</Expand>
</Type>
<Type Name="asmjit::BaseNode">
<Intrinsic Name="nodeType" Expression="_any._nodeType" />
<Intrinsic Name="isInst" Expression="nodeType() == asmjit::BaseNode::kNodeInst"></Intrinsic>
<Intrinsic Name="isSection" Expression="nodeType() == asmjit::BaseNode::kNodeSection"></Intrinsic>
<Intrinsic Name="isLabel" Expression="nodeType() == asmjit::BaseNode::kNodeLabel"></Intrinsic>
<Intrinsic Name="isAlign" Expression="nodeType() == asmjit::BaseNode::kNodeAlign"></Intrinsic>
<Intrinsic Name="isEmbedData" Expression="nodeType() == asmjit::BaseNode::kNodeEmbedData"></Intrinsic>
<Intrinsic Name="isEmbedLabel" Expression="nodeType() == asmjit::BaseNode::kNodeEmbedLabel"></Intrinsic>
<Intrinsic Name="isEmbedLabelDelta" Expression="nodeType() == asmjit::BaseNode::kNodeEmbedLabelDelta"></Intrinsic>
<Intrinsic Name="isConstPool" Expression="nodeType() == asmjit::BaseNode::kNodeConstPool"></Intrinsic>
<Intrinsic Name="isComment" Expression="nodeType() == asmjit::BaseNode::kNodeComment"></Intrinsic>
<Intrinsic Name="isSentinel" Expression="nodeType() == asmjit::BaseNode::kNodeSentinel"></Intrinsic>
<Intrinsic Name="isJump" Expression="nodeType() == asmjit::BaseNode::kNodeJump"></Intrinsic>
<Intrinsic Name="isFunc" Expression="nodeType() == asmjit::BaseNode::kNodeFunc"></Intrinsic>
<Intrinsic Name="isFuncRet" Expression="nodeType() == asmjit::BaseNode::kNodeFuncRet"></Intrinsic>
<Intrinsic Name="isInvoke" Expression="nodeType() == asmjit::BaseNode::kNodeInvoke"></Intrinsic>
<Intrinsic Name="actsAsInst" Expression="isInst() || isJump() || isFunc() || isFuncRet() || isInvoke()" />
<Intrinsic Name="actsAsLabel" Expression="isLabel() || isFunc()" />
<DisplayString Condition="isInst()">[InstNode]</DisplayString>
<DisplayString Condition="isSentinel()">[SectionNode]</DisplayString>
<DisplayString Condition="isLabel()">[LabelNode]</DisplayString>
<DisplayString Condition="isAlign()">[AlignNode]</DisplayString>
<DisplayString Condition="isEmbedData()">[EmbedDataNode]</DisplayString>
<DisplayString Condition="isEmbedLabel()">[EmbedLabelNode]</DisplayString>
<DisplayString Condition="isEmbedLabelDelta()">[EmbedLabelDeltaNode]</DisplayString>
<DisplayString Condition="isConstPool()">[ConstPoolNode]</DisplayString>
<DisplayString Condition="isComment()">[CommentNode]</DisplayString>
<DisplayString Condition="isSentinel()">[SentinelNode]</DisplayString>
<DisplayString Condition="isJump()">[JumpNode]</DisplayString>
<DisplayString Condition="isFunc()">[FuncNode]</DisplayString>
<DisplayString Condition="isFuncRet()">[FuncRetNode]</DisplayString>
<DisplayString Condition="isInvoke()">[InvokeNode]</DisplayString>
<DisplayString Condition="nodeType() == 0 || nodeType() &gt; 18">[UnknownNode {nodeType(), d}]</DisplayString>
<Expand HideRawView="true">
<Item Name="prev">_prev</Item>
<Item Name="next">_next</Item>
<Item Name="nodeType">(asmjit::BaseNode::NodeType)_any._nodeType</Item>
<Item Name="nodeFlags">(asmjit::BaseNode::Flags)_any._nodeFlags</Item>
<Item Name="position">_position</Item>
<Item Name="userData.u64">_userDataU64</Item>
<Item Name="userData.ptr">_userDataPtr</Item>
<Item Name="passData">_passData</Item>
<Item Name="inlineComment">_inlineComment, s8</Item>
<Item Name="baseInst" Condition="actsAsInst()">((asmjit::InstNode*)this)-&gt;_baseInst</Item>
<Item Name="opCount" Condition="actsAsInst()">_inst._opCount</Item>
<Item Name="opCapacity" Condition="actsAsInst()">_inst._opCapacity</Item>
<Item Name="opArray" Condition="actsAsInst()">((asmjit::InstNode*)this)-&gt;_opArray, [_inst._opCount]</Item>
<Item Name="sectionId" Condition="isSection()">((asmjit::SectionNode*)this)-&gt;_id</Item>
<Item Name="nextSection" Condition="isSection()">((asmjit::SectionNode*)this)-&gt;_nextSection</Item>
<Item Name="labelId" Condition="isLabel()">((asmjit::LabelNode*)this)-&gt;_id</Item>
<Item Name="alignMode" Condition="isAlign()">((asmjit::AlignNode*)this)-&gt;_alignMode</Item>
<Item Name="alignment" Condition="isAlign()">((asmjit::AlignNode*)this)-&gt;_alignment</Item>
<Item Name="typeId" Condition="isEmbedData()">_embed._typeId, d</Item>
<Item Name="typeSize" Condition="isEmbedData()">_embed._typeSize, d</Item>
<Item Name="itemCount" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_itemCount</Item>
<Item Name="repeatCount" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_repeatCount</Item>
<Item Name="inlineData" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_inlineData</Item>
<Item Name="externalData" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_externalData</Item>
<Item Name="labelId" Condition="isEmbedLabel()">((asmjit::EmbedLabelNode*)this)-&gt;_id</Item>
<Item Name="labelId" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_id</Item>
<Item Name="baseId" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_baseId</Item>
<Item Name="dataSize" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_dataSize</Item>
<Item Name="constPool" Condition="isConstPool()">((asmjit::ConstPoolNode*)this)-&gt;_constPool</Item>
<Item Name="sentinel.sentinelType" Condition="isSentinel()">(asmjit::SentinelNode::SentinelType)_sentinel._sentinelType</Item>
<Item Name="annotation" Condition="isJump()">((asmjit::JumpNode*)this)-&gt;_annotation</Item>
<Item Name="funcDetail" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_funcDetail</Item>
<Item Name="frame" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_frame</Item>
<Item Name="exitNode" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_exitNode</Item>
<Item Name="end" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_end</Item>
<Item Name="args" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_args, [((asmjit::FuncNode*)this)-&gt;_funcDetail._argCount]</Item>
<Item Name="funcDetail" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_funcDetail</Item>
<Item Name="rets" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_rets, [((asmjit::InvokeNode*)this)-&gt;_funcDetail._retCount]</Item>
<Item Name="args" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_args, [((asmjit::InvokeNode*)this)-&gt;_funcDetail._argCount]</Item>
</Expand>
</Type>
</AutoVisualizer>

@ -0,0 +1,35 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifdef _WIN32
#pragma push_macro("min")
#pragma push_macro("max")
#ifdef min
#undef min
#endif
#ifdef max
#undef max
#endif
#endif

@ -0,0 +1,27 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifdef _WIN32
#pragma pop_macro("min")
#pragma pop_macro("max")
#endif

@ -0,0 +1,37 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_ASMJIT_H_INCLUDED
#define ASMJIT_ASMJIT_H_INCLUDED
#include "./core.h"
#ifdef ASMJIT_BUILD_X86
#include "./x86.h"
#endif
#ifdef ASMJIT_BUILD_ARM
#include "./arm.h"
#endif
#endif // ASMJIT_ASMJIT_H_INCLUDED

File diff suppressed because it is too large Load Diff

@ -0,0 +1,77 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_API_BUILD_P_H_INCLUDED
#define ASMJIT_CORE_API_BUILD_P_H_INCLUDED
#define ASMJIT_EXPORTS
// Only turn-off these warnings when building asmjit itself.
#ifdef _MSC_VER
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#endif
// Dependencies only required for asmjit build, but never exposed through public headers.
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#endif
// ============================================================================
// [asmjit::Build - Globals - Build-Only]
// ============================================================================
#include "./api-config.h"
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__) && !defined(__clang__)
#define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os")))
#define ASMJIT_FAVOR_SPEED __attribute__((__optimize__("O3")))
#elif ASMJIT_CXX_HAS_ATTRIBUTE(__minsize__, 0)
#define ASMJIT_FAVOR_SIZE __attribute__((__minsize__))
#define ASMJIT_FAVOR_SPEED
#else
#define ASMJIT_FAVOR_SIZE
#define ASMJIT_FAVOR_SPEED
#endif
// Make sure '#ifdef'ed unit tests are properly highlighted in IDE.
#if !defined(ASMJIT_TEST) && defined(__INTELLISENSE__)
#define ASMJIT_TEST
#endif
// Include a unit testing package if this is a `asmjit_test_unit` build.
#if defined(ASMJIT_TEST)
#include "../../../test/broken.h"
#endif
#endif // ASMJIT_CORE_API_BUILD_P_H_INCLUDED

@ -0,0 +1,552 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_API_CONFIG_H_INCLUDED
#define ASMJIT_CORE_API_CONFIG_H_INCLUDED
// ============================================================================
// [asmjit::Version]
// ============================================================================
//! \addtogroup asmjit_core
//! \{
//! AsmJit library version in `(Major << 16) | (Minor << 8) | (Patch)` format.
#define ASMJIT_LIBRARY_VERSION 0x010400 /* 1.4.0 */
//! \}
// ============================================================================
// [asmjit::Build - Documentation]
// ============================================================================
// NOTE: Doxygen cannot document macros that are not defined, that's why we have
// to define them and then undefine them, so it won't use the macros with its
// own preprocessor.
#ifdef _DOXYGEN
namespace asmjit {
//! \addtogroup asmjit_build
//! \{
//! Asmjit is embedded, implies \ref ASMJIT_STATIC.
#define ASMJIT_EMBED
//! Enables static-library build.
#define ASMJIT_STATIC
//! Defined when AsmJit's build configuration is 'Debug'.
//!
//! \note Can be defined explicitly to bypass autodetection.
#define ASMJIT_BUILD_DEBUG
//! Defined when AsmJit's build configuration is 'Release'.
//!
//! \note Can be defined explicitly to bypass autodetection.
#define ASMJIT_BUILD_RELEASE
//! Defined to build X86/X64 backend.
#define ASMJIT_BUILD_X86
//! Defined to build host backend autodetected at compile-time.
#define ASMJIT_BUILD_HOST
//! Disables deprecated API at compile time.
#define ASMJIT_NO_DEPRECATED
//! Disable non-host architectures entirely.
#define ASMJIT_NO_FOREIGN
//! Disables \ref asmjit_builder functionality completely.
#define ASMJIT_NO_BUILDER
//! Disables \ref asmjit_compiler functionality completely.
#define ASMJIT_NO_COMPILER
//! Disables JIT memory management and \ref JitRuntime.
#define ASMJIT_NO_JIT
//! Disables \ref Logger and \ref Formatter.
#define ASMJIT_NO_LOGGING
//! Disables everything that contains text.
#define ASMJIT_NO_TEXT
//! Disables instruction validation API.
#define ASMJIT_NO_VALIDATION
//! Disables instruction introspection API.
#define ASMJIT_NO_INTROSPECTION
// Avoid doxygen preprocessor using feature-selection definitions.
#undef ASMJIT_NO_BUILDER
#undef ASMJIT_NO_COMPILER
#undef ASMJIT_NO_JIT
#undef ASMJIT_NO_LOGGING
#undef ASMJIT_NO_TEXT
#undef ASMJIT_NO_VALIDATION
#undef ASMJIT_NO_INTROSPECTION
//! \}
} // {asmjit}
#endif // _DOXYGEN
// Enable all features at IDE level, so it's properly highlighted and indexed.
#ifdef __INTELLISENSE__
#ifndef ASMJIT_BUILD_X86
#define ASMJIT_BUILD_X86
#endif
#endif
// ============================================================================
// [asmjit::Dependencies]
// ============================================================================
// We really want std-types as globals.
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iterator>
#include <limits>
#include <new>
#include <type_traits>
#include <utility>
#if !defined(_WIN32) && !defined(__EMSCRIPTEN__)
#include <pthread.h>
#endif
// ============================================================================
// [asmjit::Options]
// ============================================================================
// ASMJIT_NO_BUILDER implies ASMJIT_NO_COMPILER.
#if defined(ASMJIT_NO_BUILDER) && !defined(ASMJIT_NO_COMPILER)
#define ASMJIT_NO_COMPILER
#endif
// Prevent compile-time errors caused by misconfiguration.
#if defined(ASMJIT_NO_TEXT) && !defined(ASMJIT_NO_LOGGING)
#pragma "ASMJIT_NO_TEXT can only be defined when ASMJIT_NO_LOGGING is defined."
#undef ASMJIT_NO_TEXT
#endif
#if defined(ASMJIT_NO_INTROSPECTION) && !defined(ASMJIT_NO_COMPILER)
#pragma message("ASMJIT_NO_INTROSPECTION can only be defined when ASMJIT_NO_COMPILER is defined")
#undef ASMJIT_NO_INTROSPECTION
#endif
// ============================================================================
// [asmjit::Build - Globals - Deprecated]
// ============================================================================
#ifndef ASMJIT_NO_DEPRECATED
#if defined(ASMJIT_BUILD_EMBED) || defined(ASMJIT_BUILD_STATIC)
#if defined(ASMJIT_BUILD_EMBED)
#pragma message("'ASMJIT_BUILD_EMBED' is deprecated, use 'ASMJIT_STATIC'")
#endif
#if defined(ASMJIT_BUILD_STATIC)
#pragma message("'ASMJIT_BUILD_STATIC' is deprecated, use 'ASMJIT_STATIC'")
#endif
#if !defined(ASMJIT_STATIC)
#define ASMJIT_STATIC
#endif
#endif
#endif // !ASMJIT_NO_DEPRECATED
// ============================================================================
// [asmjit::Build - Globals - Build Mode]
// ============================================================================
// Detect ASMJIT_BUILD_DEBUG and ASMJIT_BUILD_RELEASE if not defined.
#if !defined(ASMJIT_BUILD_DEBUG) && !defined(ASMJIT_BUILD_RELEASE)
#if !defined(NDEBUG)
#define ASMJIT_BUILD_DEBUG
#else
#define ASMJIT_BUILD_RELEASE
#endif
#endif
// ============================================================================
// [asmjit::Build - Globals - Target Architecture Information]
// ============================================================================
#if defined(_M_X64) || defined(__x86_64__)
#define ASMJIT_ARCH_X86 64
#elif defined(_M_IX86) || defined(__X86__) || defined(__i386__)
#define ASMJIT_ARCH_X86 32
#else
#define ASMJIT_ARCH_X86 0
#endif
#if defined(__arm64__) || defined(__aarch64__)
# define ASMJIT_ARCH_ARM 64
#elif defined(_M_ARM) || defined(_M_ARMT) || defined(__arm__) || defined(__thumb__) || defined(__thumb2__)
#define ASMJIT_ARCH_ARM 32
#else
#define ASMJIT_ARCH_ARM 0
#endif
#if defined(_MIPS_ARCH_MIPS64) || defined(__mips64)
#define ASMJIT_ARCH_MIPS 64
#elif defined(_MIPS_ARCH_MIPS32) || defined(_M_MRX000) || defined(__mips__)
#define ASMJIT_ARCH_MIPS 32
#else
#define ASMJIT_ARCH_MIPS 0
#endif
#define ASMJIT_ARCH_BITS (ASMJIT_ARCH_X86 | ASMJIT_ARCH_ARM | ASMJIT_ARCH_MIPS)
#if ASMJIT_ARCH_BITS == 0
#undef ASMJIT_ARCH_BITS
#if defined (__LP64__) || defined(_LP64)
#define ASMJIT_ARCH_BITS 64
#else
#define ASMJIT_ARCH_BITS 32
#endif
#endif
#if (defined(__ARMEB__)) || \
(defined(__MIPSEB__)) || \
(defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
#define ASMJIT_ARCH_LE 0
#define ASMJIT_ARCH_BE 1
#else
#define ASMJIT_ARCH_LE 1
#define ASMJIT_ARCH_BE 0
#endif
// ============================================================================
// [asmjit::Build - Globals - Build Architectures Definitions]
// ============================================================================
#if !defined(ASMJIT_NO_FOREIGN)
// If 'ASMJIT_NO_FOREIGN' is not defined then all architectures will be built.
#if !defined(ASMJIT_BUILD_X86)
#define ASMJIT_BUILD_X86
#endif
#else
// Detect architectures to build if building only for the host architecture.
#if ASMJIT_ARCH_X86 && !defined(ASMJIT_BUILD_X86)
#define ASMJIT_BUILD_X86
#endif
#endif
// Define 'ASMJIT_BUILD_HOST' if we know that host architecture will be built.
#if !defined(ASMJIT_BUILD_HOST) && ASMJIT_ARCH_X86 && defined(ASMJIT_BUILD_X86)
#define ASMJIT_BUILD_HOST
#endif
// ============================================================================
// [asmjit::Build - Globals - C++ Compiler and Features Detection]
// ============================================================================
#define ASMJIT_CXX_GNU 0
#define ASMJIT_CXX_MAKE_VER(MAJOR, MINOR) ((MAJOR) * 1000 + (MINOR))
// Intel Compiler [pretends to be GNU or MSC, so it must be checked first]:
// - https://software.intel.com/en-us/articles/c0x-features-supported-by-intel-c-compiler
// - https://software.intel.com/en-us/articles/c14-features-supported-by-intel-c-compiler
// - https://software.intel.com/en-us/articles/c17-features-supported-by-intel-c-compiler
#if defined(__INTEL_COMPILER)
// MSC Compiler:
// - https://msdn.microsoft.com/en-us/library/hh567368.aspx
//
// Version List:
// - 16.00.0 == VS2010
// - 17.00.0 == VS2012
// - 18.00.0 == VS2013
// - 19.00.0 == VS2015
// - 19.10.0 == VS2017
#elif defined(_MSC_VER) && defined(_MSC_FULL_VER)
// Clang Compiler [Pretends to be GNU, so it must be checked before]:
// - https://clang.llvm.org/cxx_status.html
#elif defined(__clang_major__) && defined(__clang_minor__) && defined(__clang_patchlevel__)
// GNU Compiler:
// - https://gcc.gnu.org/projects/cxx-status.html
#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
#undef ASMJIT_CXX_GNU
#define ASMJIT_CXX_GNU ASMJIT_CXX_MAKE_VER(__GNUC__, __GNUC_MINOR__)
#endif
// Compiler features detection macros.
#if defined(__clang__) && defined(__has_attribute)
#define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (__has_attribute(NAME))
#else
#define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (!(!(CHECK)))
#endif
// ============================================================================
// [asmjit::Build - Globals - API Decorators & Language Extensions]
// ============================================================================
// API (Export / Import).
#if !defined(ASMJIT_STATIC)
#if defined(_WIN32) && (defined(_MSC_VER) || defined(__MINGW32__))
#ifdef ASMJIT_EXPORTS
#define ASMJIT_API __declspec(dllexport)
#else
#define ASMJIT_API __declspec(dllimport)
#endif
#elif defined(_WIN32) && defined(__GNUC__)
#ifdef ASMJIT_EXPORTS
#define ASMJIT_API __attribute__((__dllexport__))
#else
#define ASMJIT_API __attribute__((__dllimport__))
#endif
#elif defined(__GNUC__)
#define ASMJIT_API __attribute__((__visibility__("default")))
#endif
#endif
#if !defined(ASMJIT_API)
#define ASMJIT_API
#endif
#if !defined(ASMJIT_VARAPI)
#define ASMJIT_VARAPI extern ASMJIT_API
#endif
// This is basically a workaround. When using MSVC and marking class as DLL
// export everything gets exported, which is unwanted in most projects. MSVC
// automatically exports typeinfo and vtable if at least one symbol of the
// class is exported. However, GCC has some strange behavior that even if
// one or more symbol is exported it doesn't export typeinfo unless the
// class itself is decorated with "visibility(default)" (i.e. ASMJIT_API).
#if !defined(_WIN32) && defined(__GNUC__)
#define ASMJIT_VIRTAPI ASMJIT_API
#else
#define ASMJIT_VIRTAPI
#endif
// Function attributes.
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__)
#define ASMJIT_INLINE inline __attribute__((__always_inline__))
#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER)
#define ASMJIT_INLINE __forceinline
#else
#define ASMJIT_INLINE inline
#endif
#if defined(__GNUC__)
#define ASMJIT_NOINLINE __attribute__((__noinline__))
#define ASMJIT_NORETURN __attribute__((__noreturn__))
#elif defined(_MSC_VER)
#define ASMJIT_NOINLINE __declspec(noinline)
#define ASMJIT_NORETURN __declspec(noreturn)
#else
#define ASMJIT_NOINLINE
#define ASMJIT_NORETURN
#endif
// Calling conventions.
#if ASMJIT_ARCH_X86 == 32 && defined(__GNUC__)
#define ASMJIT_CDECL __attribute__((__cdecl__))
#define ASMJIT_STDCALL __attribute__((__stdcall__))
#define ASMJIT_FASTCALL __attribute__((__fastcall__))
#define ASMJIT_REGPARM(N) __attribute__((__regparm__(N)))
#elif ASMJIT_ARCH_X86 == 32 && defined(_MSC_VER)
#define ASMJIT_CDECL __cdecl
#define ASMJIT_STDCALL __stdcall
#define ASMJIT_FASTCALL __fastcall
#define ASMJIT_REGPARM(N)
#else
#define ASMJIT_CDECL
#define ASMJIT_STDCALL
#define ASMJIT_FASTCALL
#define ASMJIT_REGPARM(N)
#endif
#if ASMJIT_ARCH_X86 && defined(_WIN32) && defined(_MSC_VER)
#define ASMJIT_VECTORCALL __vectorcall
#elif ASMJIT_ARCH_X86 && defined(_WIN32)
#define ASMJIT_VECTORCALL __attribute__((__vectorcall__))
#else
#define ASMJIT_VECTORCALL
#endif
// Type alignment (not allowed by C++11 'alignas' keyword).
#if defined(__GNUC__)
#define ASMJIT_ALIGN_TYPE(TYPE, N) __attribute__((__aligned__(N))) TYPE
#elif defined(_MSC_VER)
#define ASMJIT_ALIGN_TYPE(TYPE, N) __declspec(align(N)) TYPE
#else
#define ASMJIT_ALIGN_TYPE(TYPE, N) TYPE
#endif
//! \def ASMJIT_MAY_ALIAS
//!
//! Expands to `__attribute__((__may_alias__))` if supported.
#if defined(__GNUC__)
#define ASMJIT_MAY_ALIAS __attribute__((__may_alias__))
#else
#define ASMJIT_MAY_ALIAS
#endif
//! \def ASMJIT_LIKELY(...)
//!
//! Condition is likely to be taken (mostly error handling and edge cases).
//! \def ASMJIT_UNLIKELY(...)
//!
//! Condition is unlikely to be taken (mostly error handling and edge cases).
#if defined(__GNUC__)
#define ASMJIT_LIKELY(...) __builtin_expect(!!(__VA_ARGS__), 1)
#define ASMJIT_UNLIKELY(...) __builtin_expect(!!(__VA_ARGS__), 0)
#else
#define ASMJIT_LIKELY(...) (__VA_ARGS__)
#define ASMJIT_UNLIKELY(...) (__VA_ARGS__)
#endif
//! \def ASMJIT_FALLTHROUGH
//!
//! Portable [[fallthrough]] attribute.
#if defined(__clang__) && __cplusplus >= 201103L
#define ASMJIT_FALLTHROUGH [[clang::fallthrough]]
#elif defined(__GNUC__) && __GNUC__ >= 7
#define ASMJIT_FALLTHROUGH __attribute__((__fallthrough__))
#else
#define ASMJIT_FALLTHROUGH ((void)0) /* fallthrough */
#endif
//! \def ASMJIT_DEPRECATED
//!
//! Marks function, class, struct, enum, or anything else as deprecated.
#if defined(__GNUC__)
#define ASMJIT_DEPRECATED(MESSAGE) __attribute__((__deprecated__(MESSAGE)))
#if defined(__clang__)
#define ASMJIT_DEPRECATED_STRUCT(MESSAGE) __attribute__((__deprecated__(MESSAGE)))
#else
#define ASMJIT_DEPRECATED_STRUCT(MESSAGE) /* not usable if a deprecated function uses it */
#endif
#elif defined(_MSC_VER)
#define ASMJIT_DEPRECATED(MESSAGE) __declspec(deprecated(MESSAGE))
#define ASMJIT_DEPRECATED_STRUCT(MESSAGE) /* not usable if a deprecated function uses it */
#else
#define ASMJIT_DEPRECATED(MESSAGE)
#define ASMJIT_DEPRECATED_STRUCT(MESSAGE)
#endif
// Utilities.
#define ASMJIT_OFFSET_OF(STRUCT, MEMBER) ((int)(intptr_t)((const char*)&((const STRUCT*)0x100)->MEMBER) - 0x100)
#define ASMJIT_ARRAY_SIZE(X) uint32_t(sizeof(X) / sizeof(X[0]))
#if ASMJIT_CXX_HAS_ATTRIBUTE(no_sanitize, 0)
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize__("undefined")))
#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 9)
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize_undefined__))
#else
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF
#endif
// ============================================================================
// [asmjit::Build - Globals - Begin-Namespace / End-Namespace]
// ============================================================================
#if defined(__clang__)
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wconstant-logical-operand\"") \
_Pragma("clang diagnostic ignored \"-Wunnamed-type-template-args\"")
#define ASMJIT_END_NAMESPACE \
_Pragma("clang diagnostic pop") \
}
#elif defined(__GNUC__) && __GNUC__ == 4
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
#define ASMJIT_END_NAMESPACE \
_Pragma("GCC diagnostic pop") \
}
#elif defined(__GNUC__) && __GNUC__ >= 8
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wclass-memaccess\"")
#define ASMJIT_END_NAMESPACE \
_Pragma("GCC diagnostic pop") \
}
#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { \
__pragma(warning(push)) \
__pragma(warning(disable: 4127)) /* conditional expression is const */ \
__pragma(warning(disable: 4201)) /* nameless struct/union */
#define ASMJIT_END_NAMESPACE \
__pragma(warning(pop)) \
}
#endif
#if !defined(ASMJIT_BEGIN_NAMESPACE) && !defined(ASMJIT_END_NAMESPACE)
#define ASMJIT_BEGIN_NAMESPACE namespace asmjit {
#define ASMJIT_END_NAMESPACE }
#endif
#define ASMJIT_BEGIN_SUB_NAMESPACE(NAMESPACE) \
ASMJIT_BEGIN_NAMESPACE \
namespace NAMESPACE {
#define ASMJIT_END_SUB_NAMESPACE \
} \
ASMJIT_END_NAMESPACE
// ============================================================================
// [asmjit::Build - Globals - Utilities]
// ============================================================================
#define ASMJIT_NONCOPYABLE(...) \
private: \
__VA_ARGS__(const __VA_ARGS__& other) = delete; \
__VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
public:
#define ASMJIT_NONCONSTRUCTIBLE(...) \
private: \
__VA_ARGS__() = delete; \
__VA_ARGS__(const __VA_ARGS__& other) = delete; \
__VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
public:
// ============================================================================
// [asmjit::Build - Globals - Cleanup]
// ============================================================================
// Cleanup definitions that are only used within this header file.
#undef ASMJIT_CXX_GNU
#undef ASMJIT_CXX_MAKE_VER
#endif // ASMJIT_CORE_API_CONFIG_H_INCLUDED

@ -0,0 +1,164 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
#define ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
// This file provides architecture-specific classes that are required in the
// core library. For example Imm operand allows to be created from arm::Shift
// in a const-expr way, so the arm::Shift must be provided. So this header
// file provides everything architecture-specific that is used by the Core API.
#include "../core/globals.h"
// ============================================================================
// [asmjit::arm]
// ============================================================================
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
//! \addtogroup asmjit_arm
//! \{
//! Represents ARM immediate shift operation type and value.
class Shift {
public:
//! Operation predicate (ARM) describes either SHIFT or EXTEND operation.
//!
//! \note The constants are AsmJit specific. The first 5 values describe real
//! constants on ARM32 and AArch64 hardware, however, the addition constants
//! that describe extend modes are specific to AsmJit and would be translated
//! to the AArch64 specific constants by the assembler.
enum Op : uint32_t {
//! Shift left logical operation (default).
//!
//! Available to all ARM architectures.
kOpLSL = 0x00u,
//! Shift right logical operation.
//!
//! Available to all ARM architectures.
kOpLSR = 0x01u,
//! Shift right arithmetic operation.
//!
//! Available to all ARM architectures.
kOpASR = 0x02u,
//! Rotate right operation.
//!
//! \note Not available in AArch64 mode.
kOpROR = 0x03u,
//! Rotate right with carry operation (encoded as `kShiftROR` with zero).
//!
//! \note Not available in AArch64 mode.
kOpRRX = 0x04u,
//! Shift left by filling low order bits with ones.
kOpMSL = 0x05u,
//! UXTN extend register operation (AArch64 only).
kOpUXTB = 0x06u,
//! UXTH extend register operation (AArch64 only).
kOpUXTH = 0x07u,
//! UXTW extend register operation (AArch64 only).
kOpUXTW = 0x08u,
//! UXTX extend register operation (AArch64 only).
kOpUXTX = 0x09u,
//! SXTB extend register operation (AArch64 only).
kOpSXTB = 0x0Au,
//! SXTH extend register operation (AArch64 only).
kOpSXTH = 0x0Bu,
//! SXTW extend register operation (AArch64 only).
kOpSXTW = 0x0Cu,
//! SXTX extend register operation (AArch64 only).
kOpSXTX = 0x0Du
// NOTE: 0xE and 0xF are used by memory operand to specify POST|PRE offset mode.
};
//! Shift operation.
uint32_t _op;
//! Shift Value.
uint32_t _value;
//! Default constructed Shift is not initialized.
inline Shift() noexcept = default;
//! Copy constructor (default)
constexpr Shift(const Shift& other) noexcept = default;
//! Constructs Shift from operation `op` and shift `value`.
constexpr Shift(uint32_t op, uint32_t value) noexcept
: _op(op),
_value(value) {}
//! Returns the shift operation.
constexpr uint32_t op() const noexcept { return _op; }
//! Returns the shift smount.
constexpr uint32_t value() const noexcept { return _value; }
//! Sets shift operation to `op`.
inline void setOp(uint32_t op) noexcept { _op = op; }
//! Sets shift amount to `value`.
inline void setValue(uint32_t value) noexcept { _value = value; }
};
//! Constructs a `LSL #value` shift (logical shift left).
static constexpr Shift lsl(uint32_t value) noexcept { return Shift(Shift::kOpLSL, value); }
//! Constructs a `LSR #value` shift (logical shift right).
static constexpr Shift lsr(uint32_t value) noexcept { return Shift(Shift::kOpLSR, value); }
//! Constructs a `ASR #value` shift (arithmetic shift right).
static constexpr Shift asr(uint32_t value) noexcept { return Shift(Shift::kOpASR, value); }
//! Constructs a `ROR #value` shift (rotate right).
static constexpr Shift ror(uint32_t value) noexcept { return Shift(Shift::kOpROR, value); }
//! Constructs a `RRX` shift (rotate with carry by 1).
static constexpr Shift rrx() noexcept { return Shift(Shift::kOpRRX, 0); }
//! Constructs a `MSL #value` shift (logical shift left filling ones).
static constexpr Shift msl(uint32_t value) noexcept { return Shift(Shift::kOpMSL, value); }
//! Constructs a `UXTB #value` extend and shift (unsigned byte extend).
static constexpr Shift uxtb(uint32_t value) noexcept { return Shift(Shift::kOpUXTB, value); }
//! Constructs a `UXTH #value` extend and shift (unsigned hword extend).
static constexpr Shift uxth(uint32_t value) noexcept { return Shift(Shift::kOpUXTH, value); }
//! Constructs a `UXTW #value` extend and shift (unsigned word extend).
static constexpr Shift uxtw(uint32_t value) noexcept { return Shift(Shift::kOpUXTW, value); }
//! Constructs a `UXTX #value` extend and shift (unsigned dword extend).
static constexpr Shift uxtx(uint32_t value) noexcept { return Shift(Shift::kOpUXTX, value); }
//! Constructs a `SXTB #value` extend and shift (signed byte extend).
static constexpr Shift sxtb(uint32_t value) noexcept { return Shift(Shift::kOpSXTB, value); }
//! Constructs a `SXTH #value` extend and shift (signed hword extend).
static constexpr Shift sxth(uint32_t value) noexcept { return Shift(Shift::kOpSXTH, value); }
//! Constructs a `SXTW #value` extend and shift (signed word extend).
static constexpr Shift sxtw(uint32_t value) noexcept { return Shift(Shift::kOpSXTW, value); }
//! Constructs a `SXTX #value` extend and shift (signed dword extend).
static constexpr Shift sxtx(uint32_t value) noexcept { return Shift(Shift::kOpSXTX, value); }
//! \}
ASMJIT_END_SUB_NAMESPACE
#endif // ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED

@ -0,0 +1,155 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/misc_p.h"
#ifdef ASMJIT_BUILD_X86
#include "../x86/x86archtraits_p.h"
#endif
#ifdef ASMJIT_BUILD_ARM
#include "../arm/armarchtraits_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ArchTraits]
// ============================================================================
static const constexpr ArchTraits noArchTraits = {
0xFF, // SP.
0xFF, // FP.
0xFF, // LR.
0xFF, // PC.
{ 0, 0, 0 }, // Reserved.
0, // HW stack alignment.
0, // Min stack offset.
0, // Max stack offset.
{ 0, 0, 0, 0}, // ISA features [Gp, Vec, Other0, Other1].
{ { 0 } }, // RegTypeToSignature.
{ 0 }, // RegTypeToTypeId.
{ 0 } // TypeIdToRegType.
};
ASMJIT_VARAPI const ArchTraits _archTraits[Environment::kArchCount] = {
// No architecture.
noArchTraits,
// X86/X86 architectures.
#ifdef ASMJIT_BUILD_X86
x86::x86ArchTraits,
x86::x64ArchTraits,
#else
noArchTraits,
noArchTraits,
#endif
// RISCV32/RISCV64 architectures.
noArchTraits,
noArchTraits,
// ARM architecture
noArchTraits,
// AArch64 architecture.
#ifdef ASMJIT_BUILD_ARM
arm::a64ArchTraits,
#else
noArchTraits,
#endif
// ARM/Thumb architecture.
noArchTraits,
// Reserved.
noArchTraits,
// MIPS32/MIPS64
noArchTraits,
noArchTraits
};
// ============================================================================
// [asmjit::ArchUtils]
// ============================================================================
ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegInfo(uint32_t arch, uint32_t typeId, uint32_t* typeIdOut, RegInfo* regInfoOut) noexcept {
const ArchTraits& archTraits = ArchTraits::byArch(arch);
// Passed RegType instead of TypeId?
if (typeId <= BaseReg::kTypeMax)
typeId = archTraits.regTypeToTypeId(typeId);
if (ASMJIT_UNLIKELY(!Type::isValid(typeId)))
return DebugUtils::errored(kErrorInvalidTypeId);
// First normalize architecture dependent types.
if (Type::isAbstract(typeId)) {
bool is32Bit = Environment::is32Bit(arch);
if (typeId == Type::kIdIntPtr)
typeId = is32Bit ? Type::kIdI32 : Type::kIdI64;
else
typeId = is32Bit ? Type::kIdU32 : Type::kIdU64;
}
// Type size helps to construct all groups of registers.
// TypeId is invalid if the size is zero.
uint32_t size = Type::sizeOf(typeId);
if (ASMJIT_UNLIKELY(!size))
return DebugUtils::errored(kErrorInvalidTypeId);
if (ASMJIT_UNLIKELY(typeId == Type::kIdF80))
return DebugUtils::errored(kErrorInvalidUseOfF80);
uint32_t regType = 0;
if (typeId >= Type::_kIdBaseStart && typeId < Type::_kIdVec32Start) {
regType = archTraits._typeIdToRegType[typeId - Type::_kIdBaseStart];
if (!regType) {
if (typeId == Type::kIdI64 || typeId == Type::kIdU64)
return DebugUtils::errored(kErrorInvalidUseOfGpq);
else
return DebugUtils::errored(kErrorInvalidTypeId);
}
}
else {
if (size <= 8 && archTraits._regInfo[BaseReg::kTypeVec64].isValid())
regType = BaseReg::kTypeVec64;
else if (size <= 16 && archTraits._regInfo[BaseReg::kTypeVec128].isValid())
regType = BaseReg::kTypeVec128;
else if (size == 32 && archTraits._regInfo[BaseReg::kTypeVec256].isValid())
regType = BaseReg::kTypeVec256;
else if (archTraits._regInfo[BaseReg::kTypeVec512].isValid())
regType = BaseReg::kTypeVec512;
else
return DebugUtils::errored(kErrorInvalidTypeId);
}
*typeIdOut = typeId;
regInfoOut->reset(archTraits.regTypeToSignature(regType));
return kErrorOk;
}
ASMJIT_END_NAMESPACE

@ -0,0 +1,174 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
#define ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
#include "../core/environment.h"
#include "../core/operand.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::ArchTraits]
// ============================================================================
//! Architecture traits used by Function API and Compiler's register allocator.
struct ArchTraits {
//! ISA features for each register group.
enum IsaFeatures : uint32_t {
//! ISA features a register swap by using a single instruction.
kIsaFeatureSwap = 0x01u,
//! ISA features a push/pop like instruction for this register group.
kIsaFeaturePushPop = 0x02u,
};
//! Stack pointer register id.
uint8_t _spRegId;
//! Frame pointer register id.
uint8_t _fpRegId;
//! Link register id.
uint8_t _linkRegId;
//! Instruction pointer (or program counter) register id, if accessible.
uint8_t _ipRegId;
// Reserved.
uint8_t _reserved[3];
//! Hardware stack alignment requirement.
uint8_t _hwStackAlignment;
//! Minimum addressable offset on stack guaranteed for all instructions.
uint32_t _minStackOffset;
//! Maximum addressable offset on stack depending on specific instruction.
uint32_t _maxStackOffset;
//! Flags for each virtual register group (always covers GP and Vec groups).
uint8_t _isaFlags[BaseReg::kGroupVirt];
//! Maps register type into a signature, that provides group, size and can
//! be used to construct register operands.
RegInfo _regInfo[BaseReg::kTypeMax + 1];
//! Maps a register to type-id, see \ref Type::Id.
uint8_t _regTypeToTypeId[BaseReg::kTypeMax + 1];
//! Maps base TypeId values (from TypeId::_kIdBaseStart) to register types, see \ref Type::Id.
uint8_t _typeIdToRegType[32];
//! Resets all members to zeros.
inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
//! \name Accessors
//! \{
//! Returns stack pointer register id.
inline constexpr uint32_t spRegId() const noexcept { return _spRegId; }
//! Returns stack frame register id.
inline constexpr uint32_t fpRegId() const noexcept { return _fpRegId; }
//! Returns link register id, if the architecture provides it.
inline constexpr uint32_t linkRegId() const noexcept { return _linkRegId; }
//! Returns instruction pointer register id, if the architecture provides it.
inline constexpr uint32_t ipRegId() const noexcept { return _ipRegId; }
//! Returns a hardware stack alignment requirement.
//!
//! \note This is a hardware constraint. Architectures that don't constrain
//! it would return the lowest alignment (1), however, some architectures may
//! constrain the alignment, for example AArch64 requires 16-byte alignment.
inline constexpr uint32_t hwStackAlignment() const noexcept { return _hwStackAlignment; }
//! Tests whether the architecture provides link register, which is used across
//! function calls. If the link register is not provided then a function call
//! pushes the return address on stack (X86/X64).
inline constexpr bool hasLinkReg() const noexcept { return _linkRegId != BaseReg::kIdBad; }
//! Returns minimum addressable offset on stack guaranteed for all instructions.
inline constexpr uint32_t minStackOffset() const noexcept { return _minStackOffset; }
//! Returns maximum addressable offset on stack depending on specific instruction.
inline constexpr uint32_t maxStackOffset() const noexcept { return _maxStackOffset; }
//! Returns ISA flags of the given register `group`.
inline constexpr uint32_t isaFlags(uint32_t group) const noexcept { return _isaFlags[group]; }
//! Tests whether the given register `group` has the given `flag` set.
inline constexpr bool hasIsaFlag(uint32_t group, uint32_t flag) const noexcept { return (_isaFlags[group] & flag) != 0; }
//! Tests whether the ISA provides register swap instruction for the given register `group`.
inline constexpr bool hasSwap(uint32_t group) const noexcept { return hasIsaFlag(group, kIsaFeatureSwap); }
//! Tests whether the ISA provides push/pop instructions for the given register `group`.
inline constexpr bool hasPushPop(uint32_t group) const noexcept { return hasIsaFlag(group, kIsaFeaturePushPop); }
inline uint32_t hasRegType(uint32_t rType) const noexcept {
return rType <= BaseReg::kTypeMax && _regInfo[rType].signature() != 0;
}
inline uint32_t regTypeToSignature(uint32_t rType) const noexcept {
ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
return _regInfo[rType].signature();
}
inline uint32_t regTypeToGroup(uint32_t rType) const noexcept {
ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
return _regInfo[rType].group();
}
inline uint32_t regTypeToSize(uint32_t rType) const noexcept {
ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
return _regInfo[rType].size();
}
inline uint32_t regTypeToTypeId(uint32_t rType) const noexcept {
ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
return _regTypeToTypeId[rType];
}
//! \}
//! \name Statics
//! \{
//! Returns a const reference to `ArchTraits` for the given architecture `arch`.
static inline const ArchTraits& byArch(uint32_t arch) noexcept;
//! \}
};
ASMJIT_VARAPI const ArchTraits _archTraits[Environment::kArchCount];
inline const ArchTraits& ArchTraits::byArch(uint32_t arch) noexcept { return _archTraits[arch & ~Environment::kArchBigEndianMask]; }
// ============================================================================
// [asmjit::ArchUtils]
// ============================================================================
//! Architecture utilities.
namespace ArchUtils {
ASMJIT_API Error typeIdToRegInfo(uint32_t arch, uint32_t typeId, uint32_t* typeIdOut, RegInfo* regInfo) noexcept;
} // {ArchUtils}
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ARCHTRAITS_H_INCLUDED

@ -0,0 +1,409 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/assembler.h"
#include "../core/codewriter_p.h"
#include "../core/constpool.h"
#include "../core/emitterutils_p.h"
#include "../core/formatter.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::BaseAssembler - Construction / Destruction]
// ============================================================================
BaseAssembler::BaseAssembler() noexcept
: BaseEmitter(kTypeAssembler) {}
BaseAssembler::~BaseAssembler() noexcept {}
// ============================================================================
// [asmjit::BaseAssembler - Buffer Management]
// ============================================================================
Error BaseAssembler::setOffset(size_t offset) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
size_t size = Support::max<size_t>(_section->bufferSize(), this->offset());
if (ASMJIT_UNLIKELY(offset > size))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
_bufferPtr = _bufferData + offset;
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Section Management]
// ============================================================================
static void BaseAssembler_initSection(BaseAssembler* self, Section* section) noexcept {
uint8_t* p = section->_buffer._data;
self->_section = section;
self->_bufferData = p;
self->_bufferPtr = p + section->_buffer._size;
self->_bufferEnd = p + section->_buffer._capacity;
}
Error BaseAssembler::section(Section* section) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section)
return reportError(DebugUtils::errored(kErrorInvalidSection));
#ifndef ASMJIT_NO_LOGGING
if (_logger)
_logger->logf(".section %s {#%u}\n", section->name(), section->id());
#endif
BaseAssembler_initSection(this, section);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Label Management]
// ============================================================================
Label BaseAssembler::newLabel() {
uint32_t labelId = Globals::kInvalidId;
if (ASMJIT_LIKELY(_code)) {
LabelEntry* le;
Error err = _code->newLabelEntry(&le);
if (ASMJIT_UNLIKELY(err))
reportError(err);
else
labelId = le->id();
}
return Label(labelId);
}
Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) {
uint32_t labelId = Globals::kInvalidId;
if (ASMJIT_LIKELY(_code)) {
LabelEntry* le;
Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId);
if (ASMJIT_UNLIKELY(err))
reportError(err);
else
labelId = le->id();
}
return Label(labelId);
}
Error BaseAssembler::bind(const Label& label) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
Error err = _code->bindLabel(label, _section->id(), offset());
#ifndef ASMJIT_NO_LOGGING
if (_logger)
EmitterUtils::logLabelBound(this, label);
#endif
resetInlineComment();
if (err)
return reportError(err);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Embed]
// ============================================================================
#ifndef ASMJIT_NO_LOGGING
struct DataSizeByPower {
char str[4];
};
static const DataSizeByPower dataSizeByPowerTable[] = {
{ "db" },
{ "dw" },
{ "dd" },
{ "dq" }
};
#endif
Error BaseAssembler::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
if (dataSize == 0)
return kErrorOk;
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
writer.emitData(data, dataSize);
#ifndef ASMJIT_NO_LOGGING
if (_logger)
_logger->logBinary(data, dataSize);
#endif
writer.done(this);
return kErrorOk;
}
Error BaseAssembler::embedDataArray(uint32_t typeId, const void* data, size_t itemCcount, size_t repeatCount) {
uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize());
uint32_t finalTypeId = Type::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!Type::isValid(finalTypeId)))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (itemCcount == 0 || repeatCount == 0)
return kErrorOk;
uint32_t typeSize = Type::sizeOf(finalTypeId);
Support::FastUInt8 of = 0;
size_t dataSize = Support::mulOverflow(itemCcount, size_t(typeSize), &of);
size_t totalSize = Support::mulOverflow(dataSize, repeatCount, &of);
if (ASMJIT_UNLIKELY(of))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, totalSize));
#ifndef ASMJIT_NO_LOGGING
const uint8_t* start = writer.cursor();
#endif
for (size_t i = 0; i < repeatCount; i++) {
writer.emitData(data, dataSize);
}
#ifndef ASMJIT_NO_LOGGING
if (_logger)
_logger->logBinary(start, totalSize);
#endif
writer.done(this);
return kErrorOk;
}
Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
if (ASMJIT_UNLIKELY(!isLabelValid(label)))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label));
size_t size = pool.size();
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, size));
pool.fill(writer.cursor());
#ifndef ASMJIT_NO_LOGGING
if (_logger)
_logger->logBinary(writer.cursor(), size);
#endif
writer.advance(size);
writer.done(this);
return kErrorOk;
}
Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
ASMJIT_ASSERT(_code != nullptr);
RelocEntry* re;
LabelEntry* le = _code->labelEntry(label);
if (ASMJIT_UNLIKELY(!le))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
if (dataSize == 0)
dataSize = registerSize();
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<256> sb;
sb.appendFormat("%s ", dataSizeByPowerTable[Support::ctz(dataSize)].str);
Formatter::formatLabel(sb, 0, this, label.id());
sb.append('\n');
_logger->log(sb);
}
#endif
Error err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
re->_sourceSectionId = _section->id();
re->_sourceOffset = offset();
re->_format.resetToDataValue(uint32_t(dataSize));
if (le->isBound()) {
re->_targetSectionId = le->section()->id();
re->_payload = le->offset();
}
else {
OffsetFormat of;
of.resetToDataValue(uint32_t(dataSize));
LabelLink* link = _code->newLabelLink(le, _section->id(), offset(), 0, of);
if (ASMJIT_UNLIKELY(!link))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
link->relocId = re->id();
}
// Emit dummy DWORD/QWORD depending on the data size.
writer.emitZeros(dataSize);
writer.done(this);
return kErrorOk;
}
Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return reportError(DebugUtils::errored(kErrorNotInitialized));
LabelEntry* labelEntry = _code->labelEntry(label);
LabelEntry* baseEntry = _code->labelEntry(base);
if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
if (dataSize == 0)
dataSize = registerSize();
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<256> sb;
sb.appendFormat(".%s (", dataSizeByPowerTable[Support::ctz(dataSize)].str);
Formatter::formatLabel(sb, 0, this, label.id());
sb.append(" - ");
Formatter::formatLabel(sb, 0, this, base.id());
sb.append(")\n");
_logger->log(sb);
}
#endif
// If both labels are bound within the same section it means the delta can be calculated now.
if (labelEntry->isBound() && baseEntry->isBound() && labelEntry->section() == baseEntry->section()) {
uint64_t delta = labelEntry->offset() - baseEntry->offset();
writer.emitValueLE(delta, dataSize);
}
else {
RelocEntry* re;
Error err = _code->newRelocEntry(&re, RelocEntry::kTypeExpression);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
Expression* exp = _code->_zone.newT<Expression>();
if (ASMJIT_UNLIKELY(!exp))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
exp->reset();
exp->opType = Expression::kOpSub;
exp->setValueAsLabel(0, labelEntry);
exp->setValueAsLabel(1, baseEntry);
re->_format.resetToDataValue(dataSize);
re->_sourceSectionId = _section->id();
re->_sourceOffset = offset();
re->_payload = (uint64_t)(uintptr_t)exp;
writer.emitZeros(dataSize);
}
writer.done(this);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Comment]
// ============================================================================
Error BaseAssembler::comment(const char* data, size_t size) {
if (!hasEmitterFlag(kFlagLogComments)) {
if (!hasEmitterFlag(kFlagAttached))
return reportError(DebugUtils::errored(kErrorNotInitialized));
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
// Logger cannot be NULL if `kFlagLogComments` is set.
ASMJIT_ASSERT(_logger != nullptr);
_logger->log(data, size);
_logger->log("\n", 1);
return kErrorOk;
#else
DebugUtils::unused(data, size);
return kErrorOk;
#endif
}
// ============================================================================
// [asmjit::BaseAssembler - Events]
// ============================================================================
Error BaseAssembler::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
// Attach to the end of the .text section.
BaseAssembler_initSection(this, code->_sections[0]);
return kErrorOk;
}
Error BaseAssembler::onDetach(CodeHolder* code) noexcept {
_section = nullptr;
_bufferData = nullptr;
_bufferEnd = nullptr;
_bufferPtr = nullptr;
return Base::onDetach(code);
}
ASMJIT_END_NAMESPACE

@ -0,0 +1,152 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ASSEMBLER_H_INCLUDED
#define ASMJIT_CORE_ASSEMBLER_H_INCLUDED
#include "../core/codeholder.h"
#include "../core/datatypes.h"
#include "../core/emitter.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_assembler
//! \{
// ============================================================================
// [asmjit::BaseAssembler]
// ============================================================================
//! Base assembler.
//!
//! This is a base class that provides interface used by architecture specific
//! assembler implementations. Assembler doesn't hold any data, instead it's
//! attached to \ref CodeHolder, which provides all the data that Assembler
//! needs and which can be altered by it.
//!
//! Check out architecture specific assemblers for more details and examples:
//!
//! - \ref x86::Assembler - X86/X64 assembler implementation.
class ASMJIT_VIRTAPI BaseAssembler : public BaseEmitter {
public:
ASMJIT_NONCOPYABLE(BaseAssembler)
typedef BaseEmitter Base;
//! Current section where the assembling happens.
Section* _section = nullptr;
//! Start of the CodeBuffer of the current section.
uint8_t* _bufferData = nullptr;
//! End (first invalid byte) of the current section.
uint8_t* _bufferEnd = nullptr;
//! Pointer in the CodeBuffer of the current section.
uint8_t* _bufferPtr = nullptr;
//! \name Construction & Destruction
//! \{
//! Creates a new `BaseAssembler` instance.
ASMJIT_API BaseAssembler() noexcept;
//! Destroys the `BaseAssembler` instance.
ASMJIT_API virtual ~BaseAssembler() noexcept;
//! \}
//! \name Code-Buffer Management
//! \{
//! Returns the capacity of the current CodeBuffer.
inline size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); }
//! Returns the number of remaining bytes in the current CodeBuffer.
inline size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); }
//! Returns the current position in the CodeBuffer.
inline size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); }
//! Sets the current position in the CodeBuffer to `offset`.
//!
//! \note The `offset` cannot be greater than buffer size even if it's
//! within the buffer's capacity.
ASMJIT_API Error setOffset(size_t offset);
//! Returns the start of the CodeBuffer in the current section.
inline uint8_t* bufferData() const noexcept { return _bufferData; }
//! Returns the end (first invalid byte) in the current section.
inline uint8_t* bufferEnd() const noexcept { return _bufferEnd; }
//! Returns the current pointer in the CodeBuffer in the current section.
inline uint8_t* bufferPtr() const noexcept { return _bufferPtr; }
//! \}
//! \name Section Management
//! \{
//! Returns the current section.
inline Section* currentSection() const noexcept { return _section; }
ASMJIT_API Error section(Section* section) override;
//! \}
//! \name Label Management
//! \{
ASMJIT_API Label newLabel() override;
ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) override;
ASMJIT_API Error bind(const Label& label) override;
//! \}
//! \name Embed
//! \{
ASMJIT_API Error embed(const void* data, size_t dataSize) override;
ASMJIT_API Error embedDataArray(uint32_t typeId, const void* data, size_t itemCcount, size_t repeatCount = 1) override;
ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
ASMJIT_API Error embedLabel(const Label& label, size_t dataSize = 0) override;
ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) override;
//! \}
//! \name Comment
//! \{
ASMJIT_API Error comment(const char* data, size_t size = SIZE_MAX) override;
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ASSEMBLER_H_INCLUDED

@ -0,0 +1,920 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_BUILDER
#include "../core/builder.h"
#include "../core/emitterutils_p.h"
#include "../core/errorhandler.h"
#include "../core/formatter.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::PostponedErrorHandler (Internal)]
// ============================================================================
//! Postponed error handler that never throws. Used as a temporal error handler
//! to run passes. If error occurs, the caller is notified and will call the
//! real error handler, that can throw.
class PostponedErrorHandler : public ErrorHandler {
public:
void handleError(Error err, const char* message, BaseEmitter* origin) override {
DebugUtils::unused(err, origin);
_message.assign(message);
}
StringTmp<128> _message;
};
// ============================================================================
// [asmjit::BaseBuilder - Utilities]
// ============================================================================
static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept {
for (Pass* pass : self->_passes)
pass->~Pass();
self->_passes.reset();
}
// ============================================================================
// [asmjit::BaseBuilder - Construction / Destruction]
// ============================================================================
BaseBuilder::BaseBuilder() noexcept
: BaseEmitter(kTypeBuilder),
_codeZone(32768 - Zone::kBlockOverhead),
_dataZone(16384 - Zone::kBlockOverhead),
_passZone(65536 - Zone::kBlockOverhead),
_allocator(&_codeZone) {}
BaseBuilder::~BaseBuilder() noexcept {
BaseBuilder_deletePasses(this);
}
// ============================================================================
// [asmjit::BaseBuilder - Node Management]
// ============================================================================
Error BaseBuilder::_newInstNode(InstNode** out, uint32_t instId, uint32_t instOptions, uint32_t opCount) {
uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
if (ASMJIT_UNLIKELY(!node))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
*out = new(node) InstNode(this, instId, instOptions, opCount, opCapacity);
return kErrorOk;
}
Error BaseBuilder::_newLabelNode(LabelNode** out) {
*out = nullptr;
ASMJIT_PROPAGATE(_newNodeT<LabelNode>(out));
return registerLabelNode(*out);
}
Error BaseBuilder::_newAlignNode(AlignNode** out, uint32_t alignMode, uint32_t alignment) {
*out = nullptr;
return _newNodeT<AlignNode>(out, alignMode, alignment);
}
Error BaseBuilder::_newEmbedDataNode(EmbedDataNode** out, uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount) {
*out = nullptr;
uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize());
uint32_t finalTypeId = Type::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!Type::isValid(finalTypeId)))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
uint32_t typeSize = Type::sizeOf(finalTypeId);
Support::FastUInt8 of = 0;
size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
if (ASMJIT_UNLIKELY(of))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedDataNode>(&node));
node->_embed._typeId = uint8_t(typeId);
node->_embed._typeSize = uint8_t(typeSize);
node->_itemCount = itemCount;
node->_repeatCount = repeatCount;
uint8_t* dstData = node->_inlineData;
if (dataSize > EmbedDataNode::kInlineBufferSize) {
dstData = static_cast<uint8_t*>(_dataZone.alloc(dataSize, 8));
if (ASMJIT_UNLIKELY(!dstData))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
node->_externalData = dstData;
}
if (data)
memcpy(dstData, data, dataSize);
*out = node;
return kErrorOk;
}
Error BaseBuilder::_newConstPoolNode(ConstPoolNode** out) {
*out = nullptr;
ASMJIT_PROPAGATE(_newNodeT<ConstPoolNode>(out));
return registerLabelNode(*out);
}
Error BaseBuilder::_newCommentNode(CommentNode** out, const char* data, size_t size) {
*out = nullptr;
if (data) {
if (size == SIZE_MAX)
size = strlen(data);
if (size > 0) {
data = static_cast<char*>(_dataZone.dup(data, size, true));
if (ASMJIT_UNLIKELY(!data))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
}
return _newNodeT<CommentNode>(out, data);
}
BaseNode* BaseBuilder::addNode(BaseNode* node) noexcept {
ASMJIT_ASSERT(node);
ASMJIT_ASSERT(!node->_prev);
ASMJIT_ASSERT(!node->_next);
ASMJIT_ASSERT(!node->isActive());
if (!_cursor) {
if (!_firstNode) {
_firstNode = node;
_lastNode = node;
}
else {
node->_next = _firstNode;
_firstNode->_prev = node;
_firstNode = node;
}
}
else {
BaseNode* prev = _cursor;
BaseNode* next = _cursor->next();
node->_prev = prev;
node->_next = next;
prev->_next = node;
if (next)
next->_prev = node;
else
_lastNode = node;
}
node->addFlags(BaseNode::kFlagIsActive);
if (node->isSection())
_dirtySectionLinks = true;
_cursor = node;
return node;
}
BaseNode* BaseBuilder::addAfter(BaseNode* node, BaseNode* ref) noexcept {
ASMJIT_ASSERT(node);
ASMJIT_ASSERT(ref);
ASMJIT_ASSERT(!node->_prev);
ASMJIT_ASSERT(!node->_next);
BaseNode* prev = ref;
BaseNode* next = ref->next();
node->_prev = prev;
node->_next = next;
node->addFlags(BaseNode::kFlagIsActive);
if (node->isSection())
_dirtySectionLinks = true;
prev->_next = node;
if (next)
next->_prev = node;
else
_lastNode = node;
return node;
}
BaseNode* BaseBuilder::addBefore(BaseNode* node, BaseNode* ref) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(!node->_prev);
ASMJIT_ASSERT(!node->_next);
ASMJIT_ASSERT(!node->isActive());
ASMJIT_ASSERT(ref != nullptr);
ASMJIT_ASSERT(ref->isActive());
BaseNode* prev = ref->prev();
BaseNode* next = ref;
node->_prev = prev;
node->_next = next;
node->addFlags(BaseNode::kFlagIsActive);
if (node->isSection())
_dirtySectionLinks = true;
next->_prev = node;
if (prev)
prev->_next = node;
else
_firstNode = node;
return node;
}
BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept {
if (!node->isActive())
return node;
BaseNode* prev = node->prev();
BaseNode* next = node->next();
if (_firstNode == node)
_firstNode = next;
else
prev->_next = next;
if (_lastNode == node)
_lastNode = prev;
else
next->_prev = prev;
node->_prev = nullptr;
node->_next = nullptr;
node->clearFlags(BaseNode::kFlagIsActive);
if (node->isSection())
_dirtySectionLinks = true;
if (_cursor == node)
_cursor = prev;
return node;
}
void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept {
if (first == last) {
removeNode(first);
return;
}
if (!first->isActive())
return;
BaseNode* prev = first->prev();
BaseNode* next = last->next();
if (_firstNode == first)
_firstNode = next;
else
prev->_next = next;
if (_lastNode == last)
_lastNode = prev;
else
next->_prev = prev;
BaseNode* node = first;
uint32_t didRemoveSection = false;
for (;;) {
next = node->next();
ASMJIT_ASSERT(next != nullptr);
node->_prev = nullptr;
node->_next = nullptr;
node->clearFlags(BaseNode::kFlagIsActive);
didRemoveSection |= uint32_t(node->isSection());
if (_cursor == node)
_cursor = prev;
if (node == last)
break;
node = next;
}
if (didRemoveSection)
_dirtySectionLinks = true;
}
BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept {
BaseNode* old = _cursor;
_cursor = node;
return old;
}
// ============================================================================
// [asmjit::BaseBuilder - Section]
// ============================================================================
Error BaseBuilder::sectionNodeOf(SectionNode** out, uint32_t sectionId) {
*out = nullptr;
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId)))
return reportError(DebugUtils::errored(kErrorInvalidSection));
if (sectionId >= _sectionNodes.size()) {
Error err = _sectionNodes.reserve(&_allocator, sectionId + 1);
if (ASMJIT_UNLIKELY(err != kErrorOk))
return reportError(err);
}
SectionNode* node = nullptr;
if (sectionId < _sectionNodes.size())
node = _sectionNodes[sectionId];
if (!node) {
ASMJIT_PROPAGATE(_newNodeT<SectionNode>(&node, sectionId));
// We have already reserved enough space, this cannot fail now.
if (sectionId >= _sectionNodes.size())
_sectionNodes.resize(&_allocator, sectionId + 1);
_sectionNodes[sectionId] = node;
}
*out = node;
return kErrorOk;
}
Error BaseBuilder::section(Section* section) {
SectionNode* node;
ASMJIT_PROPAGATE(sectionNodeOf(&node, section->id()));
if (!node->isActive()) {
// Insert the section at the end if it was not part of the code.
addAfter(node, lastNode());
_cursor = node;
}
else {
// This is a bit tricky. We cache section links to make sure that
// switching sections doesn't involve traversal in linked-list unless
// the position of the section has changed.
if (hasDirtySectionLinks())
updateSectionLinks();
if (node->_nextSection)
_cursor = node->_nextSection->_prev;
else
_cursor = _lastNode;
}
return kErrorOk;
}
void BaseBuilder::updateSectionLinks() noexcept {
if (!_dirtySectionLinks)
return;
BaseNode* node_ = _firstNode;
SectionNode* currentSection = nullptr;
while (node_) {
if (node_->isSection()) {
if (currentSection)
currentSection->_nextSection = node_->as<SectionNode>();
currentSection = node_->as<SectionNode>();
}
node_ = node_->next();
}
if (currentSection)
currentSection->_nextSection = nullptr;
_dirtySectionLinks = false;
}
// ============================================================================
// [asmjit::BaseBuilder - Labels]
// ============================================================================
Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) {
*out = nullptr;
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
uint32_t index = labelId;
if (ASMJIT_UNLIKELY(index >= _code->labelCount()))
return DebugUtils::errored(kErrorInvalidLabel);
if (index >= _labelNodes.size())
ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, index + 1));
LabelNode* node = _labelNodes[index];
if (!node) {
ASMJIT_PROPAGATE(_newNodeT<LabelNode>(&node, labelId));
_labelNodes[index] = node;
}
*out = node;
return kErrorOk;
}
Error BaseBuilder::registerLabelNode(LabelNode* node) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
LabelEntry* le;
ASMJIT_PROPAGATE(_code->newLabelEntry(&le));
uint32_t labelId = le->id();
// We just added one label so it must be true.
ASMJIT_ASSERT(_labelNodes.size() < labelId + 1);
ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, labelId + 1));
_labelNodes[labelId] = node;
node->_labelId = labelId;
return kErrorOk;
}
static Error BaseBuilder_newLabelInternal(BaseBuilder* self, uint32_t labelId) {
ASMJIT_ASSERT(self->_labelNodes.size() < labelId + 1);
uint32_t growBy = labelId - self->_labelNodes.size();
Error err = self->_labelNodes.willGrow(&self->_allocator, growBy);
if (ASMJIT_UNLIKELY(err))
return self->reportError(err);
LabelNode* node;
ASMJIT_PROPAGATE(self->_newNodeT<LabelNode>(&node, labelId));
self->_labelNodes.resize(&self->_allocator, labelId + 1);
self->_labelNodes[labelId] = node;
node->_labelId = labelId;
return kErrorOk;
}
Label BaseBuilder::newLabel() {
uint32_t labelId = Globals::kInvalidId;
LabelEntry* le;
if (_code &&
_code->newLabelEntry(&le) == kErrorOk &&
BaseBuilder_newLabelInternal(this, le->id()) == kErrorOk) {
labelId = le->id();
}
return Label(labelId);
}
Label BaseBuilder::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) {
uint32_t labelId = Globals::kInvalidId;
LabelEntry* le;
if (_code &&
_code->newNamedLabelEntry(&le, name, nameSize, type, parentId) == kErrorOk &&
BaseBuilder_newLabelInternal(this, le->id()) == kErrorOk) {
labelId = le->id();
}
return Label(labelId);
}
Error BaseBuilder::bind(const Label& label) {
LabelNode* node;
ASMJIT_PROPAGATE(labelNodeOf(&node, label));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Passes]
// ============================================================================
ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept {
for (Pass* pass : _passes)
if (strcmp(pass->name(), name) == 0)
return pass;
return nullptr;
}
ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (ASMJIT_UNLIKELY(pass == nullptr)) {
// Since this is directly called by `addPassT()` we treat `null` argument
// as out-of-memory condition. Otherwise it would be API misuse.
return DebugUtils::errored(kErrorOutOfMemory);
}
else if (ASMJIT_UNLIKELY(pass->_cb)) {
// Kinda weird, but okay...
if (pass->_cb == this)
return kErrorOk;
return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_PROPAGATE(_passes.append(&_allocator, pass));
pass->_cb = this;
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (ASMJIT_UNLIKELY(pass == nullptr))
return DebugUtils::errored(kErrorInvalidArgument);
if (pass->_cb != nullptr) {
if (pass->_cb != this)
return DebugUtils::errored(kErrorInvalidState);
uint32_t index = _passes.indexOf(pass);
ASMJIT_ASSERT(index != Globals::kNotFound);
pass->_cb = nullptr;
_passes.removeAt(index);
}
pass->~Pass();
return kErrorOk;
}
Error BaseBuilder::runPasses() {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (_passes.empty())
return kErrorOk;
ErrorHandler* prev = errorHandler();
PostponedErrorHandler postponed;
Error err = kErrorOk;
setErrorHandler(&postponed);
for (Pass* pass : _passes) {
_passZone.reset();
err = pass->run(&_passZone, _logger);
if (err)
break;
}
_passZone.reset();
setErrorHandler(prev);
if (ASMJIT_UNLIKELY(err))
return reportError(err, !postponed._message.empty() ? postponed._message.data() : nullptr);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Emit]
// ============================================================================
Error BaseBuilder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
uint32_t opCount = EmitterUtils::opCountFromEmitArgs(o0, o1, o2, opExt);
uint32_t options = instOptions() | forcedInstOptions();
if (options & BaseInst::kOptionReserved) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
#ifndef ASMJIT_NO_VALIDATION
// Strict validation.
if (hasValidationOption(kValidationOptionIntermediate)) {
Operand_ opArray[Globals::kMaxOpCount];
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
Error err = InstAPI::validate(arch(), BaseInst(instId, options, _extraReg), opArray, opCount);
if (ASMJIT_UNLIKELY(err)) {
resetInstOptions();
resetExtraReg();
resetInlineComment();
return reportError(err);
}
}
#endif
// Clear options that should never be part of `InstNode`.
options &= ~BaseInst::kOptionReserved;
}
uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
const char* comment = inlineComment();
resetInstOptions();
resetInlineComment();
if (ASMJIT_UNLIKELY(!node)) {
resetExtraReg();
return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
node = new(node) InstNode(this, instId, options, opCount, opCapacity);
node->setExtraReg(extraReg());
node->setOp(0, o0);
node->setOp(1, o1);
node->setOp(2, o2);
for (uint32_t i = 3; i < opCount; i++)
node->setOp(i, opExt[i - 3]);
node->resetOpRange(opCount, opCapacity);
if (comment)
node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
addNode(node);
resetExtraReg();
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Align]
// ============================================================================
Error BaseBuilder::align(uint32_t alignMode, uint32_t alignment) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
AlignNode* node;
ASMJIT_PROPAGATE(_newAlignNode(&node, alignMode, alignment));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Embed]
// ============================================================================
Error BaseBuilder::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newEmbedDataNode(&node, Type::kIdU8, data, dataSize));
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedDataArray(uint32_t typeId, const void* data, size_t itemCount, size_t itemRepeat) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newEmbedDataNode(&node, typeId, data, itemCount, itemRepeat));
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (!isLabelValid(label))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label));
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newEmbedDataNode(&node, Type::kIdU8, nullptr, pool.size()));
pool.fill(node->data());
addNode(node);
return kErrorOk;
}
// EmbedLabel / EmbedLabelDelta
// ----------------------------
//
// If dataSize is zero it means that the size is the same as target register
// width, however, if it's provided we really want to validate whether it's
// within the possible range.
static inline bool BaseBuilder_checkDataSize(size_t dataSize) noexcept {
return !dataSize || (Support::isPowerOf2(dataSize) && dataSize <= 8);
}
Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (!BaseBuilder_checkDataSize(dataSize))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
EmbedLabelNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedLabelNode>(&node, label.id(), uint32_t(dataSize)));
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
if (!BaseBuilder_checkDataSize(dataSize))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
EmbedLabelDeltaNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedLabelDeltaNode>(&node, label.id(), base.id(), uint32_t(dataSize)));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Comment]
// ============================================================================
Error BaseBuilder::comment(const char* data, size_t size) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
CommentNode* node;
ASMJIT_PROPAGATE(_newCommentNode(&node, data, size));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Serialize]
// ============================================================================
Error BaseBuilder::serializeTo(BaseEmitter* dst) {
Error err = kErrorOk;
BaseNode* node_ = _firstNode;
Operand_ opArray[Globals::kMaxOpCount];
do {
dst->setInlineComment(node_->inlineComment());
if (node_->isInst()) {
InstNode* node = node_->as<InstNode>();
// NOTE: Inlined to remove one additional call per instruction.
dst->setInstOptions(node->instOptions());
dst->setExtraReg(node->extraReg());
const Operand_* op = node->operands();
const Operand_* opExt = EmitterUtils::noExt;
uint32_t opCount = node->opCount();
if (opCount > 3) {
uint32_t i = 4;
opArray[3] = op[3];
while (i < opCount) {
opArray[i].copyFrom(op[i]);
i++;
}
while (i < Globals::kMaxOpCount) {
opArray[i].reset();
i++;
}
opExt = opArray + 3;
}
err = dst->_emit(node->id(), op[0], op[1], op[2], opExt);
}
else if (node_->isLabel()) {
if (node_->isConstPool()) {
ConstPoolNode* node = node_->as<ConstPoolNode>();
err = dst->embedConstPool(node->label(), node->constPool());
}
else {
LabelNode* node = node_->as<LabelNode>();
err = dst->bind(node->label());
}
}
else if (node_->isAlign()) {
AlignNode* node = node_->as<AlignNode>();
err = dst->align(node->alignMode(), node->alignment());
}
else if (node_->isEmbedData()) {
EmbedDataNode* node = node_->as<EmbedDataNode>();
err = dst->embedDataArray(node->typeId(), node->data(), node->itemCount(), node->repeatCount());
}
else if (node_->isEmbedLabel()) {
EmbedLabelNode* node = node_->as<EmbedLabelNode>();
err = dst->embedLabel(node->label(), node->dataSize());
}
else if (node_->isEmbedLabelDelta()) {
EmbedLabelDeltaNode* node = node_->as<EmbedLabelDeltaNode>();
err = dst->embedLabelDelta(node->label(), node->baseLabel(), node->dataSize());
}
else if (node_->isSection()) {
SectionNode* node = node_->as<SectionNode>();
err = dst->section(_code->sectionById(node->id()));
}
else if (node_->isComment()) {
CommentNode* node = node_->as<CommentNode>();
err = dst->comment(node->inlineComment());
}
if (err) break;
node_ = node_->next();
} while (node_);
return err;
}
// ============================================================================
// [asmjit::BaseBuilder - Events]
// ============================================================================
Error BaseBuilder::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
SectionNode* initialSection;
Error err = sectionNodeOf(&initialSection, 0);
if (!err)
err = _passes.willGrow(&_allocator, 8);
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
return err;
}
_cursor = initialSection;
_firstNode = initialSection;
_lastNode = initialSection;
initialSection->setFlags(BaseNode::kFlagIsActive);
return kErrorOk;
}
Error BaseBuilder::onDetach(CodeHolder* code) noexcept {
BaseBuilder_deletePasses(this);
_sectionNodes.reset();
_labelNodes.reset();
_allocator.reset(&_codeZone);
_codeZone.reset();
_dataZone.reset();
_passZone.reset();
_nodeFlags = 0;
_cursor = nullptr;
_firstNode = nullptr;
_lastNode = nullptr;
return Base::onDetach(code);
}
// ============================================================================
// [asmjit::Pass - Construction / Destruction]
// ============================================================================
Pass::Pass(const char* name) noexcept
: _name(name) {}
Pass::~Pass() noexcept {}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_BUILDER

File diff suppressed because it is too large Load Diff

@ -0,0 +1,126 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_CODEBUFFER_H_INCLUDED
#define ASMJIT_CORE_CODEBUFFER_H_INCLUDED
#include "../core/globals.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::CodeBuffer]
// ============================================================================
//! Code or data buffer.
struct CodeBuffer {
//! The content of the buffer (data).
uint8_t* _data;
//! Number of bytes of `data` used.
size_t _size;
//! Buffer capacity (in bytes).
size_t _capacity;
//! Buffer flags.
uint32_t _flags;
//! Code buffer flags.
enum Flags : uint32_t {
//! Buffer is external (not allocated by asmjit).
kFlagIsExternal = 0x00000001u,
//! Buffer is fixed (cannot be reallocated).
kFlagIsFixed = 0x00000002u
};
//! \name Overloaded Operators
//! \{
//! Returns a referebce to the byte at the given `index`.
inline uint8_t& operator[](size_t index) noexcept {
ASMJIT_ASSERT(index < _size);
return _data[index];
}
//! \overload
inline const uint8_t& operator[](size_t index) const noexcept {
ASMJIT_ASSERT(index < _size);
return _data[index];
}
//! \}
//! \name Accessors
//! \{
//! Returns code buffer flags, see \ref Flags.
inline uint32_t flags() const noexcept { return _flags; }
//! Tests whether the code buffer has the given `flag` set.
inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
//! Tests whether this code buffer has a fixed size.
//!
//! Fixed size means that the code buffer is fixed and cannot grow.
inline bool isFixed() const noexcept { return hasFlag(kFlagIsFixed); }
//! Tests whether the data in this code buffer is external.
//!
//! External data can only be provided by users, it's never used by AsmJit.
inline bool isExternal() const noexcept { return hasFlag(kFlagIsExternal); }
//! Tests whether the data in this code buffer is allocated (non-null).
inline bool isAllocated() const noexcept { return _data != nullptr; }
//! Tests whether the code buffer is empty.
inline bool empty() const noexcept { return !_size; }
//! Returns the size of the data.
inline size_t size() const noexcept { return _size; }
//! Returns the capacity of the data.
inline size_t capacity() const noexcept { return _capacity; }
//! Returns the pointer to the data the buffer references.
inline uint8_t* data() noexcept { return _data; }
//! \overload
inline const uint8_t* data() const noexcept { return _data; }
//! \}
//! \name Iterators
//! \{
inline uint8_t* begin() noexcept { return _data; }
inline const uint8_t* begin() const noexcept { return _data; }
inline uint8_t* end() noexcept { return _data + _size; }
inline const uint8_t* end() const noexcept { return _data + _size; }
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CODEBUFFER_H_INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,151 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/codeholder.h"
#include "../core/codewriter_p.h"
ASMJIT_BEGIN_NAMESPACE
bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
uint32_t bitCount = format.immBitCount();
uint32_t bitShift = format.immBitShift();
uint32_t discardLsb = format.immDiscardLsb();
if (!bitCount || bitCount > format.valueSize() * 8u)
return false;
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
return false;
offset64 >>= discardLsb;
}
if (!Support::isInt32(offset64))
return false;
int32_t offset32 = int32_t(offset64);
if (!Support::isEncodableOffset32(offset32, bitCount))
return false;
switch (format.type()) {
case OffsetFormat::kTypeCommon: {
*dst = (uint32_t(offset32) & Support::lsbMask<uint32_t>(bitCount)) << bitShift;
return true;
}
case OffsetFormat::kTypeAArch64_ADR:
case OffsetFormat::kTypeAArch64_ADRP: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5)
return false;
uint32_t immLo = uint32_t(offset32) & 0x3u;
uint32_t immHi = uint32_t(offset32 >> 2) & Support::lsbMask<uint32_t>(19);
*dst = (immLo << 29) | (immHi << 5);
return true;
}
default:
return false;
}
}
bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
uint32_t bitCount = format.immBitCount();
uint32_t discardLsb = format.immDiscardLsb();
if (!bitCount || bitCount > format.valueSize() * 8u)
return false;
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0)
return false;
offset64 >>= discardLsb;
}
if (!Support::isEncodableOffset64(offset64, bitCount))
return false;
switch (format.type()) {
case OffsetFormat::kTypeCommon: {
*dst = (uint64_t(offset64) & Support::lsbMask<uint64_t>(bitCount)) << format.immBitShift();
return true;
}
default:
return false;
}
}
bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept {
// Offset the destination by ValueOffset so the `dst` points to the
// patched word instead of the beginning of the patched region.
dst = static_cast<char*>(dst) + format.valueOffset();
switch (format.valueSize()) {
case 1: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format))
return false;
Support::writeU8(dst, Support::readU8(dst) | mask);
return true;
}
case 2: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format))
return false;
Support::writeU16uLE(dst, Support::readU16uLE(dst) | mask);
return true;
}
case 4: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format))
return false;
Support::writeU32uLE(dst, Support::readU32uLE(dst) | mask);
return true;
}
case 8: {
uint64_t mask;
if (!encodeOffset64(&mask, offset64, format))
return false;
Support::writeU64uLE(dst, Support::readU64uLE(dst) | mask);
return true;
}
default:
return false;
}
}
ASMJIT_END_NAMESPACE

@ -0,0 +1,208 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
#define ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
#include "../core/assembler.h"
#include "../core/codebuffer.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_assembler
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
struct OffsetFormat;
// ============================================================================
// [asmjit::CodeWriter]
// ============================================================================
//! Helper that is used to write into a \ref CodeBuffer held by \ref BaseAssembler.
class CodeWriter {
public:
uint8_t* _cursor;
ASMJIT_INLINE explicit CodeWriter(BaseAssembler* a) noexcept
: _cursor(a->_bufferPtr) {}
ASMJIT_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor);
if (ASMJIT_UNLIKELY(remainingSpace < n)) {
CodeBuffer& buffer = a->_section->_buffer;
Error err = a->_code->growBuffer(&buffer, n);
if (ASMJIT_UNLIKELY(err))
return a->reportError(err);
_cursor = a->_bufferPtr;
}
return kErrorOk;
}
ASMJIT_INLINE uint8_t* cursor() const noexcept { return _cursor; }
ASMJIT_INLINE void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
ASMJIT_INLINE void advance(size_t n) noexcept { _cursor += n; }
ASMJIT_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
ASMJIT_ASSERT(_cursor >= from);
return (size_t)(_cursor - from);
}
template<typename T>
ASMJIT_INLINE void emit8(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
_cursor[0] = uint8_t(U(val) & U(0xFF));
_cursor++;
}
template<typename T, typename Y>
ASMJIT_INLINE void emit8If(T val, Y cond) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_ASSERT(size_t(cond) <= 1u);
_cursor[0] = uint8_t(U(val) & U(0xFF));
_cursor += size_t(cond);
}
template<typename T>
ASMJIT_INLINE void emit16uLE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU16uLE(_cursor, uint32_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_INLINE void emit16uBE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU16uBE(_cursor, uint32_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_INLINE void emit32uLE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU32uLE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
template<typename T>
ASMJIT_INLINE void emit32uBE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU32uBE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
ASMJIT_INLINE void emitData(const void* data, size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memcpy(_cursor, data, size);
_cursor += size;
}
template<typename T>
ASMJIT_INLINE void emitValueLE(const T& value, size_t size) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_ASSERT(size <= sizeof(T));
U v = U(value);
for (uint32_t i = 0; i < size; i++) {
_cursor[i] = uint8_t(v & 0xFFu);
v >>= 8;
}
_cursor += size;
}
template<typename T>
ASMJIT_INLINE void emitValueBE(const T& value, size_t size) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_ASSERT(size <= sizeof(T));
U v = U(value);
for (uint32_t i = 0; i < size; i++) {
_cursor[i] = uint8_t(v >> (sizeof(T) - 8));
v <<= 8;
}
_cursor += size;
}
ASMJIT_INLINE void emitZeros(size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memset(_cursor, 0, size);
_cursor += size;
}
ASMJIT_INLINE void remove8(uint8_t* where) noexcept {
ASMJIT_ASSERT(where < _cursor);
uint8_t* p = where;
while (++p != _cursor)
p[-1] = p[0];
_cursor--;
}
template<typename T>
ASMJIT_INLINE void insert8(uint8_t* where, T val) noexcept {
uint8_t* p = _cursor;
while (p != where) {
p[0] = p[-1];
p--;
}
*p = uint8_t(val & 0xFF);
_cursor++;
}
ASMJIT_INLINE void done(BaseAssembler* a) noexcept {
CodeBuffer& buffer = a->_section->_buffer;
size_t newSize = (size_t)(_cursor - a->_bufferData);
ASMJIT_ASSERT(newSize <= buffer.capacity());
a->_bufferPtr = _cursor;
buffer._size = Support::max(buffer._size, newSize);
}
};
// ============================================================================
// [asmjit::CodeWriterUtils]
// ============================================================================
namespace CodeWriterUtils {
bool encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
bool encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
bool writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept;
} // {CodeWriterUtils}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED

@ -0,0 +1,628 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/assembler.h"
#include "../core/compiler.h"
#include "../core/cpuinfo.h"
#include "../core/logger.h"
#include "../core/rapass_p.h"
#include "../core/rastack_p.h"
#include "../core/support.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::GlobalConstPoolPass]
// ============================================================================
class GlobalConstPoolPass : public Pass {
typedef Pass Base;
ASMJIT_NONCOPYABLE(GlobalConstPoolPass)
GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {}
Error run(Zone* zone, Logger* logger) override {
DebugUtils::unused(zone, logger);
// Flush the global constant pool.
BaseCompiler* compiler = static_cast<BaseCompiler*>(_cb);
if (compiler->_globalConstPool) {
compiler->addAfter(compiler->_globalConstPool, compiler->lastNode());
compiler->_globalConstPool = nullptr;
}
return kErrorOk;
}
};
// ============================================================================
// [asmjit::BaseCompiler - Construction / Destruction]
// ============================================================================
BaseCompiler::BaseCompiler() noexcept
: BaseBuilder(),
_func(nullptr),
_vRegZone(4096 - Zone::kBlockOverhead),
_vRegArray(),
_localConstPool(nullptr),
_globalConstPool(nullptr) {
_emitterType = uint8_t(kTypeCompiler);
_validationFlags = uint8_t(InstAPI::kValidationFlagVirtRegs);
}
BaseCompiler::~BaseCompiler() noexcept {}
// ============================================================================
// [asmjit::BaseCompiler - Function Management]
// ============================================================================
Error BaseCompiler::_newFuncNode(FuncNode** out, const FuncSignature& signature) {
*out = nullptr;
// Create FuncNode together with all the required surrounding nodes.
FuncNode* funcNode;
ASMJIT_PROPAGATE(_newNodeT<FuncNode>(&funcNode));
ASMJIT_PROPAGATE(_newLabelNode(&funcNode->_exitNode));
ASMJIT_PROPAGATE(_newNodeT<SentinelNode>(&funcNode->_end, SentinelNode::kSentinelFuncEnd));
// Initialize the function's detail info.
Error err = funcNode->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err))
return reportError(err);
// If the Target guarantees greater stack alignment than required by the
// calling convention then override it as we can prevent having to perform
// dynamic stack alignment
uint32_t environmentStackAlignment = _environment.stackAlignment();
if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment)
funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment);
// Initialize the function frame.
err = funcNode->_frame.init(funcNode->_funcDetail);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
// Allocate space for function arguments.
funcNode->_args = nullptr;
if (funcNode->argCount() != 0) {
funcNode->_args = _allocator.allocT<FuncNode::ArgPack>(funcNode->argCount() * sizeof(FuncNode::ArgPack));
if (ASMJIT_UNLIKELY(!funcNode->_args))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
memset(funcNode->_args, 0, funcNode->argCount() * sizeof(FuncNode::ArgPack));
}
ASMJIT_PROPAGATE(registerLabelNode(funcNode));
*out = funcNode;
return kErrorOk;
}
Error BaseCompiler::_addFuncNode(FuncNode** out, const FuncSignature& signature) {
ASMJIT_PROPAGATE(_newFuncNode(out, signature));
addFunc(*out);
return kErrorOk;
}
Error BaseCompiler::_newRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
uint32_t opCount = !o1.isNone() ? 2u : !o0.isNone() ? 1u : 0u;
FuncRetNode* node;
ASMJIT_PROPAGATE(_newNodeT<FuncRetNode>(&node));
node->setOpCount(opCount);
node->setOp(0, o0);
node->setOp(1, o1);
node->resetOpRange(2, node->opCapacity());
*out = node;
return kErrorOk;
}
Error BaseCompiler::_addRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
ASMJIT_PROPAGATE(_newRetNode(out, o0, o1));
addNode(*out);
return kErrorOk;
}
FuncNode* BaseCompiler::addFunc(FuncNode* func) {
ASMJIT_ASSERT(_func == nullptr);
_func = func;
addNode(func); // Function node.
BaseNode* prev = cursor(); // {CURSOR}.
addNode(func->exitNode()); // Function exit label.
addNode(func->endNode()); // Function end sentinel.
_setCursor(prev);
return func;
}
Error BaseCompiler::endFunc() {
FuncNode* func = _func;
if (ASMJIT_UNLIKELY(!func))
return reportError(DebugUtils::errored(kErrorInvalidState));
// Add the local constant pool at the end of the function (if exists).
if (_localConstPool) {
setCursor(func->endNode()->prev());
addNode(_localConstPool);
_localConstPool = nullptr;
}
// Mark as finished.
_func = nullptr;
SentinelNode* end = func->endNode();
setCursor(end);
return kErrorOk;
}
Error BaseCompiler::_setArg(size_t argIndex, size_t valueIndex, const BaseReg& r) {
FuncNode* func = _func;
if (ASMJIT_UNLIKELY(!func))
return reportError(DebugUtils::errored(kErrorInvalidState));
if (ASMJIT_UNLIKELY(!isVirtRegValid(r)))
return reportError(DebugUtils::errored(kErrorInvalidVirtId));
VirtReg* vReg = virtRegByReg(r);
func->setArg(argIndex, valueIndex, vReg);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseCompiler - Function Invocation]
// ============================================================================
Error BaseCompiler::_newInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
InvokeNode* node;
ASMJIT_PROPAGATE(_newNodeT<InvokeNode>(&node, instId, 0u));
node->setOpCount(1);
node->setOp(0, o0);
node->resetOpRange(1, node->opCapacity());
Error err = node->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err))
return reportError(err);
// Skip the allocation if there are no arguments.
uint32_t argCount = signature.argCount();
if (argCount) {
node->_args = static_cast<InvokeNode::OperandPack*>(_allocator.alloc(argCount * sizeof(InvokeNode::OperandPack)));
if (!node->_args)
return reportError(DebugUtils::errored(kErrorOutOfMemory));
memset(node->_args, 0, argCount * sizeof(InvokeNode::OperandPack));
}
*out = node;
return kErrorOk;
}
Error BaseCompiler::_addInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
ASMJIT_PROPAGATE(_newInvokeNode(out, instId, o0, signature));
addNode(*out);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseCompiler - Virtual Registers]
// ============================================================================
static void BaseCompiler_assignGenericName(BaseCompiler* self, VirtReg* vReg) {
uint32_t index = unsigned(Operand::virtIdToIndex(vReg->_id));
char buf[64];
int size = snprintf(buf, ASMJIT_ARRAY_SIZE(buf), "%%%u", unsigned(index));
ASMJIT_ASSERT(size > 0 && size < int(ASMJIT_ARRAY_SIZE(buf)));
vReg->_name.setData(&self->_dataZone, buf, unsigned(size));
}
Error BaseCompiler::newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signature, const char* name) {
*out = nullptr;
uint32_t index = _vRegArray.size();
if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount)))
return reportError(DebugUtils::errored(kErrorTooManyVirtRegs));
if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
VirtReg* vReg = _vRegZone.allocZeroedT<VirtReg>();
if (ASMJIT_UNLIKELY(!vReg))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
uint32_t size = Type::sizeOf(typeId);
uint32_t alignment = Support::min<uint32_t>(size, 64);
vReg = new(vReg) VirtReg(Operand::indexToVirtId(index), signature, size, alignment, typeId);
#ifndef ASMJIT_NO_LOGGING
if (name && name[0] != '\0')
vReg->_name.setData(&_dataZone, name, SIZE_MAX);
else
BaseCompiler_assignGenericName(this, vReg);
#else
DebugUtils::unused(name);
#endif
_vRegArray.appendUnsafe(vReg);
*out = vReg;
return kErrorOk;
}
Error BaseCompiler::_newReg(BaseReg* out, uint32_t typeId, const char* name) {
RegInfo regInfo;
out->reset();
Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, &regInfo);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
out->_initReg(regInfo.signature(), vReg->id());
return kErrorOk;
}
Error BaseCompiler::_newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, ...) {
va_list ap;
StringTmp<256> sb;
va_start(ap, fmt);
sb.appendVFormat(fmt, ap);
va_end(ap);
return _newReg(out, typeId, sb.data());
}
Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name) {
out->reset();
RegInfo regInfo;
uint32_t typeId;
if (isVirtRegValid(ref)) {
VirtReg* vRef = virtRegByReg(ref);
typeId = vRef->typeId();
// NOTE: It's possible to cast one register type to another if it's the
// same register group. However, VirtReg always contains the TypeId that
// was used to create the register. This means that in some cases we may
// end up having different size of `ref` and `vRef`. In such case we
// adjust the TypeId to match the `ref` register type instead of the
// original register type, which should be the expected behavior.
uint32_t typeSize = Type::sizeOf(typeId);
uint32_t refSize = ref.size();
if (typeSize != refSize) {
if (Type::isInt(typeId)) {
// GP register - change TypeId to match `ref`, but keep sign of `vRef`.
switch (refSize) {
case 1: typeId = Type::kIdI8 | (typeId & 1); break;
case 2: typeId = Type::kIdI16 | (typeId & 1); break;
case 4: typeId = Type::kIdI32 | (typeId & 1); break;
case 8: typeId = Type::kIdI64 | (typeId & 1); break;
default: typeId = Type::kIdVoid; break;
}
}
else if (Type::isMmx(typeId)) {
// MMX register - always use 64-bit.
typeId = Type::kIdMmx64;
}
else if (Type::isMask(typeId)) {
// Mask register - change TypeId to match `ref` size.
switch (refSize) {
case 1: typeId = Type::kIdMask8; break;
case 2: typeId = Type::kIdMask16; break;
case 4: typeId = Type::kIdMask32; break;
case 8: typeId = Type::kIdMask64; break;
default: typeId = Type::kIdVoid; break;
}
}
else {
// VEC register - change TypeId to match `ref` size, keep vector metadata.
uint32_t elementTypeId = Type::baseOf(typeId);
switch (refSize) {
case 16: typeId = Type::_kIdVec128Start + (elementTypeId - Type::kIdI8); break;
case 32: typeId = Type::_kIdVec256Start + (elementTypeId - Type::kIdI8); break;
case 64: typeId = Type::_kIdVec512Start + (elementTypeId - Type::kIdI8); break;
default: typeId = Type::kIdVoid; break;
}
}
if (typeId == Type::kIdVoid)
return reportError(DebugUtils::errored(kErrorInvalidState));
}
}
else {
typeId = ref.type();
}
Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, &regInfo);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
out->_initReg(regInfo.signature(), vReg->id());
return kErrorOk;
}
Error BaseCompiler::_newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt, ...) {
va_list ap;
StringTmp<256> sb;
va_start(ap, fmt);
sb.appendVFormat(fmt, ap);
va_end(ap);
return _newReg(out, ref, sb.data());
}
Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) {
out->reset();
if (size == 0)
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (alignment == 0)
alignment = 1;
if (!Support::isPowerOf2(alignment))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (alignment > 64)
alignment = 64;
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, 0, 0, name));
vReg->_virtSize = size;
vReg->_isStack = true;
vReg->_alignment = uint8_t(alignment);
// Set the memory operand to GPD/GPQ and its id to VirtReg.
*out = BaseMem(BaseMem::Decomposed { _gpRegInfo.type(), vReg->id(), BaseReg::kTypeNone, 0, 0, 0, BaseMem::kSignatureMemRegHomeFlag });
return kErrorOk;
}
Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) {
if (!isVirtIdValid(virtId))
return DebugUtils::errored(kErrorInvalidVirtId);
if (newAlignment && !Support::isPowerOf2(newAlignment))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (newAlignment > 64)
newAlignment = 64;
VirtReg* vReg = virtRegById(virtId);
if (newSize)
vReg->_virtSize = newSize;
if (newAlignment)
vReg->_alignment = uint8_t(newAlignment);
// This is required if the RAPass is already running. There is a chance that
// a stack-slot has been already allocated and in that case it has to be
// updated as well, otherwise we would allocate wrong amount of memory.
RAWorkReg* workReg = vReg->_workReg;
if (workReg && workReg->_stackSlot) {
workReg->_stackSlot->_size = vReg->_virtSize;
workReg->_stackSlot->_alignment = vReg->_alignment;
}
return kErrorOk;
}
Error BaseCompiler::_newConst(BaseMem* out, uint32_t scope, const void* data, size_t size) {
out->reset();
ConstPoolNode** pPool;
if (scope == ConstPool::kScopeLocal)
pPool = &_localConstPool;
else if (scope == ConstPool::kScopeGlobal)
pPool = &_globalConstPool;
else
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (!*pPool)
ASMJIT_PROPAGATE(_newConstPoolNode(pPool));
ConstPoolNode* pool = *pPool;
size_t off;
Error err = pool->add(data, size, off);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
*out = BaseMem(BaseMem::Decomposed {
Label::kLabelTag, // Base type.
pool->labelId(), // Base id.
0, // Index type.
0, // Index id.
int32_t(off), // Offset.
uint32_t(size), // Size.
0 // Flags.
});
return kErrorOk;
}
void BaseCompiler::rename(const BaseReg& reg, const char* fmt, ...) {
if (!reg.isVirtReg()) return;
VirtReg* vReg = virtRegById(reg.id());
if (!vReg) return;
if (fmt && fmt[0] != '\0') {
char buf[128];
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
va_end(ap);
vReg->_name.setData(&_dataZone, buf, SIZE_MAX);
}
else {
BaseCompiler_assignGenericName(this, vReg);
}
}
// ============================================================================
// [asmjit::BaseCompiler - Jump Annotations]
// ============================================================================
Error BaseCompiler::newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation) {
JumpNode* node = _allocator.allocT<JumpNode>();
uint32_t opCount = 1;
*out = node;
if (ASMJIT_UNLIKELY(!node))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
node = new(node) JumpNode(this, instId, instOptions, opCount, annotation);
node->setOp(0, o0);
node->resetOpRange(opCount, JumpNode::kBaseOpCapacity);
return kErrorOk;
}
Error BaseCompiler::emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation) {
uint32_t options = instOptions() | forcedInstOptions();
RegOnly extra = extraReg();
const char* comment = inlineComment();
resetInstOptions();
resetInlineComment();
resetExtraReg();
JumpNode* node;
ASMJIT_PROPAGATE(newJumpNode(&node, instId, options, o0, annotation));
node->setExtraReg(extra);
if (comment)
node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
addNode(node);
return kErrorOk;
}
JumpAnnotation* BaseCompiler::newJumpAnnotation() {
if (_jumpAnnotations.grow(&_allocator, 1) != kErrorOk) {
reportError(DebugUtils::errored(kErrorOutOfMemory));
return nullptr;
}
uint32_t id = _jumpAnnotations.size();
JumpAnnotation* jumpAnnotation = _allocator.newT<JumpAnnotation>(this, id);
if (!jumpAnnotation) {
reportError(DebugUtils::errored(kErrorOutOfMemory));
return nullptr;
}
_jumpAnnotations.appendUnsafe(jumpAnnotation);
return jumpAnnotation;
}
// ============================================================================
// [asmjit::BaseCompiler - Events]
// ============================================================================
Error BaseCompiler::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
const ArchTraits& archTraits = ArchTraits::byArch(code->arch());
uint32_t nativeRegType = Environment::is32Bit(code->arch()) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
_gpRegInfo.setSignature(archTraits.regTypeToSignature(nativeRegType));
Error err = addPassT<GlobalConstPoolPass>();
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
return err;
}
return kErrorOk;
}
Error BaseCompiler::onDetach(CodeHolder* code) noexcept {
_func = nullptr;
_localConstPool = nullptr;
_globalConstPool = nullptr;
_vRegArray.reset();
_vRegZone.reset();
return Base::onDetach(code);
}
// ============================================================================
// [asmjit::FuncPass - Construction / Destruction]
// ============================================================================
FuncPass::FuncPass(const char* name) noexcept
: Pass(name) {}
// ============================================================================
// [asmjit::FuncPass - Run]
// ============================================================================
Error FuncPass::run(Zone* zone, Logger* logger) {
BaseNode* node = cb()->firstNode();
if (!node) return kErrorOk;
do {
if (node->type() == BaseNode::kNodeFunc) {
FuncNode* func = node->as<FuncNode>();
node = func->endNode();
ASMJIT_PROPAGATE(runOnFunction(zone, logger, func));
}
// Find a function by skipping all nodes that are not `kNodeFunc`.
do {
node = node->next();
} while (node && node->type() != BaseNode::kNodeFunc);
} while (node);
return kErrorOk;
}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER

@ -0,0 +1,763 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_COMPILER_H_INCLUDED
#define ASMJIT_CORE_COMPILER_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/assembler.h"
#include "../core/builder.h"
#include "../core/constpool.h"
#include "../core/compilerdefs.h"
#include "../core/func.h"
#include "../core/inst.h"
#include "../core/operand.h"
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonevector.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [Forward Declarations]
// ============================================================================
class JumpAnnotation;
class JumpNode;
class FuncNode;
class FuncRetNode;
class InvokeNode;
//! \addtogroup asmjit_compiler
//! \{
// ============================================================================
// [asmjit::BaseCompiler]
// ============================================================================
//! Code emitter that uses virtual registers and performs register allocation.
//!
//! Compiler is a high-level code-generation tool that provides register
//! allocation and automatic handling of function calling conventions. It was
//! primarily designed for merging multiple parts of code into a function
//! without worrying about registers and function calling conventions.
//!
//! BaseCompiler can be used, with a minimum effort, to handle 32-bit and
//! 64-bit code generation within a single code base.
//!
//! BaseCompiler is based on BaseBuilder and contains all the features it
//! provides. It means that the code it stores can be modified (removed, added,
//! injected) and analyzed. When the code is finalized the compiler can emit
//! the code into an Assembler to translate the abstract representation into a
//! machine code.
//!
//! Check out architecture specific compilers for more details and examples:
//!
//! - \ref x86::Compiler - X86/X64 compiler implementation.
class ASMJIT_VIRTAPI BaseCompiler : public BaseBuilder {
public:
ASMJIT_NONCOPYABLE(BaseCompiler)
typedef BaseBuilder Base;
//! Current function.
FuncNode* _func;
//! Allocates `VirtReg` objects.
Zone _vRegZone;
//! Stores array of `VirtReg` pointers.
ZoneVector<VirtReg*> _vRegArray;
//! Stores jump annotations.
ZoneVector<JumpAnnotation*> _jumpAnnotations;
//! Local constant pool, flushed at the end of each function.
ConstPoolNode* _localConstPool;
//! Global constant pool, flushed by `finalize()`.
ConstPoolNode* _globalConstPool;
//! \name Construction & Destruction
//! \{
//! Creates a new `BaseCompiler` instance.
ASMJIT_API BaseCompiler() noexcept;
//! Destroys the `BaseCompiler` instance.
ASMJIT_API virtual ~BaseCompiler() noexcept;
//! \}
//! \name Function Management
//! \{
//! Returns the current function.
inline FuncNode* func() const noexcept { return _func; }
//! Creates a new \ref FuncNode.
ASMJIT_API Error _newFuncNode(FuncNode** out, const FuncSignature& signature);
//! Creates a new \ref FuncNode adds it to the compiler.
ASMJIT_API Error _addFuncNode(FuncNode** out, const FuncSignature& signature);
//! Creates a new \ref FuncRetNode.
ASMJIT_API Error _newRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1);
//! Creates a new \ref FuncRetNode and adds it to the compiler.
ASMJIT_API Error _addRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1);
//! Creates a new \ref FuncNode with the given `signature` and returns it.
inline FuncNode* newFunc(const FuncSignature& signature) {
FuncNode* node;
_newFuncNode(&node, signature);
return node;
}
//! Creates a new \ref FuncNode with the given `signature`, adds it to the
//! compiler by using the \ref addFunc(FuncNode*) overload, and returns it.
inline FuncNode* addFunc(const FuncSignature& signature) {
FuncNode* node;
_addFuncNode(&node, signature);
return node;
}
//! Adds a function `node` to the instruction stream.
ASMJIT_API FuncNode* addFunc(FuncNode* func);
//! Emits a sentinel that marks the end of the current function.
ASMJIT_API Error endFunc();
ASMJIT_API Error _setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg);
//! Sets a function argument at `argIndex` to `reg`.
inline Error setArg(size_t argIndex, const BaseReg& reg) { return _setArg(argIndex, 0, reg); }
//! Sets a function argument at `argIndex` at `valueIndex` to `reg`.
inline Error setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) { return _setArg(argIndex, valueIndex, reg); }
inline FuncRetNode* newRet(const Operand_& o0, const Operand_& o1) {
FuncRetNode* node;
_newRetNode(&node, o0, o1);
return node;
}
inline FuncRetNode* addRet(const Operand_& o0, const Operand_& o1) {
FuncRetNode* node;
_addRetNode(&node, o0, o1);
return node;
}
//! \}
//! \name Function Invocation
//! \{
//! Creates a new \ref InvokeNode.
ASMJIT_API Error _newInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature);
//! Creates a new \ref InvokeNode and adds it to Compiler.
ASMJIT_API Error _addInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature);
//! Creates a new `InvokeNode`.
inline InvokeNode* newCall(uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
InvokeNode* node;
_newInvokeNode(&node, instId, o0, signature);
return node;
}
//! Adds a new `InvokeNode`.
inline InvokeNode* addCall(uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
InvokeNode* node;
_addInvokeNode(&node, instId, o0, signature);
return node;
}
//! \}
//! \name Virtual Registers
//! \{
//! Creates a new virtual register representing the given `typeId` and `signature`.
//!
//! \note This function is public, but it's not generally recommended to be used
//! by AsmJit users, use architecture-specific `newReg()` functionality instead
//! or functions like \ref _newReg() and \ref _newRegFmt().
ASMJIT_API Error newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signature, const char* name);
//! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
ASMJIT_API Error _newReg(BaseReg* out, uint32_t typeId, const char* name = nullptr);
//! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
//!
//! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
ASMJIT_API Error _newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, ...);
//! Creates a new virtual register compatible with the provided reference register `ref`.
ASMJIT_API Error _newReg(BaseReg* out, const BaseReg& ref, const char* name = nullptr);
//! Creates a new virtual register compatible with the provided reference register `ref`.
//!
//! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
ASMJIT_API Error _newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt, ...);
//! Tests whether the given `id` is a valid virtual register id.
inline bool isVirtIdValid(uint32_t id) const noexcept {
uint32_t index = Operand::virtIdToIndex(id);
return index < _vRegArray.size();
}
//! Tests whether the given `reg` is a virtual register having a valid id.
inline bool isVirtRegValid(const BaseReg& reg) const noexcept {
return isVirtIdValid(reg.id());
}
//! Returns \ref VirtReg associated with the given `id`.
inline VirtReg* virtRegById(uint32_t id) const noexcept {
ASMJIT_ASSERT(isVirtIdValid(id));
return _vRegArray[Operand::virtIdToIndex(id)];
}
//! Returns \ref VirtReg associated with the given `reg`.
inline VirtReg* virtRegByReg(const BaseReg& reg) const noexcept { return virtRegById(reg.id()); }
//! Returns \ref VirtReg associated with the given virtual register `index`.
//!
//! \note This is not the same as virtual register id. The conversion between
//! id and its index is implemented by \ref Operand_::virtIdToIndex() and \ref
//! Operand_::indexToVirtId() functions.
inline VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; }
//! Returns an array of all virtual registers managed by the Compiler.
inline const ZoneVector<VirtReg*>& virtRegs() const noexcept { return _vRegArray; }
//! \name Stack
//! \{
//! Creates a new stack of the given `size` and `alignment` and stores it to `out`.
//!
//! \note `name` can be used to give the stack a name, for debugging purposes.
ASMJIT_API Error _newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name = nullptr);
//! Updates the stack size of a stack created by `_newStack()` by its `virtId`.
ASMJIT_API Error setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment = 0);
//! Updates the stack size of a stack created by `_newStack()`.
inline Error setStackSize(const BaseMem& mem, uint32_t newSize, uint32_t newAlignment = 0) {
return setStackSize(mem.id(), newSize, newAlignment);
}
//! \}
//! \name Constants
//! \{
//! Creates a new constant of the given `scope` (see \ref ConstPool::Scope).
//!
//! This function adds a constant of the given `size` to the built-in \ref
//! ConstPool and stores the reference to that constant to the `out` operand.
ASMJIT_API Error _newConst(BaseMem* out, uint32_t scope, const void* data, size_t size);
//! \}
//! \name Miscellaneous
//! \{
//! Rename the given virtual register `reg` to a formatted string `fmt`.
ASMJIT_API void rename(const BaseReg& reg, const char* fmt, ...);
//! \}
//! \name Jump Annotations
//! \{
inline const ZoneVector<JumpAnnotation*>& jumpAnnotations() const noexcept {
return _jumpAnnotations;
}
ASMJIT_API Error newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation);
ASMJIT_API Error emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation);
//! Returns a new `JumpAnnotation` instance, which can be used to aggregate
//! possible targets of a jump where the target is not a label, for example
//! to implement jump tables.
ASMJIT_API JumpAnnotation* newJumpAnnotation();
//! \}
#ifndef ASMJIT_NO_DEPRECATED
ASMJIT_DEPRECATED("alloc() has no effect, it will be removed in the future")
inline void alloc(BaseReg&) {}
ASMJIT_DEPRECATED("spill() has no effect, it will be removed in the future")
inline void spill(BaseReg&) {}
#endif // !ASMJIT_NO_DEPRECATED
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
//! \}
};
// ============================================================================
// [asmjit::JumpAnnotation]
// ============================================================================
//! Jump annotation used to annotate jumps.
//!
//! \ref BaseCompiler allows to emit jumps where the target is either register
//! or memory operand. Such jumps cannot be trivially inspected, so instead of
//! doing heuristics AsmJit allows to annotate such jumps with possible targets.
//! Register allocator then use the annotation to construct control-flow, which
//! is then used by liveness analysis and other tools to prepare ground for
//! register allocation.
class JumpAnnotation {
public:
ASMJIT_NONCOPYABLE(JumpAnnotation)
//! Compiler that owns this JumpAnnotation.
BaseCompiler* _compiler;
//! Annotation identifier.
uint32_t _annotationId;
//! Vector of label identifiers, see \ref labelIds().
ZoneVector<uint32_t> _labelIds;
inline JumpAnnotation(BaseCompiler* compiler, uint32_t annotationId) noexcept
: _compiler(compiler),
_annotationId(annotationId) {}
//! Returns the compiler that owns this JumpAnnotation.
inline BaseCompiler* compiler() const noexcept { return _compiler; }
//! Returns the annotation id.
inline uint32_t annotationId() const noexcept { return _annotationId; }
//! Returns a vector of label identifiers that lists all targets of the jump.
const ZoneVector<uint32_t>& labelIds() const noexcept { return _labelIds; }
//! Tests whether the given `label` is a target of this JumpAnnotation.
inline bool hasLabel(const Label& label) const noexcept { return hasLabelId(label.id()); }
//! Tests whether the given `labelId` is a target of this JumpAnnotation.
inline bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); }
//! Adds the `label` to the list of targets of this JumpAnnotation.
inline Error addLabel(const Label& label) noexcept { return addLabelId(label.id()); }
//! Adds the `labelId` to the list of targets of this JumpAnnotation.
inline Error addLabelId(uint32_t labelId) noexcept { return _labelIds.append(&_compiler->_allocator, labelId); }
};
// ============================================================================
// [asmjit::JumpNode]
// ============================================================================
//! Jump instruction with \ref JumpAnnotation.
//!
//! \note This node should be only used to represent jump where the jump target
//! cannot be deduced by examining instruction operands. For example if the jump
//! target is register or memory location. This pattern is often used to perform
//! indirect jumps that use jump table, e.g. to implement `switch{}` statement.
class JumpNode : public InstNode {
public:
ASMJIT_NONCOPYABLE(JumpNode)
JumpAnnotation* _annotation;
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE JumpNode(BaseCompiler* cc, uint32_t instId, uint32_t options, uint32_t opCount, JumpAnnotation* annotation) noexcept
: InstNode(cc, instId, options, opCount, kBaseOpCapacity),
_annotation(annotation) {
setType(kNodeJump);
}
//! \}
//! \name Accessors
//! \{
//! Tests whether this JumpNode has associated a \ref JumpAnnotation.
inline bool hasAnnotation() const noexcept { return _annotation != nullptr; }
//! Returns the \ref JumpAnnotation associated with this jump, or `nullptr`.
inline JumpAnnotation* annotation() const noexcept { return _annotation; }
//! Sets the \ref JumpAnnotation associated with this jump to `annotation`.
inline void setAnnotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; }
//! \}
};
// ============================================================================
// [asmjit::FuncNode]
// ============================================================================
//! Function node represents a function used by \ref BaseCompiler.
//!
//! A function is composed of the following:
//!
//! - Function entry, \ref FuncNode acts as a label, so the entry is implicit.
//! To get the entry, simply use \ref FuncNode::label(), which is the same
//! as \ref LabelNode::label().
//!
//! - Function exit, which is represented by \ref FuncNode::exitNode(). A
//! helper function \ref FuncNode::exitLabel() exists and returns an exit
//! label instead of node.
//!
//! - Function \ref FuncNode::endNode() sentinel. This node marks the end of
//! a function - there should be no code that belongs to the function after
//! this node, but the Compiler doesn't enforce that at the moment.
//!
//! - Function detail, see \ref FuncNode::detail().
//!
//! - Function frame, see \ref FuncNode::frame().
//!
//! - Function arguments mapped to virtual registers, see \ref FuncNode::args().
//!
//! In a node list, the function and its body looks like the following:
//!
//! \code{.unparsed}
//! [...] - Anything before the function.
//!
//! [FuncNode] - Entry point of the function, acts as a label as well.
//! <Prolog> - Prolog inserted by the register allocator.
//! {...} - Function body - user code basically.
//! [ExitLabel] - Exit label
//! <Epilog> - Epilog inserted by the register allocator.
//! <Return> - Return inserted by the register allocator.
//! {...} - Can contain data or user code (error handling, special cases, ...).
//! [FuncEnd] - End sentinel
//!
//! [...] - Anything after the function.
//! \endcode
//!
//! When a function is added to the compiler by \ref BaseCompiler::addFunc() it
//! actually inserts 3 nodes (FuncNode, ExitLabel, and FuncEnd) and sets the
//! current cursor to be FuncNode. When \ref BaseCompiler::endFunc() is called
//! the cursor is set to FuncEnd. This guarantees that user can use ExitLabel
//! as a marker after additional code or data can be placed, and it's a common
//! practice.
class FuncNode : public LabelNode {
public:
ASMJIT_NONCOPYABLE(FuncNode)
//! Arguments pack.
struct ArgPack {
VirtReg* _data[Globals::kMaxValuePack];
inline void reset() noexcept {
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
_data[valueIndex] = nullptr;
}
inline VirtReg*& operator[](size_t valueIndex) noexcept { return _data[valueIndex]; }
inline VirtReg* const& operator[](size_t valueIndex) const noexcept { return _data[valueIndex]; }
};
//! Function detail.
FuncDetail _funcDetail;
//! Function frame.
FuncFrame _frame;
//! Function exit label.
LabelNode* _exitNode;
//! Function end (sentinel).
SentinelNode* _end;
//! Argument packs.
ArgPack* _args;
//! \name Construction & Destruction
//! \{
//! Creates a new `FuncNode` instance.
//!
//! Always use `BaseCompiler::addFunc()` to create `FuncNode`.
ASMJIT_INLINE FuncNode(BaseBuilder* cb) noexcept
: LabelNode(cb),
_funcDetail(),
_frame(),
_exitNode(nullptr),
_end(nullptr),
_args(nullptr) {
setType(kNodeFunc);
}
//! \}
//! \{
//! \name Accessors
//! Returns function exit `LabelNode`.
inline LabelNode* exitNode() const noexcept { return _exitNode; }
//! Returns function exit label.
inline Label exitLabel() const noexcept { return _exitNode->label(); }
//! Returns "End of Func" sentinel.
inline SentinelNode* endNode() const noexcept { return _end; }
//! Returns function declaration.
inline FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns function declaration.
inline const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns function frame.
inline FuncFrame& frame() noexcept { return _frame; }
//! Returns function frame.
inline const FuncFrame& frame() const noexcept { return _frame; }
//! Tests whether the function has a return value.
inline bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns arguments count.
inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns argument packs.
inline ArgPack* argPacks() const noexcept { return _args; }
//! Returns argument pack at `argIndex`.
inline ArgPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! Sets argument at `argIndex`.
inline void setArg(size_t argIndex, VirtReg* vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][0] = vReg;
}
//! Sets argument at `argIndex` and `valueIndex`.
inline void setArg(size_t argIndex, size_t valueIndex, VirtReg* vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex] = vReg;
}
//! Resets argument pack at `argIndex`.
inline void resetArg(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex].reset();
}
//! Resets argument pack at `argIndex`.
inline void resetArg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex] = nullptr;
}
//! Returns function attributes.
inline uint32_t attributes() const noexcept { return _frame.attributes(); }
//! Adds `attrs` to the function attributes.
inline void addAttributes(uint32_t attrs) noexcept { _frame.addAttributes(attrs); }
//! \}
};
// ============================================================================
// [asmjit::FuncRetNode]
// ============================================================================
//! Function return, used by \ref BaseCompiler.
class FuncRetNode : public InstNode {
public:
ASMJIT_NONCOPYABLE(FuncRetNode)
//! \name Construction & Destruction
//! \{
//! Creates a new `FuncRetNode` instance.
inline FuncRetNode(BaseBuilder* cb) noexcept : InstNode(cb, BaseInst::kIdAbstract, 0, 0) {
_any._nodeType = kNodeFuncRet;
}
//! \}
};
// ============================================================================
// [asmjit::InvokeNode]
// ============================================================================
//! Function invocation, used by \ref BaseCompiler.
class InvokeNode : public InstNode {
public:
ASMJIT_NONCOPYABLE(InvokeNode)
//! Operand pack provides multiple operands that can be associated with a
//! single return value of function argument. Sometims this is necessary to
//! express an argument or return value that requires multiple registers, for
//! example 64-bit value in 32-bit mode or passing / returning homogenous data
//! structures.
struct OperandPack {
//! Operands.
Operand_ _data[Globals::kMaxValuePack];
//! Reset the pack by resetting all operands in the pack.
inline void reset() noexcept {
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
_data[valueIndex].reset();
}
//! Returns an operand at the given `valueIndex`.
inline Operand& operator[](size_t valueIndex) noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>();
}
//! Returns an operand at the given `valueIndex` (const).
const inline Operand& operator[](size_t valueIndex) const noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>();
}
};
//! Function detail.
FuncDetail _funcDetail;
//! Function return value(s).
OperandPack _rets;
//! Function arguments.
OperandPack* _args;
//! \name Construction & Destruction
//! \{
//! Creates a new `InvokeNode` instance.
inline InvokeNode(BaseBuilder* cb, uint32_t instId, uint32_t options) noexcept
: InstNode(cb, instId, options, kBaseOpCapacity),
_funcDetail(),
_args(nullptr) {
setType(kNodeInvoke);
_resetOps();
_rets.reset();
addFlags(kFlagIsRemovable);
}
//! \}
//! \name Accessors
//! \{
//! Sets the function signature.
inline Error init(const FuncSignature& signature, const Environment& environment) noexcept {
return _funcDetail.init(signature, environment);
}
//! Returns the function detail.
inline FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns the function detail.
inline const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns the target operand.
inline Operand& target() noexcept { return _opArray[0].as<Operand>(); }
//! \overload
inline const Operand& target() const noexcept { return _opArray[0].as<Operand>(); }
//! Returns the number of function return values.
inline bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns the number of function arguments.
inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns operand pack representing function return value(s).
inline OperandPack& retPack() noexcept { return _rets; }
//! Returns operand pack representing function return value(s).
inline const OperandPack& retPack() const noexcept { return _rets; }
//! Returns the return value at the given `valueIndex`.
inline Operand& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; }
//! \overload
inline const Operand& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; }
//! Returns operand pack representing function return value(s).
inline OperandPack& argPack(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! \overload
inline const OperandPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
}
//! Returns a function argument at the given `argIndex`.
inline Operand& arg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex];
}
//! \overload
inline const Operand& arg(size_t argIndex, size_t valueIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex];
}
//! Sets the function return value at `i` to `op`.
inline void _setRet(size_t valueIndex, const Operand_& op) noexcept { _rets[valueIndex] = op; }
//! Sets the function argument at `i` to `op`.
inline void _setArg(size_t argIndex, size_t valueIndex, const Operand_& op) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex] = op;
}
//! Sets the function return value at `valueIndex` to `reg`.
inline void setRet(size_t valueIndex, const BaseReg& reg) noexcept { _setRet(valueIndex, reg); }
//! Sets the first function argument in a value-pack at `argIndex` to `reg`.
inline void setArg(size_t argIndex, const BaseReg& reg) noexcept { _setArg(argIndex, 0, reg); }
//! Sets the first function argument in a value-pack at `argIndex` to `imm`.
inline void setArg(size_t argIndex, const Imm& imm) noexcept { _setArg(argIndex, 0, imm); }
//! Sets the function argument at `argIndex` and `valueIndex` to `reg`.
inline void setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) noexcept { _setArg(argIndex, valueIndex, reg); }
//! Sets the function argument at `argIndex` and `valueIndex` to `imm`.
inline void setArg(size_t argIndex, size_t valueIndex, const Imm& imm) noexcept { _setArg(argIndex, valueIndex, imm); }
//! \}
};
// ============================================================================
// [asmjit::FuncPass]
// ============================================================================
//! Function pass extends \ref Pass with \ref FuncPass::runOnFunction().
class ASMJIT_VIRTAPI FuncPass : public Pass {
public:
ASMJIT_NONCOPYABLE(FuncPass)
typedef Pass Base;
//! \name Construction & Destruction
//! \{
ASMJIT_API FuncPass(const char* name) noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns the associated `BaseCompiler`.
inline BaseCompiler* cc() const noexcept { return static_cast<BaseCompiler*>(_cb); }
//! \}
//! \name Run
//! \{
//! Calls `runOnFunction()` on each `FuncNode` node found.
ASMJIT_API Error run(Zone* zone, Logger* logger) override;
//! Called once per `FuncNode`.
virtual Error runOnFunction(Zone* zone, Logger* logger, FuncNode* func) = 0;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_COMPILER_H_INCLUDED

@ -0,0 +1,170 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
#define ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
#include "../core/api-config.h"
#include "../core/operand.h"
#include "../core/zonestring.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [Forward Declarations]
// ============================================================================
class RAWorkReg;
//! \addtogroup asmjit_compiler
//! \{
// ============================================================================
// [asmjit::VirtReg]
// ============================================================================
//! Virtual register data, managed by \ref BaseCompiler.
class VirtReg {
public:
ASMJIT_NONCOPYABLE(VirtReg)
//! Virtual register id.
uint32_t _id = 0;
//! Virtual register info (signature).
RegInfo _info = {};
//! Virtual register size (can be smaller than `regInfo._size`).
uint32_t _virtSize = 0;
//! Virtual register alignment (for spilling).
uint8_t _alignment = 0;
//! Type-id.
uint8_t _typeId = 0;
//! Virtual register weight for alloc/spill decisions.
uint8_t _weight = 1;
//! True if this is a fixed register, never reallocated.
uint8_t _isFixed : 1;
//! True if the virtual register is only used as a stack (never accessed as register).
uint8_t _isStack : 1;
uint8_t _reserved : 6;
//! Virtual register name (user provided or automatically generated).
ZoneString<16> _name {};
// -------------------------------------------------------------------------
// The following members are used exclusively by RAPass. They are initialized
// when the VirtReg is created to NULL pointers and then changed during RAPass
// execution. RAPass sets them back to NULL before it returns.
// -------------------------------------------------------------------------
//! Reference to `RAWorkReg`, used during register allocation.
RAWorkReg* _workReg = nullptr;
//! \name Construction & Destruction
//! \{
inline VirtReg(uint32_t id, uint32_t signature, uint32_t virtSize, uint32_t alignment, uint32_t typeId) noexcept
: _id(id),
_info { signature },
_virtSize(virtSize),
_alignment(uint8_t(alignment)),
_typeId(uint8_t(typeId)),
_isFixed(false),
_isStack(false),
_reserved(0) {}
//! \}
//! \name Accessors
//! \{
//! Returns the virtual register id.
inline uint32_t id() const noexcept { return _id; }
//! Returns the virtual register name.
inline const char* name() const noexcept { return _name.data(); }
//! Returns the size of the virtual register name.
inline uint32_t nameSize() const noexcept { return _name.size(); }
//! Returns a register information that wraps the register signature.
inline const RegInfo& info() const noexcept { return _info; }
//! Returns a virtual register type (maps to the physical register type as well).
inline uint32_t type() const noexcept { return _info.type(); }
//! Returns a virtual register group (maps to the physical register group as well).
inline uint32_t group() const noexcept { return _info.group(); }
//! Returns a real size of the register this virtual register maps to.
//!
//! For example if this is a 128-bit SIMD register used for a scalar single
//! precision floating point value then its virtSize would be 4, however, the
//! `regSize` would still say 16 (128-bits), because it's the smallest size
//! of that register type.
inline uint32_t regSize() const noexcept { return _info.size(); }
//! Returns a register signature of this virtual register.
inline uint32_t signature() const noexcept { return _info.signature(); }
//! Returns the virtual register size.
//!
//! The virtual register size describes how many bytes the virtual register
//! needs to store its content. It can be smaller than the physical register
//! size, see `regSize()`.
inline uint32_t virtSize() const noexcept { return _virtSize; }
//! Returns the virtual register alignment.
inline uint32_t alignment() const noexcept { return _alignment; }
//! Returns the virtual register type id, see `Type::Id`.
inline uint32_t typeId() const noexcept { return _typeId; }
//! Returns the virtual register weight - the register allocator can use it
//! as explicit hint for alloc/spill decisions.
inline uint32_t weight() const noexcept { return _weight; }
//! Sets the virtual register weight (0 to 255) - the register allocator can
//! use it as explicit hint for alloc/spill decisions and initial bin-packing.
inline void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); }
//! Returns whether the virtual register is always allocated to a fixed
//! physical register (and never reallocated).
//!
//! \note This is only used for special purposes and it's mostly internal.
inline bool isFixed() const noexcept { return bool(_isFixed); }
//! Returns whether the virtual register is indeed a stack that only uses
//! the virtual register id for making it accessible.
//!
//! \note It's an error if a stack is accessed as a register.
inline bool isStack() const noexcept { return bool(_isStack); }
inline bool hasWorkReg() const noexcept { return _workReg != nullptr; }
inline RAWorkReg* workReg() const noexcept { return _workReg; }
inline void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; }
inline void resetWorkReg() noexcept { _workReg = nullptr; }
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_COMPILERDEFS_H_INCLUDED

@ -0,0 +1,375 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/constpool.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ConstPool - Construction / Destruction]
// ============================================================================
ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
ConstPool::~ConstPool() noexcept {}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
void ConstPool::reset(Zone* zone) noexcept {
_zone = zone;
size_t dataSize = 1;
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].reset();
_tree[i].setDataSize(dataSize);
_gaps[i] = nullptr;
dataSize <<= 1;
}
_gapPool = nullptr;
_size = 0;
_alignment = 0;
}
// ============================================================================
// [asmjit::ConstPool - Ops]
// ============================================================================
static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
ConstPool::Gap* gap = self->_gapPool;
if (!gap)
return self->_zone->allocT<ConstPool::Gap>();
self->_gapPool = gap->_next;
return gap;
}
static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
gap->_next = self->_gapPool;
self->_gapPool = gap;
}
static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexcept {
ASMJIT_ASSERT(size > 0);
while (size > 0) {
size_t gapIndex;
size_t gapSize;
if (size >= 16 && Support::isAligned<size_t>(offset, 16)) {
gapIndex = ConstPool::kIndex16;
gapSize = 16;
}
else if (size >= 8 && Support::isAligned<size_t>(offset, 8)) {
gapIndex = ConstPool::kIndex8;
gapSize = 8;
}
else if (size >= 4 && Support::isAligned<size_t>(offset, 4)) {
gapIndex = ConstPool::kIndex4;
gapSize = 4;
}
else if (size >= 2 && Support::isAligned<size_t>(offset, 2)) {
gapIndex = ConstPool::kIndex2;
gapSize = 2;
}
else {
gapIndex = ConstPool::kIndex1;
gapSize = 1;
}
// We don't have to check for errors here, if this failed nothing really
// happened (just the gap won't be visible) and it will fail again at
// place where the same check would generate `kErrorOutOfMemory` error.
ConstPool::Gap* gap = ConstPool_allocGap(self);
if (!gap)
return;
gap->_next = self->_gaps[gapIndex];
self->_gaps[gapIndex] = gap;
gap->_offset = offset;
gap->_size = gapSize;
offset += gapSize;
size -= gapSize;
}
}
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
size_t treeIndex;
if (size == 32)
treeIndex = kIndex32;
else if (size == 16)
treeIndex = kIndex16;
else if (size == 8)
treeIndex = kIndex8;
else if (size == 4)
treeIndex = kIndex4;
else if (size == 2)
treeIndex = kIndex2;
else if (size == 1)
treeIndex = kIndex1;
else
return DebugUtils::errored(kErrorInvalidArgument);
ConstPool::Node* node = _tree[treeIndex].get(data);
if (node) {
dstOffset = node->_offset;
return kErrorOk;
}
// Before incrementing the current offset try if there is a gap that can
// be used for the requested data.
size_t offset = ~size_t(0);
size_t gapIndex = treeIndex;
while (gapIndex != kIndexCount - 1) {
ConstPool::Gap* gap = _gaps[treeIndex];
// Check if there is a gap.
if (gap) {
size_t gapOffset = gap->_offset;
size_t gapSize = gap->_size;
// Destroy the gap for now.
_gaps[treeIndex] = gap->_next;
ConstPool_freeGap(this, gap);
offset = gapOffset;
ASMJIT_ASSERT(Support::isAligned<size_t>(offset, size));
gapSize -= size;
if (gapSize > 0)
ConstPool_addGap(this, gapOffset, gapSize);
}
gapIndex++;
}
if (offset == ~size_t(0)) {
// Get how many bytes have to be skipped so the address is aligned accordingly
// to the 'size'.
size_t diff = Support::alignUpDiff<size_t>(_size, size);
if (diff != 0) {
ConstPool_addGap(this, _size, diff);
_size += diff;
}
offset = _size;
_size += size;
}
// Add the initial node to the right index.
node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
if (!node) return DebugUtils::errored(kErrorOutOfMemory);
_tree[treeIndex].insert(node);
_alignment = Support::max<size_t>(_alignment, size);
dstOffset = offset;
// Now create a bunch of shared constants that are based on the data pattern.
// We stop at size 4, it probably doesn't make sense to split constants down
// to 1 byte.
size_t pCount = 1;
while (size > 4) {
size >>= 1;
pCount <<= 1;
ASMJIT_ASSERT(treeIndex != 0);
treeIndex--;
const uint8_t* pData = static_cast<const uint8_t*>(data);
for (size_t i = 0; i < pCount; i++, pData += size) {
node = _tree[treeIndex].get(pData);
if (node) continue;
node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true);
_tree[treeIndex].insert(node);
}
}
return kErrorOk;
}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
struct ConstPoolFill {
inline ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
_dst(dst),
_dataSize(dataSize) {}
inline void operator()(const ConstPool::Node* node) noexcept {
if (!node->_shared)
memcpy(_dst + node->_offset, node->data(), _dataSize);
}
uint8_t* _dst;
size_t _dataSize;
};
void ConstPool::fill(void* dst) const noexcept {
// Clears possible gaps, asmjit should never emit garbage to the output.
memset(dst, 0, _size);
ConstPoolFill filler(static_cast<uint8_t*>(dst), 1);
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].forEach(filler);
filler._dataSize <<= 1;
}
}
// ============================================================================
// [asmjit::ConstPool - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(const_pool) {
Zone zone(32384 - Zone::kBlockOverhead);
ConstPool pool(&zone);
uint32_t i;
uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 1000000;
INFO("Adding %u constants to the pool", kCount);
{
size_t prevOffset;
size_t curOffset;
uint64_t c = 0x0101010101010101u;
EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk);
EXPECT(prevOffset == 0);
for (i = 1; i < kCount; i++) {
c++;
EXPECT(pool.add(&c, 8, curOffset) == kErrorOk);
EXPECT(prevOffset + 8 == curOffset);
EXPECT(pool.size() == (i + 1) * 8);
prevOffset = curOffset;
}
EXPECT(pool.alignment() == 8);
}
INFO("Retrieving %u constants from the pool", kCount);
{
uint64_t c = 0x0101010101010101u;
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT(pool.add(&c, 8, offset) == kErrorOk);
EXPECT(offset == i * 8);
c++;
}
}
INFO("Checking if the constants were split into 4-byte patterns");
{
uint32_t c = 0x01010101;
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT(pool.add(&c, 4, offset) == kErrorOk);
EXPECT(offset == i * 8);
c++;
}
}
INFO("Adding 2 byte constant to misalign the current offset");
{
uint16_t c = 0xFFFF;
size_t offset;
EXPECT(pool.add(&c, 2, offset) == kErrorOk);
EXPECT(offset == kCount * 8);
EXPECT(pool.alignment() == 8);
}
INFO("Adding 8 byte constant to check if pool gets aligned again");
{
uint64_t c = 0xFFFFFFFFFFFFFFFFu;
size_t offset;
EXPECT(pool.add(&c, 8, offset) == kErrorOk);
EXPECT(offset == kCount * 8 + 8);
}
INFO("Adding 2 byte constant to verify the gap is filled");
{
uint16_t c = 0xFFFE;
size_t offset;
EXPECT(pool.add(&c, 2, offset) == kErrorOk);
EXPECT(offset == kCount * 8 + 2);
EXPECT(pool.alignment() == 8);
}
INFO("Checking reset functionality");
{
pool.reset(&zone);
zone.reset();
EXPECT(pool.size() == 0);
EXPECT(pool.alignment() == 0);
}
INFO("Checking pool alignment when combined constants are added");
{
uint8_t bytes[32] = { 0 };
size_t offset;
pool.add(bytes, 1, offset);
EXPECT(pool.size() == 1);
EXPECT(pool.alignment() == 1);
EXPECT(offset == 0);
pool.add(bytes, 2, offset);
EXPECT(pool.size() == 4);
EXPECT(pool.alignment() == 2);
EXPECT(offset == 2);
pool.add(bytes, 4, offset);
EXPECT(pool.size() == 8);
EXPECT(pool.alignment() == 4);
EXPECT(offset == 4);
pool.add(bytes, 4, offset);
EXPECT(pool.size() == 8);
EXPECT(pool.alignment() == 4);
EXPECT(offset == 4);
pool.add(bytes, 32, offset);
EXPECT(pool.size() == 64);
EXPECT(pool.alignment() == 32);
EXPECT(offset == 32);
}
}
#endif
ASMJIT_END_NAMESPACE

@ -0,0 +1,262 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_CONSTPOOL_H_INCLUDED
#define ASMJIT_CORE_CONSTPOOL_H_INCLUDED
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonetree.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_utilities
//! \{
// ============================================================================
// [asmjit::ConstPool]
// ============================================================================
//! Constant pool.
class ConstPool {
public:
ASMJIT_NONCOPYABLE(ConstPool)
//! Constant pool scope.
enum Scope : uint32_t {
//! Local constant, always embedded right after the current function.
kScopeLocal = 0,
//! Global constant, embedded at the end of the currently compiled code.
kScopeGlobal = 1
};
//! \cond INTERNAL
//! Index of a given size in const-pool table.
enum Index : uint32_t {
kIndex1 = 0,
kIndex2 = 1,
kIndex4 = 2,
kIndex8 = 3,
kIndex16 = 4,
kIndex32 = 5,
kIndexCount = 6
};
//! Zone-allocated const-pool gap created by two differently aligned constants.
struct Gap {
//! Pointer to the next gap
Gap* _next;
//! Offset of the gap.
size_t _offset;
//! Remaining bytes of the gap (basically a gap size).
size_t _size;
};
//! Zone-allocated const-pool node.
class Node : public ZoneTreeNodeT<Node> {
public:
ASMJIT_NONCOPYABLE(Node)
//! If this constant is shared with another.
uint32_t _shared : 1;
//! Data offset from the beginning of the pool.
uint32_t _offset;
inline Node(size_t offset, bool shared) noexcept
: ZoneTreeNodeT<Node>(),
_shared(shared),
_offset(uint32_t(offset)) {}
inline void* data() const noexcept {
return static_cast<void*>(const_cast<ConstPool::Node*>(this) + 1);
}
};
//! Data comparer used internally.
class Compare {
public:
size_t _dataSize;
inline Compare(size_t dataSize) noexcept
: _dataSize(dataSize) {}
inline int operator()(const Node& a, const Node& b) const noexcept {
return ::memcmp(a.data(), b.data(), _dataSize);
}
inline int operator()(const Node& a, const void* data) const noexcept {
return ::memcmp(a.data(), data, _dataSize);
}
};
//! Zone-allocated const-pool tree.
struct Tree {
//! RB tree.
ZoneTree<Node> _tree;
//! Size of the tree (number of nodes).
size_t _size;
//! Size of the data.
size_t _dataSize;
inline explicit Tree(size_t dataSize = 0) noexcept
: _tree(),
_size(0),
_dataSize(dataSize) {}
inline void reset() noexcept {
_tree.reset();
_size = 0;
}
inline bool empty() const noexcept { return _size == 0; }
inline size_t size() const noexcept { return _size; }
inline void setDataSize(size_t dataSize) noexcept {
ASMJIT_ASSERT(empty());
_dataSize = dataSize;
}
inline Node* get(const void* data) noexcept {
Compare cmp(_dataSize);
return _tree.get(data, cmp);
}
inline void insert(Node* node) noexcept {
Compare cmp(_dataSize);
_tree.insert(node, cmp);
_size++;
}
template<typename Visitor>
inline void forEach(Visitor& visitor) const noexcept {
Node* node = _tree.root();
if (!node) return;
Node* stack[Globals::kMaxTreeHeight];
size_t top = 0;
for (;;) {
Node* left = node->left();
if (left != nullptr) {
ASMJIT_ASSERT(top != Globals::kMaxTreeHeight);
stack[top++] = node;
node = left;
continue;
}
for (;;) {
visitor(node);
node = node->right();
if (node != nullptr)
break;
if (top == 0)
return;
node = stack[--top];
}
}
}
static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
Node* node = zone->allocT<Node>(sizeof(Node) + size);
if (ASMJIT_UNLIKELY(!node)) return nullptr;
node = new(node) Node(offset, shared);
memcpy(node->data(), data, size);
return node;
}
};
//! \endcond
//! Zone allocator.
Zone* _zone;
//! Tree per size.
Tree _tree[kIndexCount];
//! Gaps per size.
Gap* _gaps[kIndexCount];
//! Gaps pool
Gap* _gapPool;
//! Size of the pool (in bytes).
size_t _size;
//! Required pool alignment.
size_t _alignment;
//! \name Construction & Destruction
//! \{
ASMJIT_API ConstPool(Zone* zone) noexcept;
ASMJIT_API ~ConstPool() noexcept;
ASMJIT_API void reset(Zone* zone) noexcept;
//! \}
//! \name Accessors
//! \{
//! Tests whether the constant-pool is empty.
inline bool empty() const noexcept { return _size == 0; }
//! Returns the size of the constant-pool in bytes.
inline size_t size() const noexcept { return _size; }
//! Returns minimum alignment.
inline size_t alignment() const noexcept { return _alignment; }
//! \}
//! \name Utilities
//! \{
//! Adds a constant to the constant pool.
//!
//! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes.
//! The constant is added to the pool only if it doesn't not exist, otherwise
//! cached value is returned.
//!
//! AsmJit is able to subdivide added constants, so for example if you add
//! 8-byte constant 0x1122334455667788 it will create the following slots:
//!
//! 8-byte: 0x1122334455667788
//! 4-byte: 0x11223344, 0x55667788
//!
//! The reason is that when combining MMX/SSE/AVX code some patterns are used
//! frequently. However, AsmJit is not able to reallocate a constant that has
//! been already added. For example if you try to add 4-byte constant and then
//! 8-byte constant having the same 4-byte pattern as the previous one, two
//! independent slots will be generated by the pool.
ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
//! Fills the destination with the content of this constant pool.
ASMJIT_API void fill(void* dst) const noexcept;
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CONSTPOOL_H_INCLUDED

@ -0,0 +1,97 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/cpuinfo.h"
#if !defined(_WIN32)
#include <errno.h>
#include <sys/utsname.h>
#include <unistd.h>
#endif
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::CpuInfo - Detect - CPU NumThreads]
// ============================================================================
#if defined(_WIN32)
static inline uint32_t detectHWThreadCount() noexcept {
SYSTEM_INFO info;
::GetSystemInfo(&info);
return info.dwNumberOfProcessors;
}
#elif defined(_SC_NPROCESSORS_ONLN)
static inline uint32_t detectHWThreadCount() noexcept {
long res = ::sysconf(_SC_NPROCESSORS_ONLN);
return res <= 0 ? uint32_t(1) : uint32_t(res);
}
#else
static inline uint32_t detectHWThreadCount() noexcept {
return 1;
}
#endif
// ============================================================================
// [asmjit::CpuInfo - Detect - CPU Features]
// ============================================================================
#if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86
namespace x86 { void detectCpu(CpuInfo& cpu) noexcept; }
#endif
#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM
namespace arm { void detectCpu(CpuInfo& cpu) noexcept; }
#endif
// ============================================================================
// [asmjit::CpuInfo - Detect - Static Initializer]
// ============================================================================
static uint32_t cpuInfoInitialized;
static CpuInfo cpuInfoGlobal(Globals::NoInit);
const CpuInfo& CpuInfo::host() noexcept {
// This should never cause a problem as the resulting information should
// always be the same.
if (!cpuInfoInitialized) {
CpuInfo cpuInfoLocal;
#if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86
x86::detectCpu(cpuInfoLocal);
#endif
#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM
arm::detectCpu(cpuInfoLocal);
#endif
cpuInfoLocal._hwThreadCount = detectHWThreadCount();
cpuInfoGlobal = cpuInfoLocal;
cpuInfoInitialized = 1;
}
return cpuInfoGlobal;
}
ASMJIT_END_NAMESPACE

@ -0,0 +1,154 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_CPUINFO_H_INCLUDED
#define ASMJIT_CORE_CPUINFO_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/features.h"
#include "../core/globals.h"
#include "../core/string.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::CpuInfo]
// ============================================================================
//! CPU information.
class CpuInfo {
public:
//! Architecture.
uint8_t _arch;
//! Sub-architecture.
uint8_t _subArch;
//! Reserved for future use.
uint16_t _reserved;
//! CPU family ID.
uint32_t _familyId;
//! CPU model ID.
uint32_t _modelId;
//! CPU brand ID.
uint32_t _brandId;
//! CPU stepping.
uint32_t _stepping;
//! Processor type.
uint32_t _processorType;
//! Maximum number of addressable IDs for logical processors.
uint32_t _maxLogicalProcessors;
//! Cache line size (in bytes).
uint32_t _cacheLineSize;
//! Number of hardware threads.
uint32_t _hwThreadCount;
//! CPU vendor string.
FixedString<16> _vendor;
//! CPU brand string.
FixedString<64> _brand;
//! CPU features.
BaseFeatures _features;
//! \name Construction & Destruction
//! \{
inline CpuInfo() noexcept { reset(); }
inline CpuInfo(const CpuInfo& other) noexcept = default;
inline explicit CpuInfo(Globals::NoInit_) noexcept
: _features(Globals::NoInit) {};
//! Returns the host CPU information.
ASMJIT_API static const CpuInfo& host() noexcept;
//! Initializes CpuInfo to the given architecture, see \ref Environment.
inline void initArch(uint32_t arch, uint32_t subArch = 0u) noexcept {
_arch = uint8_t(arch);
_subArch = uint8_t(subArch);
}
inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
//! \}
//! \name Overloaded Operators
//! \{
inline CpuInfo& operator=(const CpuInfo& other) noexcept = default;
//! \}
//! \name Accessors
//! \{
//! Returns the CPU architecture id, see \ref Environment::Arch.
inline uint32_t arch() const noexcept { return _arch; }
//! Returns the CPU architecture sub-id, see \ref Environment::SubArch.
inline uint32_t subArch() const noexcept { return _subArch; }
//! Returns the CPU family ID.
inline uint32_t familyId() const noexcept { return _familyId; }
//! Returns the CPU model ID.
inline uint32_t modelId() const noexcept { return _modelId; }
//! Returns the CPU brand id.
inline uint32_t brandId() const noexcept { return _brandId; }
//! Returns the CPU stepping.
inline uint32_t stepping() const noexcept { return _stepping; }
//! Returns the processor type.
inline uint32_t processorType() const noexcept { return _processorType; }
//! Returns the number of maximum logical processors.
inline uint32_t maxLogicalProcessors() const noexcept { return _maxLogicalProcessors; }
//! Returns the size of a cache line flush.
inline uint32_t cacheLineSize() const noexcept { return _cacheLineSize; }
//! Returns number of hardware threads available.
inline uint32_t hwThreadCount() const noexcept { return _hwThreadCount; }
//! Returns the CPU vendor.
inline const char* vendor() const noexcept { return _vendor.str; }
//! Tests whether the CPU vendor is equal to `s`.
inline bool isVendor(const char* s) const noexcept { return _vendor.eq(s); }
//! Returns the CPU brand string.
inline const char* brand() const noexcept { return _brand.str; }
//! Returns all CPU features as `BaseFeatures`, cast to your arch-specific class
//! if needed.
template<typename T = BaseFeatures>
inline const T& features() const noexcept { return _features.as<T>(); }
//! Tests whether the CPU has the given `feature`.
inline bool hasFeature(uint32_t featureId) const noexcept { return _features.has(featureId); }
//! Adds the given CPU `feature` to the list of this CpuInfo features.
inline CpuInfo& addFeature(uint32_t featureId) noexcept { _features.add(featureId); return *this; }
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_CPUINFO_H_INCLUDED

File diff suppressed because it is too large Load Diff

@ -0,0 +1,351 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/emithelper_p.h"
#include "../core/formatter.h"
#include "../core/funcargscontext_p.h"
#include "../core/radefs_p.h"
// Can be used for debugging...
// #define ASMJIT_DUMP_ARGS_ASSIGNMENT
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::BaseEmitHelper - Formatting]
// ============================================================================
#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
static void dumpFuncValue(String& sb, uint32_t arch, const FuncValue& value) noexcept {
Formatter::formatTypeId(sb, value.typeId());
sb.append('@');
if (value.isIndirect())
sb.append('[');
if (value.isReg())
Formatter::formatRegister(sb, 0, nullptr, arch, value.regType(), value.regId());
else if (value.isStack())
sb.appendFormat("[%d]", value.stackOffset());
else
sb.append("<none>");
if (value.isIndirect())
sb.append(']');
}
static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
typedef FuncArgsContext::Var Var;
uint32_t arch = ctx.arch();
uint32_t varCount = ctx.varCount();
for (uint32_t i = 0; i < varCount; i++) {
const Var& var = ctx.var(i);
const FuncValue& dst = var.out;
const FuncValue& cur = var.cur;
sb.appendFormat("Var%u: ", i);
dumpFuncValue(sb, arch, dst);
sb.append(" <- ");
dumpFuncValue(sb, arch, cur);
if (var.isDone())
sb.append(" {Done}");
sb.append('\n');
}
}
#endif
// ============================================================================
// [asmjit::BaseEmitHelper - EmitArgsAssignment]
// ============================================================================
ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
typedef FuncArgsContext::Var Var;
typedef FuncArgsContext::WorkData WorkData;
enum WorkFlags : uint32_t {
kWorkNone = 0x00,
kWorkDidSome = 0x01,
kWorkPending = 0x02,
kWorkPostponed = 0x04
};
uint32_t arch = frame.arch();
const ArchTraits& archTraits = ArchTraits::byArch(arch);
RAConstraints constraints;
FuncArgsContext ctx;
ASMJIT_PROPAGATE(constraints.init(arch));
ASMJIT_PROPAGATE(ctx.initWorkData(frame, args, &constraints));
#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
{
String sb;
dumpAssignment(sb, ctx);
printf("%s\n", sb.data());
}
#endif
uint32_t varCount = ctx._varCount;
WorkData* workData = ctx._workData;
uint32_t saVarId = ctx._saVarId;
BaseReg sp = BaseReg::fromSignatureAndId(_emitter->_gpRegInfo.signature(), archTraits.spRegId());
BaseReg sa = sp;
if (frame.hasDynamicAlignment()) {
if (frame.hasPreservedFP())
sa.setId(archTraits.fpRegId());
else
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
}
// --------------------------------------------------------------------------
// Register to stack and stack to stack moves must be first as now we have
// the biggest chance of having as many as possible unassigned registers.
// --------------------------------------------------------------------------
if (ctx._stackDstMask) {
// Base address of all arguments passed by stack.
BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
BaseMem baseStackPtr(sp, 0);
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (!var.out.isStack())
continue;
FuncValue& cur = var.cur;
FuncValue& out = var.out;
ASMJIT_ASSERT(cur.isReg() || cur.isStack());
BaseReg reg;
BaseMem dstStackPtr = baseStackPtr.cloneAdjusted(out.stackOffset());
BaseMem srcStackPtr = baseArgPtr.cloneAdjusted(cur.stackOffset());
if (cur.isIndirect()) {
if (cur.isStack()) {
// TODO: Indirect stack.
return DebugUtils::errored(kErrorInvalidAssignment);
}
else {
srcStackPtr.setBaseId(cur.regId());
}
}
if (cur.isReg() && !cur.isIndirect()) {
WorkData& wd = workData[archTraits.regTypeToGroup(cur.regType())];
uint32_t rId = cur.regId();
reg.setSignatureAndId(archTraits.regTypeToSignature(cur.regType()), rId);
wd.unassign(varId, rId);
}
else {
// Stack to reg move - tricky since we move stack to stack we can decide which
// register to use. In general we follow the rule that IntToInt moves will use
// GP regs with possibility to signature or zero extend, and all other moves will
// either use GP or VEC regs depending on the size of the move.
RegInfo rInfo = getSuitableRegForMemToMemMove(arch, out.typeId(), cur.typeId());
if (ASMJIT_UNLIKELY(!rInfo.isValid()))
return DebugUtils::errored(kErrorInvalidState);
WorkData& wd = workData[rInfo.group()];
uint32_t availableRegs = wd.availableRegs();
if (ASMJIT_UNLIKELY(!availableRegs))
return DebugUtils::errored(kErrorInvalidState);
uint32_t rId = Support::ctz(availableRegs);
reg.setSignatureAndId(rInfo.signature(), rId);
ASMJIT_PROPAGATE(emitArgMove(reg, out.typeId(), srcStackPtr, cur.typeId()));
}
if (cur.isIndirect() && cur.isReg())
workData[BaseReg::kGroupGp].unassign(varId, cur.regId());
// Register to stack move.
ASMJIT_PROPAGATE(emitRegMove(dstStackPtr, reg, cur.typeId()));
var.markDone();
}
}
// --------------------------------------------------------------------------
// Shuffle all registers that are currently assigned accordingly to target
// assignment.
// --------------------------------------------------------------------------
uint32_t workFlags = kWorkNone;
for (;;) {
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (var.isDone() || !var.cur.isReg())
continue;
FuncValue& cur = var.cur;
FuncValue& out = var.out;
uint32_t curGroup = archTraits.regTypeToGroup(cur.regType());
uint32_t outGroup = archTraits.regTypeToGroup(out.regType());
uint32_t curId = cur.regId();
uint32_t outId = out.regId();
if (curGroup != outGroup) {
// TODO: Conversion is not supported.
return DebugUtils::errored(kErrorInvalidAssignment);
}
else {
WorkData& wd = workData[outGroup];
if (!wd.isAssigned(outId)) {
EmitMove:
ASMJIT_PROPAGATE(
emitArgMove(
BaseReg::fromSignatureAndId(archTraits.regTypeToSignature(out.regType()), outId), out.typeId(),
BaseReg::fromSignatureAndId(archTraits.regTypeToSignature(cur.regType()), curId), cur.typeId()));
wd.reassign(varId, outId, curId);
cur.initReg(out.regType(), outId, out.typeId());
if (outId == out.regId())
var.markDone();
workFlags |= kWorkDidSome | kWorkPending;
}
else {
uint32_t altId = wd._physToVarId[outId];
Var& altVar = ctx._vars[altId];
if (!altVar.out.isInitialized() || (altVar.out.isReg() && altVar.out.regId() == curId)) {
// Only few architectures provide swap operations, and only for few register groups.
if (archTraits.hasSwap(curGroup)) {
uint32_t highestType = Support::max(cur.regType(), altVar.cur.regType());
if (Support::isBetween<uint32_t>(highestType, BaseReg::kTypeGp8Lo, BaseReg::kTypeGp16))
highestType = BaseReg::kTypeGp32;
uint32_t signature = archTraits.regTypeToSignature(highestType);
ASMJIT_PROPAGATE(
emitRegSwap(BaseReg::fromSignatureAndId(signature, outId),
BaseReg::fromSignatureAndId(signature, curId)));
wd.swap(varId, curId, altId, outId);
cur.setRegId(outId);
var.markDone();
altVar.cur.setRegId(curId);
if (altVar.out.isInitialized())
altVar.markDone();
workFlags |= kWorkDidSome;
}
else {
// If there is a scratch register it can be used to perform the swap.
uint32_t availableRegs = wd.availableRegs();
if (availableRegs) {
uint32_t inOutRegs = wd.dstRegs();
if (availableRegs & ~inOutRegs)
availableRegs &= ~inOutRegs;
outId = Support::ctz(availableRegs);
goto EmitMove;
}
else {
workFlags |= kWorkPending;
}
}
}
else {
workFlags |= kWorkPending;
}
}
}
}
if (!(workFlags & kWorkPending))
break;
// If we did nothing twice it means that something is really broken.
if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed)
return DebugUtils::errored(kErrorInvalidState);
workFlags = (workFlags & kWorkDidSome) ? kWorkNone : kWorkPostponed;
}
// --------------------------------------------------------------------------
// Load arguments passed by stack into registers. This is pretty simple and
// it never requires multiple iterations like the previous phase.
// --------------------------------------------------------------------------
if (ctx._hasStackSrc) {
uint32_t iterCount = 1;
if (frame.hasDynamicAlignment() && !frame.hasPreservedFP())
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
// Base address of all arguments passed by stack.
BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
for (uint32_t iter = 0; iter < iterCount; iter++) {
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (var.isDone())
continue;
if (var.cur.isStack()) {
ASMJIT_ASSERT(var.out.isReg());
uint32_t outId = var.out.regId();
uint32_t outType = var.out.regType();
uint32_t group = archTraits.regTypeToGroup(outType);
WorkData& wd = ctx._workData[group];
if (outId == sa.id() && group == BaseReg::kGroupGp) {
// This register will be processed last as we still need `saRegId`.
if (iterCount == 1) {
iterCount++;
continue;
}
wd.unassign(wd._physToVarId[outId], outId);
}
BaseReg dstReg = BaseReg::fromSignatureAndId(archTraits.regTypeToSignature(outType), outId);
BaseMem srcMem = baseArgPtr.cloneAdjusted(var.cur.stackOffset());
ASMJIT_PROPAGATE(emitArgMove(
dstReg, var.out.typeId(),
srcMem, var.cur.typeId()));
wd.assign(varId, outId);
var.cur.initReg(outType, outId, var.cur.typeId(), FuncValue::kFlagIsDone);
}
}
}
}
return kErrorOk;
}
ASMJIT_END_NAMESPACE

@ -0,0 +1,83 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
#define ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
#include "../core/emitter.h"
#include "../core/operand.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::BaseEmitHelper]
// ============================================================================
//! Helper class that provides utilities for each supported architecture.
class BaseEmitHelper {
public:
BaseEmitter* _emitter;
inline explicit BaseEmitHelper(BaseEmitter* emitter = nullptr) noexcept
: _emitter(emitter) {}
inline BaseEmitter* emitter() const noexcept { return _emitter; }
inline void setEmitter(BaseEmitter* emitter) noexcept { _emitter = emitter; }
//! Emits a pure move operation between two registers or the same type or
//! between a register and its home slot. This function does not handle
//! register conversion.
virtual Error emitRegMove(
const Operand_& dst_,
const Operand_& src_, uint32_t typeId, const char* comment = nullptr) = 0;
//! Emits swap between two registers.
virtual Error emitRegSwap(
const BaseReg& a,
const BaseReg& b, const char* comment = nullptr) = 0;
//! Emits move from a function argument (either register or stack) to a register.
//!
//! This function can handle the necessary conversion from one argument to
//! another, and from one register type to another, if it's possible. Any
//! attempt of conversion that requires third register of a different group
//! (for example conversion from K to MMX on X86/X64) will fail.
virtual Error emitArgMove(
const BaseReg& dst_, uint32_t dstTypeId,
const Operand_& src_, uint32_t srcTypeId, const char* comment = nullptr) = 0;
Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_EMITHELPER_P_H_INCLUDED

@ -0,0 +1,416 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/emitterutils_p.h"
#include "../core/errorhandler.h"
#include "../core/logger.h"
#include "../core/support.h"
#ifdef ASMJIT_BUILD_X86
#include "../x86/x86emithelper_p.h"
#include "../x86/x86instdb_p.h"
#endif // ASMJIT_BUILD_X86
#ifdef ASMJIT_BUILD_ARM
#include "../arm/a64emithelper_p.h"
#include "../arm/a64instdb.h"
#endif // ASMJIT_BUILD_ARM
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::BaseEmitter - Construction / Destruction]
// ============================================================================
BaseEmitter::BaseEmitter(uint32_t emitterType) noexcept
: _emitterType(uint8_t(emitterType)) {}
BaseEmitter::~BaseEmitter() noexcept {
if (_code) {
_addEmitterFlags(kFlagDestroyed);
_code->detach(this);
}
}
// ============================================================================
// [asmjit::BaseEmitter - Finalize]
// ============================================================================
Error BaseEmitter::finalize() {
// Does nothing by default, overridden by `BaseBuilder` and `BaseCompiler`.
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseEmitter - Internals]
// ============================================================================
static constexpr uint32_t kEmitterPreservedFlags = BaseEmitter::kFlagOwnLogger | BaseEmitter::kFlagOwnErrorHandler;
static ASMJIT_NOINLINE void BaseEmitter_updateForcedOptions(BaseEmitter* self) noexcept {
bool emitComments = false;
bool hasValidationOptions = false;
if (self->emitterType() == BaseEmitter::kTypeAssembler) {
// Assembler: Don't emit comments if logger is not attached.
emitComments = self->_code != nullptr && self->_logger != nullptr;
hasValidationOptions = self->hasValidationOption(BaseEmitter::kValidationOptionAssembler);
}
else {
// Builder/Compiler: Always emit comments, we cannot assume they won't be used.
emitComments = self->_code != nullptr;
hasValidationOptions = self->hasValidationOption(BaseEmitter::kValidationOptionIntermediate);
}
if (emitComments)
self->_addEmitterFlags(BaseEmitter::kFlagLogComments);
else
self->_clearEmitterFlags(BaseEmitter::kFlagLogComments);
// The reserved option tells emitter (Assembler/Builder/Compiler) that there
// may be either a border case (CodeHolder not attached, for example) or that
// logging or validation is required.
if (self->_code == nullptr || self->_logger || hasValidationOptions)
self->_forcedInstOptions |= BaseInst::kOptionReserved;
else
self->_forcedInstOptions &= ~BaseInst::kOptionReserved;
}
// ============================================================================
// [asmjit::BaseEmitter - Validation Options]
// ============================================================================
void BaseEmitter::addValidationOptions(uint32_t options) noexcept {
_validationOptions = uint8_t(_validationOptions | options);
BaseEmitter_updateForcedOptions(this);
}
void BaseEmitter::clearValidationOptions(uint32_t options) noexcept {
_validationOptions = uint8_t(_validationOptions | options);
BaseEmitter_updateForcedOptions(this);
}
// ============================================================================
// [asmjit::BaseEmitter - Logging]
// ============================================================================
void BaseEmitter::setLogger(Logger* logger) noexcept {
#ifndef ASMJIT_NO_LOGGING
if (logger) {
_logger = logger;
_addEmitterFlags(kFlagOwnLogger);
}
else {
_logger = nullptr;
_clearEmitterFlags(kFlagOwnLogger);
if (_code)
_logger = _code->logger();
}
BaseEmitter_updateForcedOptions(this);
#else
DebugUtils::unused(logger);
#endif
}
// ============================================================================
// [asmjit::BaseEmitter - Error Handling]
// ============================================================================
void BaseEmitter::setErrorHandler(ErrorHandler* errorHandler) noexcept {
if (errorHandler) {
_errorHandler = errorHandler;
_addEmitterFlags(kFlagOwnErrorHandler);
}
else {
_errorHandler = nullptr;
_clearEmitterFlags(kFlagOwnErrorHandler);
if (_code)
_errorHandler = _code->errorHandler();
}
}
Error BaseEmitter::reportError(Error err, const char* message) {
ErrorHandler* eh = _errorHandler;
if (eh) {
if (!message)
message = DebugUtils::errorAsString(err);
eh->handleError(err, message, this);
}
return err;
}
// ============================================================================
// [asmjit::BaseEmitter - Labels]
// ============================================================================
Label BaseEmitter::labelByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
return Label(_code ? _code->labelIdByName(name, nameSize, parentId) : uint32_t(Globals::kInvalidId));
}
bool BaseEmitter::isLabelValid(uint32_t labelId) const noexcept {
return _code && labelId < _code->labelCount();
}
// ============================================================================
// [asmjit::BaseEmitter - Emit (Low-Level)]
// ============================================================================
using EmitterUtils::noExt;
Error BaseEmitter::_emitI(uint32_t instId) {
return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0) {
return _emit(instId, o0, noExt[1], noExt[2], noExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1) {
return _emit(instId, o0, o1, noExt[2], noExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2) {
return _emit(instId, o0, o1, o2, noExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
Operand_ opExt[3] = { o3 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4) {
Operand_ opExt[3] = { o3, o4 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
Operand_ opExt[3] = { o3, o4, o5 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount) {
const Operand_* op = operands;
Operand_ opExt[3];
switch (opCount) {
case 0:
return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
case 1:
return _emit(instId, op[0], noExt[1], noExt[2], noExt);
case 2:
return _emit(instId, op[0], op[1], noExt[2], noExt);
case 3:
return _emit(instId, op[0], op[1], op[2], noExt);
case 4:
opExt[0] = op[3];
opExt[1].reset();
opExt[2].reset();
return _emit(instId, op[0], op[1], op[2], opExt);
case 5:
opExt[0] = op[3];
opExt[1] = op[4];
opExt[2].reset();
return _emit(instId, op[0], op[1], op[2], opExt);
case 6:
return _emit(instId, op[0], op[1], op[2], op + 3);
default:
return DebugUtils::errored(kErrorInvalidArgument);
}
}
// ============================================================================
// [asmjit::BaseEmitter - Emit (High-Level)]
// ============================================================================
ASMJIT_FAVOR_SIZE Error BaseEmitter::emitProlog(const FuncFrame& frame) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
#ifdef ASMJIT_BUILD_X86
if (environment().isFamilyX86()) {
x86::EmitHelper emitHelper(this, frame.isAvxEnabled());
return emitHelper.emitProlog(frame);
}
#endif
#ifdef ASMJIT_BUILD_ARM
if (environment().isArchAArch64()) {
a64::EmitHelper emitHelper(this);
return emitHelper.emitProlog(frame);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
ASMJIT_FAVOR_SIZE Error BaseEmitter::emitEpilog(const FuncFrame& frame) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
#ifdef ASMJIT_BUILD_X86
if (environment().isFamilyX86()) {
x86::EmitHelper emitHelper(this, frame.isAvxEnabled());
return emitHelper.emitEpilog(frame);
}
#endif
#ifdef ASMJIT_BUILD_ARM
if (environment().isArchAArch64()) {
a64::EmitHelper emitHelper(this);
return emitHelper.emitEpilog(frame);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
ASMJIT_FAVOR_SIZE Error BaseEmitter::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
#ifdef ASMJIT_BUILD_X86
if (environment().isFamilyX86()) {
x86::EmitHelper emitHelper(this, frame.isAvxEnabled());
return emitHelper.emitArgsAssignment(frame, args);
}
#endif
#ifdef ASMJIT_BUILD_ARM
if (environment().isArchAArch64()) {
a64::EmitHelper emitHelper(this);
return emitHelper.emitArgsAssignment(frame, args);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
// ============================================================================
// [asmjit::BaseEmitter - Comment]
// ============================================================================
Error BaseEmitter::commentf(const char* fmt, ...) {
if (!hasEmitterFlag(kFlagLogComments)) {
if (!hasEmitterFlag(kFlagAttached))
return reportError(DebugUtils::errored(kErrorNotInitialized));
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
StringTmp<1024> sb;
va_list ap;
va_start(ap, fmt);
Error err = sb.appendVFormat(fmt, ap);
va_end(ap);
ASMJIT_PROPAGATE(err);
return comment(sb.data(), sb.size());
#else
DebugUtils::unused(fmt);
return kErrorOk;
#endif
}
Error BaseEmitter::commentv(const char* fmt, va_list ap) {
if (!hasEmitterFlag(kFlagLogComments)) {
if (!hasEmitterFlag(kFlagAttached))
return reportError(DebugUtils::errored(kErrorNotInitialized));
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
StringTmp<1024> sb;
Error err = sb.appendVFormat(fmt, ap);
ASMJIT_PROPAGATE(err);
return comment(sb.data(), sb.size());
#else
DebugUtils::unused(fmt, ap);
return kErrorOk;
#endif
}
// ============================================================================
// [asmjit::BaseEmitter - Events]
// ============================================================================
Error BaseEmitter::onAttach(CodeHolder* code) noexcept {
_code = code;
_environment = code->environment();
_addEmitterFlags(kFlagAttached);
const ArchTraits& archTraits = ArchTraits::byArch(code->arch());
uint32_t nativeRegType = Environment::is32Bit(code->arch()) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
_gpRegInfo.setSignature(archTraits._regInfo[nativeRegType].signature());
onSettingsUpdated();
return kErrorOk;
}
Error BaseEmitter::onDetach(CodeHolder* code) noexcept {
DebugUtils::unused(code);
if (!hasOwnLogger())
_logger = nullptr;
if (!hasOwnErrorHandler())
_errorHandler = nullptr;
_clearEmitterFlags(~kEmitterPreservedFlags);
_forcedInstOptions = BaseInst::kOptionReserved;
_privateData = 0;
_environment.reset();
_gpRegInfo.reset();
_instOptions = 0;
_extraReg.reset();
_inlineComment = nullptr;
return kErrorOk;
}
void BaseEmitter::onSettingsUpdated() noexcept {
// Only called when attached to CodeHolder by CodeHolder.
ASMJIT_ASSERT(_code != nullptr);
if (!hasOwnLogger())
_logger = _code->logger();
if (!hasOwnErrorHandler())
_errorHandler = _code->errorHandler();
BaseEmitter_updateForcedOptions(this);
}
ASMJIT_END_NAMESPACE

@ -0,0 +1,723 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_EMITTER_H_INCLUDED
#define ASMJIT_CORE_EMITTER_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/codeholder.h"
#include "../core/inst.h"
#include "../core/operand.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class ConstPool;
class FuncFrame;
class FuncArgsAssignment;
// ============================================================================
// [asmjit::BaseEmitter]
// ============================================================================
//! Provides a base foundation to emit code - specialized by `Assembler` and
//! `BaseBuilder`.
class ASMJIT_VIRTAPI BaseEmitter {
public:
ASMJIT_BASE_CLASS(BaseEmitter)
//! See \ref EmitterType.
uint8_t _emitterType = 0;
//! See \ref BaseEmitter::EmitterFlags.
uint8_t _emitterFlags = 0;
//! Validation flags in case validation is used, see \ref InstAPI::ValidationFlags.
//!
//! \note Validation flags are specific to the emitter and they are setup at
//! construction time and then never changed.
uint8_t _validationFlags = 0;
//! Validation options, see \ref ValidationOptions.
uint8_t _validationOptions = 0;
//! Encoding options, see \ref EncodingOptions.
uint32_t _encodingOptions = 0;
//! Forced instruction options, combined with \ref _instOptions by \ref emit().
uint32_t _forcedInstOptions = BaseInst::kOptionReserved;
//! Internal private data used freely by any emitter.
uint32_t _privateData = 0;
//! CodeHolder the emitter is attached to.
CodeHolder* _code = nullptr;
//! Attached \ref Logger.
Logger* _logger = nullptr;
//! Attached \ref ErrorHandler.
ErrorHandler* _errorHandler = nullptr;
//! Describes the target environment, matches \ref CodeHolder::environment().
Environment _environment {};
//! Native GP register signature and signature related information.
RegInfo _gpRegInfo {};
//! Next instruction options (affects the next instruction).
uint32_t _instOptions = 0;
//! Extra register (op-mask {k} on AVX-512) (affects the next instruction).
RegOnly _extraReg {};
//! Inline comment of the next instruction (affects the next instruction).
const char* _inlineComment = nullptr;
//! Emitter type.
enum EmitterType : uint32_t {
//! Unknown or uninitialized.
kTypeNone = 0,
//! Emitter inherits from \ref BaseAssembler.
kTypeAssembler = 1,
//! Emitter inherits from \ref BaseBuilder.
kTypeBuilder = 2,
//! Emitter inherits from \ref BaseCompiler.
kTypeCompiler = 3,
//! Count of emitter types.
kTypeCount = 4
};
//! Emitter flags.
enum EmitterFlags : uint32_t {
//! Emitter is attached to CodeHolder.
kFlagAttached = 0x01u,
//! The emitter must emit comments.
kFlagLogComments = 0x08u,
//! The emitter has its own \ref Logger (not propagated from \ref CodeHolder).
kFlagOwnLogger = 0x10u,
//! The emitter has its own \ref ErrorHandler (not propagated from \ref CodeHolder).
kFlagOwnErrorHandler = 0x20u,
//! The emitter was finalized.
kFlagFinalized = 0x40u,
//! The emitter was destroyed.
kFlagDestroyed = 0x80u
};
//! Encoding options.
enum EncodingOptions : uint32_t {
//! Emit instructions that are optimized for size, if possible.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! When this option is set it the assembler will try to fix instructions
//! if possible into operation equivalent instructions that take less bytes
//! by taking advantage of implicit zero extension. For example instruction
//! like `mov r64, imm` and `and r64, imm` can be translated to `mov r32, imm`
//! and `and r32, imm` when the immediate constant is lesser than `2^31`.
kEncodingOptionOptimizeForSize = 0x00000001u,
//! Emit optimized code-alignment sequences.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! Default align sequence used by X86 architecture is one-byte (0x90)
//! opcode that is often shown by disassemblers as NOP. However there are
//! more optimized align sequences for 2-11 bytes that may execute faster
//! on certain CPUs. If this feature is enabled AsmJit will generate
//! specialized sequences for alignment between 2 to 11 bytes.
kEncodingOptionOptimizedAlign = 0x00000002u,
//! Emit jump-prediction hints.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! Jump prediction is usually based on the direction of the jump. If the
//! jump is backward it is usually predicted as taken; and if the jump is
//! forward it is usually predicted as not-taken. The reason is that loops
//! generally use backward jumps and conditions usually use forward jumps.
//! However this behavior can be overridden by using instruction prefixes.
//! If this option is enabled these hints will be emitted.
//!
//! This feature is disabled by default, because the only processor that
//! used to take into consideration prediction hints was P4. Newer processors
//! implement heuristics for branch prediction and ignore static hints. This
//! means that this feature can be only used for annotation purposes.
kEncodingOptionPredictedJumps = 0x00000010u
};
#ifndef ASMJIT_NO_DEPRECATED
enum EmitterOptions : uint32_t {
kOptionOptimizedForSize = kEncodingOptionOptimizeForSize,
kOptionOptimizedAlign = kEncodingOptionOptimizedAlign,
kOptionPredictedJumps = kEncodingOptionPredictedJumps
};
#endif
//! Validation options are used to tell emitters to perform strict validation
//! of instructions passed to \ref emit().
//!
//! \ref BaseAssembler implementation perform by default only basic checks
//! that are necessary to identify all variations of an instruction so the
//! correct encoding can be selected. This is fine for production-ready code
//! as the assembler doesn't have to perform checks that would slow it down.
//! However, sometimes these checks are beneficial especially when the project
//! that uses AsmJit is in a development phase, in which mistakes happen often.
//! To make the experience of using AsmJit seamless it offers validation
//! features that can be controlled by `ValidationOptions`.
enum ValidationOptions : uint32_t {
//! Perform strict validation in \ref BaseAssembler::emit() implementations.
//!
//! This flag ensures that each instruction is checked before it's encoded
//! into a binary representation. This flag is only relevant for \ref
//! BaseAssembler implementations, but can be set in any other emitter type,
//! in that case if that emitter needs to create an assembler on its own,
//! for the purpose of \ref finalize() it would propagate this flag to such
//! assembler so all instructions passed to it are explicitly validated.
//!
//! Default: false.
kValidationOptionAssembler = 0x00000001u,
//! Perform strict validation in \ref BaseBuilder::emit() and \ref
//! BaseCompiler::emit() implementations.
//!
//! This flag ensures that each instruction is checked before an \ref
//! InstNode representing the instruction is created by Builder or Compiler.
//!
//! Default: false.
kValidationOptionIntermediate = 0x00000002u
};
//! \name Construction & Destruction
//! \{
ASMJIT_API explicit BaseEmitter(uint32_t emitterType) noexcept;
ASMJIT_API virtual ~BaseEmitter() noexcept;
//! \}
//! \name Cast
//! \{
template<typename T>
inline T* as() noexcept { return reinterpret_cast<T*>(this); }
template<typename T>
inline const T* as() const noexcept { return reinterpret_cast<const T*>(this); }
//! \}
//! \name Emitter Type & Flags
//! \{
//! Returns the type of this emitter, see `EmitterType`.
inline uint32_t emitterType() const noexcept { return _emitterType; }
//! Returns emitter flags , see `Flags`.
inline uint32_t emitterFlags() const noexcept { return _emitterFlags; }
//! Tests whether the emitter inherits from `BaseAssembler`.
inline bool isAssembler() const noexcept { return _emitterType == kTypeAssembler; }
//! Tests whether the emitter inherits from `BaseBuilder`.
//!
//! \note Both Builder and Compiler emitters would return `true`.
inline bool isBuilder() const noexcept { return _emitterType >= kTypeBuilder; }
//! Tests whether the emitter inherits from `BaseCompiler`.
inline bool isCompiler() const noexcept { return _emitterType == kTypeCompiler; }
//! Tests whether the emitter has the given `flag` enabled.
inline bool hasEmitterFlag(uint32_t flag) const noexcept { return (_emitterFlags & flag) != 0; }
//! Tests whether the emitter is finalized.
inline bool isFinalized() const noexcept { return hasEmitterFlag(kFlagFinalized); }
//! Tests whether the emitter is destroyed (only used during destruction).
inline bool isDestroyed() const noexcept { return hasEmitterFlag(kFlagDestroyed); }
inline void _addEmitterFlags(uint32_t flags) noexcept { _emitterFlags = uint8_t(_emitterFlags | flags); }
inline void _clearEmitterFlags(uint32_t flags) noexcept { _emitterFlags = uint8_t(_emitterFlags & ~flags); }
//! \}
//! \name Target Information
//! \{
//! Returns the CodeHolder this emitter is attached to.
inline CodeHolder* code() const noexcept { return _code; }
//! Returns the target environment, see \ref Environment.
//!
//! The returned \ref Environment reference matches \ref CodeHolder::environment().
inline const Environment& environment() const noexcept { return _environment; }
//! Tests whether the target architecture is 32-bit.
inline bool is32Bit() const noexcept { return environment().is32Bit(); }
//! Tests whether the target architecture is 64-bit.
inline bool is64Bit() const noexcept { return environment().is64Bit(); }
//! Returns the target architecture type.
inline uint32_t arch() const noexcept { return environment().arch(); }
//! Returns the target architecture sub-type.
inline uint32_t subArch() const noexcept { return environment().subArch(); }
//! Returns the target architecture's GP register size (4 or 8 bytes).
inline uint32_t registerSize() const noexcept { return environment().registerSize(); }
//! \}
//! \name Initialization & Finalization
//! \{
//! Tests whether the emitter is initialized (i.e. attached to \ref CodeHolder).
inline bool isInitialized() const noexcept { return _code != nullptr; }
//! Finalizes this emitter.
//!
//! Materializes the content of the emitter by serializing it to the attached
//! \ref CodeHolder through an architecture specific \ref BaseAssembler. This
//! function won't do anything if the emitter inherits from \ref BaseAssembler
//! as assemblers emit directly to a \ref CodeBuffer held by \ref CodeHolder.
//! However, if this is an emitter that inherits from \ref BaseBuilder or \ref
//! BaseCompiler then these emitters need the materialization phase as they
//! store their content in a representation not visible to \ref CodeHolder.
ASMJIT_API virtual Error finalize();
//! \}
//! \name Logging
//! \{
//! Tests whether the emitter has a logger.
inline bool hasLogger() const noexcept { return _logger != nullptr; }
//! Tests whether the emitter has its own logger.
//!
//! Own logger means that it overrides the possible logger that may be used
//! by \ref CodeHolder this emitter is attached to.
inline bool hasOwnLogger() const noexcept { return hasEmitterFlag(kFlagOwnLogger); }
//! Returns the logger this emitter uses.
//!
//! The returned logger is either the emitter's own logger or it's logger
//! used by \ref CodeHolder this emitter is attached to.
inline Logger* logger() const noexcept { return _logger; }
//! Sets or resets the logger of the emitter.
//!
//! If the `logger` argument is non-null then the logger will be considered
//! emitter's own logger, see \ref hasOwnLogger() for more details. If the
//! given `logger` is null then the emitter will automatically use logger
//! that is attached to the \ref CodeHolder this emitter is attached to.
ASMJIT_API void setLogger(Logger* logger) noexcept;
//! Resets the logger of this emitter.
//!
//! The emitter will bail to using a logger attached to \ref CodeHolder this
//! emitter is attached to, or no logger at all if \ref CodeHolder doesn't
//! have one.
inline void resetLogger() noexcept { return setLogger(nullptr); }
//! \}
//! \name Error Handling
//! \{
//! Tests whether the emitter has an error handler attached.
inline bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
//! Tests whether the emitter has its own error handler.
//!
//! Own error handler means that it overrides the possible error handler that
//! may be used by \ref CodeHolder this emitter is attached to.
inline bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(kFlagOwnErrorHandler); }
//! Returns the error handler this emitter uses.
//!
//! The returned error handler is either the emitter's own error handler or
//! it's error handler used by \ref CodeHolder this emitter is attached to.
inline ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
//! Sets or resets the error handler of the emitter.
ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept;
//! Resets the error handler.
inline void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
//! Handles the given error in the following way:
//! 1. If the emitter has \ref ErrorHandler attached, it calls its
//! \ref ErrorHandler::handleError() member function first, and
//! then returns the error. The `handleError()` function may throw.
//! 2. if the emitter doesn't have \ref ErrorHandler, the error is
//! simply returned.
ASMJIT_API Error reportError(Error err, const char* message = nullptr);
//! \}
//! \name Encoding Options
//! \{
//! Returns encoding options, see \ref EncodingOptions.
inline uint32_t encodingOptions() const noexcept { return _encodingOptions; }
//! Tests whether the encoding `option` is set.
inline bool hasEncodingOption(uint32_t option) const noexcept { return (_encodingOptions & option) != 0; }
//! Enables the given encoding `options`, see \ref EncodingOptions.
inline void addEncodingOptions(uint32_t options) noexcept { _encodingOptions |= options; }
//! Disables the given encoding `options`, see \ref EncodingOptions.
inline void clearEncodingOptions(uint32_t options) noexcept { _encodingOptions &= ~options; }
//! \}
//! \name Validation Options
//! \{
//! Returns the emitter's validation options, see \ref ValidationOptions.
inline uint32_t validationOptions() const noexcept {
return _validationOptions;
}
//! Tests whether the given `option` is present in validation options.
inline bool hasValidationOption(uint32_t option) const noexcept {
return (_validationOptions & option) != 0;
}
//! Activates the given validation `options`, see \ref ValidationOptions.
//!
//! This function is used to activate explicit validation options that will
//! be then used by all emitter implementations. There are in general two
//! possibilities:
//!
//! - Architecture specific assembler is used. In this case a
//! \ref kValidationOptionAssembler can be used to turn on explicit
//! validation that will be used before an instruction is emitted.
//! This means that internally an extra step will be performed to
//! make sure that the instruction is correct. This is needed, because
//! by default assemblers prefer speed over strictness.
//!
//! This option should be used in debug builds as it's pretty expensive.
//!
//! - Architecture specific builder or compiler is used. In this case
//! the user can turn on \ref kValidationOptionIntermediate option
//! that adds explicit validation step before the Builder or Compiler
//! creates an \ref InstNode to represent an emitted instruction. Error
//! will be returned if the instruction is ill-formed. In addition,
//! also \ref kValidationOptionAssembler can be used, which would not be
//! consumed by Builder / Compiler directly, but it would be propagated
//! to an architecture specific \ref BaseAssembler implementation it
//! creates during \ref BaseEmitter::finalize().
ASMJIT_API void addValidationOptions(uint32_t options) noexcept;
//! Deactivates the given validation `options`.
//!
//! See \ref addValidationOptions() and \ref ValidationOptions for more details.
ASMJIT_API void clearValidationOptions(uint32_t options) noexcept;
//! \}
//! \name Instruction Options
//! \{
//! Returns forced instruction options.
//!
//! Forced instruction options are merged with next instruction options before
//! the instruction is encoded. These options have some bits reserved that are
//! used by error handling, logging, and instruction validation purposes. Other
//! options are globals that affect each instruction.
inline uint32_t forcedInstOptions() const noexcept { return _forcedInstOptions; }
//! Returns options of the next instruction.
inline uint32_t instOptions() const noexcept { return _instOptions; }
//! Returns options of the next instruction.
inline void setInstOptions(uint32_t options) noexcept { _instOptions = options; }
//! Adds options of the next instruction.
inline void addInstOptions(uint32_t options) noexcept { _instOptions |= options; }
//! Resets options of the next instruction.
inline void resetInstOptions() noexcept { _instOptions = 0; }
//! Tests whether the extra register operand is valid.
inline bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
//! Returns an extra operand that will be used by the next instruction (architecture specific).
inline const RegOnly& extraReg() const noexcept { return _extraReg; }
//! Sets an extra operand that will be used by the next instruction (architecture specific).
inline void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
//! Sets an extra operand that will be used by the next instruction (architecture specific).
inline void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
//! Resets an extra operand that will be used by the next instruction (architecture specific).
inline void resetExtraReg() noexcept { _extraReg.reset(); }
//! Returns comment/annotation of the next instruction.
inline const char* inlineComment() const noexcept { return _inlineComment; }
//! Sets comment/annotation of the next instruction.
//!
//! \note This string is set back to null by `_emit()`, but until that it has
//! to remain valid as the Emitter is not required to make a copy of it (and
//! it would be slow to do that for each instruction).
inline void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Resets the comment/annotation to nullptr.
inline void resetInlineComment() noexcept { _inlineComment = nullptr; }
//! \}
//! \name Sections
//! \{
virtual Error section(Section* section) = 0;
//! \}
//! \name Labels
//! \{
//! Creates a new label.
virtual Label newLabel() = 0;
//! Creates a new named label.
virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) = 0;
//! Creates a new external label.
inline Label newExternalLabel(const char* name, size_t nameSize = SIZE_MAX) {
return newNamedLabel(name, nameSize, Label::kTypeExternal);
}
//! Returns `Label` by `name`.
//!
//! Returns invalid Label in case that the name is invalid or label was not found.
//!
//! \note This function doesn't trigger ErrorHandler in case the name is invalid
//! or no such label exist. You must always check the validity of the `Label` returned.
ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
//! Binds the `label` to the current position of the current section.
//!
//! \note Attempt to bind the same label multiple times will return an error.
virtual Error bind(const Label& label) = 0;
//! Tests whether the label `id` is valid (i.e. registered).
ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept;
//! Tests whether the `label` is valid (i.e. registered).
inline bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); }
//! \}
//! \name Emit
//! \{
// NOTE: These `emit()` helpers are designed to address a code-bloat generated
// by C++ compilers to call a function having many arguments. Each parameter to
// `_emit()` requires some code to pass it, which means that if we default to
// 5 arguments in `_emit()` and instId the C++ compiler would have to generate
// a virtual function call having 5 parameters and additional `this` argument,
// which is quite a lot. Since by default most instructions have 2 to 3 operands
// it's better to introduce helpers that pass from 0 to 6 operands that help to
// reduce the size of emit(...) function call.
//! Emits an instruction (internal).
ASMJIT_API Error _emitI(uint32_t instId);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
//! Emits an instruction `instId` with the given `operands`.
template<typename... Args>
ASMJIT_INLINE Error emit(uint32_t instId, Args&&... operands) {
return _emitI(instId, Support::ForwardOp<Args>::forward(operands)...);
}
inline Error emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount) {
return _emitOpArray(instId, operands, opCount);
}
inline Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) {
setInstOptions(inst.options());
setExtraReg(inst.extraReg());
return _emitOpArray(inst.id(), operands, opCount);
}
//! \cond INTERNAL
//! Emits an instruction - all 6 operands must be defined.
virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* oExt) = 0;
//! Emits instruction having operands stored in array.
ASMJIT_API virtual Error _emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount);
//! \endcond
//! \}
//! \name Emit Utilities
//! \{
ASMJIT_API Error emitProlog(const FuncFrame& frame);
ASMJIT_API Error emitEpilog(const FuncFrame& frame);
ASMJIT_API Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
//! \}
//! \name Align
//! \{
//! Aligns the current CodeBuffer position to the `alignment` specified.
//!
//! The sequence that is used to fill the gap between the aligned location
//! and the current location depends on the align `mode`, see \ref AlignMode.
virtual Error align(uint32_t alignMode, uint32_t alignment) = 0;
//! \}
//! \name Embed
//! \{
//! Embeds raw data into the \ref CodeBuffer.
virtual Error embed(const void* data, size_t dataSize) = 0;
//! Embeds a typed data array.
//!
//! This is the most flexible function for embedding data as it allows to:
//! - Assign a `typeId` to the data, so the emitter knows the type of
//! items stored in `data`. Binary data should use \ref Type::kIdU8.
//! - Repeat the given data `repeatCount` times, so the data can be used
//! as a fill pattern for example, or as a pattern used by SIMD instructions.
virtual Error embedDataArray(uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount = 1) = 0;
//! Embeds int8_t `value` repeated by `repeatCount`.
inline Error embedInt8(int8_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI8, &value, 1, repeatCount); }
//! Embeds uint8_t `value` repeated by `repeatCount`.
inline Error embedUInt8(uint8_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU8, &value, 1, repeatCount); }
//! Embeds int16_t `value` repeated by `repeatCount`.
inline Error embedInt16(int16_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI16, &value, 1, repeatCount); }
//! Embeds uint16_t `value` repeated by `repeatCount`.
inline Error embedUInt16(uint16_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU16, &value, 1, repeatCount); }
//! Embeds int32_t `value` repeated by `repeatCount`.
inline Error embedInt32(int32_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI32, &value, 1, repeatCount); }
//! Embeds uint32_t `value` repeated by `repeatCount`.
inline Error embedUInt32(uint32_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU32, &value, 1, repeatCount); }
//! Embeds int64_t `value` repeated by `repeatCount`.
inline Error embedInt64(int64_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI64, &value, 1, repeatCount); }
//! Embeds uint64_t `value` repeated by `repeatCount`.
inline Error embedUInt64(uint64_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU64, &value, 1, repeatCount); }
//! Embeds a floating point `value` repeated by `repeatCount`.
inline Error embedFloat(float value, size_t repeatCount = 1) { return embedDataArray(Type::kIdF32, &value, 1, repeatCount); }
//! Embeds a floating point `value` repeated by `repeatCount`.
inline Error embedDouble(double value, size_t repeatCount = 1) { return embedDataArray(Type::IdOfT<double>::kTypeId, &value, 1, repeatCount); }
//! Embeds a constant pool at the current offset by performing the following:
//! 1. Aligns by using kAlignData to the minimum `pool` alignment.
//! 2. Binds the ConstPool label so it's bound to an aligned location.
//! 3. Emits ConstPool content.
virtual Error embedConstPool(const Label& label, const ConstPool& pool) = 0;
//! Embeds an absolute `label` address as data.
//!
//! The `dataSize` is an optional argument that can be used to specify the
//! size of the address data. If it's zero (default) the address size is
//! deduced from the target architecture (either 4 or 8 bytes).
virtual Error embedLabel(const Label& label, size_t dataSize = 0) = 0;
//! Embeds a delta (distance) between the `label` and `base` calculating it
//! as `label - base`. This function was designed to make it easier to embed
//! lookup tables where each index is a relative distance of two labels.
virtual Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) = 0;
//! \}
//! \name Comment
//! \{
//! Emits a comment stored in `data` with an optional `size` parameter.
virtual Error comment(const char* data, size_t size = SIZE_MAX) = 0;
//! Emits a formatted comment specified by `fmt` and variable number of arguments.
ASMJIT_API Error commentf(const char* fmt, ...);
//! Emits a formatted comment specified by `fmt` and `ap`.
ASMJIT_API Error commentv(const char* fmt, va_list ap);
//! \}
//! \name Events
//! \{
//! Called after the emitter was attached to `CodeHolder`.
virtual Error onAttach(CodeHolder* code) noexcept = 0;
//! Called after the emitter was detached from `CodeHolder`.
virtual Error onDetach(CodeHolder* code) noexcept = 0;
//! Called when \ref CodeHolder has updated an important setting, which
//! involves the following:
//!
//! - \ref Logger has been changed (\ref CodeHolder::setLogger() has been
//! called).
//! - \ref ErrorHandler has been changed (\ref CodeHolder::setErrorHandler()
//! has been called).
//!
//! This function ensures that the settings are properly propagated from
//! \ref CodeHolder to the emitter.
//!
//! \note This function is virtual and can be overridden, however, if you
//! do so, always call \ref BaseEmitter::onSettingsUpdated() within your
//! own implementation to ensure that the emitter is in a consisten state.
ASMJIT_API virtual void onSettingsUpdated() noexcept;
//! \}
#ifndef ASMJIT_NO_DEPRECATED
ASMJIT_DEPRECATED("Use environment() instead")
inline CodeInfo codeInfo() const noexcept {
return CodeInfo(_environment, _code ? _code->baseAddress() : Globals::kNoBaseAddress);
}
ASMJIT_DEPRECATED("Use arch() instead")
inline uint32_t archId() const noexcept { return arch(); }
ASMJIT_DEPRECATED("Use registerSize() instead")
inline uint32_t gpSize() const noexcept { return registerSize(); }
ASMJIT_DEPRECATED("Use encodingOptions() instead")
inline uint32_t emitterOptions() const noexcept { return encodingOptions(); }
ASMJIT_DEPRECATED("Use addEncodingOptions() instead")
inline void addEmitterOptions(uint32_t options) noexcept { addEncodingOptions(options); }
ASMJIT_DEPRECATED("Use clearEncodingOptions() instead")
inline void clearEmitterOptions(uint32_t options) noexcept { clearEncodingOptions(options); }
ASMJIT_DEPRECATED("Use forcedInstOptions() instead")
inline uint32_t globalInstOptions() const noexcept { return forcedInstOptions(); }
#endif // !ASMJIT_NO_DEPRECATED
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_EMITTER_H_INCLUDED

@ -0,0 +1,150 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/assembler.h"
#include "../core/emitterutils_p.h"
#include "../core/formatter.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::EmitterUtils]
// ============================================================================
namespace EmitterUtils {
#ifndef ASMJIT_NO_LOGGING
Error formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept {
size_t currentSize = sb.size();
size_t commentSize = comment ? Support::strLen(comment, Globals::kMaxCommentSize) : 0;
ASMJIT_ASSERT(binSize >= dispSize);
const size_t kNoBinSize = SIZE_MAX;
if ((binSize != 0 && binSize != kNoBinSize) || commentSize) {
size_t align = kMaxInstLineSize;
char sep = ';';
for (size_t i = (binSize == kNoBinSize); i < 2; i++) {
size_t begin = sb.size();
ASMJIT_PROPAGATE(sb.padEnd(align));
if (sep) {
ASMJIT_PROPAGATE(sb.append(sep));
ASMJIT_PROPAGATE(sb.append(' '));
}
// Append binary data or comment.
if (i == 0) {
ASMJIT_PROPAGATE(sb.appendHex(binData, binSize - dispSize - immSize));
ASMJIT_PROPAGATE(sb.appendChars('.', dispSize * 2));
ASMJIT_PROPAGATE(sb.appendHex(binData + binSize - immSize, immSize));
if (commentSize == 0) break;
}
else {
ASMJIT_PROPAGATE(sb.append(comment, commentSize));
}
currentSize += sb.size() - begin;
align += kMaxBinarySize;
sep = '|';
}
}
return sb.append('\n');
}
void logLabelBound(BaseAssembler* self, const Label& label) noexcept {
Logger* logger = self->logger();
StringTmp<512> sb;
size_t binSize = logger->hasFlag(FormatOptions::kFlagMachineCode) ? size_t(0) : SIZE_MAX;
sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationLabel));
Formatter::formatLabel(sb, logger->flags(), self, label.id());
sb.append(':');
EmitterUtils::formatLine(sb, nullptr, binSize, 0, 0, self->_inlineComment);
logger->log(sb.data(), sb.size());
}
void logInstructionEmitted(
BaseAssembler* self,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
uint32_t relSize, uint32_t immSize, uint8_t* afterCursor) {
Logger* logger = self->logger();
ASMJIT_ASSERT(logger != nullptr);
StringTmp<256> sb;
uint32_t flags = logger->flags();
uint8_t* beforeCursor = self->bufferPtr();
intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor);
Operand_ opArray[Globals::kMaxOpCount];
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationCode));
Formatter::formatInstruction(sb, flags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
if ((flags & FormatOptions::kFlagMachineCode) != 0)
EmitterUtils::formatLine(sb, self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment());
else
EmitterUtils::formatLine(sb, nullptr, SIZE_MAX, 0, 0, self->inlineComment());
logger->log(sb);
}
Error logInstructionFailed(
BaseAssembler* self,
Error err,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
StringTmp<256> sb;
sb.append(DebugUtils::errorAsString(err));
sb.append(": ");
Operand_ opArray[Globals::kMaxOpCount];
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
Formatter::formatInstruction(sb, 0, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
if (self->inlineComment()) {
sb.append(" ; ");
sb.append(self->inlineComment());
}
self->resetInstOptions();
self->resetExtraReg();
self->resetInlineComment();
return self->reportError(err, sb.data());
}
#endif
} // {EmitterUtils}
ASMJIT_END_NAMESPACE

@ -0,0 +1,109 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
#define ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
#include "../core/emitter.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
class BaseAssembler;
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::EmitterUtils]
// ============================================================================
namespace EmitterUtils {
static const Operand_ noExt[3] {};
enum kOpIndex {
kOp3 = 0,
kOp4 = 1,
kOp5 = 2
};
static ASMJIT_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
uint32_t opCount = 0;
if (opExt[kOp3].isNone()) {
if (!o0.isNone()) opCount = 1;
if (!o1.isNone()) opCount = 2;
if (!o2.isNone()) opCount = 3;
}
else {
opCount = 4;
if (!opExt[kOp4].isNone()) {
opCount = 5 + uint32_t(!opExt[kOp5].isNone());
}
}
return opCount;
}
static ASMJIT_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
dst[0].copyFrom(o0);
dst[1].copyFrom(o1);
dst[2].copyFrom(o2);
dst[3].copyFrom(opExt[kOp3]);
dst[4].copyFrom(opExt[kOp4]);
dst[5].copyFrom(opExt[kOp5]);
}
#ifndef ASMJIT_NO_LOGGING
enum : uint32_t {
// Has to be big to be able to hold all metadata compiler can assign to a
// single instruction.
kMaxInstLineSize = 44,
kMaxBinarySize = 26
};
Error formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept;
void logLabelBound(BaseAssembler* self, const Label& label) noexcept;
void logInstructionEmitted(
BaseAssembler* self,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
uint32_t relSize, uint32_t immSize, uint8_t* afterCursor);
Error logInstructionFailed(
BaseAssembler* self,
Error err, uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt);
#endif
}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED

@ -0,0 +1,64 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/environment.h"
ASMJIT_BEGIN_NAMESPACE
// X86 Target
// ----------
//
// - 32-bit - Linux, OSX, BSD, and apparently also Haiku guarantee 16-byte
// stack alignment. Other operating systems are assumed to have
// 4-byte alignment by default for safety reasons.
// - 64-bit - stack must be aligned to 16 bytes.
//
// ARM Target
// ----------
//
// - 32-bit - Stack must be aligned to 8 bytes.
// - 64-bit - Stack must be aligned to 16 bytes (hardware requirement).
uint32_t Environment::stackAlignment() const noexcept {
if (is64Bit()) {
// Assume 16-byte alignment on any 64-bit target.
return 16;
}
else {
// The following platforms use 16-byte alignment in 32-bit mode.
if (isPlatformLinux() ||
isPlatformBSD() ||
isPlatformApple() ||
isPlatformHaiku()) {
return 16u;
}
if (isFamilyARM())
return 8;
// Bail to 4-byte alignment if we don't know.
return 4;
}
}
ASMJIT_END_NAMESPACE

@ -0,0 +1,612 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
#define ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
#include "../core/globals.h"
#if defined(__APPLE__)
#include <TargetConditionals.h>
#endif
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::Environment]
// ============================================================================
//! Represents an environment, which is usually related to a \ref Target.
//!
//! Environment has usually an 'arch-subarch-vendor-os-abi' format, which is
//! sometimes called "Triple" (historically it used to be 3 only parts) or
//! "Tuple", which is a convention used by Debian Linux.
//!
//! AsmJit doesn't support all possible combinations or architectures and ABIs,
//! however, it models the environment similarly to other compilers for future
//! extensibility.
class Environment {
public:
//! Architecture type, see \ref Arch.
uint8_t _arch;
//! Sub-architecture type, see \ref SubArch.
uint8_t _subArch;
//! Vendor type, see \ref Vendor.
uint8_t _vendor;
//! Platform type, see \ref Platform.
uint8_t _platform;
//! ABI type, see \ref Abi.
uint8_t _abi;
//! Object format, see \ref Format.
uint8_t _format;
//! Reserved for future use, must be zero.
uint16_t _reserved;
//! Architecture.
enum Arch : uint32_t {
//! Unknown or uninitialized architecture.
kArchUnknown = 0,
//! Mask used by 32-bit architectures (odd are 32-bit, even are 64-bit).
kArch32BitMask = 0x01,
//! Mask used by big-endian architectures.
kArchBigEndianMask = 0x80u,
//! 32-bit X86 architecture.
kArchX86 = 1,
//! 64-bit X86 architecture also known as X86_64 and AMD64.
kArchX64 = 2,
//! 32-bit RISC-V architecture.
kArchRISCV32 = 3,
//! 64-bit RISC-V architecture.
kArchRISCV64 = 4,
//! 32-bit ARM architecture (little endian).
kArchARM = 5,
//! 32-bit ARM architecture (big endian).
kArchARM_BE = kArchARM | kArchBigEndianMask,
//! 64-bit ARM architecture in (little endian).
kArchAArch64 = 6,
//! 64-bit ARM architecture in (big endian).
kArchAArch64_BE = kArchAArch64 | kArchBigEndianMask,
//! 32-bit ARM in Thumb mode (little endian).
kArchThumb = 7,
//! 32-bit ARM in Thumb mode (big endian).
kArchThumb_BE = kArchThumb | kArchBigEndianMask,
// 8 is not used, even numbers are 64-bit architectures.
//! 32-bit MIPS architecture in (little endian).
kArchMIPS32_LE = 9,
//! 32-bit MIPS architecture in (big endian).
kArchMIPS32_BE = kArchMIPS32_LE | kArchBigEndianMask,
//! 64-bit MIPS architecture in (little endian).
kArchMIPS64_LE = 10,
//! 64-bit MIPS architecture in (big endian).
kArchMIPS64_BE = kArchMIPS64_LE | kArchBigEndianMask,
//! Count of architectures.
kArchCount = 11
};
//! Sub-architecture.
enum SubArch : uint32_t {
//! Unknown or uninitialized architecture sub-type.
kSubArchUnknown = 0,
//! Count of sub-architectures.
kSubArchCount
};
//! Vendor.
//!
//! \note AsmJit doesn't use vendor information at the moment. It's provided
//! for future use, if required.
enum Vendor : uint32_t {
//! Unknown or uninitialized vendor.
kVendorUnknown = 0,
//! Count of vendor identifiers.
kVendorCount
};
//! Platform / OS.
enum Platform : uint32_t {
//! Unknown or uninitialized platform.
kPlatformUnknown = 0,
//! Windows OS.
kPlatformWindows,
//! Other platform, most likely POSIX based.
kPlatformOther,
//! Linux OS.
kPlatformLinux,
//! GNU/Hurd OS.
kPlatformHurd,
//! FreeBSD OS.
kPlatformFreeBSD,
//! OpenBSD OS.
kPlatformOpenBSD,
//! NetBSD OS.
kPlatformNetBSD,
//! DragonFly BSD OS.
kPlatformDragonFlyBSD,
//! Haiku OS.
kPlatformHaiku,
//! Apple OSX.
kPlatformOSX,
//! Apple iOS.
kPlatformIOS,
//! Apple TVOS.
kPlatformTVOS,
//! Apple WatchOS.
kPlatformWatchOS,
//! Emscripten platform.
kPlatformEmscripten,
//! Count of platform identifiers.
kPlatformCount
};
//! ABI.
enum Abi : uint32_t {
//! Unknown or uninitialied environment.
kAbiUnknown = 0,
//! Microsoft ABI.
kAbiMSVC,
//! GNU ABI.
kAbiGNU,
//! Android Environment / ABI.
kAbiAndroid,
//! Cygwin ABI.
kAbiCygwin,
//! Count of known ABI types.
kAbiCount
};
//! Object format.
//!
//! \note AsmJit doesn't really use anything except \ref kFormatUnknown and
//! \ref kFormatJIT at the moment. Object file formats are provided for
//! future extensibility and a possibility to generate object files at some
//! point.
enum Format : uint32_t {
//! Unknown or uninitialized object format.
kFormatUnknown = 0,
//! JIT code generation object, most likely \ref JitRuntime or a custom
//! \ref Target implementation.
kFormatJIT,
//! Executable and linkable format (ELF).
kFormatELF,
//! Common object file format.
kFormatCOFF,
//! Extended COFF object format.
kFormatXCOFF,
//! Mach object file format.
kFormatMachO,
//! Count of object format types.
kFormatCount
};
//! \name Environment Detection
//! \{
#ifdef _DOXYGEN
//! Architecture detected at compile-time (architecture of the host).
static constexpr Arch kArchHost = DETECTED_AT_COMPILE_TIME;
//! Sub-architecture detected at compile-time (sub-architecture of the host).
static constexpr SubArch kSubArchHost = DETECTED_AT_COMPILE_TIME;
//! Vendor detected at compile-time (vendor of the host).
static constexpr Vendor kVendorHost = DETECTED_AT_COMPILE_TIME;
//! Platform detected at compile-time (platform of the host).
static constexpr Platform kPlatformHost = DETECTED_AT_COMPILE_TIME;
//! ABI detected at compile-time (ABI of the host).
static constexpr Abi kAbiHost = DETECTED_AT_COMPILE_TIME;
#else
static constexpr Arch kArchHost =
ASMJIT_ARCH_X86 == 32 ? kArchX86 :
ASMJIT_ARCH_X86 == 64 ? kArchX64 :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_LE ? kArchARM :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_BE ? kArchARM_BE :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_LE ? kArchAArch64 :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_BE ? kArchAArch64_BE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_LE ? kArchMIPS32_LE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_BE ? kArchMIPS32_BE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_LE ? kArchMIPS64_LE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_BE ? kArchMIPS64_BE :
kArchUnknown;
static constexpr SubArch kSubArchHost =
kSubArchUnknown;
static constexpr Vendor kVendorHost =
kVendorUnknown;
static constexpr Platform kPlatformHost =
#if defined(__EMSCRIPTEN__)
kPlatformEmscripten
#elif defined(_WIN32)
kPlatformWindows
#elif defined(__linux__)
kPlatformLinux
#elif defined(__gnu_hurd__)
kPlatformHurd
#elif defined(__FreeBSD__)
kPlatformFreeBSD
#elif defined(__OpenBSD__)
kPlatformOpenBSD
#elif defined(__NetBSD__)
kPlatformNetBSD
#elif defined(__DragonFly__)
kPlatformDragonFlyBSD
#elif defined(__HAIKU__)
kPlatformHaiku
#elif defined(__APPLE__) && TARGET_OS_OSX
kPlatformOSX
#elif defined(__APPLE__) && TARGET_OS_TV
kPlatformTVOS
#elif defined(__APPLE__) && TARGET_OS_WATCH
kPlatformWatchOS
#elif defined(__APPLE__) && TARGET_OS_IPHONE
kPlatformIOS
#else
kPlatformOther
#endif
;
static constexpr Abi kAbiHost =
#if defined(_MSC_VER)
kAbiMSVC
#elif defined(__CYGWIN__)
kAbiCygwin
#elif defined(__MINGW32__) || defined(__GLIBC__)
kAbiGNU
#elif defined(__ANDROID__)
kAbiAndroid
#else
kAbiUnknown
#endif
;
#endif
//! \}
//! \name Construction / Destruction
//! \{
inline Environment() noexcept :
_arch(uint8_t(kArchUnknown)),
_subArch(uint8_t(kSubArchUnknown)),
_vendor(uint8_t(kVendorUnknown)),
_platform(uint8_t(kPlatformUnknown)),
_abi(uint8_t(kAbiUnknown)),
_format(uint8_t(kFormatUnknown)),
_reserved(0) {}
inline Environment(const Environment& other) noexcept = default;
inline explicit Environment(uint32_t arch,
uint32_t subArch = kSubArchUnknown,
uint32_t vendor = kVendorUnknown,
uint32_t platform = kPlatformUnknown,
uint32_t abi = kAbiUnknown,
uint32_t format = kFormatUnknown) noexcept {
init(arch, subArch, vendor, platform, abi, format);
}
//! \}
//! \name Overloaded Operators
//! \{
inline Environment& operator=(const Environment& other) noexcept = default;
inline bool operator==(const Environment& other) const noexcept { return equals(other); }
inline bool operator!=(const Environment& other) const noexcept { return !equals(other); }
//! \}
//! \name Accessors
//! \{
//! Tests whether the environment is not set up.
//!
//! Returns true if all members are zero, and thus unknown.
inline bool empty() const noexcept {
// Unfortunately compilers won't optimize fields are checked one by one...
return _packed() == 0;
}
//! Tests whether the environment is intialized, which means it must have
//! a valid architecture.
inline bool isInitialized() const noexcept {
return _arch != kArchUnknown;
}
inline uint64_t _packed() const noexcept {
uint64_t x;
memcpy(&x, this, 8);
return x;
}
//! Resets all members of the environment to zero / unknown.
inline void reset() noexcept {
_arch = uint8_t(kArchUnknown);
_subArch = uint8_t(kSubArchUnknown);
_vendor = uint8_t(kVendorUnknown);
_platform = uint8_t(kPlatformUnknown);
_abi = uint8_t(kAbiUnknown);
_format = uint8_t(kFormatUnknown);
_reserved = 0;
}
inline bool equals(const Environment& other) const noexcept {
return _packed() == other._packed();
}
//! Returns the architecture, see \ref Arch.
inline uint32_t arch() const noexcept { return _arch; }
//! Returns the sub-architecture, see \ref SubArch.
inline uint32_t subArch() const noexcept { return _subArch; }
//! Returns vendor, see \ref Vendor.
inline uint32_t vendor() const noexcept { return _vendor; }
//! Returns target's platform or operating system, see \ref Platform.
inline uint32_t platform() const noexcept { return _platform; }
//! Returns target's ABI, see \ref Abi.
inline uint32_t abi() const noexcept { return _abi; }
//! Returns target's object format, see \ref Format.
inline uint32_t format() const noexcept { return _format; }
inline void init(uint32_t arch,
uint32_t subArch = kSubArchUnknown,
uint32_t vendor = kVendorUnknown,
uint32_t platform = kPlatformUnknown,
uint32_t abi = kAbiUnknown,
uint32_t format = kFormatUnknown) noexcept {
_arch = uint8_t(arch);
_subArch = uint8_t(subArch);
_vendor = uint8_t(vendor);
_platform = uint8_t(platform);
_abi = uint8_t(abi);
_format = uint8_t(format);
_reserved = 0;
}
inline bool isArchX86() const noexcept { return _arch == kArchX86; }
inline bool isArchX64() const noexcept { return _arch == kArchX64; }
inline bool isArchRISCV32() const noexcept { return _arch == kArchRISCV32; }
inline bool isArchRISCV64() const noexcept { return _arch == kArchRISCV64; }
inline bool isArchARM() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchARM; }
inline bool isArchThumb() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchThumb; }
inline bool isArchAArch64() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchAArch64; }
inline bool isArchMIPS32() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchMIPS32_LE; }
inline bool isArchMIPS64() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchMIPS64_LE; }
//! Tests whether the architecture is 32-bit.
inline bool is32Bit() const noexcept { return is32Bit(_arch); }
//! Tests whether the architecture is 64-bit.
inline bool is64Bit() const noexcept { return is64Bit(_arch); }
//! Tests whether the architecture is little endian.
inline bool isLittleEndian() const noexcept { return isLittleEndian(_arch); }
//! Tests whether the architecture is big endian.
inline bool isBigEndian() const noexcept { return isBigEndian(_arch); }
//! Tests whether this architecture is of X86 family.
inline bool isFamilyX86() const noexcept { return isFamilyX86(_arch); }
//! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit).
inline bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); }
//! Tests whether this architecture family is ARM, Thumb, or AArch64.
inline bool isFamilyARM() const noexcept { return isFamilyARM(_arch); }
//! Tests whether this architecture family is MISP or MIPS64.
inline bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); }
//! Tests whether the environment platform is Windows.
inline bool isPlatformWindows() const noexcept { return _platform == kPlatformWindows; }
//! Tests whether the environment platform is Linux.
inline bool isPlatformLinux() const noexcept { return _platform == kPlatformLinux; }
//! Tests whether the environment platform is Hurd.
inline bool isPlatformHurd() const noexcept { return _platform == kPlatformHurd; }
//! Tests whether the environment platform is Haiku.
inline bool isPlatformHaiku() const noexcept { return _platform == kPlatformHaiku; }
//! Tests whether the environment platform is any BSD.
inline bool isPlatformBSD() const noexcept {
return _platform == kPlatformFreeBSD ||
_platform == kPlatformOpenBSD ||
_platform == kPlatformNetBSD ||
_platform == kPlatformDragonFlyBSD;
}
//! Tests whether the environment platform is any Apple platform (OSX, iOS, TVOS, WatchOS).
inline bool isPlatformApple() const noexcept {
return _platform == kPlatformOSX ||
_platform == kPlatformIOS ||
_platform == kPlatformTVOS ||
_platform == kPlatformWatchOS;
}
//! Tests whether the ABI is MSVC.
inline bool isAbiMSVC() const noexcept { return _abi == kAbiMSVC; }
//! Tests whether the ABI is GNU.
inline bool isAbiGNU() const noexcept { return _abi == kAbiGNU; }
//! Returns a calculated stack alignment for this environment.
ASMJIT_API uint32_t stackAlignment() const noexcept;
//! Returns a native register size of this architecture.
uint32_t registerSize() const noexcept { return registerSizeFromArch(_arch); }
//! Sets the architecture to `arch`.
inline void setArch(uint32_t arch) noexcept { _arch = uint8_t(arch); }
//! Sets the sub-architecture to `subArch`.
inline void setSubArch(uint32_t subArch) noexcept { _subArch = uint8_t(subArch); }
//! Sets the vendor to `vendor`.
inline void setVendor(uint32_t vendor) noexcept { _vendor = uint8_t(vendor); }
//! Sets the platform to `platform`.
inline void setPlatform(uint32_t platform) noexcept { _platform = uint8_t(platform); }
//! Sets the ABI to `abi`.
inline void setAbi(uint32_t abi) noexcept { _abi = uint8_t(abi); }
//! Sets the object format to `format`.
inline void setFormat(uint32_t format) noexcept { _format = uint8_t(format); }
//! \}
//! \name Static Utilities
//! \{
static inline bool isValidArch(uint32_t arch) noexcept {
return (arch & ~kArchBigEndianMask) != 0 &&
(arch & ~kArchBigEndianMask) < kArchCount;
}
//! Tests whether the given architecture `arch` is 32-bit.
static inline bool is32Bit(uint32_t arch) noexcept {
return (arch & kArch32BitMask) == kArch32BitMask;
}
//! Tests whether the given architecture `arch` is 64-bit.
static inline bool is64Bit(uint32_t arch) noexcept {
return (arch & kArch32BitMask) == 0;
}
//! Tests whether the given architecture `arch` is little endian.
static inline bool isLittleEndian(uint32_t arch) noexcept {
return (arch & kArchBigEndianMask) == 0;
}
//! Tests whether the given architecture `arch` is big endian.
static inline bool isBigEndian(uint32_t arch) noexcept {
return (arch & kArchBigEndianMask) == kArchBigEndianMask;
}
//! Tests whether the given architecture is AArch64.
static inline bool isArchAArch64(uint32_t arch) noexcept {
arch &= ~kArchBigEndianMask;
return arch == kArchAArch64;
}
//! Tests whether the given architecture family is X86 or X64.
static inline bool isFamilyX86(uint32_t arch) noexcept {
return arch == kArchX86 ||
arch == kArchX64;
}
//! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit).
static inline bool isFamilyRISCV(uint32_t arch) noexcept {
return arch == kArchRISCV32 ||
arch == kArchRISCV64;
}
//! Tests whether the given architecture family is ARM, Thumb, or AArch64.
static inline bool isFamilyARM(uint32_t arch) noexcept {
arch &= ~kArchBigEndianMask;
return arch == kArchARM ||
arch == kArchAArch64 ||
arch == kArchThumb;
}
//! Tests whether the given architecture family is MISP or MIPS64.
static inline bool isFamilyMIPS(uint32_t arch) noexcept {
arch &= ~kArchBigEndianMask;
return arch == kArchMIPS32_LE ||
arch == kArchMIPS64_LE;
}
//! Returns a native general purpose register size from the given architecture.
static uint32_t registerSizeFromArch(uint32_t arch) noexcept {
return is32Bit(arch) ? 4u : 8u;
}
//! \}
};
//! Returns the host environment constructed from preprocessor macros defined
//! by the compiler.
//!
//! The returned environment should precisely match the target host architecture,
//! sub-architecture, platform, and ABI.
static ASMJIT_INLINE Environment hostEnvironment() noexcept {
return Environment(Environment::kArchHost,
Environment::kSubArchHost,
Environment::kVendorHost,
Environment::kPlatformHost,
Environment::kAbiHost,
Environment::kFormatUnknown);
}
static_assert(sizeof(Environment) == 8,
"Environment must occupy exactly 8 bytes.");
//! \}
#ifndef ASMJIT_NO_DEPRECATED
class ASMJIT_DEPRECATED_STRUCT("Use Environment instead") ArchInfo : public Environment {
public:
inline ArchInfo() noexcept : Environment() {}
inline ArchInfo(const Environment& other) noexcept : Environment(other) {}
inline explicit ArchInfo(uint32_t arch, uint32_t subArch = kSubArchUnknown) noexcept
: Environment(arch, subArch) {}
enum Id : uint32_t {
kIdNone = Environment::kArchUnknown,
kIdX86 = Environment::kArchX86,
kIdX64 = Environment::kArchX64,
kIdA32 = Environment::kArchARM,
kIdA64 = Environment::kArchAArch64,
kIdHost = Environment::kArchHost
};
enum SubType : uint32_t {
kSubIdNone = Environment::kSubArchUnknown
};
static inline ArchInfo host() noexcept { return ArchInfo(hostEnvironment()); }
};
#endif // !ASMJIT_NO_DEPRECATED
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ENVIRONMENT_H_INCLUDED

@ -0,0 +1,37 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/errorhandler.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ErrorHandler]
// ============================================================================
ErrorHandler::ErrorHandler() noexcept {}
ErrorHandler::~ErrorHandler() noexcept {}
ASMJIT_END_NAMESPACE

@ -0,0 +1,267 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
#define ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
#include "../core/globals.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_error_handling
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class BaseEmitter;
// ============================================================================
// [asmjit::ErrorHandler]
// ============================================================================
//! Error handler can be used to override the default behavior of error handling.
//!
//! It's available to all classes that inherit `BaseEmitter`. Override
//! \ref ErrorHandler::handleError() to implement your own error handler.
//!
//! The following use-cases are supported:
//!
//! - Record the error and continue code generation. This is the simplest
//! approach that can be used to at least log possible errors.
//! - Throw an exception. AsmJit doesn't use exceptions and is completely
//! exception-safe, but it's perfectly legal to throw an exception from
//! the error handler.
//! - Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts Assembler,
//! Builder and Compiler to a consistent state before calling \ref handleError(),
//! so `longjmp()` can be used without issues to cancel the code-generation if
//! an error occurred. This method can be used if exception handling in your
//! project is turned off and you still want some comfort. In most cases it
//! should be safe as AsmJit uses \ref Zone memory and the ownership of memory
//! it allocates always ends with the instance that allocated it. If using this
//! approach please never jump outside the life-time of \ref CodeHolder and
//! \ref BaseEmitter.
//!
//! \ref ErrorHandler can be attached to \ref CodeHolder or \ref BaseEmitter,
//! which has a priority. The example below uses error handler that just prints
//! the error, but lets AsmJit continue:
//!
//! ```
//! // Error Handling #1 - Logging and returing Error.
//! #include <asmjit/x86.h>
//! #include <stdio.h>
//!
//! using namespace asmjit;
//!
//! // Error handler that just prints the error and lets AsmJit ignore it.
//! class SimpleErrorHandler : public ErrorHandler {
//! public:
//! Error err;
//!
//! inline SimpleErrorHandler() : err(kErrorOk) {}
//!
//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
//! this->err = err;
//! fprintf(stderr, "ERROR: %s\n", message);
//! }
//! };
//!
//! int main() {
//! JitRuntime rt;
//! SimpleErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment());
//! code.setErrorHandler(&eh);
//!
//! // Try to emit instruction that doesn't exist.
//! x86::Assembler a(&code);
//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
//!
//! if (eh.err) {
//! // Assembler failed!
//! return 1;
//! }
//!
//! return 0;
//! }
//! ```
//!
//! If error happens during instruction emitting / encoding the assembler behaves
//! transactionally - the output buffer won't advance if encoding failed, thus
//! either a fully encoded instruction or nothing is emitted. The error handling
//! shown above is useful, but it's still not the best way of dealing with errors
//! in AsmJit. The following example shows how to use exception handling to handle
//! errors in a more C++ way:
//!
//! ```
//! // Error Handling #2 - Throwing an exception.
//! #include <asmjit/x86.h>
//! #include <exception>
//! #include <string>
//! #include <stdio.h>
//!
//! using namespace asmjit;
//!
//! // Error handler that throws a user-defined `AsmJitException`.
//! class AsmJitException : public std::exception {
//! public:
//! Error err;
//! std::string message;
//!
//! AsmJitException(Error err, const char* message) noexcept
//! : err(err),
//! message(message) {}
//!
//! const char* what() const noexcept override { return message.c_str(); }
//! };
//!
//! class ThrowableErrorHandler : public ErrorHandler {
//! public:
//! // Throw is possible, functions that use ErrorHandler are never 'noexcept'.
//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
//! throw AsmJitException(err, message);
//! }
//! };
//!
//! int main() {
//! JitRuntime rt;
//! ThrowableErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment());
//! code.setErrorHandler(&eh);
//!
//! x86::Assembler a(&code);
//!
//! // Try to emit instruction that doesn't exist.
//! try {
//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
//! }
//! catch (const AsmJitException& ex) {
//! printf("EXCEPTION THROWN: %s\n", ex.what());
//! return 1;
//! }
//!
//! return 0;
//! }
//! ```
//!
//! If C++ exceptions are not what you like or your project turns off them
//! completely there is still a way of reducing the error handling to a minimum
//! by using a standard setjmp/longjmp approach. AsmJit is exception-safe and
//! cleans up everything before calling the ErrorHandler, so any approach is
//! safe. You can simply jump from the error handler without causing any
//! side-effects or memory leaks. The following example demonstrates how it
//! could be done:
//!
//! ```
//! // Error Handling #3 - Using setjmp/longjmp if exceptions are not allowed.
//! #include <asmjit/x86.h>
//! #include <setjmp.h>
//! #include <stdio.h>
//!
//! class LongJmpErrorHandler : public asmjit::ErrorHandler {
//! public:
//! inline LongJmpErrorHandler() : err(asmjit::kErrorOk) {}
//!
//! void handleError(asmjit::Error err, const char* message, asmjit::BaseEmitter* origin) override {
//! this->err = err;
//! longjmp(state, 1);
//! }
//!
//! jmp_buf state;
//! asmjit::Error err;
//! };
//!
//! int main(int argc, char* argv[]) {
//! using namespace asmjit;
//!
//! JitRuntime rt;
//! LongJmpErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.rt.environment());
//! code.setErrorHandler(&eh);
//!
//! x86::Assembler a(&code);
//!
//! if (!setjmp(eh.state)) {
//! // Try to emit instruction that doesn't exist.
//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
//! }
//! else {
//! Error err = eh.err;
//! printf("ASMJIT ERROR: 0x%08X [%s]\n", err, DebugUtils::errorAsString(err));
//! }
//!
//! return 0;
//! }
//! ```
class ASMJIT_VIRTAPI ErrorHandler {
public:
ASMJIT_BASE_CLASS(ErrorHandler)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Creates a new `ErrorHandler` instance.
ASMJIT_API ErrorHandler() noexcept;
//! Destroys the `ErrorHandler` instance.
ASMJIT_API virtual ~ErrorHandler() noexcept;
// --------------------------------------------------------------------------
// [Handle Error]
// --------------------------------------------------------------------------
//! Error handler (must be reimplemented).
//!
//! Error handler is called after an error happened and before it's propagated
//! to the caller. There are multiple ways how the error handler can be used:
//!
//! 1. User-based error handling without throwing exception or using C's
//! `longjmp()`. This is for users that don't use exceptions and want
//! customized error handling.
//!
//! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely
//! exception-safe, but you can throw exception from your error handler if
//! this way is the preferred way of handling errors in your project.
//!
//! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts
//! `BaseEmitter` to a consistent state before calling `handleError()`
//! so `longjmp()` can be used without any issues to cancel the code
//! generation if an error occurred. There is no difference between
//! exceptions and `longjmp()` from AsmJit's perspective, however,
//! never jump outside of `CodeHolder` and `BaseEmitter` scope as you
//! would leak memory.
virtual void handleError(Error err, const char* message, BaseEmitter* origin) = 0;
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ERRORHANDLER_H_INCLUDED

@ -0,0 +1,186 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_FEATURES_H_INCLUDED
#define ASMJIT_CORE_FEATURES_H_INCLUDED
#include "../core/globals.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::BaseFeatures]
// ============================================================================
//! Base class that provides information about CPU features.
//!
//! Internally each feature is represented by a single bit in an embedded
//! bit-array, however, feature bits are defined by an architecture specific
//! implementations, like \ref x86::Features.
class BaseFeatures {
public:
typedef Support::BitWord BitWord;
typedef Support::BitVectorIterator<BitWord> Iterator;
enum : uint32_t {
kMaxFeatures = 256,
kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits
};
BitWord _bits[kNumBitWords];
//! \name Construction & Destruction
//! \{
inline BaseFeatures() noexcept { reset(); }
inline BaseFeatures(const BaseFeatures& other) noexcept = default;
inline explicit BaseFeatures(Globals::NoInit_) noexcept {}
inline void reset() noexcept {
for (size_t i = 0; i < kNumBitWords; i++)
_bits[i] = 0;
}
//! \}
//! \name Overloaded Operators
//! \{
inline BaseFeatures& operator=(const BaseFeatures& other) noexcept = default;
inline bool operator==(const BaseFeatures& other) noexcept { return eq(other); }
inline bool operator!=(const BaseFeatures& other) noexcept { return !eq(other); }
//! \}
//! \name Cast
//! \{
//! Casts this base class into a derived type `T`.
template<typename T>
inline T& as() noexcept { return static_cast<T&>(*this); }
//! Casts this base class into a derived type `T` (const).
template<typename T>
inline const T& as() const noexcept { return static_cast<const T&>(*this); }
//! \}
//! \name Accessors
//! \{
inline bool empty() const noexcept {
for (uint32_t i = 0; i < kNumBitWords; i++)
if (_bits[i])
return false;
return true;
}
//! Returns all features as array of bitwords (see \ref Support::BitWord).
inline BitWord* bits() noexcept { return _bits; }
//! Returns all features as array of bitwords (const).
inline const BitWord* bits() const noexcept { return _bits; }
//! Returns the number of BitWords returned by \ref bits().
inline size_t bitWordCount() const noexcept { return kNumBitWords; }
//! Returns \ref Support::BitVectorIterator, that can be used to iterate
//! all features efficiently
inline Iterator iterator() const noexcept {
return Iterator(_bits, kNumBitWords);
}
//! Tests whether the feature `featureId` is present.
inline bool has(uint32_t featureId) const noexcept {
ASMJIT_ASSERT(featureId < kMaxFeatures);
uint32_t idx = featureId / Support::kBitWordSizeInBits;
uint32_t bit = featureId % Support::kBitWordSizeInBits;
return bool((_bits[idx] >> bit) & 0x1);
}
//! Tests whether all features as defined by `other` are present.
inline bool hasAll(const BaseFeatures& other) const noexcept {
for (uint32_t i = 0; i < kNumBitWords; i++)
if ((_bits[i] & other._bits[i]) != other._bits[i])
return false;
return true;
}
//! \}
//! \name Utilities
//! \{
//! Adds the given CPU `featureId` to the list of features.
inline void add(uint32_t featureId) noexcept {
ASMJIT_ASSERT(featureId < kMaxFeatures);
uint32_t idx = featureId / Support::kBitWordSizeInBits;
uint32_t bit = featureId % Support::kBitWordSizeInBits;
_bits[idx] |= BitWord(1) << bit;
}
template<typename... Args>
inline void add(uint32_t featureId, Args... otherIds) noexcept {
add(featureId);
add(otherIds...);
}
//! Removes the given CPU `featureId` from the list of features.
inline void remove(uint32_t featureId) noexcept {
ASMJIT_ASSERT(featureId < kMaxFeatures);
uint32_t idx = featureId / Support::kBitWordSizeInBits;
uint32_t bit = featureId % Support::kBitWordSizeInBits;
_bits[idx] &= ~(BitWord(1) << bit);
}
template<typename... Args>
inline void remove(uint32_t featureId, Args... otherIds) noexcept {
remove(featureId);
remove(otherIds...);
}
inline bool eq(const BaseFeatures& other) const noexcept {
for (size_t i = 0; i < kNumBitWords; i++)
if (_bits[i] != other._bits[i])
return false;
return true;
}
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_FEATURES_H_INCLUDED

@ -0,0 +1,481 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/builder.h"
#include "../core/codeholder.h"
#include "../core/compiler.h"
#include "../core/emitter.h"
#include "../core/formatter.h"
#include "../core/string.h"
#include "../core/support.h"
#include "../core/type.h"
#ifdef ASMJIT_BUILD_X86
#include "../x86/x86formatter_p.h"
#endif
#ifdef ASMJIT_BUILD_ARM
#include "../arm/armformatter_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
#if defined(ASMJIT_NO_COMPILER)
class VirtReg;
#endif
// ============================================================================
// [asmjit::Formatter]
// ============================================================================
namespace Formatter {
Error formatTypeId(String& sb, uint32_t typeId) noexcept {
if (typeId == Type::kIdVoid)
return sb.append("void");
if (!Type::isValid(typeId))
return sb.append("unknown");
const char* typeName = "unknown";
uint32_t typeSize = Type::sizeOf(typeId);
uint32_t baseId = Type::baseOf(typeId);
switch (baseId) {
case Type::kIdIntPtr : typeName = "iptr" ; break;
case Type::kIdUIntPtr: typeName = "uptr" ; break;
case Type::kIdI8 : typeName = "i8" ; break;
case Type::kIdU8 : typeName = "u8" ; break;
case Type::kIdI16 : typeName = "i16" ; break;
case Type::kIdU16 : typeName = "u16" ; break;
case Type::kIdI32 : typeName = "i32" ; break;
case Type::kIdU32 : typeName = "u32" ; break;
case Type::kIdI64 : typeName = "i64" ; break;
case Type::kIdU64 : typeName = "u64" ; break;
case Type::kIdF32 : typeName = "f32" ; break;
case Type::kIdF64 : typeName = "f64" ; break;
case Type::kIdF80 : typeName = "f80" ; break;
case Type::kIdMask8 : typeName = "mask8" ; break;
case Type::kIdMask16 : typeName = "mask16"; break;
case Type::kIdMask32 : typeName = "mask32"; break;
case Type::kIdMask64 : typeName = "mask64"; break;
case Type::kIdMmx32 : typeName = "mmx32" ; break;
case Type::kIdMmx64 : typeName = "mmx64" ; break;
}
uint32_t baseSize = Type::sizeOf(baseId);
if (typeSize > baseSize) {
uint32_t count = typeSize / baseSize;
return sb.appendFormat("%sx%u", typeName, unsigned(count));
}
else {
return sb.append(typeName);
}
}
Error formatFeature(
String& sb,
uint32_t arch,
uint32_t featureId) noexcept {
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::FormatterInternal::formatFeature(sb, featureId);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isFamilyARM(arch))
return arm::FormatterInternal::formatFeature(sb, featureId);
#endif
return kErrorInvalidArch;
}
Error formatLabel(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t labelId) noexcept {
DebugUtils::unused(formatFlags);
const LabelEntry* le = emitter->code()->labelEntry(labelId);
if (ASMJIT_UNLIKELY(!le))
return sb.appendFormat("<InvalidLabel:%u>", labelId);
if (le->hasName()) {
if (le->hasParent()) {
uint32_t parentId = le->parentId();
const LabelEntry* pe = emitter->code()->labelEntry(parentId);
if (ASMJIT_UNLIKELY(!pe))
ASMJIT_PROPAGATE(sb.appendFormat("<InvalidLabel:%u>", labelId));
else if (ASMJIT_UNLIKELY(!pe->hasName()))
ASMJIT_PROPAGATE(sb.appendFormat("L%u", parentId));
else
ASMJIT_PROPAGATE(sb.append(pe->name()));
ASMJIT_PROPAGATE(sb.append('.'));
}
return sb.append(le->name());
}
else {
return sb.appendFormat("L%u", labelId);
}
}
Error formatRegister(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
uint32_t regType,
uint32_t regId) noexcept {
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isFamilyARM(arch))
return arm::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
#endif
return kErrorInvalidArch;
}
Error formatOperand(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
const Operand_& op) noexcept {
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isFamilyARM(arch))
return arm::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
#endif
return kErrorInvalidArch;
}
Error formatInstruction(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isFamilyARM(arch))
return arm::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
#endif
return kErrorInvalidArch;
}
#ifndef ASMJIT_NO_BUILDER
#ifndef ASMJIT_NO_COMPILER
static Error formatFuncValue(String& sb, uint32_t formatFlags, const BaseEmitter* emitter, FuncValue value) noexcept {
uint32_t typeId = value.typeId();
ASMJIT_PROPAGATE(formatTypeId(sb, typeId));
if (value.isAssigned()) {
ASMJIT_PROPAGATE(sb.append('@'));
if (value.isIndirect())
ASMJIT_PROPAGATE(sb.append('['));
// NOTE: It should be either reg or stack, but never both. We
// use two IFs on purpose so if the FuncValue is both it would
// show in logs.
if (value.isReg()) {
ASMJIT_PROPAGATE(formatRegister(sb, formatFlags, emitter, emitter->arch(), value.regType(), value.regId()));
}
if (value.isStack()) {
ASMJIT_PROPAGATE(sb.appendFormat("[%d]", int(value.stackOffset())));
}
if (value.isIndirect())
ASMJIT_PROPAGATE(sb.append(']'));
}
return kErrorOk;
}
static Error formatFuncValuePack(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
const FuncValuePack& pack,
VirtReg* const* vRegs) noexcept {
size_t count = pack.count();
if (!count)
return sb.append("void");
if (count > 1)
sb.append('[');
for (uint32_t valueIndex = 0; valueIndex < count; valueIndex++) {
const FuncValue& value = pack[valueIndex];
if (!value)
break;
if (valueIndex)
ASMJIT_PROPAGATE(sb.append(", "));
ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, emitter, value));
if (vRegs) {
static const char nullRet[] = "<none>";
ASMJIT_PROPAGATE(sb.appendFormat(" %s", vRegs[valueIndex] ? vRegs[valueIndex]->name() : nullRet));
}
}
if (count > 1)
sb.append(']');
return kErrorOk;
}
static Error formatFuncRets(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
const FuncDetail& fd) noexcept {
return formatFuncValuePack(sb, formatFlags, emitter, fd.retPack(), nullptr);
}
static Error formatFuncArgs(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
const FuncDetail& fd,
const FuncNode::ArgPack* argPacks) noexcept {
uint32_t argCount = fd.argCount();
if (!argCount)
return sb.append("void");
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
if (argIndex)
ASMJIT_PROPAGATE(sb.append(", "));
ASMJIT_PROPAGATE(formatFuncValuePack(sb, formatFlags, emitter, fd.argPack(argIndex), argPacks[argIndex]._data));
}
return kErrorOk;
}
#endif
Error formatNode(
String& sb,
uint32_t formatFlags,
const BaseBuilder* builder,
const BaseNode* node) noexcept {
if (node->hasPosition() && (formatFlags & FormatOptions::kFlagPositions) != 0)
ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node->position()));
switch (node->type()) {
case BaseNode::kNodeInst:
case BaseNode::kNodeJump: {
const InstNode* instNode = node->as<InstNode>();
ASMJIT_PROPAGATE(
formatInstruction(sb, formatFlags, builder,
builder->arch(),
instNode->baseInst(), instNode->operands(), instNode->opCount()));
break;
}
case BaseNode::kNodeSection: {
const SectionNode* sectionNode = node->as<SectionNode>();
if (builder->_code->isSectionValid(sectionNode->id())) {
const Section* section = builder->_code->sectionById(sectionNode->id());
ASMJIT_PROPAGATE(sb.appendFormat(".section %s", section->name()));
}
break;
}
case BaseNode::kNodeLabel: {
const LabelNode* labelNode = node->as<LabelNode>();
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, labelNode->labelId()));
ASMJIT_PROPAGATE(sb.append(":"));
break;
}
case BaseNode::kNodeAlign: {
const AlignNode* alignNode = node->as<AlignNode>();
ASMJIT_PROPAGATE(
sb.appendFormat("align %u (%s)",
alignNode->alignment(),
alignNode->alignMode() == kAlignCode ? "code" : "data"));
break;
}
case BaseNode::kNodeEmbedData: {
const EmbedDataNode* embedNode = node->as<EmbedDataNode>();
ASMJIT_PROPAGATE(sb.append("embed "));
if (embedNode->repeatCount() != 1)
ASMJIT_PROPAGATE(sb.appendFormat("[repeat=%zu] ", size_t(embedNode->repeatCount())));
ASMJIT_PROPAGATE(sb.appendFormat("%u bytes", embedNode->dataSize()));
break;
}
case BaseNode::kNodeEmbedLabel: {
const EmbedLabelNode* embedNode = node->as<EmbedLabelNode>();
ASMJIT_PROPAGATE(sb.append(".label "));
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, embedNode->labelId()));
break;
}
case BaseNode::kNodeEmbedLabelDelta: {
const EmbedLabelDeltaNode* embedNode = node->as<EmbedLabelDeltaNode>();
ASMJIT_PROPAGATE(sb.append(".label ("));
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, embedNode->labelId()));
ASMJIT_PROPAGATE(sb.append(" - "));
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, embedNode->baseLabelId()));
ASMJIT_PROPAGATE(sb.append(")"));
break;
}
case BaseNode::kNodeComment: {
const CommentNode* commentNode = node->as<CommentNode>();
ASMJIT_PROPAGATE(sb.appendFormat("; %s", commentNode->inlineComment()));
break;
}
case BaseNode::kNodeSentinel: {
const SentinelNode* sentinelNode = node->as<SentinelNode>();
const char* sentinelName = nullptr;
switch (sentinelNode->sentinelType()) {
case SentinelNode::kSentinelFuncEnd:
sentinelName = "[FuncEnd]";
break;
default:
sentinelName = "[Sentinel]";
break;
}
ASMJIT_PROPAGATE(sb.append(sentinelName));
break;
}
#ifndef ASMJIT_NO_COMPILER
case BaseNode::kNodeFunc: {
const FuncNode* funcNode = node->as<FuncNode>();
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, funcNode->labelId()));
ASMJIT_PROPAGATE(sb.append(": "));
ASMJIT_PROPAGATE(formatFuncRets(sb, formatFlags, builder, funcNode->detail()));
ASMJIT_PROPAGATE(sb.append(" Func("));
ASMJIT_PROPAGATE(formatFuncArgs(sb, formatFlags, builder, funcNode->detail(), funcNode->argPacks()));
ASMJIT_PROPAGATE(sb.append(")"));
break;
}
case BaseNode::kNodeFuncRet: {
const FuncRetNode* retNode = node->as<FuncRetNode>();
ASMJIT_PROPAGATE(sb.append("[FuncRet]"));
for (uint32_t i = 0; i < 2; i++) {
const Operand_& op = retNode->_opArray[i];
if (!op.isNone()) {
ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", "));
ASMJIT_PROPAGATE(formatOperand(sb, formatFlags, builder, builder->arch(), op));
}
}
break;
}
case BaseNode::kNodeInvoke: {
const InvokeNode* invokeNode = node->as<InvokeNode>();
ASMJIT_PROPAGATE(
formatInstruction(sb, formatFlags, builder,
builder->arch(),
invokeNode->baseInst(), invokeNode->operands(), invokeNode->opCount()));
break;
}
#endif
default: {
ASMJIT_PROPAGATE(sb.appendFormat("[UserNode:%u]", node->type()));
break;
}
}
return kErrorOk;
}
Error formatNodeList(
String& sb,
uint32_t formatFlags,
const BaseBuilder* builder) noexcept {
return formatNodeList(sb, formatFlags, builder, builder->firstNode(), nullptr);
}
Error formatNodeList(
String& sb,
uint32_t formatFlags,
const BaseBuilder* builder,
const BaseNode* begin,
const BaseNode* end) noexcept {
const BaseNode* node = begin;
while (node != end) {
ASMJIT_PROPAGATE(formatNode(sb, formatFlags, builder, node));
ASMJIT_PROPAGATE(sb.append('\n'));
node = node->next();
}
return kErrorOk;
}
#endif
} // {Formatter}
ASMJIT_END_NAMESPACE
#endif

@ -0,0 +1,256 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_FORMATTER_H_INCLUDED
#define ASMJIT_CORE_FORMATTER_H_INCLUDED
#include "../core/inst.h"
#include "../core/string.h"
#ifndef ASMJIT_NO_LOGGING
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_logging
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class BaseEmitter;
struct Operand_;
#ifndef ASMJIT_NO_BUILDER
class BaseBuilder;
class BaseNode;
#endif
#ifndef ASMJIT_NO_COMPILER
class BaseCompiler;
#endif
// ============================================================================
// [asmjit::FormatOptions]
// ============================================================================
//! Formatting options used by \ref Logger and \ref Formatter.
class FormatOptions {
public:
//! Format flags, see \ref Flags.
uint32_t _flags;
//! Indentation by type, see \ref IndentationType.
uint8_t _indentation[4];
//! Flags can enable a logging feature.
enum Flags : uint32_t {
//! No flags.
kNoFlags = 0u,
//! Show also binary form of each logged instruction (Assembler).
kFlagMachineCode = 0x00000001u,
//! Show a text explanation of some immediate values.
kFlagExplainImms = 0x00000002u,
//! Use hexadecimal notation of immediate values.
kFlagHexImms = 0x00000004u,
//! Use hexadecimal notation of address offsets.
kFlagHexOffsets = 0x00000008u,
//! Show casts between virtual register types (Compiler).
kFlagRegCasts = 0x00000010u,
//! Show positions associated with nodes (Compiler).
kFlagPositions = 0x00000020u,
//! Annotate nodes that are lowered by passes.
kFlagAnnotations = 0x00000040u,
// TODO: These must go, keep this only for formatting.
//! Show an additional output from passes.
kFlagDebugPasses = 0x00000080u,
//! Show an additional output from RA.
kFlagDebugRA = 0x00000100u
};
//! Describes indentation type of code, label, or comment in logger output.
enum IndentationType : uint32_t {
//! Indentation used for instructions and directives.
kIndentationCode = 0u,
//! Indentation used for labels and function nodes.
kIndentationLabel = 1u,
//! Indentation used for comments (not inline comments).
kIndentationComment = 2u,
//! \cond INTERNAL
//! Reserved for future use.
kIndentationReserved = 3u
//! \endcond
};
//! \name Construction & Destruction
//! \{
//! Creates a default-initialized FormatOptions.
constexpr FormatOptions() noexcept
: _flags(0),
_indentation { 0, 0, 0, 0 } {}
constexpr FormatOptions(const FormatOptions& other) noexcept = default;
inline FormatOptions& operator=(const FormatOptions& other) noexcept = default;
//! Resets FormatOptions to its default initialized state.
inline void reset() noexcept {
_flags = 0;
_indentation[0] = 0;
_indentation[1] = 0;
_indentation[2] = 0;
_indentation[3] = 0;
}
//! \}
//! \name Accessors
//! \{
//! Returns format flags.
constexpr uint32_t flags() const noexcept { return _flags; }
//! Tests whether the given `flag` is set in format flags.
constexpr bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
//! Resets all format flags to `flags`.
inline void setFlags(uint32_t flags) noexcept { _flags = flags; }
//! Adds `flags` to format flags.
inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
//! Removes `flags` from format flags.
inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
//! Returns indentation for the given `type`, see \ref IndentationType.
constexpr uint8_t indentation(uint32_t type) const noexcept { return _indentation[type]; }
//! Sets indentation for the given `type`, see \ref IndentationType.
inline void setIndentation(uint32_t type, uint32_t n) noexcept { _indentation[type] = uint8_t(n); }
//! Resets indentation for the given `type` to zero.
inline void resetIndentation(uint32_t type) noexcept { _indentation[type] = uint8_t(0); }
//! \}
};
// ============================================================================
// [asmjit::Formatter]
// ============================================================================
//! Provides formatting functionality to format operands, instructions, and nodes.
namespace Formatter {
//! Appends a formatted `typeId` to the output string `sb`.
ASMJIT_API Error formatTypeId(
String& sb,
uint32_t typeId) noexcept;
//! Appends a formatted `featureId` to the output string `sb`.
//!
//! See \ref BaseFeatures.
ASMJIT_API Error formatFeature(
String& sb,
uint32_t arch,
uint32_t featureId) noexcept;
//! Appends a formatted register to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format virtual registers,
//! which won't be formatted properly if the `emitter` is not provided.
ASMJIT_API Error formatRegister(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
uint32_t regType,
uint32_t regId) noexcept;
//! Appends a formatted label to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels
//! properly, otherwise the formatted as it is an anonymous label.
ASMJIT_API Error formatLabel(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t labelId) noexcept;
//! Appends a formatted operand to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels and
//! virtual registers. See \ref formatRegister() and \ref formatLabel() for
//! more details.
ASMJIT_API Error formatOperand(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
const Operand_& op) noexcept;
//! Appends a formatted instruction to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels and
//! virtual registers. See \ref formatRegister() and \ref formatLabel() for
//! more details.
ASMJIT_API Error formatInstruction(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
#ifndef ASMJIT_NO_BUILDER
//! Appends a formatted node to the output string `sb`.
//!
//! The `node` must belong to the provided `builder`.
ASMJIT_API Error formatNode(
String& sb,
uint32_t formatFlags,
const BaseBuilder* builder,
const BaseNode* node) noexcept;
//! Appends formatted nodes to the output string `sb`.
//!
//! All nodes that are part of the given `builder` will be appended.
ASMJIT_API Error formatNodeList(
String& sb,
uint32_t formatFlags,
const BaseBuilder* builder) noexcept;
//! Appends formatted nodes to the output string `sb`.
//!
//! This function works the same as \ref formatNode(), but appends more nodes
//! to the output string, separating each node with a newline '\n' character.
ASMJIT_API Error formatNodeList(
String& sb,
uint32_t formatFlags,
const BaseBuilder* builder,
const BaseNode* begin,
const BaseNode* end) noexcept;
#endif
} // {Formatter}
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif // ASMJIT_CORE_FORMATTER_H_INCLUDED

@ -0,0 +1,310 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
#include "../core/func.h"
#include "../core/operand.h"
#include "../core/type.h"
#include "../core/funcargscontext_p.h"
#ifdef ASMJIT_BUILD_X86
#include "../x86/x86func_p.h"
#endif
#ifdef ASMJIT_BUILD_ARM
#include "../arm/armfunc_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::CallConv - Init / Reset]
// ============================================================================
ASMJIT_FAVOR_SIZE Error CallConv::init(uint32_t ccId, const Environment& environment) noexcept {
reset();
#ifdef ASMJIT_BUILD_X86
if (environment.isFamilyX86())
return x86::FuncInternal::initCallConv(*this, ccId, environment);
#endif
#ifdef ASMJIT_BUILD_ARM
if (environment.isFamilyARM())
return arm::FuncInternal::initCallConv(*this, ccId, environment);
#endif
return DebugUtils::errored(kErrorInvalidArgument);
}
// ============================================================================
// [asmjit::FuncDetail - Init / Reset]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const Environment& environment) noexcept {
uint32_t ccId = signature.callConv();
uint32_t argCount = signature.argCount();
if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs))
return DebugUtils::errored(kErrorInvalidArgument);
CallConv& cc = _callConv;
ASMJIT_PROPAGATE(cc.init(ccId, environment));
uint32_t registerSize = Environment::registerSizeFromArch(cc.arch());
uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize);
const uint8_t* signatureArgs = signature.args();
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
FuncValuePack& argPack = _args[argIndex];
argPack[0].initTypeId(Type::deabstract(signatureArgs[argIndex], deabstractDelta));
}
_argCount = uint8_t(argCount);
_vaIndex = uint8_t(signature.vaIndex());
uint32_t ret = signature.ret();
if (ret != Type::kIdVoid)
_rets[0].initTypeId(Type::deabstract(ret, deabstractDelta));
#ifdef ASMJIT_BUILD_X86
if (environment.isFamilyX86())
return x86::FuncInternal::initFuncDetail(*this, signature, registerSize);
#endif
#ifdef ASMJIT_BUILD_ARM
if (environment.isFamilyARM())
return arm::FuncInternal::initFuncDetail(*this, signature, registerSize);
#endif
// We should never bubble here as if `cc.init()` succeeded then there has to
// be an implementation for the current architecture. However, stay safe.
return DebugUtils::errored(kErrorInvalidArgument);
}
// ============================================================================
// [asmjit::FuncFrame - Init / Finalize]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
uint32_t arch = func.callConv().arch();
if (!Environment::isValidArch(arch))
return DebugUtils::errored(kErrorInvalidArch);
const ArchTraits& archTraits = ArchTraits::byArch(arch);
// Initializing FuncFrame means making a copy of some properties of `func`.
// Properties like `_localStackSize` will be set by the user before the frame
// is finalized.
reset();
_arch = uint8_t(arch);
_spRegId = uint8_t(archTraits.spRegId());
_saRegId = uint8_t(BaseReg::kIdBad);
uint32_t naturalStackAlignment = func.callConv().naturalStackAlignment();
uint32_t minDynamicAlignment = Support::max<uint32_t>(naturalStackAlignment, 16);
if (minDynamicAlignment == naturalStackAlignment)
minDynamicAlignment <<= 1;
_naturalStackAlignment = uint8_t(naturalStackAlignment);
_minDynamicAlignment = uint8_t(minDynamicAlignment);
_redZoneSize = uint8_t(func.redZoneSize());
_spillZoneSize = uint8_t(func.spillZoneSize());
_finalStackAlignment = uint8_t(_naturalStackAlignment);
if (func.hasFlag(CallConv::kFlagCalleePopsStack)) {
_calleeStackCleanup = uint16_t(func.argStackSize());
}
// Initial masks of dirty and preserved registers.
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
_dirtyRegs[group] = func.usedRegs(group);
_preservedRegs[group] = func.preservedRegs(group);
}
// Exclude stack pointer - this register is never included in saved GP regs.
_preservedRegs[BaseReg::kGroupGp] &= ~Support::bitMask(archTraits.spRegId());
// The size and alignment of save/restore area of registers for each significant register group.
memcpy(_saveRestoreRegSize, func.callConv()._saveRestoreRegSize, sizeof(_saveRestoreRegSize));
memcpy(_saveRestoreAlignment, func.callConv()._saveRestoreAlignment, sizeof(_saveRestoreAlignment));
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
if (!Environment::isValidArch(arch()))
return DebugUtils::errored(kErrorInvalidArch);
const ArchTraits& archTraits = ArchTraits::byArch(arch());
uint32_t registerSize = _saveRestoreRegSize[BaseReg::kGroupGp];
uint32_t vectorSize = _saveRestoreRegSize[BaseReg::kGroupVec];
uint32_t returnAddressSize = archTraits.hasLinkReg() ? 0u : registerSize;
// The final stack alignment must be updated accordingly to call and local stack alignments.
uint32_t stackAlignment = _finalStackAlignment;
ASMJIT_ASSERT(stackAlignment == Support::max(_naturalStackAlignment,
_callStackAlignment,
_localStackAlignment));
bool hasFP = hasPreservedFP();
bool hasDA = hasDynamicAlignment();
uint32_t kSp = archTraits.spRegId();
uint32_t kFp = archTraits.fpRegId();
uint32_t kLr = archTraits.linkRegId();
// Make frame pointer dirty if the function uses it.
if (hasFP) {
_dirtyRegs[BaseReg::kGroupGp] |= Support::bitMask(kFp);
// Currently required by ARM, if this works differently across architectures
// we would have to generalize most likely in CallConv.
if (kLr != BaseReg::kIdBad)
_dirtyRegs[BaseReg::kGroupGp] |= Support::bitMask(kLr);
}
// These two are identical if the function doesn't align its stack dynamically.
uint32_t saRegId = _saRegId;
if (saRegId == BaseReg::kIdBad)
saRegId = kSp;
// Fix stack arguments base-register from SP to FP in case it was not picked
// before and the function performs dynamic stack alignment.
if (hasDA && saRegId == kSp)
saRegId = kFp;
// Mark as dirty any register but SP if used as SA pointer.
if (saRegId != kSp)
_dirtyRegs[BaseReg::kGroupGp] |= Support::bitMask(saRegId);
_spRegId = uint8_t(kSp);
_saRegId = uint8_t(saRegId);
// Setup stack size used to save preserved registers.
uint32_t saveRestoreSizes[2] {};
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
saveRestoreSizes[size_t(!archTraits.hasPushPop(group))]
+= Support::alignUp(Support::popcnt(savedRegs(group)) * saveRestoreRegSize(group), saveRestoreAlignment(group));
_pushPopSaveSize = uint16_t(saveRestoreSizes[0]);
_extraRegSaveSize = uint16_t(saveRestoreSizes[1]);
uint32_t v = 0; // The beginning of the stack frame relative to SP after prolog.
v += callStackSize(); // Count 'callStackSize' <- This is used to call functions.
v = Support::alignUp(v, stackAlignment); // Align to function's stack alignment.
_localStackOffset = v; // Store 'localStackOffset' <- Function's local stack starts here.
v += localStackSize(); // Count 'localStackSize' <- Function's local stack ends here.
// If the function's stack must be aligned, calculate the alignment necessary
// to store vector registers, and set `FuncFrame::kAttrAlignedVecSR` to inform
// PEI that it can use instructions that perform aligned stores/loads.
if (stackAlignment >= vectorSize && _extraRegSaveSize) {
addAttributes(FuncFrame::kAttrAlignedVecSR);
v = Support::alignUp(v, vectorSize); // Align 'extraRegSaveOffset'.
}
_extraRegSaveOffset = v; // Store 'extraRegSaveOffset' <- Non-GP save/restore starts here.
v += _extraRegSaveSize; // Count 'extraRegSaveSize' <- Non-GP save/restore ends here.
// Calculate if dynamic alignment (DA) slot (stored as offset relative to SP) is required and its offset.
if (hasDA && !hasFP) {
_daOffset = v; // Store 'daOffset' <- DA pointer would be stored here.
v += registerSize; // Count 'daOffset'.
}
else {
_daOffset = FuncFrame::kTagInvalidOffset;
}
// Link Register
// -------------
//
// The stack is aligned after the function call as the return address is
// stored in a link register. Some architectures may require to always
// have aligned stack after PUSH/POP operation, which is represented by
// ArchTraits::stackAlignmentConstraint().
//
// No Link Register (X86/X64)
// --------------------------
//
// The return address should be stored after GP save/restore regs. It has
// the same size as `registerSize` (basically the native register/pointer
// size). We don't adjust it now as `v` now contains the exact size that the
// function requires to adjust (call frame + stack frame, vec stack size).
// The stack (if we consider this size) is misaligned now, as it's always
// aligned before the function call - when `call()` is executed it pushes
// the current EIP|RIP onto the stack, and misaligns it by 12 or 8 bytes
// (depending on the architecture). So count number of bytes needed to align
// it up to the function's CallFrame (the beginning).
if (v || hasFuncCalls() || !returnAddressSize)
v += Support::alignUpDiff(v + pushPopSaveSize() + returnAddressSize, stackAlignment);
_pushPopSaveOffset = v; // Store 'pushPopSaveOffset' <- Function's push/pop save/restore starts here.
_stackAdjustment = v; // Store 'stackAdjustment' <- SA used by 'add SP, SA' and 'sub SP, SA'.
v += _pushPopSaveSize; // Count 'pushPopSaveSize' <- Function's push/pop save/restore ends here.
_finalStackSize = v; // Store 'finalStackSize' <- Final stack used by the function.
if (!archTraits.hasLinkReg())
v += registerSize; // Count 'ReturnAddress' <- As CALL pushes onto stack.
// If the function performs dynamic stack alignment then the stack-adjustment must be aligned.
if (hasDA)
_stackAdjustment = Support::alignUp(_stackAdjustment, stackAlignment);
// Calculate where the function arguments start relative to SP.
_saOffsetFromSP = hasDA ? FuncFrame::kTagInvalidOffset : v;
// Calculate where the function arguments start relative to FP or user-provided register.
_saOffsetFromSA = hasFP ? returnAddressSize + registerSize // Return address + frame pointer.
: returnAddressSize + _pushPopSaveSize; // Return address + all push/pop regs.
return kErrorOk;
}
// ============================================================================
// [asmjit::FuncArgsAssignment]
// ============================================================================
ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) const noexcept {
uint32_t arch = frame.arch();
const FuncDetail* func = funcDetail();
if (!func)
return DebugUtils::errored(kErrorInvalidState);
RAConstraints constraints;
ASMJIT_PROPAGATE(constraints.init(arch));
FuncArgsContext ctx;
ASMJIT_PROPAGATE(ctx.initWorkData(frame, *this, &constraints));
ASMJIT_PROPAGATE(ctx.markDstRegsDirty(frame));
ASMJIT_PROPAGATE(ctx.markScratchRegs(frame));
ASMJIT_PROPAGATE(ctx.markStackArgsReg(frame));
return kErrorOk;
}
ASMJIT_END_NAMESPACE

File diff suppressed because it is too large Load Diff

@ -0,0 +1,315 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/funcargscontext_p.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
FuncArgsContext::FuncArgsContext() noexcept {
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
_workData[group].reset();
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept {
// The code has to be updated if this changes.
ASMJIT_ASSERT(BaseReg::kGroupVirt == 4);
uint32_t i;
uint32_t arch = frame.arch();
const FuncDetail& func = *args.funcDetail();
_archTraits = &ArchTraits::byArch(arch);
_constraints = constraints;
_arch = uint8_t(arch);
// Initialize `_archRegs`.
for (i = 0; i < BaseReg::kGroupVirt; i++)
_workData[i]._archRegs = _constraints->availableRegs(i);
if (frame.hasPreservedFP())
_workData[BaseReg::kGroupGp]._archRegs &= ~Support::bitMask(archTraits().fpRegId());
// Extract information from all function arguments/assignments and build Var[] array.
uint32_t varId = 0;
for (uint32_t argIndex = 0; argIndex < Globals::kMaxFuncArgs; argIndex++) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
const FuncValue& dst_ = args.arg(argIndex, valueIndex);
if (!dst_.isAssigned())
continue;
const FuncValue& src_ = func.arg(argIndex, valueIndex);
if (ASMJIT_UNLIKELY(!src_.isAssigned()))
return DebugUtils::errored(kErrorInvalidState);
Var& var = _vars[varId];
var.init(src_, dst_);
FuncValue& src = var.cur;
FuncValue& dst = var.out;
uint32_t dstGroup = 0xFFFFFFFFu;
uint32_t dstId = BaseReg::kIdBad;
WorkData* dstWd = nullptr;
// Not supported.
if (src.isIndirect())
return DebugUtils::errored(kErrorInvalidAssignment);
if (dst.isReg()) {
uint32_t dstType = dst.regType();
if (ASMJIT_UNLIKELY(!archTraits().hasRegType(dstType)))
return DebugUtils::errored(kErrorInvalidRegType);
// Copy TypeId from source if the destination doesn't have it. The RA
// used by BaseCompiler would never leave TypeId undefined, but users
// of FuncAPI can just assign phys regs without specifying the type.
if (!dst.hasTypeId())
dst.setTypeId(archTraits().regTypeToTypeId(dst.regType()));
dstGroup = archTraits().regTypeToGroup(dstType);
if (ASMJIT_UNLIKELY(dstGroup >= BaseReg::kGroupVirt))
return DebugUtils::errored(kErrorInvalidRegGroup);
dstWd = &_workData[dstGroup];
dstId = dst.regId();
if (ASMJIT_UNLIKELY(dstId >= 32 || !Support::bitTest(dstWd->archRegs(), dstId)))
return DebugUtils::errored(kErrorInvalidPhysId);
if (ASMJIT_UNLIKELY(Support::bitTest(dstWd->dstRegs(), dstId)))
return DebugUtils::errored(kErrorOverlappedRegs);
dstWd->_dstRegs |= Support::bitMask(dstId);
dstWd->_dstShuf |= Support::bitMask(dstId);
dstWd->_usedRegs |= Support::bitMask(dstId);
}
else {
if (!dst.hasTypeId())
dst.setTypeId(src.typeId());
RegInfo regInfo = getSuitableRegForMemToMemMove(arch, dst.typeId(), src.typeId());
if (ASMJIT_UNLIKELY(!regInfo.isValid()))
return DebugUtils::errored(kErrorInvalidState);
_stackDstMask = uint8_t(_stackDstMask | Support::bitMask(regInfo.group()));
}
if (src.isReg()) {
uint32_t srcId = src.regId();
uint32_t srcGroup = archTraits().regTypeToGroup(src.regType());
if (dstGroup == srcGroup) {
dstWd->assign(varId, srcId);
// The best case, register is allocated where it is expected to be.
if (dstId == srcId)
var.markDone();
}
else {
if (ASMJIT_UNLIKELY(srcGroup >= BaseReg::kGroupVirt))
return DebugUtils::errored(kErrorInvalidState);
WorkData& srcData = _workData[srcGroup];
srcData.assign(varId, srcId);
}
}
else {
if (dstWd)
dstWd->_numStackArgs++;
_hasStackSrc = true;
}
varId++;
}
}
// Initialize WorkData::workRegs.
for (i = 0; i < BaseReg::kGroupVirt; i++) {
_workData[i]._workRegs = (_workData[i].archRegs() & (frame.dirtyRegs(i) | ~frame.preservedRegs(i))) | _workData[i].dstRegs() | _workData[i].assignedRegs();
}
// Create a variable that represents `SARegId` if necessary.
bool saRegRequired = _hasStackSrc && frame.hasDynamicAlignment() && !frame.hasPreservedFP();
WorkData& gpRegs = _workData[BaseReg::kGroupGp];
uint32_t saCurRegId = frame.saRegId();
uint32_t saOutRegId = args.saRegId();
if (saCurRegId != BaseReg::kIdBad) {
// Check if the provided `SARegId` doesn't collide with input registers.
if (ASMJIT_UNLIKELY(gpRegs.isAssigned(saCurRegId)))
return DebugUtils::errored(kErrorOverlappedRegs);
}
if (saOutRegId != BaseReg::kIdBad) {
// Check if the provided `SARegId` doesn't collide with argument assignments.
if (ASMJIT_UNLIKELY(Support::bitTest(gpRegs.dstRegs(), saOutRegId)))
return DebugUtils::errored(kErrorOverlappedRegs);
saRegRequired = true;
}
if (saRegRequired) {
uint32_t ptrTypeId = Environment::is32Bit(arch) ? Type::kIdU32 : Type::kIdU64;
uint32_t ptrRegType = Environment::is32Bit(arch) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
_saVarId = uint8_t(varId);
_hasPreservedFP = frame.hasPreservedFP();
Var& var = _vars[varId];
var.reset();
if (saCurRegId == BaseReg::kIdBad) {
if (saOutRegId != BaseReg::kIdBad && !gpRegs.isAssigned(saOutRegId)) {
saCurRegId = saOutRegId;
}
else {
uint32_t availableRegs = gpRegs.availableRegs();
if (!availableRegs)
availableRegs = gpRegs.archRegs() & ~gpRegs.workRegs();
if (ASMJIT_UNLIKELY(!availableRegs))
return DebugUtils::errored(kErrorNoMorePhysRegs);
saCurRegId = Support::ctz(availableRegs);
}
}
var.cur.initReg(ptrRegType, saCurRegId, ptrTypeId);
gpRegs.assign(varId, saCurRegId);
gpRegs._workRegs |= Support::bitMask(saCurRegId);
if (saOutRegId != BaseReg::kIdBad) {
var.out.initReg(ptrRegType, saOutRegId, ptrTypeId);
gpRegs._dstRegs |= Support::bitMask(saOutRegId);
gpRegs._workRegs |= Support::bitMask(saOutRegId);
}
else {
var.markDone();
}
varId++;
}
_varCount = varId;
// Detect register swaps.
for (varId = 0; varId < _varCount; varId++) {
Var& var = _vars[varId];
if (var.cur.isReg() && var.out.isReg()) {
uint32_t srcId = var.cur.regId();
uint32_t dstId = var.out.regId();
uint32_t group = archTraits().regTypeToGroup(var.cur.regType());
if (group != archTraits().regTypeToGroup(var.out.regType()))
continue;
WorkData& wd = _workData[group];
if (wd.isAssigned(dstId)) {
Var& other = _vars[wd._physToVarId[dstId]];
if (archTraits().regTypeToGroup(other.out.regType()) == group && other.out.regId() == srcId) {
wd._numSwaps++;
_regSwapsMask = uint8_t(_regSwapsMask | Support::bitMask(group));
}
}
}
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markDstRegsDirty(FuncFrame& frame) noexcept {
for (uint32_t i = 0; i < BaseReg::kGroupVirt; i++) {
WorkData& wd = _workData[i];
uint32_t regs = wd.usedRegs() | wd._dstShuf;
wd._workRegs |= regs;
frame.addDirtyRegs(i, regs);
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markScratchRegs(FuncFrame& frame) noexcept {
uint32_t groupMask = 0;
// Handle stack to stack moves.
groupMask |= _stackDstMask;
// Handle register swaps.
groupMask |= _regSwapsMask & ~Support::bitMask(BaseReg::kGroupGp);
if (!groupMask)
return kErrorOk;
// Selects one dirty register per affected group that can be used as a scratch register.
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
if (Support::bitTest(groupMask, group)) {
WorkData& wd = _workData[group];
// Initially, pick some clobbered or dirty register.
uint32_t workRegs = wd.workRegs();
uint32_t regs = workRegs & ~(wd.usedRegs() | wd._dstShuf);
// If that didn't work out pick some register which is not in 'used'.
if (!regs)
regs = workRegs & ~wd.usedRegs();
// If that didn't work out pick any other register that is allocable.
// This last resort case will, however, result in marking one more
// register dirty.
if (!regs)
regs = wd.archRegs() & ~workRegs;
// If that didn't work out we will have to use XORs instead of MOVs.
if (!regs)
continue;
uint32_t regMask = Support::blsi(regs);
wd._workRegs |= regMask;
frame.addDirtyRegs(group, regMask);
}
}
return kErrorOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markStackArgsReg(FuncFrame& frame) noexcept {
if (_saVarId != kVarIdNone) {
const Var& var = _vars[_saVarId];
frame.setSARegId(var.cur.regId());
}
else if (frame.hasPreservedFP()) {
frame.setSARegId(archTraits().fpRegId());
}
return kErrorOk;
}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE

@ -0,0 +1,224 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_FUNCARGSCONTEXT_P_H_INCLUDED
#define ASMJIT_CORE_FUNCARGSCONTEXT_P_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/environment.h"
#include "../core/func.h"
#include "../core/operand.h"
#include "../core/radefs_p.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [TODO: Place somewhere else]
// ============================================================================
static inline RegInfo getSuitableRegForMemToMemMove(uint32_t arch, uint32_t dstTypeId, uint32_t srcTypeId) noexcept {
const ArchTraits& archTraits = ArchTraits::byArch(arch);
uint32_t dstSize = Type::sizeOf(dstTypeId);
uint32_t srcSize = Type::sizeOf(srcTypeId);
uint32_t maxSize = Support::max<uint32_t>(dstSize, srcSize);
uint32_t regSize = Environment::registerSizeFromArch(arch);
uint32_t signature = 0;
if (maxSize <= regSize || (Type::isInt(dstTypeId) && Type::isInt(srcTypeId)))
signature = maxSize <= 4 ? archTraits.regTypeToSignature(BaseReg::kTypeGp32)
: archTraits.regTypeToSignature(BaseReg::kTypeGp64);
else if (maxSize <= 8 && archTraits.hasRegType(BaseReg::kTypeVec64))
signature = archTraits.regTypeToSignature(BaseReg::kTypeVec64);
else if (maxSize <= 16 && archTraits.hasRegType(BaseReg::kTypeVec128))
signature = archTraits.regTypeToSignature(BaseReg::kTypeVec128);
else if (maxSize <= 32 && archTraits.hasRegType(BaseReg::kTypeVec256))
signature = archTraits.regTypeToSignature(BaseReg::kTypeVec256);
else if (maxSize <= 64 && archTraits.hasRegType(BaseReg::kTypeVec512))
signature = archTraits.regTypeToSignature(BaseReg::kTypeVec512);
return RegInfo { signature };
}
// ============================================================================
// [asmjit::FuncArgsContext]
// ============================================================================
class FuncArgsContext {
public:
enum VarId : uint32_t {
kVarIdNone = 0xFF
};
//! Contains information about a single argument or SA register that may need shuffling.
struct Var {
FuncValue cur;
FuncValue out;
inline void init(const FuncValue& cur_, const FuncValue& out_) noexcept {
cur = cur_;
out = out_;
}
//! Reset the value to its unassigned state.
inline void reset() noexcept {
cur.reset();
out.reset();
}
inline bool isDone() const noexcept { return cur.isDone(); }
inline void markDone() noexcept { cur.addFlags(FuncValue::kFlagIsDone); }
};
struct WorkData {
//! All allocable registers provided by the architecture.
uint32_t _archRegs;
//! All registers that can be used by the shuffler.
uint32_t _workRegs;
//! Registers used by the shuffler (all).
uint32_t _usedRegs;
//! Assigned registers.
uint32_t _assignedRegs;
//! Destination registers assigned to arguments or SA.
uint32_t _dstRegs;
//! Destination registers that require shuffling.
uint32_t _dstShuf;
//! Number of register swaps.
uint8_t _numSwaps;
//! Number of stack loads.
uint8_t _numStackArgs;
//! Reserved (only used as padding).
uint8_t _reserved[6];
//! Physical ID to variable ID mapping.
uint8_t _physToVarId[32];
inline void reset() noexcept {
_archRegs = 0;
_workRegs = 0;
_usedRegs = 0;
_assignedRegs = 0;
_dstRegs = 0;
_dstShuf = 0;
_numSwaps = 0;
_numStackArgs = 0;
memset(_reserved, 0, sizeof(_reserved));
memset(_physToVarId, kVarIdNone, 32);
}
inline bool isAssigned(uint32_t regId) const noexcept {
ASMJIT_ASSERT(regId < 32);
return Support::bitTest(_assignedRegs, regId);
}
inline void assign(uint32_t varId, uint32_t regId) noexcept {
ASMJIT_ASSERT(!isAssigned(regId));
ASMJIT_ASSERT(_physToVarId[regId] == kVarIdNone);
_physToVarId[regId] = uint8_t(varId);
_assignedRegs ^= Support::bitMask(regId);
}
inline void reassign(uint32_t varId, uint32_t newId, uint32_t oldId) noexcept {
ASMJIT_ASSERT( isAssigned(oldId));
ASMJIT_ASSERT(!isAssigned(newId));
ASMJIT_ASSERT(_physToVarId[oldId] == varId);
ASMJIT_ASSERT(_physToVarId[newId] == kVarIdNone);
_physToVarId[oldId] = uint8_t(kVarIdNone);
_physToVarId[newId] = uint8_t(varId);
_assignedRegs ^= Support::bitMask(newId) ^ Support::bitMask(oldId);
}
inline void swap(uint32_t aVarId, uint32_t aRegId, uint32_t bVarId, uint32_t bRegId) noexcept {
ASMJIT_ASSERT(isAssigned(aRegId));
ASMJIT_ASSERT(isAssigned(bRegId));
ASMJIT_ASSERT(_physToVarId[aRegId] == aVarId);
ASMJIT_ASSERT(_physToVarId[bRegId] == bVarId);
_physToVarId[aRegId] = uint8_t(bVarId);
_physToVarId[bRegId] = uint8_t(aVarId);
}
inline void unassign(uint32_t varId, uint32_t regId) noexcept {
ASMJIT_ASSERT(isAssigned(regId));
ASMJIT_ASSERT(_physToVarId[regId] == varId);
DebugUtils::unused(varId);
_physToVarId[regId] = uint8_t(kVarIdNone);
_assignedRegs ^= Support::bitMask(regId);
}
inline uint32_t archRegs() const noexcept { return _archRegs; }
inline uint32_t workRegs() const noexcept { return _workRegs; }
inline uint32_t usedRegs() const noexcept { return _usedRegs; }
inline uint32_t assignedRegs() const noexcept { return _assignedRegs; }
inline uint32_t dstRegs() const noexcept { return _dstRegs; }
inline uint32_t availableRegs() const noexcept { return _workRegs & ~_assignedRegs; }
};
//! Architecture traits.
const ArchTraits* _archTraits = nullptr;
const RAConstraints* _constraints = nullptr;
//! Architecture identifier.
uint8_t _arch = 0;
//! Has arguments passed via stack (SRC).
bool _hasStackSrc = false;
//! Has preserved frame-pointer (FP).
bool _hasPreservedFP = false;
//! Has arguments assigned to stack (DST).
uint8_t _stackDstMask = 0;
//! Register swap groups (bit-mask).
uint8_t _regSwapsMask = 0;
uint8_t _saVarId = kVarIdNone;
uint32_t _varCount = 0;
WorkData _workData[BaseReg::kGroupVirt];
Var _vars[Globals::kMaxFuncArgs * Globals::kMaxValuePack + 1];
FuncArgsContext() noexcept;
inline const ArchTraits& archTraits() const noexcept { return *_archTraits; }
inline uint32_t arch() const noexcept { return _arch; }
inline uint32_t varCount() const noexcept { return _varCount; }
inline size_t indexOf(const Var* var) const noexcept { return (size_t)(var - _vars); }
inline Var& var(size_t varId) noexcept { return _vars[varId]; }
inline const Var& var(size_t varId) const noexcept { return _vars[varId]; }
Error initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept;
Error markScratchRegs(FuncFrame& frame) noexcept;
Error markDstRegsDirty(FuncFrame& frame) noexcept;
Error markStackArgsReg(FuncFrame& frame) noexcept;
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_FUNCARGSCONTEXT_P_H_INCLUDED

@ -0,0 +1,146 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/globals.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::DebugUtils]
// ============================================================================
ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
#ifndef ASMJIT_NO_TEXT
// @EnumStringBegin{"enum": "ErrorCode", "output": "sError", "strip": "kError"}@
static const char sErrorString[] =
"Ok\0"
"OutOfMemory\0"
"InvalidArgument\0"
"InvalidState\0"
"InvalidArch\0"
"NotInitialized\0"
"AlreadyInitialized\0"
"FeatureNotEnabled\0"
"TooManyHandles\0"
"TooLarge\0"
"NoCodeGenerated\0"
"InvalidDirective\0"
"InvalidLabel\0"
"TooManyLabels\0"
"LabelAlreadyBound\0"
"LabelAlreadyDefined\0"
"LabelNameTooLong\0"
"InvalidLabelName\0"
"InvalidParentLabel\0"
"NonLocalLabelCannotHaveParent\0"
"InvalidSection\0"
"TooManySections\0"
"InvalidSectionName\0"
"TooManyRelocations\0"
"InvalidRelocEntry\0"
"RelocOffsetOutOfRange\0"
"InvalidAssignment\0"
"InvalidInstruction\0"
"InvalidRegType\0"
"InvalidRegGroup\0"
"InvalidPhysId\0"
"InvalidVirtId\0"
"InvalidElementIndex\0"
"InvalidPrefixCombination\0"
"InvalidLockPrefix\0"
"InvalidXAcquirePrefix\0"
"InvalidXReleasePrefix\0"
"InvalidRepPrefix\0"
"InvalidRexPrefix\0"
"InvalidExtraReg\0"
"InvalidKMaskUse\0"
"InvalidKZeroUse\0"
"InvalidBroadcast\0"
"InvalidEROrSAE\0"
"InvalidAddress\0"
"InvalidAddressIndex\0"
"InvalidAddressScale\0"
"InvalidAddress64Bit\0"
"InvalidAddress64BitZeroExtension\0"
"InvalidDisplacement\0"
"InvalidSegment\0"
"InvalidImmediate\0"
"InvalidOperandSize\0"
"AmbiguousOperandSize\0"
"OperandSizeMismatch\0"
"InvalidOption\0"
"OptionAlreadyDefined\0"
"InvalidTypeId\0"
"InvalidUseOfGpbHi\0"
"InvalidUseOfGpq\0"
"InvalidUseOfF80\0"
"NotConsecutiveRegs\0"
"IllegalVirtReg\0"
"TooManyVirtRegs\0"
"NoMorePhysRegs\0"
"OverlappedRegs\0"
"OverlappingStackRegWithRegArg\0"
"ExpressionLabelNotBound\0"
"ExpressionOverflow\0"
"FailedToOpenAnonymousMemory\0"
"<Unknown>\0";
static const uint16_t sErrorIndex[] = {
0, 3, 15, 31, 44, 56, 71, 90, 108, 123, 132, 148, 165, 178, 192, 210, 230,
247, 264, 283, 313, 328, 344, 363, 382, 400, 422, 440, 459, 474, 490, 504,
518, 538, 563, 581, 603, 625, 642, 659, 675, 691, 707, 724, 739, 754, 774,
794, 814, 847, 867, 882, 899, 918, 939, 959, 973, 994, 1008, 1026, 1042,
1058, 1077, 1092, 1108, 1123, 1138, 1168, 1192, 1211, 1239
};
// @EnumStringEnd@
return sErrorString + sErrorIndex[Support::min<Error>(err, kErrorCount)];
#else
DebugUtils::unused(err);
static const char noMessage[] = "";
return noMessage;
#endif
}
ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
#if defined(_WIN32)
::OutputDebugStringA(str);
#else
::fputs(str, stderr);
#endif
}
ASMJIT_FAVOR_SIZE void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept {
char str[1024];
snprintf(str, 1024,
"[asmjit] Assertion failed at %s (line %d):\n"
"[asmjit] %s\n", file, line, msg);
debugOutput(str);
::abort();
}
ASMJIT_END_NAMESPACE

@ -0,0 +1,462 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_GLOBALS_H_INCLUDED
#define ASMJIT_CORE_GLOBALS_H_INCLUDED
#include "../core/api-config.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Support]
// ============================================================================
//! \cond INTERNAL
//! \addtogroup asmjit_utilities
//! \{
namespace Support {
//! Cast designed to cast between function and void* pointers.
template<typename Dst, typename Src>
static inline Dst ptr_cast_impl(Src p) noexcept { return (Dst)p; }
} // {Support}
#if defined(ASMJIT_NO_STDCXX)
namespace Support {
ASMJIT_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); }
ASMJIT_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); }
} // {Support}
#define ASMJIT_BASE_CLASS(TYPE) \
ASMJIT_INLINE void* operator new(size_t n) noexcept { \
return Support::operatorNew(n); \
} \
\
ASMJIT_INLINE void operator delete(void* p) noexcept { \
Support::operatorDelete(p); \
} \
\
ASMJIT_INLINE void* operator new(size_t, void* p) noexcept { return p; } \
ASMJIT_INLINE void operator delete(void*, void*) noexcept {}
#else
#define ASMJIT_BASE_CLASS(TYPE)
#endif
//! \}
//! \endcond
// ============================================================================
// [asmjit::Globals]
// ============================================================================
//! \addtogroup asmjit_core
//! \{
//! Contains typedefs, constants, and variables used globally by AsmJit.
namespace Globals {
// ============================================================================
// [asmjit::Globals::<global>]
// ============================================================================
//! Host memory allocator overhead.
static constexpr uint32_t kAllocOverhead = uint32_t(sizeof(intptr_t) * 4);
//! Host memory allocator alignment.
static constexpr uint32_t kAllocAlignment = 8;
//! Aggressive growing strategy threshold.
static constexpr uint32_t kGrowThreshold = 1024 * 1024 * 16;
//! Maximum depth of RB-Tree is:
//!
//! `2 * log2(n + 1)`
//!
//! Size of RB node is at least two pointers (without data),
//! so a theoretical architecture limit would be:
//!
//! `2 * log2(addressableMemorySize / sizeof(Node) + 1)`
//!
//! Which yields 30 on 32-bit arch and 61 on 64-bit arch.
//! The final value was adjusted by +1 for safety reasons.
static constexpr uint32_t kMaxTreeHeight = (ASMJIT_ARCH_BITS == 32 ? 30 : 61) + 1;
//! Maximum number of operands per a single instruction.
static constexpr uint32_t kMaxOpCount = 6;
//! Maximum arguments of a function supported by the Compiler / Function API.
static constexpr uint32_t kMaxFuncArgs = 16;
//! The number of values that can be assigned to a single function argument or
//! return value.
static constexpr uint32_t kMaxValuePack = 4;
//! Maximum number of physical registers AsmJit can use per register group.
static constexpr uint32_t kMaxPhysRegs = 32;
//! Maximum alignment.
static constexpr uint32_t kMaxAlignment = 64;
//! Maximum label or symbol size in bytes.
static constexpr uint32_t kMaxLabelNameSize = 2048;
//! Maximum section name size.
static constexpr uint32_t kMaxSectionNameSize = 35;
//! Maximum size of comment.
static constexpr uint32_t kMaxCommentSize = 1024;
//! Invalid identifier.
static constexpr uint32_t kInvalidId = 0xFFFFFFFFu;
//! Returned by `indexOf()` and similar when working with containers that use 32-bit index/size.
static constexpr uint32_t kNotFound = 0xFFFFFFFFu;
//! Invalid base address.
static constexpr uint64_t kNoBaseAddress = ~uint64_t(0);
// ============================================================================
// [asmjit::Globals::ResetPolicy]
// ============================================================================
//! Reset policy used by most `reset()` functions.
enum ResetPolicy : uint32_t {
//! Soft reset, doesn't deallocate memory (default).
kResetSoft = 0,
//! Hard reset, releases all memory used, if any.
kResetHard = 1
};
// ============================================================================
// [asmjit::Globals::Link]
// ============================================================================
enum Link : uint32_t {
kLinkLeft = 0,
kLinkRight = 1,
kLinkPrev = 0,
kLinkNext = 1,
kLinkFirst = 0,
kLinkLast = 1,
kLinkCount = 2
};
struct Init_ {};
struct NoInit_ {};
static const constexpr Init_ Init {};
static const constexpr NoInit_ NoInit {};
} // {Globals}
// ============================================================================
// [asmjit::ByteOrder]
// ============================================================================
//! Byte order.
namespace ByteOrder {
enum : uint32_t {
kLE = 0,
kBE = 1,
kNative = ASMJIT_ARCH_LE ? kLE : kBE,
kSwapped = ASMJIT_ARCH_LE ? kBE : kLE
};
}
// ============================================================================
// [asmjit::ptr_as_func / func_as_ptr]
// ============================================================================
template<typename Func>
static inline Func ptr_as_func(void* func) noexcept { return Support::ptr_cast_impl<Func, void*>(func); }
template<typename Func>
static inline void* func_as_ptr(Func func) noexcept { return Support::ptr_cast_impl<void*, Func>(func); }
//! \}
// ============================================================================
// [asmjit::Error]
// ============================================================================
//! \addtogroup asmjit_error_handling
//! \{
//! AsmJit error type (uint32_t).
typedef uint32_t Error;
//! AsmJit error codes.
enum ErrorCode : uint32_t {
// @EnumValuesBegin{"enum": "ErrorCode"}@
//! No error (success).
kErrorOk = 0,
//! Out of memory.
kErrorOutOfMemory,
//! Invalid argument.
kErrorInvalidArgument,
//! Invalid state.
//!
//! If this error is returned it means that either you are doing something
//! wrong or AsmJit caught itself by doing something wrong. This error should
//! never be ignored.
kErrorInvalidState,
//! Invalid or incompatible architecture.
kErrorInvalidArch,
//! The object is not initialized.
kErrorNotInitialized,
//! The object is already initialized.
kErrorAlreadyInitialized,
//! Built-in feature was disabled at compile time and it's not available.
kErrorFeatureNotEnabled,
//! Too many handles (Windows) or file descriptors (Unix/Posix).
kErrorTooManyHandles,
//! Code generated is larger than allowed.
kErrorTooLarge,
//! No code generated.
//!
//! Returned by runtime if the \ref CodeHolder contains no code.
kErrorNoCodeGenerated,
//! Invalid directive.
kErrorInvalidDirective,
//! Attempt to use uninitialized label.
kErrorInvalidLabel,
//! Label index overflow - a single \ref BaseAssembler instance can hold
//! almost 2^32 (4 billion) labels. If there is an attempt to create more
//! labels then this error is returned.
kErrorTooManyLabels,
//! Label is already bound.
kErrorLabelAlreadyBound,
//! Label is already defined (named labels).
kErrorLabelAlreadyDefined,
//! Label name is too long.
kErrorLabelNameTooLong,
//! Label must always be local if it's anonymous (without a name).
kErrorInvalidLabelName,
//! Parent id passed to \ref CodeHolder::newNamedLabelEntry() was invalid.
kErrorInvalidParentLabel,
//! Parent id specified for a non-local (global) label.
kErrorNonLocalLabelCannotHaveParent,
//! Invalid section.
kErrorInvalidSection,
//! Too many sections (section index overflow).
kErrorTooManySections,
//! Invalid section name (most probably too long).
kErrorInvalidSectionName,
//! Relocation index overflow (too many relocations).
kErrorTooManyRelocations,
//! Invalid relocation entry.
kErrorInvalidRelocEntry,
//! Reloc entry contains address that is out of range (unencodable).
kErrorRelocOffsetOutOfRange,
//! Invalid assignment to a register, function argument, or function return value.
kErrorInvalidAssignment,
//! Invalid instruction.
kErrorInvalidInstruction,
//! Invalid register type.
kErrorInvalidRegType,
//! Invalid register group.
kErrorInvalidRegGroup,
//! Invalid physical register id.
kErrorInvalidPhysId,
//! Invalid virtual register id.
kErrorInvalidVirtId,
//! Invalid element index (ARM).
kErrorInvalidElementIndex,
//! Invalid prefix combination (X86|X64).
kErrorInvalidPrefixCombination,
//! Invalid LOCK prefix (X86|X64).
kErrorInvalidLockPrefix,
//! Invalid XACQUIRE prefix (X86|X64).
kErrorInvalidXAcquirePrefix,
//! Invalid XRELEASE prefix (X86|X64).
kErrorInvalidXReleasePrefix,
//! Invalid REP prefix (X86|X64).
kErrorInvalidRepPrefix,
//! Invalid REX prefix (X86|X64).
kErrorInvalidRexPrefix,
//! Invalid {...} register (X86|X64).
kErrorInvalidExtraReg,
//! Invalid {k} use (not supported by the instruction) (X86|X64).
kErrorInvalidKMaskUse,
//! Invalid {k}{z} use (not supported by the instruction) (X86|X64).
kErrorInvalidKZeroUse,
//! Invalid broadcast - Currently only related to invalid use of AVX-512 {1tox} (X86|X64).
kErrorInvalidBroadcast,
//! Invalid 'embedded-rounding' {er} or 'suppress-all-exceptions' {sae} (AVX-512) (X86|X64).
kErrorInvalidEROrSAE,
//! Invalid address used (not encodable).
kErrorInvalidAddress,
//! Invalid index register used in memory address (not encodable).
kErrorInvalidAddressIndex,
//! Invalid address scale (not encodable).
kErrorInvalidAddressScale,
//! Invalid use of 64-bit address.
kErrorInvalidAddress64Bit,
//! Invalid use of 64-bit address that require 32-bit zero-extension (X64).
kErrorInvalidAddress64BitZeroExtension,
//! Invalid displacement (not encodable).
kErrorInvalidDisplacement,
//! Invalid segment (X86).
kErrorInvalidSegment,
//! Invalid immediate (out of bounds on X86 and invalid pattern on ARM).
kErrorInvalidImmediate,
//! Invalid operand size.
kErrorInvalidOperandSize,
//! Ambiguous operand size (memory has zero size while it's required to determine the operation type.
kErrorAmbiguousOperandSize,
//! Mismatching operand size (size of multiple operands doesn't match the operation size).
kErrorOperandSizeMismatch,
//! Invalid option.
kErrorInvalidOption,
//! Option already defined.
kErrorOptionAlreadyDefined,
//! Invalid TypeId.
kErrorInvalidTypeId,
//! Invalid use of a 8-bit GPB-HIGH register.
kErrorInvalidUseOfGpbHi,
//! Invalid use of a 64-bit GPQ register in 32-bit mode.
kErrorInvalidUseOfGpq,
//! Invalid use of an 80-bit float (\ref Type::kIdF80).
kErrorInvalidUseOfF80,
//! Some registers in the instruction muse be consecutive (some ARM and AVX512
//! neural-net instructions).
kErrorNotConsecutiveRegs,
//! Illegal virtual register - reported by instruction validation.
kErrorIllegalVirtReg,
//! AsmJit cannot create more virtual registers.
kErrorTooManyVirtRegs,
//! AsmJit requires a physical register, but no one is available.
kErrorNoMorePhysRegs,
//! A variable has been assigned more than once to a function argument (BaseCompiler).
kErrorOverlappedRegs,
//! Invalid register to hold stack arguments offset.
kErrorOverlappingStackRegWithRegArg,
//! Unbound label cannot be evaluated by expression.
kErrorExpressionLabelNotBound,
//! Arithmetic overflow during expression evaluation.
kErrorExpressionOverflow,
//! Failed to open anonymous memory handle or file descriptor.
kErrorFailedToOpenAnonymousMemory,
// @EnumValuesEnd@
//! Count of AsmJit error codes.
kErrorCount
};
// ============================================================================
// [asmjit::DebugUtils]
// ============================================================================
//! Debugging utilities.
namespace DebugUtils {
//! \cond INTERNAL
//! Used to silence warnings about unused arguments or variables.
template<typename... Args>
static ASMJIT_INLINE void unused(Args&&...) noexcept {}
//! \endcond
//! Returns the error `err` passed.
//!
//! Provided for debugging purposes. Putting a breakpoint inside `errored` can
//! help with tracing the origin of any error reported / returned by AsmJit.
static constexpr Error errored(Error err) noexcept { return err; }
//! Returns a printable version of `asmjit::Error` code.
ASMJIT_API const char* errorAsString(Error err) noexcept;
//! Called to output debugging message(s).
ASMJIT_API void debugOutput(const char* str) noexcept;
//! Called on assertion failure.
//!
//! \param file Source file name where it happened.
//! \param line Line in the source file.
//! \param msg Message to display.
//!
//! If you have problems with assertion failures a breakpoint can be put
//! at \ref assertionFailed() function (asmjit/core/globals.cpp). A call stack
//! will be available when such assertion failure is triggered. AsmJit always
//! returns errors on failures, assertions are a last resort and usually mean
//! unrecoverable state due to out of range array access or totally invalid
//! arguments like nullptr where a valid pointer should be provided, etc...
ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept;
} // {DebugUtils}
//! \def ASMJIT_ASSERT(...)
//!
//! AsmJit's own assert macro used in AsmJit code-base.
#if defined(ASMJIT_BUILD_DEBUG)
#define ASMJIT_ASSERT(...) \
do { \
if (ASMJIT_LIKELY(__VA_ARGS__)) \
break; \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \
} while (0)
#else
#define ASMJIT_ASSERT(...) ((void)0)
#endif
//! \def ASMJIT_PROPAGATE(...)
//!
//! Propagates a possible `Error` produced by `...` to the caller by returning
//! the error immediately. Used by AsmJit internally, but kept public for users
//! that want to use the same technique to propagate errors to the caller.
#define ASMJIT_PROPAGATE(...) \
do { \
::asmjit::Error _err = __VA_ARGS__; \
if (ASMJIT_UNLIKELY(_err)) \
return _err; \
} while (0)
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_GLOBALS_H_INCLUDED

@ -0,0 +1,139 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#ifdef ASMJIT_BUILD_X86
#include "../core/archtraits.h"
#include "../core/inst.h"
#ifdef ASMJIT_BUILD_X86
#include "../x86/x86instapi_p.h"
#endif
#ifdef ASMJIT_BUILD_ARM
#include "../arm/a64instapi_p.h"
#endif
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::InstAPI - Text]
// ============================================================================
#ifndef ASMJIT_NO_TEXT
Error InstAPI::instIdToString(uint32_t arch, uint32_t instId, String& output) noexcept {
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::InstInternal::instIdToString(arch, instId, output);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isArchAArch64(arch))
return a64::InstInternal::instIdToString(arch, instId, output);
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
uint32_t InstAPI::stringToInstId(uint32_t arch, const char* s, size_t len) noexcept {
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::InstInternal::stringToInstId(arch, s, len);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isArchAArch64(arch))
return a64::InstInternal::stringToInstId(arch, s, len);
#endif
return 0;
}
#endif // !ASMJIT_NO_TEXT
// ============================================================================
// [asmjit::InstAPI - Validate]
// ============================================================================
#ifndef ASMJIT_NO_VALIDATION
Error InstAPI::validate(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, uint32_t validationFlags) noexcept {
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::InstInternal::validate(arch, inst, operands, opCount, validationFlags);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isArchAArch64(arch))
return a64::InstInternal::validate(arch, inst, operands, opCount, validationFlags);
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
#endif // !ASMJIT_NO_VALIDATION
// ============================================================================
// [asmjit::InstAPI - QueryRWInfo]
// ============================================================================
#ifndef ASMJIT_NO_INTROSPECTION
Error InstAPI::queryRWInfo(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept {
if (ASMJIT_UNLIKELY(opCount > Globals::kMaxOpCount))
return DebugUtils::errored(kErrorInvalidArgument);
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::InstInternal::queryRWInfo(arch, inst, operands, opCount, out);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isArchAArch64(arch))
return a64::InstInternal::queryRWInfo(arch, inst, operands, opCount, out);
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
#endif // !ASMJIT_NO_INTROSPECTION
// ============================================================================
// [asmjit::InstAPI - QueryFeatures]
// ============================================================================
#ifndef ASMJIT_NO_INTROSPECTION
Error InstAPI::queryFeatures(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, BaseFeatures* out) noexcept {
#ifdef ASMJIT_BUILD_X86
if (Environment::isFamilyX86(arch))
return x86::InstInternal::queryFeatures(arch, inst, operands, opCount, out);
#endif
#ifdef ASMJIT_BUILD_ARM
if (Environment::isArchAArch64(arch))
return a64::InstInternal::queryFeatures(arch, inst, operands, opCount, out);
#endif
return DebugUtils::errored(kErrorInvalidArch);
}
#endif // !ASMJIT_NO_INTROSPECTION
ASMJIT_END_NAMESPACE
#endif // ASMJIT_BUILD_X86

@ -0,0 +1,559 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_INST_H_INCLUDED
#define ASMJIT_CORE_INST_H_INCLUDED
#include "../core/cpuinfo.h"
#include "../core/operand.h"
#include "../core/string.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_instruction_db
//! \{
// ============================================================================
// [asmjit::BaseInst]
// ============================================================================
//! Instruction id, options, and extraReg in a single structure. This structure
//! exists mainly to simplify analysis and validation API that requires `BaseInst`
//! and `Operand[]` array.
class BaseInst {
public:
//! Instruction id, see \ref BaseInst::Id or {arch-specific}::Inst::Id.
uint32_t _id;
//! Instruction options, see \ref BaseInst::Options or {arch-specific}::Inst::Options.
uint32_t _options;
//! Extra register used by instruction (either REP register or AVX-512 selector).
RegOnly _extraReg;
enum Id : uint32_t {
//! Invalid or uninitialized instruction id.
kIdNone = 0x00000000u,
//! Abstract instruction (BaseBuilder and BaseCompiler).
kIdAbstract = 0x80000000u
};
enum Options : uint32_t {
//! Used internally by emitters for handling errors and rare cases.
kOptionReserved = 0x00000001u,
//! Prevents following a jump during compilation (BaseCompiler).
kOptionUnfollow = 0x00000002u,
//! Overwrite the destination operand(s) (BaseCompiler).
//!
//! Hint that is important for register liveness analysis. It tells the
//! compiler that the destination operand will be overwritten now or by
//! adjacent instructions. BaseCompiler knows when a register is completely
//! overwritten by a single instruction, for example you don't have to
//! mark "movaps" or "pxor x, x", however, if a pair of instructions is
//! used and the first of them doesn't completely overwrite the content
//! of the destination, BaseCompiler fails to mark that register as dead.
//!
//! X86 Specific
//! ------------
//!
//! - All instructions that always overwrite at least the size of the
//! register the virtual-register uses , for example "mov", "movq",
//! "movaps" don't need the overwrite option to be used - conversion,
//! shuffle, and other miscellaneous instructions included.
//!
//! - All instructions that clear the destination register if all operands
//! are the same, for example "xor x, x", "pcmpeqb x x", etc...
//!
//! - Consecutive instructions that partially overwrite the variable until
//! there is no old content require `BaseCompiler::overwrite()` to be used.
//! Some examples (not always the best use cases thought):
//!
//! - `movlps xmm0, ?` followed by `movhps xmm0, ?` and vice versa
//! - `movlpd xmm0, ?` followed by `movhpd xmm0, ?` and vice versa
//! - `mov al, ?` followed by `and ax, 0xFF`
//! - `mov al, ?` followed by `mov ah, al`
//! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1`
//!
//! - If allocated variable is used temporarily for scalar operations. For
//! example if you allocate a full vector like `x86::Compiler::newXmm()`
//! and then use that vector for scalar operations you should use
//! `overwrite()` directive:
//!
//! - `sqrtss x, y` - only LO element of `x` is changed, if you don't
//! use HI elements, use `compiler.overwrite().sqrtss(x, y)`.
kOptionOverwrite = 0x00000004u,
//! Emit short-form of the instruction.
kOptionShortForm = 0x00000010u,
//! Emit long-form of the instruction.
kOptionLongForm = 0x00000020u,
//! Conditional jump is likely to be taken.
kOptionTaken = 0x00000040u,
//! Conditional jump is unlikely to be taken.
kOptionNotTaken = 0x00000080u
};
//! Control type.
enum ControlType : uint32_t {
//! No control type (doesn't jump).
kControlNone = 0u,
//! Unconditional jump.
kControlJump = 1u,
//! Conditional jump (branch).
kControlBranch = 2u,
//! Function call.
kControlCall = 3u,
//! Function return.
kControlReturn = 4u
};
//! \name Construction & Destruction
//! \{
//! Creates a new BaseInst instance with `id` and `options` set.
//!
//! Default values of `id` and `options` are zero, which means none instruciton.
//! Such instruction is guaranteed to never exist for any architecture supported
//! by AsmJit.
inline explicit BaseInst(uint32_t id = 0, uint32_t options = 0) noexcept
: _id(id),
_options(options),
_extraReg() {}
inline BaseInst(uint32_t id, uint32_t options, const RegOnly& extraReg) noexcept
: _id(id),
_options(options),
_extraReg(extraReg) {}
inline BaseInst(uint32_t id, uint32_t options, const BaseReg& extraReg) noexcept
: _id(id),
_options(options),
_extraReg { extraReg.signature(), extraReg.id() } {}
//! \}
//! \name Instruction ID
//! \{
//! Returns the instruction id.
inline uint32_t id() const noexcept { return _id; }
//! Sets the instruction id to the given `id`.
inline void setId(uint32_t id) noexcept { _id = id; }
//! Resets the instruction id to zero, see \ref kIdNone.
inline void resetId() noexcept { _id = 0; }
//! \}
//! \name Instruction Options
//! \{
inline uint32_t options() const noexcept { return _options; }
inline bool hasOption(uint32_t option) const noexcept { return (_options & option) != 0; }
inline void setOptions(uint32_t options) noexcept { _options = options; }
inline void addOptions(uint32_t options) noexcept { _options |= options; }
inline void clearOptions(uint32_t options) noexcept { _options &= ~options; }
inline void resetOptions() noexcept { _options = 0; }
//! \}
//! \name Extra Register
//! \{
inline bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
inline RegOnly& extraReg() noexcept { return _extraReg; }
inline const RegOnly& extraReg() const noexcept { return _extraReg; }
inline void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
inline void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
inline void resetExtraReg() noexcept { _extraReg.reset(); }
//! \}
};
// ============================================================================
// [asmjit::OpRWInfo]
// ============================================================================
//! Read/Write information related to a single operand, used by \ref InstRWInfo.
struct OpRWInfo {
//! Read/Write flags, see \ref OpRWInfo::Flags.
uint32_t _opFlags;
//! Physical register index, if required.
uint8_t _physId;
//! Size of a possible memory operand that can replace a register operand.
uint8_t _rmSize;
//! Reserved for future use.
uint8_t _reserved[2];
//! Read bit-mask where each bit represents one byte read from Reg/Mem.
uint64_t _readByteMask;
//! Write bit-mask where each bit represents one byte written to Reg/Mem.
uint64_t _writeByteMask;
//! Zero/Sign extend bit-mask where each bit represents one byte written to Reg/Mem.
uint64_t _extendByteMask;
//! Flags describe how the operand is accessed and some additional information.
enum Flags : uint32_t {
//! Operand is read.
kRead = 0x00000001u,
//! Operand is written.
kWrite = 0x00000002u,
//! Operand is both read and written.
kRW = 0x00000003u,
//! Register operand can be replaced by a memory operand.
kRegMem = 0x00000004u,
//! The `extendByteMask()` represents a zero extension.
kZExt = 0x00000010u,
//! Register operand must use \ref physId().
kRegPhysId = 0x00000100u,
//! Base register of a memory operand must use \ref physId().
kMemPhysId = 0x00000200u,
//! This memory operand is only used to encode registers and doesn't access memory.
//!
//! X86 Specific
//! ------------
//!
//! Instructions that use such feature include BNDLDX, BNDSTX, and LEA.
kMemFake = 0x000000400u,
//! Base register of the memory operand will be read.
kMemBaseRead = 0x00001000u,
//! Base register of the memory operand will be written.
kMemBaseWrite = 0x00002000u,
//! Base register of the memory operand will be read & written.
kMemBaseRW = 0x00003000u,
//! Index register of the memory operand will be read.
kMemIndexRead = 0x00004000u,
//! Index register of the memory operand will be written.
kMemIndexWrite = 0x00008000u,
//! Index register of the memory operand will be read & written.
kMemIndexRW = 0x0000C000u,
//! Base register of the memory operand will be modified before the operation.
kMemBasePreModify = 0x00010000u,
//! Base register of the memory operand will be modified after the operation.
kMemBasePostModify = 0x00020000u
};
// Don't remove these asserts. Read/Write flags are used extensively
// by Compiler and they must always be compatible with constants below.
static_assert(kRead == 0x1, "OpRWInfo::kRead flag must be 0x1");
static_assert(kWrite == 0x2, "OpRWInfo::kWrite flag must be 0x2");
static_assert(kRegMem == 0x4, "OpRWInfo::kRegMem flag must be 0x4");
//! \name Reset
//! \{
//! Resets this operand information to all zeros.
inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
//! Resets this operand info (resets all members) and set common information
//! to the given `opFlags`, `regSize`, and possibly `physId`.
inline void reset(uint32_t opFlags, uint32_t regSize, uint32_t physId = BaseReg::kIdBad) noexcept {
_opFlags = opFlags;
_physId = uint8_t(physId);
_rmSize = uint8_t((opFlags & kRegMem) ? regSize : uint32_t(0));
_resetReserved();
uint64_t mask = Support::lsbMask<uint64_t>(regSize);
_readByteMask = opFlags & kRead ? mask : uint64_t(0);
_writeByteMask = opFlags & kWrite ? mask : uint64_t(0);
_extendByteMask = 0;
}
inline void _resetReserved() noexcept {
memset(_reserved, 0, sizeof(_reserved));
}
//! \}
//! \name Operand Flags
//! \{
//! Returns operand flags, see \ref Flags.
inline uint32_t opFlags() const noexcept { return _opFlags; }
//! Tests whether operand flags contain the given `flag`.
inline bool hasOpFlag(uint32_t flag) const noexcept { return (_opFlags & flag) != 0; }
//! Adds the given `flags` to operand flags.
inline void addOpFlags(uint32_t flags) noexcept { _opFlags |= flags; }
//! Removes the given `flags` from operand flags.
inline void clearOpFlags(uint32_t flags) noexcept { _opFlags &= ~flags; }
//! Tests whether this operand is read from.
inline bool isRead() const noexcept { return hasOpFlag(kRead); }
//! Tests whether this operand is written to.
inline bool isWrite() const noexcept { return hasOpFlag(kWrite); }
//! Tests whether this operand is both read and write.
inline bool isReadWrite() const noexcept { return (_opFlags & kRW) == kRW; }
//! Tests whether this operand is read only.
inline bool isReadOnly() const noexcept { return (_opFlags & kRW) == kRead; }
//! Tests whether this operand is write only.
inline bool isWriteOnly() const noexcept { return (_opFlags & kRW) == kWrite; }
//! Tests whether this operand is Reg/Mem
//!
//! Reg/Mem operands can use either register or memory.
inline bool isRm() const noexcept { return hasOpFlag(kRegMem); }
//! Tests whether the operand will be zero extended.
inline bool isZExt() const noexcept { return hasOpFlag(kZExt); }
//! \}
//! \name Memory Flags
//! \{
//! Tests whether this is a fake memory operand, which is only used, because
//! of encoding. Fake memory operands do not access any memory, they are only
//! used to encode registers.
inline bool isMemFake() const noexcept { return hasOpFlag(kMemFake); }
//! Tests whether the instruction's memory BASE register is used.
inline bool isMemBaseUsed() const noexcept { return (_opFlags & kMemBaseRW) != 0; }
//! Tests whether the instruction reads from its BASE registers.
inline bool isMemBaseRead() const noexcept { return hasOpFlag(kMemBaseRead); }
//! Tests whether the instruction writes to its BASE registers.
inline bool isMemBaseWrite() const noexcept { return hasOpFlag(kMemBaseWrite); }
//! Tests whether the instruction reads and writes from/to its BASE registers.
inline bool isMemBaseReadWrite() const noexcept { return (_opFlags & kMemBaseRW) == kMemBaseRW; }
//! Tests whether the instruction only reads from its BASE registers.
inline bool isMemBaseReadOnly() const noexcept { return (_opFlags & kMemBaseRW) == kMemBaseRead; }
//! Tests whether the instruction only writes to its BASE registers.
inline bool isMemBaseWriteOnly() const noexcept { return (_opFlags & kMemBaseRW) == kMemBaseWrite; }
//! Tests whether the instruction modifies the BASE register before it uses
//! it to calculate the target address.
inline bool isMemBasePreModify() const noexcept { return hasOpFlag(kMemBasePreModify); }
//! Tests whether the instruction modifies the BASE register after it uses
//! it to calculate the target address.
inline bool isMemBasePostModify() const noexcept { return hasOpFlag(kMemBasePostModify); }
//! Tests whether the instruction's memory INDEX register is used.
inline bool isMemIndexUsed() const noexcept { return (_opFlags & kMemIndexRW) != 0; }
//! Tests whether the instruction reads the INDEX registers.
inline bool isMemIndexRead() const noexcept { return hasOpFlag(kMemIndexRead); }
//! Tests whether the instruction writes to its INDEX registers.
inline bool isMemIndexWrite() const noexcept { return hasOpFlag(kMemIndexWrite); }
//! Tests whether the instruction reads and writes from/to its INDEX registers.
inline bool isMemIndexReadWrite() const noexcept { return (_opFlags & kMemIndexRW) == kMemIndexRW; }
//! Tests whether the instruction only reads from its INDEX registers.
inline bool isMemIndexReadOnly() const noexcept { return (_opFlags & kMemIndexRW) == kMemIndexRead; }
//! Tests whether the instruction only writes to its INDEX registers.
inline bool isMemIndexWriteOnly() const noexcept { return (_opFlags & kMemIndexRW) == kMemIndexWrite; }
//! \}
//! \name Physical Register ID
//! \{
//! Returns a physical id of the register that is fixed for this operand.
//!
//! Returns \ref BaseReg::kIdBad if any register can be used.
inline uint32_t physId() const noexcept { return _physId; }
//! Tests whether \ref physId() would return a valid physical register id.
inline bool hasPhysId() const noexcept { return _physId != BaseReg::kIdBad; }
//! Sets physical register id, which would be fixed for this operand.
inline void setPhysId(uint32_t physId) noexcept { _physId = uint8_t(physId); }
//! \}
//! \name Reg/Mem Information
//! \{
//! Returns Reg/Mem size of the operand.
inline uint32_t rmSize() const noexcept { return _rmSize; }
//! Sets Reg/Mem size of the operand.
inline void setRmSize(uint32_t rmSize) noexcept { _rmSize = uint8_t(rmSize); }
//! \}
//! \name Read & Write Masks
//! \{
//! Returns read mask.
inline uint64_t readByteMask() const noexcept { return _readByteMask; }
//! Returns write mask.
inline uint64_t writeByteMask() const noexcept { return _writeByteMask; }
//! Returns extend mask.
inline uint64_t extendByteMask() const noexcept { return _extendByteMask; }
//! Sets read mask.
inline void setReadByteMask(uint64_t mask) noexcept { _readByteMask = mask; }
//! Sets write mask.
inline void setWriteByteMask(uint64_t mask) noexcept { _writeByteMask = mask; }
//! Sets externd mask.
inline void setExtendByteMask(uint64_t mask) noexcept { _extendByteMask = mask; }
//! \}
};
// ============================================================================
// [asmjit::InstRWInfo]
// ============================================================================
//! Read/Write information of an instruction.
struct InstRWInfo {
//! Instruction flags (there are no flags at the moment, this field is reserved).
uint32_t _instFlags;
//! Mask of CPU flags read.
uint32_t _readFlags;
//! Mask of CPU flags written.
uint32_t _writeFlags;
//! Count of operands.
uint8_t _opCount;
//! CPU feature required for replacing register operand with memory operand.
uint8_t _rmFeature;
//! Reserved for future use.
uint8_t _reserved[18];
//! Read/Write onfo of extra register (rep{} or kz{}).
OpRWInfo _extraReg;
//! Read/Write info of instruction operands.
OpRWInfo _operands[Globals::kMaxOpCount];
//! \name Commons
//! \{
//! Resets this RW information to all zeros.
inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
//! \}
//! \name Instruction Flags
//!
//! \{
inline uint32_t instFlags() const noexcept { return _instFlags; }
inline bool hasInstFlag(uint32_t flag) const noexcept { return (_instFlags & flag) != 0; }
//! }
//! \name CPU Flags Read/Write Information
//! \{
//! Returns read flags of the instruction.
inline uint32_t readFlags() const noexcept { return _readFlags; }
//! Returns write flags of the instruction.
inline uint32_t writeFlags() const noexcept { return _writeFlags; }
//! \}
//! \name Reg/Mem Information
//! \{
//! Returns the CPU feature required to replace a register operand with memory
//! operand. If the returned feature is zero (none) then this instruction
//! either doesn't provide memory operand combination or there is no extra
//! CPU feature required.
//!
//! X86 Specific
//! ------------
//!
//! Some AVX+ instructions may require extra features for replacing registers
//! with memory operands, for example VPSLLDQ instruction only supports
//! 'reg/reg/imm' combination on AVX/AVX2 capable CPUs and requires AVX-512 for
//! 'reg/mem/imm' combination.
inline uint32_t rmFeature() const noexcept { return _rmFeature; }
//! \}
//! \name Operand Read/Write Information
//! \{
//! Returns RW information of extra register operand (extraReg).
inline const OpRWInfo& extraReg() const noexcept { return _extraReg; }
//! Returns RW information of all instruction's operands.
inline const OpRWInfo* operands() const noexcept { return _operands; }
//! Returns RW information of the operand at the given `index`.
inline const OpRWInfo& operand(size_t index) const noexcept {
ASMJIT_ASSERT(index < Globals::kMaxOpCount);
return _operands[index];
}
//! Returns the number of operands this instruction has.
inline uint32_t opCount() const noexcept { return _opCount; }
//! \}
};
// ============================================================================
// [asmjit::InstAPI]
// ============================================================================
//! Instruction API.
namespace InstAPI {
//! Validation flags that can be used with \ref InstAPI::validate().
enum ValidationFlags : uint32_t {
//! Allow virtual registers in the instruction.
kValidationFlagVirtRegs = 0x01u
};
#ifndef ASMJIT_NO_TEXT
//! Appends the name of the instruction specified by `instId` and `instOptions`
//! into the `output` string.
//!
//! \note Instruction options would only affect instruction prefix & suffix,
//! other options would be ignored. If `instOptions` is zero then only raw
//! instruction name (without any additional text) will be appended.
ASMJIT_API Error instIdToString(uint32_t arch, uint32_t instId, String& output) noexcept;
//! Parses an instruction name in the given string `s`. Length is specified
//! by `len` argument, which can be `SIZE_MAX` if `s` is known to be null
//! terminated.
//!
//! Returns the parsed instruction id or \ref BaseInst::kIdNone if no such
//! instruction exists.
ASMJIT_API uint32_t stringToInstId(uint32_t arch, const char* s, size_t len) noexcept;
#endif // !ASMJIT_NO_TEXT
#ifndef ASMJIT_NO_VALIDATION
//! Validates the given instruction considering the validation `flags`, see
//! \ref ValidationFlags.
ASMJIT_API Error validate(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, uint32_t validationFlags = 0) noexcept;
#endif // !ASMJIT_NO_VALIDATION
#ifndef ASMJIT_NO_INTROSPECTION
//! Gets Read/Write information of the given instruction.
ASMJIT_API Error queryRWInfo(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
//! Gets CPU features required by the given instruction.
ASMJIT_API Error queryFeatures(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, BaseFeatures* out) noexcept;
#endif // !ASMJIT_NO_INTROSPECTION
} // {InstAPI}
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_INST_H_INCLUDED

File diff suppressed because it is too large Load Diff

@ -0,0 +1,278 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
#define ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_JIT
#include "../core/globals.h"
#include "../core/virtmem.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_virtual_memory
//! \{
// ============================================================================
// [asmjit::JitAllocator]
// ============================================================================
//! A simple implementation of memory manager that uses `asmjit::VirtMem`
//! functions to manage virtual memory for JIT compiled code.
//!
//! Implementation notes:
//!
//! - Granularity of allocated blocks is different than granularity for a typical
//! C malloc. In addition, the allocator can use several memory pools having a
//! different granularity to minimize the maintenance overhead. Multiple pools
//! feature requires `kFlagUseMultiplePools` flag to be set.
//!
//! - The allocator doesn't store any information in executable memory, instead,
//! the implementation uses two bit-vectors to manage allocated memory of each
//! allocator-block. The first bit-vector called 'used' is used to track used
//! memory (where each bit represents memory size defined by granularity) and
//! the second bit vector called 'stop' is used as a sentinel to mark where
//! the allocated area ends.
//!
//! - Internally, the allocator also uses RB tree to keep track of all blocks
//! across all pools. Each inserted block is added to the tree so it can be
//! matched fast during `release()` and `shrink()`.
class JitAllocator {
public:
ASMJIT_NONCOPYABLE(JitAllocator)
struct Impl {
//! Allocator options, see \ref JitAllocator::Options.
uint32_t options;
//! Base block size (0 if the allocator is not initialized).
uint32_t blockSize;
//! Base granularity (0 if the allocator is not initialized).
uint32_t granularity;
//! A pattern that is used to fill unused memory if secure mode is enabled.
uint32_t fillPattern;
};
//! Allocator implementation (private).
Impl* _impl;
enum Options : uint32_t {
//! Enables the use of an anonymous memory-mapped memory that is mapped into
//! two buffers having a different pointer. The first buffer has read and
//! execute permissions and the second buffer has read+write permissions.
//!
//! See \ref VirtMem::allocDualMapping() for more details about this feature.
kOptionUseDualMapping = 0x00000001u,
//! Enables the use of multiple pools with increasing granularity instead of
//! a single pool. This flag would enable 3 internal pools in total having
//! 64, 128, and 256 bytes granularity.
//!
//! This feature is only recommended for users that generate a lot of code
//! and would like to minimize the overhead of `JitAllocator` itself by
//! having blocks of different allocation granularities. Using this feature
//! only for few allocations won't pay off as the allocator may need to
//! create more blocks initially before it can take the advantage of
//! variable block granularity.
kOptionUseMultiplePools = 0x00000002u,
//! Always fill reserved memory by a fill-pattern.
//!
//! Causes a new block to be cleared by the fill pattern and freshly
//! released memory to be cleared before making it ready for another use.
kOptionFillUnusedMemory = 0x00000004u,
//! When this flag is set the allocator would immediately release unused
//! blocks during `release()` or `reset()`. When this flag is not set the
//! allocator would keep one empty block in each pool to prevent excessive
//! virtual memory allocations and deallocations in border cases, which
//! involve constantly allocating and deallocating a single block caused
//! by repetitive calling `alloc()` and `release()` when the allocator has
//! either no blocks or have all blocks fully occupied.
kOptionImmediateRelease = 0x00000008u,
//! Use a custom fill pattern, must be combined with `kFlagFillUnusedMemory`.
kOptionCustomFillPattern = 0x10000000u
};
//! \name Construction & Destruction
//! \{
//! Parameters that can be passed to `JitAllocator` constructor.
//!
//! Use it like this:
//!
//! ```
//! // Zero initialize (zero means the default value) and change what you need.
//! JitAllocator::CreateParams params {};
//! params.blockSize = 1024 * 1024;
//!
//! // Create the allocator.
//! JitAllocator allocator(&params);
//! ```
struct CreateParams {
//! Allocator options, see \ref JitAllocator::Options.
//!
//! No options are used by default.
uint32_t options;
//! Base size of a single block in bytes (default 64kB).
//!
//! \remarks Block size must be equal or greater to page size and must be
//! power of 2. If the input is not valid then the default block size will
//! be used instead.
uint32_t blockSize;
//! Base granularity (and also natural alignment) of allocations in bytes
//! (default 64).
//!
//! Since the `JitAllocator` uses bit-arrays to mark used memory the
//! granularity also specifies how many bytes correspond to a single bit in
//! such bit-array. Higher granularity means more waste of virtual memory
//! (as it increases the natural alignment), but smaller bit-arrays as less
//! bits would be required per a single block.
uint32_t granularity;
//! Patter to use to fill unused memory.
//!
//! Only used if \ref kOptionCustomFillPattern is set.
uint32_t fillPattern;
// Reset the content of `CreateParams`.
inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
};
//! Creates a `JitAllocator` instance.
explicit ASMJIT_API JitAllocator(const CreateParams* params = nullptr) noexcept;
//! Destroys the `JitAllocator` instance and release all blocks held.
ASMJIT_API ~JitAllocator() noexcept;
inline bool isInitialized() const noexcept { return _impl->blockSize == 0; }
//! Free all allocated memory - makes all pointers returned by `alloc()` invalid.
//!
//! \remarks This function is not thread-safe as it's designed to be used when
//! nobody else is using allocator. The reason is that there is no point of
//1 calling `reset()` when the allocator is still in use.
ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns allocator options, see `Flags`.
inline uint32_t options() const noexcept { return _impl->options; }
//! Tests whether the allocator has the given `option` set.
inline bool hasOption(uint32_t option) const noexcept { return (_impl->options & option) != 0; }
//! Returns a base block size (a minimum size of block that the allocator would allocate).
inline uint32_t blockSize() const noexcept { return _impl->blockSize; }
//! Returns granularity of the allocator.
inline uint32_t granularity() const noexcept { return _impl->granularity; }
//! Returns pattern that is used to fill unused memory if `kFlagUseFillPattern` is set.
inline uint32_t fillPattern() const noexcept { return _impl->fillPattern; }
//! \}
//! \name Alloc & Release
//! \{
//! Allocate `size` bytes of virtual memory.
//!
//! \remarks This function is thread-safe.
ASMJIT_API Error alloc(void** roPtrOut, void** rwPtrOut, size_t size) noexcept;
//! Release a memory returned by `alloc()`.
//!
//! \remarks This function is thread-safe.
ASMJIT_API Error release(void* roPtr) noexcept;
//! Free extra memory allocated with `p` by restricting it to `newSize` size.
//!
//! \remarks This function is thread-safe.
ASMJIT_API Error shrink(void* roPtr, size_t newSize) noexcept;
//! \}
//! \name Statistics
//! \{
//! Statistics about `JitAllocator`.
struct Statistics {
//! Number of blocks `JitAllocator` maintains.
size_t _blockCount;
//! How many bytes are currently used / allocated.
size_t _usedSize;
//! How many bytes are currently reserved by the allocator.
size_t _reservedSize;
//! Allocation overhead (in bytes) required to maintain all blocks.
size_t _overheadSize;
inline void reset() noexcept {
_blockCount = 0;
_usedSize = 0;
_reservedSize = 0;
_overheadSize = 0;
}
//! Returns count of blocks managed by `JitAllocator` at the moment.
inline size_t blockCount() const noexcept { return _blockCount; }
//! Returns how many bytes are currently used.
inline size_t usedSize() const noexcept { return _usedSize; }
//! Returns the number of bytes unused by the allocator at the moment.
inline size_t unusedSize() const noexcept { return _reservedSize - _usedSize; }
//! Returns the total number of bytes bytes reserved by the allocator (sum of sizes of all blocks).
inline size_t reservedSize() const noexcept { return _reservedSize; }
//! Returns the number of bytes the allocator needs to manage the allocated memory.
inline size_t overheadSize() const noexcept { return _overheadSize; }
inline double usedSizeAsPercent() const noexcept {
return (double(usedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
inline double unusedSizeAsPercent() const noexcept {
return (double(unusedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
inline double overheadSizeAsPercent() const noexcept {
return (double(overheadSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
};
//! Returns JIT allocator statistics.
//!
//! \remarks This function is thread-safe.
ASMJIT_API Statistics statistics() const noexcept;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif

@ -0,0 +1,128 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_JIT
#include "../core/cpuinfo.h"
#include "../core/jitruntime.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::JitRuntime - Utilities]
// ============================================================================
// Only useful on non-x86 architectures.
static inline void JitRuntime_flushInstructionCache(const void* p, size_t size) noexcept {
#if ASMJIT_ARCH_X86
DebugUtils::unused(p, size);
#else
# if defined(_WIN32)
// Windows has a built-in support in `kernel32.dll`.
::FlushInstructionCache(::GetCurrentProcess(), p, size);
# elif defined(__GNUC__)
char* start = static_cast<char*>(const_cast<void*>(p));
char* end = start + size;
__builtin___clear_cache(start, end);
# else
DebugUtils::unused(p, size);
# endif
#endif
}
// ============================================================================
// [asmjit::JitRuntime - Construction / Destruction]
// ============================================================================
JitRuntime::JitRuntime(const JitAllocator::CreateParams* params) noexcept
: _allocator(params) {
_environment = hostEnvironment();
_environment.setFormat(Environment::kFormatJIT);
}
JitRuntime::~JitRuntime() noexcept {}
// ============================================================================
// [asmjit::JitRuntime - Interface]
// ============================================================================
Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
*dst = nullptr;
ASMJIT_PROPAGATE(code->flatten());
ASMJIT_PROPAGATE(code->resolveUnresolvedLinks());
size_t estimatedCodeSize = code->codeSize();
if (ASMJIT_UNLIKELY(estimatedCodeSize == 0))
return DebugUtils::errored(kErrorNoCodeGenerated);
uint8_t* ro;
uint8_t* rw;
ASMJIT_PROPAGATE(_allocator.alloc((void**)&ro, (void**)&rw, estimatedCodeSize));
// Relocate the code.
Error err = code->relocateToBase(uintptr_t((void*)ro));
if (ASMJIT_UNLIKELY(err)) {
_allocator.release(ro);
return err;
}
// Recalculate the final code size and shrink the memory we allocated for it
// in case that some relocations didn't require records in an address table.
size_t codeSize = code->codeSize();
for (Section* section : code->_sections) {
size_t offset = size_t(section->offset());
size_t bufferSize = size_t(section->bufferSize());
size_t virtualSize = size_t(section->virtualSize());
ASMJIT_ASSERT(offset + bufferSize <= codeSize);
memcpy(rw + offset, section->data(), bufferSize);
if (virtualSize > bufferSize) {
ASMJIT_ASSERT(offset + virtualSize <= codeSize);
memset(rw + offset + bufferSize, 0, virtualSize - bufferSize);
}
}
if (codeSize < estimatedCodeSize)
_allocator.shrink(ro, codeSize);
flush(ro, codeSize);
*dst = ro;
return kErrorOk;
}
Error JitRuntime::_release(void* p) noexcept {
return _allocator.release(p);
}
void JitRuntime::flush(const void* p, size_t size) noexcept {
JitRuntime_flushInstructionCache(p, size);
}
ASMJIT_END_NAMESPACE
#endif

@ -0,0 +1,126 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_JITRUNTIME_H_INCLUDED
#define ASMJIT_CORE_JITRUNTIME_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_JIT
#include "../core/codeholder.h"
#include "../core/jitallocator.h"
#include "../core/target.h"
ASMJIT_BEGIN_NAMESPACE
class CodeHolder;
//! \addtogroup asmjit_virtual_memory
//! \{
// ============================================================================
// [asmjit::JitRuntime]
// ============================================================================
//! JIT execution runtime is a special `Target` that is designed to store and
//! execute the generated code.
class ASMJIT_VIRTAPI JitRuntime : public Target {
public:
ASMJIT_NONCOPYABLE(JitRuntime)
//! Virtual memory allocator.
JitAllocator _allocator;
//! \name Construction & Destruction
//! \{
//! Creates a `JitRuntime` instance.
explicit ASMJIT_API JitRuntime(const JitAllocator::CreateParams* params = nullptr) noexcept;
//! Destroys the `JitRuntime` instance.
ASMJIT_API virtual ~JitRuntime() noexcept;
inline void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept {
_allocator.reset(resetPolicy);
}
//! \}
//! \name Accessors
//! \{
//! Returns the associated `JitAllocator`.
inline JitAllocator* allocator() const noexcept { return const_cast<JitAllocator*>(&_allocator); }
//! \}
//! \name Utilities
//! \{
// NOTE: To allow passing function pointers to `add()` and `release()` the
// virtual methods are prefixed with `_` and called from templates instead.
//! Allocates memory needed for a code stored in the `CodeHolder` and relocates
//! the code to the pointer allocated.
//!
//! The beginning of the memory allocated for the function is returned in `dst`.
//! If failed `Error` code is returned and `dst` is explicitly set to `nullptr`
//! (this means that you don't have to set it to null before calling `add()`).
template<typename Func>
inline Error add(Func* dst, CodeHolder* code) noexcept {
return _add(Support::ptr_cast_impl<void**, Func*>(dst), code);
}
//! Releases `p` which was obtained by calling `add()`.
template<typename Func>
inline Error release(Func p) noexcept {
return _release(Support::ptr_cast_impl<void*, Func>(p));
}
//! Type-unsafe version of `add()`.
ASMJIT_API virtual Error _add(void** dst, CodeHolder* code) noexcept;
//! Type-unsafe version of `release()`.
ASMJIT_API virtual Error _release(void* p) noexcept;
//! Flushes an instruction cache.
//!
//! This member function is called after the code has been copied to the
//! destination buffer. It is only useful for JIT code generation as it
//! causes a flush of the processor's cache.
//!
//! Flushing is basically a NOP under X86, but is needed by architectures
//! that do not have a transparent instruction cache like ARM.
//!
//! This function can also be overridden to improve compatibility with tools
//! such as Valgrind, however, it's not an official part of AsmJit.
ASMJIT_API virtual void flush(const void* p, size_t size) noexcept;
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif

@ -0,0 +1,124 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/logger.h"
#include "../core/string.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Logger - Construction / Destruction]
// ============================================================================
Logger::Logger() noexcept
: _options() {}
Logger::~Logger() noexcept {}
// ============================================================================
// [asmjit::Logger - Logging]
// ============================================================================
Error Logger::logf(const char* fmt, ...) noexcept {
Error err;
va_list ap;
va_start(ap, fmt);
err = logv(fmt, ap);
va_end(ap);
return err;
}
Error Logger::logv(const char* fmt, va_list ap) noexcept {
StringTmp<2048> sb;
ASMJIT_PROPAGATE(sb.appendVFormat(fmt, ap));
return log(sb);
}
Error Logger::logBinary(const void* data, size_t size) noexcept {
static const char prefix[] = "db ";
StringTmp<256> sb;
sb.append(prefix, ASMJIT_ARRAY_SIZE(prefix) - 1);
size_t i = size;
const uint8_t* s = static_cast<const uint8_t*>(data);
while (i) {
uint32_t n = uint32_t(Support::min<size_t>(i, 16));
sb.truncate(ASMJIT_ARRAY_SIZE(prefix) - 1);
sb.appendHex(s, n);
sb.append('\n');
ASMJIT_PROPAGATE(log(sb));
s += n;
i -= n;
}
return kErrorOk;
}
// ============================================================================
// [asmjit::FileLogger - Construction / Destruction]
// ============================================================================
FileLogger::FileLogger(FILE* file) noexcept
: _file(file) {}
FileLogger::~FileLogger() noexcept {}
// ============================================================================
// [asmjit::FileLogger - Logging]
// ============================================================================
Error FileLogger::_log(const char* data, size_t size) noexcept {
if (!_file)
return kErrorOk;
if (size == SIZE_MAX)
size = strlen(data);
fwrite(data, 1, size, _file);
return kErrorOk;
}
// ============================================================================
// [asmjit::StringLogger - Construction / Destruction]
// ============================================================================
StringLogger::StringLogger() noexcept {}
StringLogger::~StringLogger() noexcept {}
// ============================================================================
// [asmjit::StringLogger - Logging]
// ============================================================================
Error StringLogger::_log(const char* data, size_t size) noexcept {
return _content.append(data, size);
}
ASMJIT_END_NAMESPACE
#endif

@ -0,0 +1,228 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_LOGGING_H_INCLUDED
#define ASMJIT_CORE_LOGGING_H_INCLUDED
#include "../core/inst.h"
#include "../core/string.h"
#include "../core/formatter.h"
#ifndef ASMJIT_NO_LOGGING
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_logging
//! \{
// ============================================================================
// [asmjit::Logger]
// ============================================================================
//! Logging interface.
//!
//! This class can be inherited and reimplemented to fit into your own logging
//! needs. When reimplementing a logger use \ref Logger::_log() method to log
//! customize the output.
//!
//! There are two `Logger` implementations offered by AsmJit:
//! - \ref FileLogger - logs into a `FILE*`.
//! - \ref StringLogger - concatenates all logs into a \ref String.
class ASMJIT_VIRTAPI Logger {
public:
ASMJIT_BASE_CLASS(Logger)
ASMJIT_NONCOPYABLE(Logger)
//! Format options.
FormatOptions _options;
//! \name Construction & Destruction
//! \{
//! Creates a `Logger` instance.
ASMJIT_API Logger() noexcept;
//! Destroys the `Logger` instance.
ASMJIT_API virtual ~Logger() noexcept;
//! \}
//! \name Format Options
//! \{
//! Returns \ref FormatOptions of this logger.
inline FormatOptions& options() noexcept { return _options; }
//! \overload
inline const FormatOptions& options() const noexcept { return _options; }
//! Returns formatting flags, see \ref FormatOptions::Flags.
inline uint32_t flags() const noexcept { return _options.flags(); }
//! Tests whether the logger has the given `flag` enabled.
inline bool hasFlag(uint32_t flag) const noexcept { return _options.hasFlag(flag); }
//! Sets formatting flags to `flags`, see \ref FormatOptions::Flags.
inline void setFlags(uint32_t flags) noexcept { _options.setFlags(flags); }
//! Enables the given formatting `flags`, see \ref FormatOptions::Flags.
inline void addFlags(uint32_t flags) noexcept { _options.addFlags(flags); }
//! Disables the given formatting `flags`, see \ref FormatOptions::Flags.
inline void clearFlags(uint32_t flags) noexcept { _options.clearFlags(flags); }
//! Returns indentation of `type`, see \ref FormatOptions::IndentationType.
inline uint32_t indentation(uint32_t type) const noexcept { return _options.indentation(type); }
//! Sets indentation of the given indentation `type` to `n` spaces, see \ref
//! FormatOptions::IndentationType.
inline void setIndentation(uint32_t type, uint32_t n) noexcept { _options.setIndentation(type, n); }
//! Resets indentation of the given indentation `type` to 0 spaces.
inline void resetIndentation(uint32_t type) noexcept { _options.resetIndentation(type); }
//! \}
//! \name Logging Interface
//! \{
//! Logs `str` - must be reimplemented.
//!
//! The function can accept either a null terminated string if `size` is
//! `SIZE_MAX` or a non-null terminated string of the given `size`. The
//! function cannot assume that the data is null terminated and must handle
//! non-null terminated inputs.
virtual Error _log(const char* data, size_t size) noexcept = 0;
//! Logs string `str`, which is either null terminated or having size `size`.
inline Error log(const char* data, size_t size = SIZE_MAX) noexcept { return _log(data, size); }
//! Logs content of a string `str`.
inline Error log(const String& str) noexcept { return _log(str.data(), str.size()); }
//! Formats the message by using `snprintf()` and then passes the formatted
//! string to \ref _log().
ASMJIT_API Error logf(const char* fmt, ...) noexcept;
//! Formats the message by using `vsnprintf()` and then passes the formatted
//! string to \ref _log().
ASMJIT_API Error logv(const char* fmt, va_list ap) noexcept;
//! Logs binary `data` of the given `size`.
ASMJIT_API Error logBinary(const void* data, size_t size) noexcept;
//! \}
};
// ============================================================================
// [asmjit::FileLogger]
// ============================================================================
//! Logger that can log to a `FILE*`.
class ASMJIT_VIRTAPI FileLogger : public Logger {
public:
ASMJIT_NONCOPYABLE(FileLogger)
FILE* _file;
//! \name Construction & Destruction
//! \{
//! Creates a new `FileLogger` that logs to `FILE*`.
ASMJIT_API FileLogger(FILE* file = nullptr) noexcept;
//! Destroys the `FileLogger`.
ASMJIT_API virtual ~FileLogger() noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns the logging output stream or null if the logger has no output
//! stream.
inline FILE* file() const noexcept { return _file; }
//! Sets the logging output stream to `stream` or null.
//!
//! \note If the `file` is null the logging will be disabled. When a logger
//! is attached to `CodeHolder` or any emitter the logging API will always
//! be called regardless of the output file. This means that if you really
//! want to disable logging at emitter level you must not attach a logger
//! to it.
inline void setFile(FILE* file) noexcept { _file = file; }
//! \}
ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override;
};
// ============================================================================
// [asmjit::StringLogger]
// ============================================================================
//! Logger that stores everything in an internal string buffer.
class ASMJIT_VIRTAPI StringLogger : public Logger {
public:
ASMJIT_NONCOPYABLE(StringLogger)
//! Logger data as string.
String _content;
//! \name Construction & Destruction
//! \{
//! Create new `StringLogger`.
ASMJIT_API StringLogger() noexcept;
//! Destroys the `StringLogger`.
ASMJIT_API virtual ~StringLogger() noexcept;
//! \}
//! \name Logger Data Accessors
//! \{
//! Returns the content of the logger as \ref String.
//!
//! It can be moved, if desired.
inline String& content() noexcept { return _content; }
//! \overload
inline const String& content() const noexcept { return _content; }
//! Returns aggregated logger data as `char*` pointer.
//!
//! The pointer is owned by `StringLogger`, it can't be modified or freed.
inline const char* data() const noexcept { return _content.data(); }
//! Returns size of the data returned by `data()`.
inline size_t dataSize() const noexcept { return _content.size(); }
//! \}
//! \name Logger Data Manipulation
//! \{
//! Clears the accumulated logger data.
inline void clear() noexcept { _content.clear(); }
//! \}
ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override;
};
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif // ASMJIT_CORE_LOGGER_H_INCLUDED

@ -0,0 +1,51 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_MISC_P_H_INCLUDED
#define ASMJIT_CORE_MISC_P_H_INCLUDED
#include "../core/api-config.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_utilities
//! \{
#define ASMJIT_LOOKUP_TABLE_4(T, I) T((I)), T((I+1)), T((I+2)), T((I+3))
#define ASMJIT_LOOKUP_TABLE_8(T, I) ASMJIT_LOOKUP_TABLE_4(T, I), ASMJIT_LOOKUP_TABLE_4(T, I + 4)
#define ASMJIT_LOOKUP_TABLE_16(T, I) ASMJIT_LOOKUP_TABLE_8(T, I), ASMJIT_LOOKUP_TABLE_8(T, I + 8)
#define ASMJIT_LOOKUP_TABLE_32(T, I) ASMJIT_LOOKUP_TABLE_16(T, I), ASMJIT_LOOKUP_TABLE_16(T, I + 16)
#define ASMJIT_LOOKUP_TABLE_40(T, I) ASMJIT_LOOKUP_TABLE_16(T, I), ASMJIT_LOOKUP_TABLE_16(T, I + 16), ASMJIT_LOOKUP_TABLE_8(T, I + 32)
#define ASMJIT_LOOKUP_TABLE_64(T, I) ASMJIT_LOOKUP_TABLE_32(T, I), ASMJIT_LOOKUP_TABLE_32(T, I + 32)
#define ASMJIT_LOOKUP_TABLE_128(T, I) ASMJIT_LOOKUP_TABLE_64(T, I), ASMJIT_LOOKUP_TABLE_64(T, I + 64)
#define ASMJIT_LOOKUP_TABLE_256(T, I) ASMJIT_LOOKUP_TABLE_128(T, I), ASMJIT_LOOKUP_TABLE_128(T, I + 128)
#define ASMJIT_LOOKUP_TABLE_512(T, I) ASMJIT_LOOKUP_TABLE_256(T, I), ASMJIT_LOOKUP_TABLE_256(T, I + 256)
#define ASMJIT_LOOKUP_TABLE_1024(T, I) ASMJIT_LOOKUP_TABLE_512(T, I), ASMJIT_LOOKUP_TABLE_512(T, I + 512)
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_MISC_P_H_INCLUDED

@ -0,0 +1,143 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Operand - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(operand) {
INFO("Checking operand sizes");
EXPECT(sizeof(Operand) == 16);
EXPECT(sizeof(BaseReg) == 16);
EXPECT(sizeof(BaseMem) == 16);
EXPECT(sizeof(Imm) == 16);
EXPECT(sizeof(Label) == 16);
INFO("Checking basic functionality of Operand");
Operand a, b;
Operand dummy;
EXPECT(a.isNone() == true);
EXPECT(a.isReg() == false);
EXPECT(a.isMem() == false);
EXPECT(a.isImm() == false);
EXPECT(a.isLabel() == false);
EXPECT(a == b);
EXPECT(a._data[0] == 0);
EXPECT(a._data[1] == 0);
INFO("Checking basic functionality of Label");
Label label;
EXPECT(label.isValid() == false);
EXPECT(label.id() == Globals::kInvalidId);
INFO("Checking basic functionality of BaseReg");
EXPECT(BaseReg().isReg() == true);
EXPECT(BaseReg().isValid() == false);
EXPECT(BaseReg()._data[0] == 0);
EXPECT(BaseReg()._data[1] == 0);
EXPECT(dummy.as<BaseReg>().isValid() == false);
// Create some register (not specific to any architecture).
uint32_t rSig = Operand::kOpReg | (1 << Operand::kSignatureRegTypeShift ) |
(2 << Operand::kSignatureRegGroupShift) |
(8 << Operand::kSignatureSizeShift ) ;
BaseReg r1 = BaseReg::fromSignatureAndId(rSig, 5);
EXPECT(r1.isValid() == true);
EXPECT(r1.isReg() == true);
EXPECT(r1.isReg(1) == true);
EXPECT(r1.isPhysReg() == true);
EXPECT(r1.isVirtReg() == false);
EXPECT(r1.signature() == rSig);
EXPECT(r1.type() == 1);
EXPECT(r1.group() == 2);
EXPECT(r1.size() == 8);
EXPECT(r1.id() == 5);
EXPECT(r1.isReg(1, 5) == true); // RegType and Id.
EXPECT(r1._data[0] == 0);
EXPECT(r1._data[1] == 0);
// The same type of register having different id.
BaseReg r2(r1, 6);
EXPECT(r2.isValid() == true);
EXPECT(r2.isReg() == true);
EXPECT(r2.isReg(1) == true);
EXPECT(r2.isPhysReg() == true);
EXPECT(r2.isVirtReg() == false);
EXPECT(r2.signature() == rSig);
EXPECT(r2.type() == r1.type());
EXPECT(r2.group() == r1.group());
EXPECT(r2.size() == r1.size());
EXPECT(r2.id() == 6);
EXPECT(r2.isReg(1, 6) == true);
r1.reset();
EXPECT(!r1.isReg());
EXPECT(!r1.isValid());
INFO("Checking basic functionality of BaseMem");
BaseMem m;
EXPECT(m.isMem());
EXPECT(m == BaseMem());
EXPECT(m.hasBase() == false);
EXPECT(m.hasIndex() == false);
EXPECT(m.hasOffset() == false);
EXPECT(m.isOffset64Bit() == true);
EXPECT(m.offset() == 0);
m.setOffset(-1);
EXPECT(m.offsetLo32() == -1);
EXPECT(m.offset() == -1);
int64_t x = int64_t(0xFF00FF0000000001u);
int32_t xHi = int32_t(0xFF00FF00u);
m.setOffset(x);
EXPECT(m.offset() == x);
EXPECT(m.offsetLo32() == 1);
EXPECT(m.offsetHi32() == xHi);
INFO("Checking basic functionality of Imm");
Imm immValue(-42);
EXPECT(immValue.type() == Imm::kTypeInteger);
EXPECT(Imm(-1).value() == -1);
EXPECT(imm(-1).value() == -1);
EXPECT(immValue.value() == -42);
EXPECT(imm(0xFFFFFFFF).value() == int64_t(0xFFFFFFFF));
Imm immDouble(0.4);
EXPECT(immDouble.type() == Imm::kTypeDouble);
EXPECT(immDouble.valueAs<double>() == 0.4);
EXPECT(immDouble == imm(0.4));
}
#endif
ASMJIT_END_NAMESPACE

File diff suppressed because it is too large Load Diff

@ -0,0 +1,106 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/osutils.h"
#include "../core/support.h"
#if defined(_WIN32)
#include <atomic>
#elif defined(__APPLE__)
#include <mach/mach_time.h>
#else
#include <time.h>
#include <unistd.h>
#endif
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::OSUtils - GetTickCount]
// ============================================================================
uint32_t OSUtils::getTickCount() noexcept {
#if defined(_WIN32)
enum HiResStatus : uint32_t {
kHiResUnknown = 0,
kHiResAvailable = 1,
kHiResNotAvailable = 2
};
static std::atomic<uint32_t> _hiResStatus(kHiResUnknown);
static volatile double _hiResFreq(0);
uint32_t status = _hiResStatus.load();
LARGE_INTEGER now, qpf;
if (status != kHiResNotAvailable && ::QueryPerformanceCounter(&now)) {
double freq = _hiResFreq;
if (status == kHiResUnknown) {
// Detects the availability of high resolution counter.
if (::QueryPerformanceFrequency(&qpf)) {
freq = double(qpf.QuadPart) / 1000.0;
_hiResFreq = freq;
_hiResStatus.compare_exchange_strong(status, kHiResAvailable);
status = kHiResAvailable;
}
else {
// High resolution not available.
_hiResStatus.compare_exchange_strong(status, kHiResNotAvailable);
}
}
if (status == kHiResAvailable)
return uint32_t(uint64_t(int64_t(double(now.QuadPart) / freq)) & 0xFFFFFFFFu);
}
// Bail to `GetTickCount()` if we cannot use high resolution.
return ::GetTickCount();
#elif defined(__APPLE__)
// See Apple's QA1398.
static mach_timebase_info_data_t _machTime;
uint32_t denom = _machTime.denom;
if (ASMJIT_UNLIKELY(!denom)) {
if (mach_timebase_info(&_machTime) != KERN_SUCCESS || !(denom = _machTime.denom))
return 0;
}
// `mach_absolute_time()` returns nanoseconds, we want milliseconds.
uint64_t t = mach_absolute_time() / 1000000u;
t = (t * _machTime.numer) / _machTime.denom;
return uint32_t(t & 0xFFFFFFFFu);
#elif defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
struct timespec ts;
if (ASMJIT_UNLIKELY(clock_gettime(CLOCK_MONOTONIC, &ts) != 0))
return 0;
uint64_t t = (uint64_t(ts.tv_sec ) * 1000u) + (uint64_t(ts.tv_nsec) / 1000000u);
return uint32_t(t & 0xFFFFFFFFu);
#else
#pragma message("asmjit::OSUtils::getTickCount() doesn't have implementation for the target OS.")
return 0;
#endif
}
ASMJIT_END_NAMESPACE

@ -0,0 +1,87 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_OSUTILS_H_INCLUDED
#define ASMJIT_CORE_OSUTILS_H_INCLUDED
#include "../core/globals.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_utilities
//! \{
// ============================================================================
// [asmjit::OSUtils]
// ============================================================================
//! Operating system utilities.
namespace OSUtils {
//! Gets the current CPU tick count, used for benchmarking (1ms resolution).
ASMJIT_API uint32_t getTickCount() noexcept;
};
// ============================================================================
// [asmjit::Lock]
// ============================================================================
//! \cond INTERNAL
//! Lock.
//!
//! Lock is internal, it cannot be used outside of AsmJit, however, its internal
//! layout is exposed as it's used by some other classes, which are public.
class Lock {
public:
ASMJIT_NONCOPYABLE(Lock)
#if defined(_WIN32)
#pragma pack(push, 8)
struct ASMJIT_MAY_ALIAS Handle {
void* DebugInfo;
long LockCount;
long RecursionCount;
void* OwningThread;
void* LockSemaphore;
unsigned long* SpinCount;
};
Handle _handle;
#pragma pack(pop)
#elif !defined(__EMSCRIPTEN__)
typedef pthread_mutex_t Handle;
Handle _handle;
#endif
inline Lock() noexcept;
inline ~Lock() noexcept;
inline void lock() noexcept;
inline void unlock() noexcept;
};
//! \endcond
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_OSUTILS_H_INCLUDED

@ -0,0 +1,94 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_OSUTILS_P_H_INCLUDED
#define ASMJIT_CORE_OSUTILS_P_H_INCLUDED
#include "../core/osutils.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_utilities
//! \{
// ============================================================================
// [asmjit::Lock]
// ============================================================================
#if defined(_WIN32)
// Windows implementation.
static_assert(sizeof(Lock::Handle) == sizeof(CRITICAL_SECTION), "asmjit::Lock::Handle layout must match CRITICAL_SECTION");
static_assert(alignof(Lock::Handle) == alignof(CRITICAL_SECTION), "asmjit::Lock::Handle alignment must match CRITICAL_SECTION");
inline Lock::Lock() noexcept { InitializeCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
inline Lock::~Lock() noexcept { DeleteCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
inline void Lock::lock() noexcept { EnterCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
inline void Lock::unlock() noexcept { LeaveCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
#elif !defined(__EMSCRIPTEN__)
// PThread implementation.
#ifdef PTHREAD_MUTEX_INITIALIZER
inline Lock::Lock() noexcept : _handle(PTHREAD_MUTEX_INITIALIZER) {}
#else
inline Lock::Lock() noexcept { pthread_mutex_init(&_handle, nullptr); }
#endif
inline Lock::~Lock() noexcept { pthread_mutex_destroy(&_handle); }
inline void Lock::lock() noexcept { pthread_mutex_lock(&_handle); }
inline void Lock::unlock() noexcept { pthread_mutex_unlock(&_handle); }
#else
// Dummy implementation - Emscripten or other unsupported platform.
inline Lock::Lock() noexcept {}
inline Lock::~Lock() noexcept {}
inline void Lock::lock() noexcept {}
inline void Lock::unlock() noexcept {}
#endif
// ============================================================================
// [asmjit::LockGuard]
// ============================================================================
//! Scoped lock.
class LockGuard {
public:
ASMJIT_NONCOPYABLE(LockGuard)
Lock& _target;
inline LockGuard(Lock& target) noexcept
: _target(target) { _target.lock(); }
inline ~LockGuard() noexcept { _target.unlock(); }
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_OSUTILS_P_H_INCLUDED

@ -0,0 +1,408 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
#define ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/radefs_p.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_ra
//! \{
// ============================================================================
// [asmjit::RAAssignment]
// ============================================================================
class RAAssignment {
ASMJIT_NONCOPYABLE(RAAssignment)
public:
enum Ids : uint32_t {
kPhysNone = 0xFF,
kWorkNone = RAWorkReg::kIdNone
};
enum DirtyBit : uint32_t {
kClean = 0,
kDirty = 1
};
struct Layout {
//! Index of architecture registers per group.
RARegIndex physIndex;
//! Count of architecture registers per group.
RARegCount physCount;
//! Count of physical registers of all groups.
uint32_t physTotal;
//! Count of work registers.
uint32_t workCount;
//! WorkRegs data (vector).
const RAWorkRegs* workRegs;
inline void reset() noexcept {
physIndex.reset();
physCount.reset();
physTotal = 0;
workCount = 0;
workRegs = nullptr;
}
};
struct PhysToWorkMap {
//! Assigned registers (each bit represents one physical reg).
RARegMask assigned;
//! Dirty registers (spill slot out of sync or no spill slot).
RARegMask dirty;
//! PhysReg to WorkReg mapping.
uint32_t workIds[1 /* ... */];
static inline size_t sizeOf(size_t count) noexcept {
return sizeof(PhysToWorkMap) - sizeof(uint32_t) + count * sizeof(uint32_t);
}
inline void reset(size_t count) noexcept {
assigned.reset();
dirty.reset();
for (size_t i = 0; i < count; i++)
workIds[i] = kWorkNone;
}
inline void copyFrom(const PhysToWorkMap* other, size_t count) noexcept {
size_t size = sizeOf(count);
memcpy(this, other, size);
}
};
struct WorkToPhysMap {
//! WorkReg to PhysReg mapping
uint8_t physIds[1 /* ... */];
static inline size_t sizeOf(size_t count) noexcept {
return size_t(count) * sizeof(uint8_t);
}
inline void reset(size_t count) noexcept {
for (size_t i = 0; i < count; i++)
physIds[i] = kPhysNone;
}
inline void copyFrom(const WorkToPhysMap* other, size_t count) noexcept {
size_t size = sizeOf(count);
if (ASMJIT_LIKELY(size))
memcpy(this, other, size);
}
};
//! Physical registers layout.
Layout _layout;
//! WorkReg to PhysReg mapping.
WorkToPhysMap* _workToPhysMap;
//! PhysReg to WorkReg mapping and assigned/dirty bits.
PhysToWorkMap* _physToWorkMap;
//! Optimization to translate PhysRegs to WorkRegs faster.
uint32_t* _physToWorkIds[BaseReg::kGroupVirt];
//! \name Construction & Destruction
//! \{
inline RAAssignment() noexcept {
_layout.reset();
resetMaps();
}
inline void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept {
// Layout must be initialized before data.
ASMJIT_ASSERT(_physToWorkMap == nullptr);
ASMJIT_ASSERT(_workToPhysMap == nullptr);
_layout.physIndex.buildIndexes(physCount);
_layout.physCount = physCount;
_layout.physTotal = uint32_t(_layout.physIndex[BaseReg::kGroupVirt - 1]) +
uint32_t(_layout.physCount[BaseReg::kGroupVirt - 1]) ;
_layout.workCount = workRegs.size();
_layout.workRegs = &workRegs;
}
inline void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
_physToWorkMap = physToWorkMap;
_workToPhysMap = workToPhysMap;
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
_physToWorkIds[group] = physToWorkMap->workIds + _layout.physIndex.get(group);
}
inline void resetMaps() noexcept {
_physToWorkMap = nullptr;
_workToPhysMap = nullptr;
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
_physToWorkIds[group] = nullptr;
}
//! \}
//! \name Accessors
//! \{
inline PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; }
inline WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; }
inline RARegMask& assigned() noexcept { return _physToWorkMap->assigned; }
inline const RARegMask& assigned() const noexcept { return _physToWorkMap->assigned; }
inline uint32_t assigned(uint32_t group) const noexcept { return _physToWorkMap->assigned[group]; }
inline RARegMask& dirty() noexcept { return _physToWorkMap->dirty; }
inline const RARegMask& dirty() const noexcept { return _physToWorkMap->dirty; }
inline uint32_t dirty(uint32_t group) const noexcept { return _physToWorkMap->dirty[group]; }
inline uint32_t workToPhysId(uint32_t group, uint32_t workId) const noexcept {
DebugUtils::unused(group);
ASMJIT_ASSERT(workId != kWorkNone);
ASMJIT_ASSERT(workId < _layout.workCount);
return _workToPhysMap->physIds[workId];
}
inline uint32_t physToWorkId(uint32_t group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return _physToWorkIds[group][physId];
}
inline bool isPhysAssigned(uint32_t group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return Support::bitTest(_physToWorkMap->assigned[group], physId);
}
inline bool isPhysDirty(uint32_t group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return Support::bitTest(_physToWorkMap->dirty[group], physId);
}
//! \}
//! \name Assignment
//! \{
// These are low-level allocation helpers that are used to update the current
// mappings between physical and virt/work registers and also to update masks
// that represent allocated and dirty registers. These functions don't emit
// any code; they are only used to update and keep all mappings in sync.
//! Assign [VirtReg/WorkReg] to a physical register.
ASMJIT_INLINE void assign(uint32_t group, uint32_t workId, uint32_t physId, uint32_t dirty) noexcept {
ASMJIT_ASSERT(workToPhysId(group, workId) == kPhysNone);
ASMJIT_ASSERT(physToWorkId(group, physId) == kWorkNone);
ASMJIT_ASSERT(!isPhysAssigned(group, physId));
ASMJIT_ASSERT(!isPhysDirty(group, physId));
_workToPhysMap->physIds[workId] = uint8_t(physId);
_physToWorkIds[group][physId] = workId;
uint32_t regMask = Support::bitMask(physId);
_physToWorkMap->assigned[group] |= regMask;
_physToWorkMap->dirty[group] |= regMask & Support::bitMaskFromBool<uint32_t>(dirty);
verify();
}
//! Reassign [VirtReg/WorkReg] to `dstPhysId` from `srcPhysId`.
ASMJIT_INLINE void reassign(uint32_t group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
ASMJIT_ASSERT(dstPhysId != srcPhysId);
ASMJIT_ASSERT(workToPhysId(group, workId) == srcPhysId);
ASMJIT_ASSERT(physToWorkId(group, srcPhysId) == workId);
ASMJIT_ASSERT(isPhysAssigned(group, srcPhysId) == true);
ASMJIT_ASSERT(isPhysAssigned(group, dstPhysId) == false);
_workToPhysMap->physIds[workId] = uint8_t(dstPhysId);
_physToWorkIds[group][srcPhysId] = kWorkNone;
_physToWorkIds[group][dstPhysId] = workId;
uint32_t srcMask = Support::bitMask(srcPhysId);
uint32_t dstMask = Support::bitMask(dstPhysId);
uint32_t dirty = (_physToWorkMap->dirty[group] & srcMask) != 0;
uint32_t regMask = dstMask | srcMask;
_physToWorkMap->assigned[group] ^= regMask;
_physToWorkMap->dirty[group] ^= regMask & Support::bitMaskFromBool<uint32_t>(dirty);
verify();
}
ASMJIT_INLINE void swap(uint32_t group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
ASMJIT_ASSERT(aPhysId != bPhysId);
ASMJIT_ASSERT(workToPhysId(group, aWorkId) == aPhysId);
ASMJIT_ASSERT(workToPhysId(group, bWorkId) == bPhysId);
ASMJIT_ASSERT(physToWorkId(group, aPhysId) == aWorkId);
ASMJIT_ASSERT(physToWorkId(group, bPhysId) == bWorkId);
ASMJIT_ASSERT(isPhysAssigned(group, aPhysId));
ASMJIT_ASSERT(isPhysAssigned(group, bPhysId));
_workToPhysMap->physIds[aWorkId] = uint8_t(bPhysId);
_workToPhysMap->physIds[bWorkId] = uint8_t(aPhysId);
_physToWorkIds[group][aPhysId] = bWorkId;
_physToWorkIds[group][bPhysId] = aWorkId;
uint32_t aMask = Support::bitMask(aPhysId);
uint32_t bMask = Support::bitMask(bPhysId);
uint32_t flipMask = Support::bitMaskFromBool<uint32_t>(
((_physToWorkMap->dirty[group] & aMask) != 0) ^
((_physToWorkMap->dirty[group] & bMask) != 0));
uint32_t regMask = aMask | bMask;
_physToWorkMap->dirty[group] ^= regMask & flipMask;
verify();
}
//! Unassign [VirtReg/WorkReg] from a physical register.
ASMJIT_INLINE void unassign(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
ASMJIT_ASSERT(workToPhysId(group, workId) == physId);
ASMJIT_ASSERT(physToWorkId(group, physId) == workId);
ASMJIT_ASSERT(isPhysAssigned(group, physId));
_workToPhysMap->physIds[workId] = kPhysNone;
_physToWorkIds[group][physId] = kWorkNone;
uint32_t regMask = Support::bitMask(physId);
_physToWorkMap->assigned[group] &= ~regMask;
_physToWorkMap->dirty[group] &= ~regMask;
verify();
}
inline void makeClean(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
DebugUtils::unused(workId);
uint32_t regMask = Support::bitMask(physId);
_physToWorkMap->dirty[group] &= ~regMask;
}
inline void makeDirty(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
DebugUtils::unused(workId);
uint32_t regMask = Support::bitMask(physId);
_physToWorkMap->dirty[group] |= regMask;
}
//! \}
//! \name Utilities
//! \{
inline void swap(RAAssignment& other) noexcept {
std::swap(_workToPhysMap, other._workToPhysMap);
std::swap(_physToWorkMap, other._physToWorkMap);
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
std::swap(_physToWorkIds[group], other._physToWorkIds[group]);
}
inline void copyFrom(const PhysToWorkMap* physToWorkMap, const WorkToPhysMap* workToPhysMap) noexcept {
memcpy(_physToWorkMap, physToWorkMap, PhysToWorkMap::sizeOf(_layout.physTotal));
memcpy(_workToPhysMap, workToPhysMap, WorkToPhysMap::sizeOf(_layout.workCount));
}
inline void copyFrom(const RAAssignment& other) noexcept {
copyFrom(other.physToWorkMap(), other.workToPhysMap());
}
// Not really useful outside of debugging.
bool equals(const RAAssignment& other) const noexcept {
// Layout should always match.
if (_layout.physIndex != other._layout.physIndex ||
_layout.physCount != other._layout.physCount ||
_layout.physTotal != other._layout.physTotal ||
_layout.workCount != other._layout.workCount ||
_layout.workRegs != other._layout.workRegs)
return false;
uint32_t physTotal = _layout.physTotal;
uint32_t workCount = _layout.workCount;
for (uint32_t physId = 0; physId < physTotal; physId++) {
uint32_t thisWorkId = _physToWorkMap->workIds[physId];
uint32_t otherWorkId = other._physToWorkMap->workIds[physId];
if (thisWorkId != otherWorkId)
return false;
}
for (uint32_t workId = 0; workId < workCount; workId++) {
uint32_t thisPhysId = _workToPhysMap->physIds[workId];
uint32_t otherPhysId = other._workToPhysMap->physIds[workId];
if (thisPhysId != otherPhysId)
return false;
}
if (_physToWorkMap->assigned != other._physToWorkMap->assigned ||
_physToWorkMap->dirty != other._physToWorkMap->dirty )
return false;
return true;
}
#if defined(ASMJIT_BUILD_DEBUG)
ASMJIT_NOINLINE void verify() noexcept {
// Verify WorkToPhysMap.
{
for (uint32_t workId = 0; workId < _layout.workCount; workId++) {
uint32_t physId = _workToPhysMap->physIds[workId];
if (physId != kPhysNone) {
const RAWorkReg* workReg = _layout.workRegs->at(workId);
uint32_t group = workReg->group();
ASMJIT_ASSERT(_physToWorkIds[group][physId] == workId);
}
}
}
// Verify PhysToWorkMap.
{
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
uint32_t physCount = _layout.physCount[group];
for (uint32_t physId = 0; physId < physCount; physId++) {
uint32_t workId = _physToWorkIds[group][physId];
if (workId != kWorkNone) {
ASMJIT_ASSERT(_workToPhysMap->physIds[workId] == physId);
}
}
}
}
}
#else
inline void verify() noexcept {}
#endif
//! \}
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED

@ -0,0 +1,636 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
#define ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/formatter.h"
#include "../core/rapass_p.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_ra
//! \{
// ============================================================================
// [asmjit::RACFGBuilderT]
// ============================================================================
template<typename This>
class RACFGBuilderT {
public:
BaseRAPass* _pass = nullptr;
BaseCompiler* _cc = nullptr;
RABlock* _curBlock = nullptr;
RABlock* _retBlock = nullptr;
FuncNode* _funcNode = nullptr;
RARegsStats _blockRegStats {};
uint32_t _exitLabelId = Globals::kInvalidId;
ZoneVector<uint32_t> _sharedAssignmentsMap {};
// Only used by logging, it's fine to be here to prevent more #ifdefs...
bool _hasCode = false;
RABlock* _lastLoggedBlock = nullptr;
#ifndef ASMJIT_NO_LOGGING
Logger* _logger = nullptr;
uint32_t _logFlags = FormatOptions::kFlagPositions;
StringTmp<512> _sb;
#endif
static constexpr uint32_t kRootIndentation = 2;
static constexpr uint32_t kCodeIndentation = 4;
// NOTE: This is a bit hacky. There are some nodes which are processed twice
// (see `onBeforeInvoke()` and `onBeforeRet()`) as they can insert some nodes
// around them. Since we don't have any flags to mark these we just use their
// position that is [at that time] unassigned.
static constexpr uint32_t kNodePositionDidOnBefore = 0xFFFFFFFFu;
inline RACFGBuilderT(BaseRAPass* pass) noexcept
: _pass(pass),
_cc(pass->cc()) {
#ifndef ASMJIT_NO_LOGGING
_logger = _pass->debugLogger();
if (_logger)
_logFlags |= _logger->flags();
#endif
}
inline BaseCompiler* cc() const noexcept { return _cc; }
// --------------------------------------------------------------------------
// [Run]
// --------------------------------------------------------------------------
//! Called per function by an architecture-specific CFG builder.
Error run() noexcept {
log("[RAPass::BuildCFG]\n");
ASMJIT_PROPAGATE(prepare());
logNode(_funcNode, kRootIndentation);
logBlock(_curBlock, kRootIndentation);
RABlock* entryBlock = _curBlock;
BaseNode* node = _funcNode->next();
if (ASMJIT_UNLIKELY(!node))
return DebugUtils::errored(kErrorInvalidState);
_curBlock->setFirst(_funcNode);
_curBlock->setLast(_funcNode);
RAInstBuilder ib;
ZoneVector<RABlock*> blocksWithUnknownJumps;
for (;;) {
BaseNode* next = node->next();
ASMJIT_ASSERT(node->position() == 0 || node->position() == kNodePositionDidOnBefore);
if (node->isInst()) {
// Instruction | Jump | Invoke | Return
// ------------------------------------
// Handle `InstNode`, `InvokeNode`, and `FuncRetNode`. All of them
// share the same interface that provides operands that have read/write
// semantics.
if (ASMJIT_UNLIKELY(!_curBlock)) {
// Unreachable code has to be removed, we cannot allocate registers
// in such code as we cannot do proper liveness analysis in such case.
removeNode(node);
node = next;
continue;
}
_hasCode = true;
if (node->isInvoke() || node->isFuncRet()) {
if (node->position() != kNodePositionDidOnBefore) {
// Call and Reg are complicated as they may insert some surrounding
// code around them. The simplest approach is to get the previous
// node, call the `onBefore()` handlers and then check whether
// anything changed and restart if so. By restart we mean that the
// current `node` would go back to the first possible inserted node
// by `onBeforeInvoke()` or `onBeforeRet()`.
BaseNode* prev = node->prev();
if (node->type() == BaseNode::kNodeInvoke)
ASMJIT_PROPAGATE(static_cast<This*>(this)->onBeforeInvoke(node->as<InvokeNode>()));
else
ASMJIT_PROPAGATE(static_cast<This*>(this)->onBeforeRet(node->as<FuncRetNode>()));
if (prev != node->prev()) {
// If this was the first node in the block and something was
// inserted before it then we have to update the first block.
if (_curBlock->first() == node)
_curBlock->setFirst(prev->next());
node->setPosition(kNodePositionDidOnBefore);
node = prev->next();
// `onBeforeInvoke()` and `onBeforeRet()` can only insert instructions.
ASMJIT_ASSERT(node->isInst());
}
// Necessary if something was inserted after `node`, but nothing before.
next = node->next();
}
else {
// Change the position back to its original value.
node->setPosition(0);
}
}
InstNode* inst = node->as<InstNode>();
logNode(inst, kCodeIndentation);
uint32_t controlType = BaseInst::kControlNone;
ib.reset();
ASMJIT_PROPAGATE(static_cast<This*>(this)->onInst(inst, controlType, ib));
if (node->isInvoke()) {
ASMJIT_PROPAGATE(static_cast<This*>(this)->onInvoke(inst->as<InvokeNode>(), ib));
}
if (node->isFuncRet()) {
ASMJIT_PROPAGATE(static_cast<This*>(this)->onRet(inst->as<FuncRetNode>(), ib));
controlType = BaseInst::kControlReturn;
}
if (controlType == BaseInst::kControlJump) {
uint32_t fixedRegCount = 0;
for (RATiedReg& tiedReg : ib) {
RAWorkReg* workReg = _pass->workRegById(tiedReg.workId());
if (workReg->group() == BaseReg::kGroupGp) {
uint32_t useId = tiedReg.useId();
if (useId == BaseReg::kIdBad) {
useId = _pass->_scratchRegIndexes[fixedRegCount++];
tiedReg.setUseId(useId);
}
_curBlock->addExitScratchGpRegs(Support::bitMask<uint32_t>(useId));
}
}
}
ASMJIT_PROPAGATE(_pass->assignRAInst(inst, _curBlock, ib));
_blockRegStats.combineWith(ib._stats);
if (controlType != BaseInst::kControlNone) {
// Support for conditional and unconditional jumps.
if (controlType == BaseInst::kControlJump || controlType == BaseInst::kControlBranch) {
_curBlock->setLast(node);
_curBlock->addFlags(RABlock::kFlagHasTerminator);
_curBlock->makeConstructed(_blockRegStats);
if (!(inst->instOptions() & BaseInst::kOptionUnfollow)) {
// Jmp/Jcc/Call/Loop/etc...
uint32_t opCount = inst->opCount();
const Operand* opArray = inst->operands();
// Cannot jump anywhere without operands.
if (ASMJIT_UNLIKELY(!opCount))
return DebugUtils::errored(kErrorInvalidState);
if (opArray[opCount - 1].isLabel()) {
// Labels are easy for constructing the control flow.
LabelNode* labelNode;
ASMJIT_PROPAGATE(cc()->labelNodeOf(&labelNode, opArray[opCount - 1].as<Label>()));
RABlock* targetBlock = _pass->newBlockOrExistingAt(labelNode);
if (ASMJIT_UNLIKELY(!targetBlock))
return DebugUtils::errored(kErrorOutOfMemory);
targetBlock->makeTargetable();
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
}
else {
// Not a label - could be jump with reg/mem operand, which
// means that it can go anywhere. Such jumps must either be
// annotated so the CFG can be properly constructed, otherwise
// we assume the worst case - can jump to any basic block.
JumpAnnotation* jumpAnnotation = nullptr;
_curBlock->addFlags(RABlock::kFlagHasJumpTable);
if (inst->type() == BaseNode::kNodeJump)
jumpAnnotation = inst->as<JumpNode>()->annotation();
if (jumpAnnotation) {
uint64_t timestamp = _pass->nextTimestamp();
for (uint32_t id : jumpAnnotation->labelIds()) {
LabelNode* labelNode;
ASMJIT_PROPAGATE(cc()->labelNodeOf(&labelNode, id));
RABlock* targetBlock = _pass->newBlockOrExistingAt(labelNode);
if (ASMJIT_UNLIKELY(!targetBlock))
return DebugUtils::errored(kErrorOutOfMemory);
// Prevents adding basic-block successors multiple times.
if (!targetBlock->hasTimestamp(timestamp)) {
targetBlock->setTimestamp(timestamp);
targetBlock->makeTargetable();
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
}
}
ASMJIT_PROPAGATE(shareAssignmentAcrossSuccessors(_curBlock));
}
else {
ASMJIT_PROPAGATE(blocksWithUnknownJumps.append(_pass->allocator(), _curBlock));
}
}
}
if (controlType == BaseInst::kControlJump) {
// Unconditional jump makes the code after the jump unreachable,
// which will be removed instantly during the CFG construction;
// as we cannot allocate registers for instructions that are not
// part of any block. Of course we can leave these instructions
// as they are, however, that would only postpone the problem as
// assemblers can't encode instructions that use virtual registers.
_curBlock = nullptr;
}
else {
node = next;
if (ASMJIT_UNLIKELY(!node))
return DebugUtils::errored(kErrorInvalidState);
RABlock* consecutiveBlock;
if (node->type() == BaseNode::kNodeLabel) {
if (node->hasPassData()) {
consecutiveBlock = node->passData<RABlock>();
}
else {
consecutiveBlock = _pass->newBlock(node);
if (ASMJIT_UNLIKELY(!consecutiveBlock))
return DebugUtils::errored(kErrorOutOfMemory);
node->setPassData<RABlock>(consecutiveBlock);
}
}
else {
consecutiveBlock = _pass->newBlock(node);
if (ASMJIT_UNLIKELY(!consecutiveBlock))
return DebugUtils::errored(kErrorOutOfMemory);
}
_curBlock->addFlags(RABlock::kFlagHasConsecutive);
ASMJIT_PROPAGATE(_curBlock->prependSuccessor(consecutiveBlock));
_curBlock = consecutiveBlock;
_hasCode = false;
_blockRegStats.reset();
if (_curBlock->isConstructed())
break;
ASMJIT_PROPAGATE(_pass->addBlock(consecutiveBlock));
logBlock(_curBlock, kRootIndentation);
continue;
}
}
if (controlType == BaseInst::kControlReturn) {
_curBlock->setLast(node);
_curBlock->makeConstructed(_blockRegStats);
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(_retBlock));
_curBlock = nullptr;
}
}
}
else if (node->type() == BaseNode::kNodeLabel) {
// Label - Basic-Block Management
// ------------------------------
if (!_curBlock) {
// If the current code is unreachable the label makes it reachable
// again. We may remove the whole block in the future if it's not
// referenced though.
_curBlock = node->passData<RABlock>();
if (_curBlock) {
// If the label has a block assigned we can either continue with
// it or skip it if the block has been constructed already.
if (_curBlock->isConstructed())
break;
}
else {
// No block assigned - create a new one and assign it.
_curBlock = _pass->newBlock(node);
if (ASMJIT_UNLIKELY(!_curBlock))
return DebugUtils::errored(kErrorOutOfMemory);
node->setPassData<RABlock>(_curBlock);
}
_curBlock->makeTargetable();
_hasCode = false;
_blockRegStats.reset();
ASMJIT_PROPAGATE(_pass->addBlock(_curBlock));
}
else {
if (node->hasPassData()) {
RABlock* consecutive = node->passData<RABlock>();
consecutive->makeTargetable();
if (_curBlock == consecutive) {
// The label currently processed is part of the current block. This
// is only possible for multiple labels that are right next to each
// other or labels that are separated by non-code nodes like directives
// and comments.
if (ASMJIT_UNLIKELY(_hasCode))
return DebugUtils::errored(kErrorInvalidState);
}
else {
// Label makes the current block constructed. There is a chance that the
// Label is not used, but we don't know that at this point. In the worst
// case there would be two blocks next to each other, it's just fine.
ASMJIT_ASSERT(_curBlock->last() != node);
_curBlock->setLast(node->prev());
_curBlock->addFlags(RABlock::kFlagHasConsecutive);
_curBlock->makeConstructed(_blockRegStats);
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(consecutive));
ASMJIT_PROPAGATE(_pass->addBlock(consecutive));
_curBlock = consecutive;
_hasCode = false;
_blockRegStats.reset();
}
}
else {
// First time we see this label.
if (_hasCode || _curBlock == entryBlock) {
// Cannot continue the current block if it already contains some
// code or it's a block entry. We need to create a new block and
// make it a successor.
ASMJIT_ASSERT(_curBlock->last() != node);
_curBlock->setLast(node->prev());
_curBlock->addFlags(RABlock::kFlagHasConsecutive);
_curBlock->makeConstructed(_blockRegStats);
RABlock* consecutive = _pass->newBlock(node);
if (ASMJIT_UNLIKELY(!consecutive))
return DebugUtils::errored(kErrorOutOfMemory);
consecutive->makeTargetable();
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(consecutive));
ASMJIT_PROPAGATE(_pass->addBlock(consecutive));
_curBlock = consecutive;
_hasCode = false;
_blockRegStats.reset();
}
node->setPassData<RABlock>(_curBlock);
}
}
if (_curBlock && _curBlock != _lastLoggedBlock)
logBlock(_curBlock, kRootIndentation);
logNode(node, kRootIndentation);
// Unlikely: Assume that the exit label is reached only once per function.
if (ASMJIT_UNLIKELY(node->as<LabelNode>()->labelId() == _exitLabelId)) {
_curBlock->setLast(node);
_curBlock->makeConstructed(_blockRegStats);
ASMJIT_PROPAGATE(_pass->addExitBlock(_curBlock));
_curBlock = nullptr;
}
}
else {
// Other Nodes | Function Exit
// ---------------------------
logNode(node, kCodeIndentation);
if (node->type() == BaseNode::kNodeSentinel) {
if (node == _funcNode->endNode()) {
// Make sure we didn't flow here if this is the end of the function sentinel.
if (ASMJIT_UNLIKELY(_curBlock))
return DebugUtils::errored(kErrorInvalidState);
break;
}
}
else if (node->type() == BaseNode::kNodeFunc) {
// RAPass can only compile a single function at a time. If we
// encountered a function it must be the current one, bail if not.
if (ASMJIT_UNLIKELY(node != _funcNode))
return DebugUtils::errored(kErrorInvalidState);
// PASS if this is the first node.
}
else {
// PASS if this is a non-interesting or unknown node.
}
}
// Advance to the next node.
node = next;
// NOTE: We cannot encounter a NULL node, because every function must be
// terminated by a sentinel (`stop`) node. If we encountered a NULL node it
// means that something went wrong and this node list is corrupted; bail in
// such case.
if (ASMJIT_UNLIKELY(!node))
return DebugUtils::errored(kErrorInvalidState);
}
if (_pass->hasDanglingBlocks())
return DebugUtils::errored(kErrorInvalidState);
for (RABlock* block : blocksWithUnknownJumps)
handleBlockWithUnknownJump(block);
return _pass->initSharedAssignments(_sharedAssignmentsMap);
}
// --------------------------------------------------------------------------
// [Prepare]
// --------------------------------------------------------------------------
//! Prepares the CFG builder of the current function.
Error prepare() noexcept {
FuncNode* func = _pass->func();
BaseNode* node = nullptr;
// Create entry and exit blocks.
_funcNode = func;
_retBlock = _pass->newBlockOrExistingAt(func->exitNode(), &node);
if (ASMJIT_UNLIKELY(!_retBlock))
return DebugUtils::errored(kErrorOutOfMemory);
_retBlock->makeTargetable();
ASMJIT_PROPAGATE(_pass->addExitBlock(_retBlock));
if (node != func) {
_curBlock = _pass->newBlock();
if (ASMJIT_UNLIKELY(!_curBlock))
return DebugUtils::errored(kErrorOutOfMemory);
}
else {
// Function that has no code at all.
_curBlock = _retBlock;
}
// Reset everything we may need.
_blockRegStats.reset();
_exitLabelId = func->exitNode()->labelId();
// Initially we assume there is no code in the function body.
_hasCode = false;
return _pass->addBlock(_curBlock);
}
// --------------------------------------------------------------------------
// [Utilities]
// --------------------------------------------------------------------------
//! Called when a `node` is removed, e.g. bacause of a dead code elimination.
void removeNode(BaseNode* node) noexcept {
logNode(node, kRootIndentation, "<Removed>");
cc()->removeNode(node);
}
//! Handles block with unknown jump, which could be a jump to a jump table.
//!
//! If we encounter such block we basically insert all existing blocks as
//! successors except the function entry block and a natural successor, if
//! such block exists.
Error handleBlockWithUnknownJump(RABlock* block) noexcept {
RABlocks& blocks = _pass->blocks();
size_t blockCount = blocks.size();
// NOTE: Iterate from `1` as the first block is the entry block, we don't
// allow the entry to be a successor of any block.
RABlock* consecutive = block->consecutive();
for (size_t i = 1; i < blockCount; i++) {
RABlock* candidate = blocks[i];
if (candidate == consecutive || !candidate->isTargetable())
continue;
block->appendSuccessor(candidate);
}
return shareAssignmentAcrossSuccessors(block);
}
Error shareAssignmentAcrossSuccessors(RABlock* block) noexcept {
if (block->successors().size() <= 1)
return kErrorOk;
RABlock* consecutive = block->consecutive();
uint32_t sharedAssignmentId = Globals::kInvalidId;
for (RABlock* successor : block->successors()) {
if (successor == consecutive)
continue;
if (successor->hasSharedAssignmentId()) {
if (sharedAssignmentId == Globals::kInvalidId)
sharedAssignmentId = successor->sharedAssignmentId();
else
_sharedAssignmentsMap[successor->sharedAssignmentId()] = sharedAssignmentId;
}
else {
if (sharedAssignmentId == Globals::kInvalidId)
ASMJIT_PROPAGATE(newSharedAssignmentId(&sharedAssignmentId));
successor->setSharedAssignmentId(sharedAssignmentId);
}
}
return kErrorOk;
}
Error newSharedAssignmentId(uint32_t* out) noexcept {
uint32_t id = _sharedAssignmentsMap.size();
ASMJIT_PROPAGATE(_sharedAssignmentsMap.append(_pass->allocator(), id));
*out = id;
return kErrorOk;
}
// --------------------------------------------------------------------------
// [Logging]
// --------------------------------------------------------------------------
#ifndef ASMJIT_NO_LOGGING
template<typename... Args>
inline void log(const char* fmt, Args&&... args) noexcept {
if (_logger)
_logger->logf(fmt, std::forward<Args>(args)...);
}
inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept {
if (_logger)
_logBlock(block, indentation);
}
inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept {
if (_logger)
_logNode(node, indentation, action);
}
void _logBlock(RABlock* block, uint32_t indentation) noexcept {
_sb.clear();
_sb.appendChars(' ', indentation);
_sb.appendFormat("{#%u}\n", block->blockId());
_logger->log(_sb);
_lastLoggedBlock = block;
}
void _logNode(BaseNode* node, uint32_t indentation, const char* action) noexcept {
_sb.clear();
_sb.appendChars(' ', indentation);
if (action) {
_sb.append(action);
_sb.append(' ');
}
Formatter::formatNode(_sb, _logFlags, cc(), node);
_sb.append('\n');
_logger->log(_sb);
}
#else
template<typename... Args>
inline void log(const char* fmt, Args&&... args) noexcept {
DebugUtils::unused(fmt);
DebugUtils::unused(std::forward<Args>(args)...);
}
inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept {
DebugUtils::unused(block, indentation);
}
inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept {
DebugUtils::unused(node, indentation, action);
}
#endif
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_RABUILDERS_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,282 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_RALOCAL_P_H_INCLUDED
#define ASMJIT_CORE_RALOCAL_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/raassignment_p.h"
#include "../core/radefs_p.h"
#include "../core/rapass_p.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_ra
//! \{
// ============================================================================
// [asmjit::RALocalAllocator]
// ============================================================================
//! Local register allocator.
class RALocalAllocator {
public:
ASMJIT_NONCOPYABLE(RALocalAllocator)
typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
//! Link to `BaseRAPass`.
BaseRAPass* _pass;
//! Link to `BaseCompiler`.
BaseCompiler* _cc;
//! Architecture traits.
const ArchTraits* _archTraits;
//! Registers available to the allocator.
RARegMask _availableRegs;
//! Registers clobbered by the allocator.
RARegMask _clobberedRegs;
//! Register assignment (current).
RAAssignment _curAssignment;
//! Register assignment used temporarily during assignment switches.
RAAssignment _tmpAssignment;
//! Link to the current `RABlock`.
RABlock* _block;
//! InstNode.
InstNode* _node;
//! RA instruction.
RAInst* _raInst;
//! Count of all TiedReg's.
uint32_t _tiedTotal;
//! TiedReg's total counter.
RARegCount _tiedCount;
//! \name Construction & Destruction
//! \{
inline RALocalAllocator(BaseRAPass* pass) noexcept
: _pass(pass),
_cc(pass->cc()),
_archTraits(pass->_archTraits),
_availableRegs(pass->_availableRegs),
_clobberedRegs(),
_curAssignment(),
_block(nullptr),
_node(nullptr),
_raInst(nullptr),
_tiedTotal(),
_tiedCount() {}
Error init() noexcept;
//! \}
//! \name Accessors
//! \{
inline RAWorkReg* workRegById(uint32_t workId) const noexcept { return _pass->workRegById(workId); }
inline PhysToWorkMap* physToWorkMap() const noexcept { return _curAssignment.physToWorkMap(); }
inline WorkToPhysMap* workToPhysMap() const noexcept { return _curAssignment.workToPhysMap(); }
//! Returns the currently processed block.
inline RABlock* block() const noexcept { return _block; }
//! Sets the currently processed block.
inline void setBlock(RABlock* block) noexcept { _block = block; }
//! Returns the currently processed `InstNode`.
inline InstNode* node() const noexcept { return _node; }
//! Returns the currently processed `RAInst`.
inline RAInst* raInst() const noexcept { return _raInst; }
//! Returns all tied regs as `RATiedReg` array.
inline RATiedReg* tiedRegs() const noexcept { return _raInst->tiedRegs(); }
//! Returns tied registers grouped by the given `group`.
inline RATiedReg* tiedRegs(uint32_t group) const noexcept { return _raInst->tiedRegs(group); }
//! Returns count of all TiedRegs used by the instruction.
inline uint32_t tiedCount() const noexcept { return _tiedTotal; }
//! Returns count of TiedRegs used by the given register `group`.
inline uint32_t tiedCount(uint32_t group) const noexcept { return _tiedCount.get(group); }
inline bool isGroupUsed(uint32_t group) const noexcept { return _tiedCount[group] != 0; }
//! \}
//! \name Assignment
//! \{
Error makeInitialAssignment() noexcept;
Error replaceAssignment(
const PhysToWorkMap* physToWorkMap,
const WorkToPhysMap* workToPhysMap) noexcept;
//! Switch to the given assignment by reassigning all register and emitting
//! code that reassigns them. This is always used to switch to a previously
//! stored assignment.
//!
//! If `tryMode` is true then the final assignment doesn't have to be exactly
//! same as specified by `dstPhysToWorkMap` and `dstWorkToPhysMap`. This mode
//! is only used before conditional jumps that already have assignment to
//! generate a code sequence that is always executed regardless of the flow.
Error switchToAssignment(
PhysToWorkMap* dstPhysToWorkMap,
WorkToPhysMap* dstWorkToPhysMap,
const ZoneBitVector& liveIn,
bool dstReadOnly,
bool tryMode) noexcept;
inline Error spillRegsBeforeEntry(RABlock* block) noexcept {
return spillScratchGpRegsBeforeEntry(block->entryScratchGpRegs());
}
Error spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noexcept;
//! \}
//! \name Allocation
//! \{
Error allocInst(InstNode* node) noexcept;
Error spillAfterAllocation(InstNode* node) noexcept;
Error allocBranch(InstNode* node, RABlock* target, RABlock* cont) noexcept;
Error allocJumpTable(InstNode* node, const RABlocks& targets, RABlock* cont) noexcept;
//! \}
//! \name Decision Making
//! \{
enum CostModel : uint32_t {
kCostOfFrequency = 1048576,
kCostOfDirtyFlag = kCostOfFrequency / 4
};
inline uint32_t costByFrequency(float freq) const noexcept {
return uint32_t(int32_t(freq * float(kCostOfFrequency)));
}
inline uint32_t calculateSpillCost(uint32_t group, uint32_t workId, uint32_t assignedId) const noexcept {
RAWorkReg* workReg = workRegById(workId);
uint32_t cost = costByFrequency(workReg->liveStats().freq());
if (_curAssignment.isPhysDirty(group, assignedId))
cost += kCostOfDirtyFlag;
return cost;
}
//! Decides on register assignment.
uint32_t decideOnAssignment(uint32_t group, uint32_t workId, uint32_t assignedId, uint32_t allocableRegs) const noexcept;
//! Decides on whether to MOVE or SPILL the given WorkReg, because it's allocated
//! in a physical register that have to be used by another WorkReg.
//!
//! The function must return either `RAAssignment::kPhysNone`, which means that
//! the WorkReg of `workId` should be spilled, or a valid physical register ID,
//! which means that the register should be moved to that physical register instead.
uint32_t decideOnReassignment(uint32_t group, uint32_t workId, uint32_t assignedId, uint32_t allocableRegs) const noexcept;
//! Decides on best spill given a register mask `spillableRegs`
uint32_t decideOnSpillFor(uint32_t group, uint32_t workId, uint32_t spillableRegs, uint32_t* spillWorkId) const noexcept;
//! \}
//! \name Emit
//! \{
//! Emits a move between a destination and source register, and fixes the
//! register assignment.
inline Error onMoveReg(uint32_t group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
if (dstPhysId == srcPhysId) return kErrorOk;
_curAssignment.reassign(group, workId, dstPhysId, srcPhysId);
return _pass->emitMove(workId, dstPhysId, srcPhysId);
}
//! Emits a swap between two physical registers and fixes their assignment.
//!
//! \note Target must support this operation otherwise this would ASSERT.
inline Error onSwapReg(uint32_t group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
_curAssignment.swap(group, aWorkId, aPhysId, bWorkId, bPhysId);
return _pass->emitSwap(aWorkId, aPhysId, bWorkId, bPhysId);
}
//! Emits a load from [VirtReg/WorkReg]'s spill slot to a physical register
//! and makes it assigned and clean.
inline Error onLoadReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.assign(group, workId, physId, RAAssignment::kClean);
return _pass->emitLoad(workId, physId);
}
//! Emits a save a physical register to a [VirtReg/WorkReg]'s spill slot,
//! keeps it assigned, and makes it clean.
inline Error onSaveReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
ASMJIT_ASSERT(_curAssignment.workToPhysId(group, workId) == physId);
ASMJIT_ASSERT(_curAssignment.physToWorkId(group, physId) == workId);
_curAssignment.makeClean(group, workId, physId);
return _pass->emitSave(workId, physId);
}
//! Assigns a register, the content of it is undefined at this point.
inline Error onAssignReg(uint32_t group, uint32_t workId, uint32_t physId, uint32_t dirty) noexcept {
_curAssignment.assign(group, workId, physId, dirty);
return kErrorOk;
}
//! Spills a variable/register, saves the content to the memory-home if modified.
inline Error onSpillReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
if (_curAssignment.isPhysDirty(group, physId))
ASMJIT_PROPAGATE(onSaveReg(group, workId, physId));
return onKillReg(group, workId, physId);
}
inline Error onDirtyReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.makeDirty(group, workId, physId);
return kErrorOk;
}
inline Error onKillReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.unassign(group, workId, physId);
return kErrorOk;
}
//! \}
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_RALOCAL_P_H_INCLUDED

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,206 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/rastack_p.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::RAStackAllocator - Slots]
// ============================================================================
RAStackSlot* RAStackAllocator::newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags) noexcept {
if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk))
return nullptr;
RAStackSlot* slot = allocator()->allocT<RAStackSlot>();
if (ASMJIT_UNLIKELY(!slot))
return nullptr;
slot->_baseRegId = uint8_t(baseRegId);
slot->_alignment = uint8_t(Support::max<uint32_t>(alignment, 1));
slot->_flags = uint16_t(flags);
slot->_useCount = 0;
slot->_size = size;
slot->_weight = 0;
slot->_offset = 0;
_alignment = Support::max<uint32_t>(_alignment, alignment);
_slots.appendUnsafe(slot);
return slot;
}
// ============================================================================
// [asmjit::RAStackAllocator - Utilities]
// ============================================================================
struct RAStackGap {
inline RAStackGap() noexcept
: offset(0),
size(0) {}
inline RAStackGap(uint32_t offset, uint32_t size) noexcept
: offset(offset),
size(size) {}
inline RAStackGap(const RAStackGap& other) noexcept
: offset(other.offset),
size(other.size) {}
uint32_t offset;
uint32_t size;
};
Error RAStackAllocator::calculateStackFrame() noexcept {
// Base weight added to all registers regardless of their size and alignment.
uint32_t kBaseRegWeight = 16;
// STEP 1:
//
// Update usage based on the size of the slot. We boost smaller slots in a way
// that 32-bit register has higher priority than a 128-bit register, however,
// if one 128-bit register is used 4 times more than some other 32-bit register
// it will overweight it.
for (RAStackSlot* slot : _slots) {
uint32_t alignment = slot->alignment();
ASMJIT_ASSERT(alignment > 0);
uint32_t power = Support::min<uint32_t>(Support::ctz(alignment), 6);
uint64_t weight;
if (slot->isRegHome())
weight = kBaseRegWeight + (uint64_t(slot->useCount()) * (7 - power));
else
weight = power;
// If overflown, which has less chance of winning a lottery, just use max
// possible weight. In such case it probably doesn't matter at all.
if (weight > 0xFFFFFFFFu)
weight = 0xFFFFFFFFu;
slot->setWeight(uint32_t(weight));
}
// STEP 2:
//
// Sort stack slots based on their newly calculated weight (in descending order).
_slots.sort([](const RAStackSlot* a, const RAStackSlot* b) noexcept {
return a->weight() > b->weight() ? 1 :
a->weight() == b->weight() ? 0 : -1;
});
// STEP 3:
//
// Calculate offset of each slot. We start from the slot that has the highest
// weight and advance to slots with lower weight. It could look that offsets
// start from the first slot in our list and then simply increase, but it's
// not always the case as we also try to fill all gaps introduced by the fact
// that slots are sorted by weight and not by size & alignment, so when we need
// to align some slot we distribute the gap caused by the alignment to `gaps`.
uint32_t offset = 0;
ZoneVector<RAStackGap> gaps[kSizeCount - 1];
for (RAStackSlot* slot : _slots) {
if (slot->isStackArg())
continue;
uint32_t slotAlignment = slot->alignment();
uint32_t alignedOffset = Support::alignUp(offset, slotAlignment);
// Try to find a slot within gaps first, before advancing the `offset`.
bool foundGap = false;
uint32_t gapSize = 0;
uint32_t gapOffset = 0;
{
uint32_t slotSize = slot->size();
if (slotSize < (1u << uint32_t(ASMJIT_ARRAY_SIZE(gaps)))) {
// Iterate from the lowest to the highest possible.
uint32_t index = Support::ctz(slotSize);
do {
if (!gaps[index].empty()) {
RAStackGap gap = gaps[index].pop();
ASMJIT_ASSERT(Support::isAligned(gap.offset, slotAlignment));
slot->setOffset(int32_t(gap.offset));
gapSize = gap.size - slotSize;
gapOffset = gap.offset - slotSize;
foundGap = true;
break;
}
} while (++index < uint32_t(ASMJIT_ARRAY_SIZE(gaps)));
}
}
// No gap found, we may create a new one(s) if the current offset is not aligned.
if (!foundGap && offset != alignedOffset) {
gapSize = alignedOffset - offset;
gapOffset = alignedOffset;
offset = alignedOffset;
}
// True if we have found a gap and not filled all of it or we aligned the current offset.
if (gapSize) {
uint32_t gapEnd = gapSize + gapOffset;
while (gapOffset < gapEnd) {
uint32_t index = Support::ctz(gapOffset);
uint32_t slotSize = 1u << index;
// Weird case, better to bail...
if (gapEnd - gapOffset < slotSize)
break;
ASMJIT_PROPAGATE(gaps[index].append(allocator(), RAStackGap(gapOffset, slotSize)));
gapOffset += slotSize;
}
}
if (!foundGap) {
ASMJIT_ASSERT(Support::isAligned(offset, slotAlignment));
slot->setOffset(int32_t(offset));
offset += slot->size();
}
}
_stackSize = Support::alignUp(offset, _alignment);
return kErrorOk;
}
Error RAStackAllocator::adjustSlotOffsets(int32_t offset) noexcept {
for (RAStackSlot* slot : _slots)
if (!slot->isStackArg())
slot->_offset += offset;
return kErrorOk;
}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER

@ -0,0 +1,187 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_RASTACK_P_H_INCLUDED
#define ASMJIT_CORE_RASTACK_P_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/radefs_p.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_ra
//! \{
// ============================================================================
// [asmjit::RAStackSlot]
// ============================================================================
//! Stack slot.
struct RAStackSlot {
//! Stack slot flags.
//!
//! TODO: kFlagStackArg is not used by the current implementation, do we need to keep it?
enum Flags : uint32_t {
//! Stack slot is register home slot.
kFlagRegHome = 0x0001u,
//! Stack slot position matches argument passed via stack.
kFlagStackArg = 0x0002u
};
enum ArgIndex : uint32_t {
kNoArgIndex = 0xFF
};
//! Base register used to address the stack.
uint8_t _baseRegId;
//! Minimum alignment required by the slot.
uint8_t _alignment;
//! Reserved for future use.
uint16_t _flags;
//! Size of memory required by the slot.
uint32_t _size;
//! Usage counter (one unit equals one memory access).
uint32_t _useCount;
//! Weight of the slot, calculated by \ref RAStackAllocator::calculateStackFrame().
uint32_t _weight;
//! Stack offset, calculated by \ref RAStackAllocator::calculateStackFrame().
int32_t _offset;
//! \name Accessors
//! \{
inline uint32_t baseRegId() const noexcept { return _baseRegId; }
inline void setBaseRegId(uint32_t id) noexcept { _baseRegId = uint8_t(id); }
inline uint32_t size() const noexcept { return _size; }
inline uint32_t alignment() const noexcept { return _alignment; }
inline uint32_t flags() const noexcept { return _flags; }
inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
inline void addFlags(uint32_t flags) noexcept { _flags = uint16_t(_flags | flags); }
inline bool isRegHome() const noexcept { return hasFlag(kFlagRegHome); }
inline bool isStackArg() const noexcept { return hasFlag(kFlagStackArg); }
inline uint32_t useCount() const noexcept { return _useCount; }
inline void addUseCount(uint32_t n = 1) noexcept { _useCount += n; }
inline uint32_t weight() const noexcept { return _weight; }
inline void setWeight(uint32_t weight) noexcept { _weight = weight; }
inline int32_t offset() const noexcept { return _offset; }
inline void setOffset(int32_t offset) noexcept { _offset = offset; }
//! \}
};
typedef ZoneVector<RAStackSlot*> RAStackSlots;
// ============================================================================
// [asmjit::RAStackAllocator]
// ============================================================================
//! Stack allocator.
class RAStackAllocator {
public:
ASMJIT_NONCOPYABLE(RAStackAllocator)
enum Size : uint32_t {
kSize1 = 0,
kSize2 = 1,
kSize4 = 2,
kSize8 = 3,
kSize16 = 4,
kSize32 = 5,
kSize64 = 6,
kSizeCount = 7
};
//! Allocator used to allocate internal data.
ZoneAllocator* _allocator;
//! Count of bytes used by all slots.
uint32_t _bytesUsed;
//! Calculated stack size (can be a bit greater than `_bytesUsed`).
uint32_t _stackSize;
//! Minimum stack alignment.
uint32_t _alignment;
//! Stack slots vector.
RAStackSlots _slots;
//! \name Construction / Destruction
//! \{
inline RAStackAllocator() noexcept
: _allocator(nullptr),
_bytesUsed(0),
_stackSize(0),
_alignment(1),
_slots() {}
inline void reset(ZoneAllocator* allocator) noexcept {
_allocator = allocator;
_bytesUsed = 0;
_stackSize = 0;
_alignment = 1;
_slots.reset();
}
//! \}
//! \name Accessors
//! \{
inline ZoneAllocator* allocator() const noexcept { return _allocator; }
inline uint32_t bytesUsed() const noexcept { return _bytesUsed; }
inline uint32_t stackSize() const noexcept { return _stackSize; }
inline uint32_t alignment() const noexcept { return _alignment; }
inline RAStackSlots& slots() noexcept { return _slots; }
inline const RAStackSlots& slots() const noexcept { return _slots; }
inline uint32_t slotCount() const noexcept { return _slots.size(); }
//! \}
//! \name Utilities
//! \{
RAStackSlot* newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags = 0) noexcept;
Error calculateStackFrame() noexcept;
Error adjustSlotOffsets(int32_t offset) noexcept;
//! \}
};
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
#endif // ASMJIT_CORE_RASTACK_P_H_INCLUDED

@ -0,0 +1,551 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/string.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::String - Globals]
// ============================================================================
static const char String_baseN[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
constexpr size_t kMinAllocSize = 64;
constexpr size_t kMaxAllocSize = SIZE_MAX - Globals::kGrowThreshold;
// ============================================================================
// [asmjit::String]
// ============================================================================
Error String::reset() noexcept {
if (_type == kTypeLarge)
::free(_large.data);
_resetInternal();
return kErrorOk;
}
Error String::clear() noexcept {
if (isLarge()) {
_large.size = 0;
_large.data[0] = '\0';
}
else {
_raw.uptr[0] = 0;
}
return kErrorOk;
}
char* String::prepare(uint32_t op, size_t size) noexcept {
char* curData;
size_t curSize;
size_t curCapacity;
if (isLarge()) {
curData = this->_large.data;
curSize = this->_large.size;
curCapacity = this->_large.capacity;
}
else {
curData = this->_small.data;
curSize = this->_small.type;
curCapacity = kSSOCapacity;
}
if (op == kOpAssign) {
if (size > curCapacity) {
// Prevent arithmetic overflow.
if (ASMJIT_UNLIKELY(size >= kMaxAllocSize))
return nullptr;
size_t newCapacity = Support::alignUp<size_t>(size + 1, kMinAllocSize);
char* newData = static_cast<char*>(::malloc(newCapacity));
if (ASMJIT_UNLIKELY(!newData))
return nullptr;
if (_type == kTypeLarge)
::free(curData);
_large.type = kTypeLarge;
_large.size = size;
_large.capacity = newCapacity - 1;
_large.data = newData;
newData[size] = '\0';
return newData;
}
else {
_setSize(size);
curData[size] = '\0';
return curData;
}
}
else {
// Prevent arithmetic overflow.
if (ASMJIT_UNLIKELY(size >= kMaxAllocSize - curSize))
return nullptr;
size_t newSize = size + curSize;
size_t newSizePlusOne = newSize + 1;
if (newSizePlusOne > curCapacity) {
size_t newCapacity = Support::max<size_t>(curCapacity + 1, kMinAllocSize);
if (newCapacity < newSizePlusOne && newCapacity < Globals::kGrowThreshold)
newCapacity = Support::alignUpPowerOf2(newCapacity);
if (newCapacity < newSizePlusOne)
newCapacity = Support::alignUp(newSizePlusOne, Globals::kGrowThreshold);
if (ASMJIT_UNLIKELY(newCapacity < newSizePlusOne))
return nullptr;
char* newData = static_cast<char*>(::malloc(newCapacity));
if (ASMJIT_UNLIKELY(!newData))
return nullptr;
memcpy(newData, curData, curSize);
if (_type == kTypeLarge)
::free(curData);
_large.type = kTypeLarge;
_large.size = newSize;
_large.capacity = newCapacity - 1;
_large.data = newData;
newData[newSize] = '\0';
return newData + curSize;
}
else {
_setSize(newSize);
curData[newSize] = '\0';
return curData + curSize;
}
}
}
Error String::assign(const char* data, size_t size) noexcept {
char* dst = nullptr;
// Null terminated string without `size` specified.
if (size == SIZE_MAX)
size = data ? strlen(data) : size_t(0);
if (isLarge()) {
if (size <= _large.capacity) {
dst = _large.data;
_large.size = size;
}
else {
size_t capacityPlusOne = Support::alignUp(size + 1, 32);
if (ASMJIT_UNLIKELY(capacityPlusOne < size))
return DebugUtils::errored(kErrorOutOfMemory);
dst = static_cast<char*>(::malloc(capacityPlusOne));
if (ASMJIT_UNLIKELY(!dst))
return DebugUtils::errored(kErrorOutOfMemory);
if (!isExternal())
::free(_large.data);
_large.type = kTypeLarge;
_large.data = dst;
_large.size = size;
_large.capacity = capacityPlusOne - 1;
}
}
else {
if (size <= kSSOCapacity) {
ASMJIT_ASSERT(size < 0xFFu);
dst = _small.data;
_small.type = uint8_t(size);
}
else {
dst = static_cast<char*>(::malloc(size + 1));
if (ASMJIT_UNLIKELY(!dst))
return DebugUtils::errored(kErrorOutOfMemory);
_large.type = kTypeLarge;
_large.data = dst;
_large.size = size;
_large.capacity = size;
}
}
// Optionally copy data from `data` and null-terminate.
if (data && size) {
// NOTE: It's better to use `memmove()`. If, for any reason, somebody uses
// this function to substring the same string it would work as expected.
::memmove(dst, data, size);
}
dst[size] = '\0';
return kErrorOk;
}
// ============================================================================
// [asmjit::String - Operations]
// ============================================================================
Error String::_opString(uint32_t op, const char* str, size_t size) noexcept {
if (size == SIZE_MAX)
size = str ? strlen(str) : size_t(0);
if (!size)
return kErrorOk;
char* p = prepare(op, size);
if (!p)
return DebugUtils::errored(kErrorOutOfMemory);
memcpy(p, str, size);
return kErrorOk;
}
Error String::_opChar(uint32_t op, char c) noexcept {
char* p = prepare(op, 1);
if (!p)
return DebugUtils::errored(kErrorOutOfMemory);
*p = c;
return kErrorOk;
}
Error String::_opChars(uint32_t op, char c, size_t n) noexcept {
if (!n)
return kErrorOk;
char* p = prepare(op, n);
if (!p)
return DebugUtils::errored(kErrorOutOfMemory);
memset(p, c, n);
return kErrorOk;
}
Error String::padEnd(size_t n, char c) noexcept {
size_t size = this->size();
return n > size ? appendChars(c, n - size) : kErrorOk;
}
Error String::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) noexcept {
if (base < 2 || base > 36)
base = 10;
char buf[128];
char* p = buf + ASMJIT_ARRAY_SIZE(buf);
uint64_t orig = i;
char sign = '\0';
// --------------------------------------------------------------------------
// [Sign]
// --------------------------------------------------------------------------
if ((flags & kFormatSigned) != 0 && int64_t(i) < 0) {
i = uint64_t(-int64_t(i));
sign = '-';
}
else if ((flags & kFormatShowSign) != 0) {
sign = '+';
}
else if ((flags & kFormatShowSpace) != 0) {
sign = ' ';
}
// --------------------------------------------------------------------------
// [Number]
// --------------------------------------------------------------------------
do {
uint64_t d = i / base;
uint64_t r = i % base;
*--p = String_baseN[r];
i = d;
} while (i);
size_t numberSize = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p);
// --------------------------------------------------------------------------
// [Alternate Form]
// --------------------------------------------------------------------------
if ((flags & kFormatAlternate) != 0) {
if (base == 8) {
if (orig != 0)
*--p = '0';
}
if (base == 16) {
*--p = 'x';
*--p = '0';
}
}
// --------------------------------------------------------------------------
// [Width]
// --------------------------------------------------------------------------
if (sign != 0)
*--p = sign;
if (width > 256)
width = 256;
if (width <= numberSize)
width = 0;
else
width -= numberSize;
// --------------------------------------------------------------------------
// Write]
// --------------------------------------------------------------------------
size_t prefixSize = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberSize;
char* data = prepare(op, prefixSize + width + numberSize);
if (!data)
return DebugUtils::errored(kErrorOutOfMemory);
memcpy(data, p, prefixSize);
data += prefixSize;
memset(data, '0', width);
data += width;
memcpy(data, p + prefixSize, numberSize);
return kErrorOk;
}
Error String::_opHex(uint32_t op, const void* data, size_t size, char separator) noexcept {
char* dst;
const uint8_t* src = static_cast<const uint8_t*>(data);
if (!size)
return kErrorOk;
if (separator) {
if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 3))
return DebugUtils::errored(kErrorOutOfMemory);
dst = prepare(op, size * 3 - 1);
if (ASMJIT_UNLIKELY(!dst))
return DebugUtils::errored(kErrorOutOfMemory);
size_t i = 0;
for (;;) {
dst[0] = String_baseN[(src[0] >> 4) & 0xF];
dst[1] = String_baseN[(src[0] ) & 0xF];
if (++i == size)
break;
// This makes sure that the separator is only put between two hexadecimal bytes.
dst[2] = separator;
dst += 3;
src++;
}
}
else {
if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 2))
return DebugUtils::errored(kErrorOutOfMemory);
dst = prepare(op, size * 2);
if (ASMJIT_UNLIKELY(!dst))
return DebugUtils::errored(kErrorOutOfMemory);
for (size_t i = 0; i < size; i++, dst += 2, src++) {
dst[0] = String_baseN[(src[0] >> 4) & 0xF];
dst[1] = String_baseN[(src[0] ) & 0xF];
}
}
return kErrorOk;
}
Error String::_opFormat(uint32_t op, const char* fmt, ...) noexcept {
Error err;
va_list ap;
va_start(ap, fmt);
err = _opVFormat(op, fmt, ap);
va_end(ap);
return err;
}
Error String::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept {
size_t startAt = (op == kOpAssign) ? size_t(0) : size();
size_t remainingCapacity = capacity() - startAt;
char buf[1024];
int fmtResult;
size_t outputSize;
va_list apCopy;
va_copy(apCopy, ap);
if (remainingCapacity >= 128) {
fmtResult = vsnprintf(data() + startAt, remainingCapacity, fmt, ap);
outputSize = size_t(fmtResult);
if (ASMJIT_LIKELY(outputSize <= remainingCapacity)) {
_setSize(startAt + outputSize);
return kErrorOk;
}
}
else {
fmtResult = vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
outputSize = size_t(fmtResult);
if (ASMJIT_LIKELY(outputSize < ASMJIT_ARRAY_SIZE(buf)))
return _opString(op, buf, outputSize);
}
if (ASMJIT_UNLIKELY(fmtResult < 0))
return DebugUtils::errored(kErrorInvalidState);
char* p = prepare(op, outputSize);
if (ASMJIT_UNLIKELY(!p))
return DebugUtils::errored(kErrorOutOfMemory);
fmtResult = vsnprintf(p, outputSize + 1, fmt, apCopy);
ASMJIT_ASSERT(size_t(fmtResult) == outputSize);
return kErrorOk;
}
Error String::truncate(size_t newSize) noexcept {
if (isLarge()) {
if (newSize < _large.size) {
_large.data[newSize] = '\0';
_large.size = newSize;
}
}
else {
if (newSize < _type) {
_small.data[newSize] = '\0';
_small.type = uint8_t(newSize);
}
}
return kErrorOk;
}
bool String::eq(const char* other, size_t size) const noexcept {
const char* aData = data();
const char* bData = other;
size_t aSize = this->size();
size_t bSize = size;
if (bSize == SIZE_MAX) {
size_t i;
for (i = 0; i < aSize; i++)
if (aData[i] != bData[i] || bData[i] == 0)
return false;
return bData[i] == 0;
}
else {
if (aSize != bSize)
return false;
return ::memcmp(aData, bData, aSize) == 0;
}
}
// ============================================================================
// [asmjit::Support - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(core_string) {
String s;
EXPECT(s.isLarge() == false);
EXPECT(s.isExternal() == false);
EXPECT(s.assign('a') == kErrorOk);
EXPECT(s.size() == 1);
EXPECT(s.capacity() == String::kSSOCapacity);
EXPECT(s.data()[0] == 'a');
EXPECT(s.data()[1] == '\0');
EXPECT(s.eq("a") == true);
EXPECT(s.eq("a", 1) == true);
EXPECT(s.assignChars('b', 4) == kErrorOk);
EXPECT(s.size() == 4);
EXPECT(s.capacity() == String::kSSOCapacity);
EXPECT(s.data()[0] == 'b');
EXPECT(s.data()[1] == 'b');
EXPECT(s.data()[2] == 'b');
EXPECT(s.data()[3] == 'b');
EXPECT(s.data()[4] == '\0');
EXPECT(s.eq("bbbb") == true);
EXPECT(s.eq("bbbb", 4) == true);
EXPECT(s.assign("abc") == kErrorOk);
EXPECT(s.size() == 3);
EXPECT(s.capacity() == String::kSSOCapacity);
EXPECT(s.data()[0] == 'a');
EXPECT(s.data()[1] == 'b');
EXPECT(s.data()[2] == 'c');
EXPECT(s.data()[3] == '\0');
EXPECT(s.eq("abc") == true);
EXPECT(s.eq("abc", 3) == true);
const char* large = "Large string that will not fit into SSO buffer";
EXPECT(s.assign(large) == kErrorOk);
EXPECT(s.isLarge() == true);
EXPECT(s.size() == strlen(large));
EXPECT(s.capacity() > String::kSSOCapacity);
EXPECT(s.eq(large) == true);
EXPECT(s.eq(large, strlen(large)) == true);
const char* additional = " (additional content)";
EXPECT(s.isLarge() == true);
EXPECT(s.append(additional) == kErrorOk);
EXPECT(s.size() == strlen(large) + strlen(additional));
EXPECT(s.clear() == kErrorOk);
EXPECT(s.size() == 0);
EXPECT(s.empty() == true);
EXPECT(s.data()[0] == '\0');
EXPECT(s.isLarge() == true); // Clear should never release the memory.
EXPECT(s.appendUInt(1234) == kErrorOk);
EXPECT(s.eq("1234") == true);
StringTmp<64> sTmp;
EXPECT(sTmp.isLarge());
EXPECT(sTmp.isExternal());
EXPECT(sTmp.appendChars(' ', 1000) == kErrorOk);
EXPECT(!sTmp.isExternal());
}
#endif
ASMJIT_END_NAMESPACE

@ -0,0 +1,400 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_STRING_H_INCLUDED
#define ASMJIT_CORE_STRING_H_INCLUDED
#include "../core/support.h"
#include "../core/zone.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_utilities
//! \{
// ============================================================================
// [asmjit::FixedString]
// ============================================================================
//! A fixed string - only useful for strings that would never exceed `N - 1`
//! characters; always null-terminated.
template<size_t N>
union FixedString {
enum : uint32_t {
kNumU32 = uint32_t((N + sizeof(uint32_t) - 1) / sizeof(uint32_t))
};
char str[kNumU32 * sizeof(uint32_t)];
uint32_t u32[kNumU32];
//! \name Utilities
//! \{
inline bool eq(const char* other) const noexcept {
return strcmp(str, other) == 0;
}
//! \}
};
// ============================================================================
// [asmjit::String]
// ============================================================================
//! A simple non-reference counted string that uses small string optimization (SSO).
//!
//! This string has 3 allocation possibilities:
//!
//! 1. Small - embedded buffer is used for up to `kSSOCapacity` characters.
//! This should handle most small strings and thus avoid dynamic
//! memory allocation for most use-cases.
//!
//! 2. Large - string that doesn't fit into an embedded buffer (or string
//! that was truncated from a larger buffer) and is owned by
//! AsmJit. When you destroy the string AsmJit would automatically
//! release the large buffer.
//!
//! 3. External - like Large (2), however, the large buffer is not owned by
//! AsmJit and won't be released when the string is destroyed
//! or reallocated. This is mostly useful for working with
//! larger temporary strings allocated on stack or with immutable
//! strings.
class String {
public:
ASMJIT_NONCOPYABLE(String)
//! String operation.
enum Op : uint32_t {
//! Assignment - a new content replaces the current one.
kOpAssign = 0,
//! Append - a new content is appended to the string.
kOpAppend = 1
};
//! String format flags.
enum FormatFlags : uint32_t {
kFormatShowSign = 0x00000001u,
kFormatShowSpace = 0x00000002u,
kFormatAlternate = 0x00000004u,
kFormatSigned = 0x80000000u
};
//! \cond INTERNAL
enum : uint32_t {
kLayoutSize = 32,
kSSOCapacity = kLayoutSize - 2
};
//! String type.
enum Type : uint8_t {
kTypeLarge = 0x1Fu, //!< Large string (owned by String).
kTypeExternal = 0x20u //!< External string (zone allocated or not owned by String).
};
union Raw {
uint8_t u8[kLayoutSize];
uint64_t u64[kLayoutSize / sizeof(uint64_t)];
uintptr_t uptr[kLayoutSize / sizeof(uintptr_t)];
};
struct Small {
uint8_t type;
char data[kSSOCapacity + 1u];
};
struct Large {
uint8_t type;
uint8_t reserved[sizeof(uintptr_t) - 1];
size_t size;
size_t capacity;
char* data;
};
union {
uint8_t _type;
Raw _raw;
Small _small;
Large _large;
};
//! \endcond
//! \name Construction & Destruction
//! \{
//! Creates a default-initialized string if zero length.
inline String() noexcept
: _small {} {}
//! Creates a string that takes ownership of the content of the `other` string.
inline String(String&& other) noexcept {
_raw = other._raw;
other._resetInternal();
}
inline ~String() noexcept {
reset();
}
//! Reset the string into a construction state.
ASMJIT_API Error reset() noexcept;
//! \}
//! \name Overloaded Operators
//! \{
inline String& operator=(String&& other) noexcept {
swap(other);
other.reset();
return *this;
}
inline bool operator==(const char* other) const noexcept { return eq(other); }
inline bool operator!=(const char* other) const noexcept { return !eq(other); }
inline bool operator==(const String& other) const noexcept { return eq(other); }
inline bool operator!=(const String& other) const noexcept { return !eq(other); }
//! \}
//! \name Accessors
//! \{
inline bool isLarge() const noexcept { return _type >= kTypeLarge; }
inline bool isExternal() const noexcept { return _type == kTypeExternal; }
//! Tests whether the string is empty.
inline bool empty() const noexcept { return size() == 0; }
//! Returns the size of the string.
inline size_t size() const noexcept { return isLarge() ? size_t(_large.size) : size_t(_type); }
//! Returns the capacity of the string.
inline size_t capacity() const noexcept { return isLarge() ? _large.capacity : size_t(kSSOCapacity); }
//! Returns the data of the string.
inline char* data() noexcept { return isLarge() ? _large.data : _small.data; }
//! \overload
inline const char* data() const noexcept { return isLarge() ? _large.data : _small.data; }
inline char* start() noexcept { return data(); }
inline const char* start() const noexcept { return data(); }
inline char* end() noexcept { return data() + size(); }
inline const char* end() const noexcept { return data() + size(); }
//! \}
//! \name String Operations
//! \{
//! Swaps the content of this string with `other`.
inline void swap(String& other) noexcept {
std::swap(_raw, other._raw);
}
//! Clears the content of the string.
ASMJIT_API Error clear() noexcept;
ASMJIT_API char* prepare(uint32_t op, size_t size) noexcept;
ASMJIT_API Error _opString(uint32_t op, const char* str, size_t size = SIZE_MAX) noexcept;
ASMJIT_API Error _opChar(uint32_t op, char c) noexcept;
ASMJIT_API Error _opChars(uint32_t op, char c, size_t n) noexcept;
ASMJIT_API Error _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept;
ASMJIT_API Error _opHex(uint32_t op, const void* data, size_t size, char separator = '\0') noexcept;
ASMJIT_API Error _opFormat(uint32_t op, const char* fmt, ...) noexcept;
ASMJIT_API Error _opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept;
//! Replaces the current of the string with `data` of the given `size`.
//!
//! Null terminated strings can set `size` to `SIZE_MAX`.
ASMJIT_API Error assign(const char* data, size_t size = SIZE_MAX) noexcept;
//! Replaces the current of the string with `other` string.
inline Error assign(const String& other) noexcept {
return assign(other.data(), other.size());
}
//! Replaces the current of the string by a single `c` character.
inline Error assign(char c) noexcept {
return _opChar(kOpAssign, c);
}
//! Replaces the current of the string by a `c` character, repeated `n` times.
inline Error assignChars(char c, size_t n) noexcept {
return _opChars(kOpAssign, c, n);
}
//! Replaces the current of the string by a formatted integer `i` (signed).
inline Error assignInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kOpAssign, uint64_t(i), base, width, flags | kFormatSigned);
}
//! Replaces the current of the string by a formatted integer `i` (unsigned).
inline Error assignUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kOpAssign, i, base, width, flags);
}
//! Replaces the current of the string by the given `data` converted to a HEX string.
inline Error assignHex(const void* data, size_t size, char separator = '\0') noexcept {
return _opHex(kOpAssign, data, size, separator);
}
//! Replaces the current of the string by a formatted string `fmt`.
template<typename... Args>
inline Error assignFormat(const char* fmt, Args&&... args) noexcept {
return _opFormat(kOpAssign, fmt, std::forward<Args>(args)...);
}
//! Replaces the current of the string by a formatted string `fmt` (va_list version).
inline Error assignVFormat(const char* fmt, va_list ap) noexcept {
return _opVFormat(kOpAssign, fmt, ap);
}
//! Appends `str` having the given size `size` to the string.
//!
//! Null terminated strings can set `size` to `SIZE_MAX`.
inline Error append(const char* str, size_t size = SIZE_MAX) noexcept {
return _opString(kOpAppend, str, size);
}
//! Appends `other` string to this string.
inline Error append(const String& other) noexcept {
return append(other.data(), other.size());
}
//! Appends a single `c` character.
inline Error append(char c) noexcept {
return _opChar(kOpAppend, c);
}
//! Appends `c` character repeated `n` times.
inline Error appendChars(char c, size_t n) noexcept {
return _opChars(kOpAppend, c, n);
}
//! Appends a formatted integer `i` (signed).
inline Error appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kOpAppend, uint64_t(i), base, width, flags | kFormatSigned);
}
//! Appends a formatted integer `i` (unsigned).
inline Error appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kOpAppend, i, base, width, flags);
}
//! Appends the given `data` converted to a HEX string.
inline Error appendHex(const void* data, size_t size, char separator = '\0') noexcept {
return _opHex(kOpAppend, data, size, separator);
}
//! Appends a formatted string `fmt` with `args`.
template<typename... Args>
inline Error appendFormat(const char* fmt, Args&&... args) noexcept {
return _opFormat(kOpAppend, fmt, std::forward<Args>(args)...);
}
//! Appends a formatted string `fmt` (va_list version).
inline Error appendVFormat(const char* fmt, va_list ap) noexcept {
return _opVFormat(kOpAppend, fmt, ap);
}
ASMJIT_API Error padEnd(size_t n, char c = ' ') noexcept;
//! Truncate the string length into `newSize`.
ASMJIT_API Error truncate(size_t newSize) noexcept;
ASMJIT_API bool eq(const char* other, size_t size = SIZE_MAX) const noexcept;
inline bool eq(const String& other) const noexcept { return eq(other.data(), other.size()); }
//! \}
//! \name Internal Functions
//! \{
//! Resets string to embedded and makes it empty (zero length, zero first char)
//!
//! \note This is always called internally after an external buffer was released
//! as it zeroes all bytes used by String's embedded storage.
inline void _resetInternal() noexcept {
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_raw.uptr); i++)
_raw.uptr[i] = 0;
}
inline void _setSize(size_t newSize) noexcept {
if (isLarge())
_large.size = newSize;
else
_small.type = uint8_t(newSize);
}
//! \}
#ifndef ASMJIT_NO_DEPRECATED
ASMJIT_DEPRECATED("Use assign() instead of assignString()")
inline Error assignString(const char* data, size_t size = SIZE_MAX) noexcept { return assign(data, size); }
ASMJIT_DEPRECATED("Use assign() instead of assignChar()")
inline Error assignChar(char c) noexcept { return assign(c); }
ASMJIT_DEPRECATED("Use append() instead of appendString()")
inline Error appendString(const char* data, size_t size = SIZE_MAX) noexcept { return append(data, size); }
ASMJIT_DEPRECATED("Use append() instead of appendChar()")
inline Error appendChar(char c) noexcept { return append(c); }
#endif // !ASMJIT_NO_DEPRECATED
};
// ============================================================================
// [asmjit::StringTmp]
// ============================================================================
//! Temporary string builder, has statically allocated `N` bytes.
template<size_t N>
class StringTmp : public String {
public:
ASMJIT_NONCOPYABLE(StringTmp<N>)
//! Embedded data.
char _embeddedData[Support::alignUp(N + 1, sizeof(size_t))];
//! \name Construction & Destruction
//! \{
inline StringTmp() noexcept {
_resetToTemporary();
}
inline void _resetToTemporary() noexcept {
_large.type = kTypeExternal;
_large.capacity = ASMJIT_ARRAY_SIZE(_embeddedData) - 1;
_large.data = _embeddedData;
_embeddedData[0] = '\0';
}
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_STRING_H_INCLUDED

@ -0,0 +1,507 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Support - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
template<typename T>
static void testArrays(const T* a, const T* b, size_t size) noexcept {
for (size_t i = 0; i < size; i++)
EXPECT(a[i] == b[i], "Mismatch at %u", unsigned(i));
}
static void testAlignment() noexcept {
INFO("Support::isAligned()");
EXPECT(Support::isAligned<size_t>(0xFFFF, 4) == false);
EXPECT(Support::isAligned<size_t>(0xFFF4, 4) == true);
EXPECT(Support::isAligned<size_t>(0xFFF8, 8) == true);
EXPECT(Support::isAligned<size_t>(0xFFF0, 16) == true);
INFO("Support::alignUp()");
EXPECT(Support::alignUp<size_t>(0xFFFF, 4) == 0x10000);
EXPECT(Support::alignUp<size_t>(0xFFF4, 4) == 0x0FFF4);
EXPECT(Support::alignUp<size_t>(0xFFF8, 8) == 0x0FFF8);
EXPECT(Support::alignUp<size_t>(0xFFF0, 16) == 0x0FFF0);
EXPECT(Support::alignUp<size_t>(0xFFF0, 32) == 0x10000);
INFO("Support::alignUpDiff()");
EXPECT(Support::alignUpDiff<size_t>(0xFFFF, 4) == 1);
EXPECT(Support::alignUpDiff<size_t>(0xFFF4, 4) == 0);
EXPECT(Support::alignUpDiff<size_t>(0xFFF8, 8) == 0);
EXPECT(Support::alignUpDiff<size_t>(0xFFF0, 16) == 0);
EXPECT(Support::alignUpDiff<size_t>(0xFFF0, 32) == 16);
INFO("Support::alignUpPowerOf2()");
EXPECT(Support::alignUpPowerOf2<size_t>(0x0000) == 0x00000);
EXPECT(Support::alignUpPowerOf2<size_t>(0xFFFF) == 0x10000);
EXPECT(Support::alignUpPowerOf2<size_t>(0xF123) == 0x10000);
EXPECT(Support::alignUpPowerOf2<size_t>(0x0F00) == 0x01000);
EXPECT(Support::alignUpPowerOf2<size_t>(0x0100) == 0x00100);
EXPECT(Support::alignUpPowerOf2<size_t>(0x1001) == 0x02000);
}
static void testBitUtils() noexcept {
uint32_t i;
INFO("Support::shl() / shr()");
EXPECT(Support::shl(int32_t(0x00001111), 16) == int32_t(0x11110000u));
EXPECT(Support::shl(uint32_t(0x00001111), 16) == uint32_t(0x11110000u));
EXPECT(Support::shr(int32_t(0x11110000u), 16) == int32_t(0x00001111u));
EXPECT(Support::shr(uint32_t(0x11110000u), 16) == uint32_t(0x00001111u));
EXPECT(Support::sar(int32_t(0xFFFF0000u), 16) == int32_t(0xFFFFFFFFu));
EXPECT(Support::sar(uint32_t(0xFFFF0000u), 16) == uint32_t(0xFFFFFFFFu));
INFO("Support::blsi()");
for (i = 0; i < 32; i++) EXPECT(Support::blsi(uint32_t(1) << i) == uint32_t(1) << i);
for (i = 0; i < 31; i++) EXPECT(Support::blsi(uint32_t(3) << i) == uint32_t(1) << i);
for (i = 0; i < 64; i++) EXPECT(Support::blsi(uint64_t(1) << i) == uint64_t(1) << i);
for (i = 0; i < 63; i++) EXPECT(Support::blsi(uint64_t(3) << i) == uint64_t(1) << i);
INFO("Support::ctz()");
for (i = 0; i < 32; i++) EXPECT(Support::ctz(uint32_t(1) << i) == i);
for (i = 0; i < 64; i++) EXPECT(Support::ctz(uint64_t(1) << i) == i);
for (i = 0; i < 32; i++) EXPECT(Support::constCtz(uint32_t(1) << i) == i);
for (i = 0; i < 64; i++) EXPECT(Support::constCtz(uint64_t(1) << i) == i);
INFO("Support::bitMask()");
EXPECT(Support::bitMask(0, 1, 7) == 0x83u);
for (i = 0; i < 32; i++)
EXPECT(Support::bitMask(i) == (1u << i));
INFO("Support::bitTest()");
for (i = 0; i < 32; i++) {
EXPECT(Support::bitTest((1 << i), i) == true, "Support::bitTest(%X, %u) should return true", (1 << i), i);
}
INFO("Support::lsbMask<uint32_t>()");
for (i = 0; i < 32; i++) {
uint32_t expectedBits = 0;
for (uint32_t b = 0; b < i; b++)
expectedBits |= uint32_t(1) << b;
EXPECT(Support::lsbMask<uint32_t>(i) == expectedBits);
}
INFO("Support::lsbMask<uint64_t>()");
for (i = 0; i < 64; i++) {
uint64_t expectedBits = 0;
for (uint32_t b = 0; b < i; b++)
expectedBits |= uint64_t(1) << b;
EXPECT(Support::lsbMask<uint64_t>(i) == expectedBits);
}
INFO("Support::popcnt()");
for (i = 0; i < 32; i++) EXPECT(Support::popcnt((uint32_t(1) << i)) == 1);
for (i = 0; i < 64; i++) EXPECT(Support::popcnt((uint64_t(1) << i)) == 1);
EXPECT(Support::popcnt(0x000000F0) == 4);
EXPECT(Support::popcnt(0x10101010) == 4);
EXPECT(Support::popcnt(0xFF000000) == 8);
EXPECT(Support::popcnt(0xFFFFFFF7) == 31);
EXPECT(Support::popcnt(0x7FFFFFFF) == 31);
INFO("Support::isPowerOf2()");
for (i = 0; i < 64; i++) {
EXPECT(Support::isPowerOf2(uint64_t(1) << i) == true);
EXPECT(Support::isPowerOf2((uint64_t(1) << i) ^ 0x001101) == false);
}
}
static void testIntUtils() noexcept {
INFO("Support::byteswap()");
EXPECT(Support::byteswap32(int32_t(0x01020304)) == int32_t(0x04030201));
EXPECT(Support::byteswap32(uint32_t(0x01020304)) == uint32_t(0x04030201));
INFO("Support::bytepack()");
union BytePackData {
uint8_t bytes[4];
uint32_t u32;
} bpdata;
bpdata.u32 = Support::bytepack32_4x8(0x00, 0x11, 0x22, 0x33);
EXPECT(bpdata.bytes[0] == 0x00);
EXPECT(bpdata.bytes[1] == 0x11);
EXPECT(bpdata.bytes[2] == 0x22);
EXPECT(bpdata.bytes[3] == 0x33);
INFO("Support::isBetween()");
EXPECT(Support::isBetween<int>(10 , 10, 20) == true);
EXPECT(Support::isBetween<int>(11 , 10, 20) == true);
EXPECT(Support::isBetween<int>(20 , 10, 20) == true);
EXPECT(Support::isBetween<int>(9 , 10, 20) == false);
EXPECT(Support::isBetween<int>(21 , 10, 20) == false);
EXPECT(Support::isBetween<int>(101, 10, 20) == false);
INFO("Support::isInt8()");
EXPECT(Support::isInt8(-128) == true);
EXPECT(Support::isInt8( 127) == true);
EXPECT(Support::isInt8(-129) == false);
EXPECT(Support::isInt8( 128) == false);
INFO("Support::isInt16()");
EXPECT(Support::isInt16(-32768) == true);
EXPECT(Support::isInt16( 32767) == true);
EXPECT(Support::isInt16(-32769) == false);
EXPECT(Support::isInt16( 32768) == false);
INFO("Support::isInt32()");
EXPECT(Support::isInt32( 2147483647 ) == true);
EXPECT(Support::isInt32(-2147483647 - 1) == true);
EXPECT(Support::isInt32(uint64_t(2147483648u)) == false);
EXPECT(Support::isInt32(uint64_t(0xFFFFFFFFu)) == false);
EXPECT(Support::isInt32(uint64_t(0xFFFFFFFFu) + 1) == false);
INFO("Support::isUInt8()");
EXPECT(Support::isUInt8(0) == true);
EXPECT(Support::isUInt8(255) == true);
EXPECT(Support::isUInt8(256) == false);
EXPECT(Support::isUInt8(-1) == false);
INFO("Support::isUInt12()");
EXPECT(Support::isUInt12(0) == true);
EXPECT(Support::isUInt12(4095) == true);
EXPECT(Support::isUInt12(4096) == false);
EXPECT(Support::isUInt12(-1) == false);
INFO("Support::isUInt16()");
EXPECT(Support::isUInt16(0) == true);
EXPECT(Support::isUInt16(65535) == true);
EXPECT(Support::isUInt16(65536) == false);
EXPECT(Support::isUInt16(-1) == false);
INFO("Support::isUInt32()");
EXPECT(Support::isUInt32(uint64_t(0xFFFFFFFF)) == true);
EXPECT(Support::isUInt32(uint64_t(0xFFFFFFFF) + 1) == false);
EXPECT(Support::isUInt32(-1) == false);
}
static void testReadWrite() noexcept {
INFO("Support::readX() / writeX()");
uint8_t arr[32] = { 0 };
Support::writeU16uBE(arr + 1, 0x0102u);
Support::writeU16uBE(arr + 3, 0x0304u);
EXPECT(Support::readU32uBE(arr + 1) == 0x01020304u);
EXPECT(Support::readU32uLE(arr + 1) == 0x04030201u);
EXPECT(Support::readU32uBE(arr + 2) == 0x02030400u);
EXPECT(Support::readU32uLE(arr + 2) == 0x00040302u);
Support::writeU32uLE(arr + 5, 0x05060708u);
EXPECT(Support::readU64uBE(arr + 1) == 0x0102030408070605u);
EXPECT(Support::readU64uLE(arr + 1) == 0x0506070804030201u);
Support::writeU64uLE(arr + 7, 0x1122334455667788u);
EXPECT(Support::readU32uBE(arr + 8) == 0x77665544u);
}
static void testBitVector() noexcept {
INFO("Support::bitVectorOp");
{
uint32_t vec[3] = { 0 };
Support::bitVectorFill(vec, 1, 64);
EXPECT(vec[0] == 0xFFFFFFFEu);
EXPECT(vec[1] == 0xFFFFFFFFu);
EXPECT(vec[2] == 0x00000001u);
Support::bitVectorClear(vec, 1, 1);
EXPECT(vec[0] == 0xFFFFFFFCu);
EXPECT(vec[1] == 0xFFFFFFFFu);
EXPECT(vec[2] == 0x00000001u);
Support::bitVectorFill(vec, 0, 32);
EXPECT(vec[0] == 0xFFFFFFFFu);
EXPECT(vec[1] == 0xFFFFFFFFu);
EXPECT(vec[2] == 0x00000001u);
Support::bitVectorClear(vec, 0, 32);
EXPECT(vec[0] == 0x00000000u);
EXPECT(vec[1] == 0xFFFFFFFFu);
EXPECT(vec[2] == 0x00000001u);
Support::bitVectorFill(vec, 1, 30);
EXPECT(vec[0] == 0x7FFFFFFEu);
EXPECT(vec[1] == 0xFFFFFFFFu);
EXPECT(vec[2] == 0x00000001u);
Support::bitVectorClear(vec, 1, 95);
EXPECT(vec[0] == 0x00000000u);
EXPECT(vec[1] == 0x00000000u);
EXPECT(vec[2] == 0x00000000u);
Support::bitVectorFill(vec, 32, 64);
EXPECT(vec[0] == 0x00000000u);
EXPECT(vec[1] == 0xFFFFFFFFu);
EXPECT(vec[2] == 0xFFFFFFFFu);
Support::bitVectorSetBit(vec, 1, true);
EXPECT(vec[0] == 0x00000002u);
EXPECT(vec[1] == 0xFFFFFFFFu);
EXPECT(vec[2] == 0xFFFFFFFFu);
Support::bitVectorSetBit(vec, 95, false);
EXPECT(vec[0] == 0x00000002u);
EXPECT(vec[1] == 0xFFFFFFFFu);
EXPECT(vec[2] == 0x7FFFFFFFu);
Support::bitVectorClear(vec, 33, 32);
EXPECT(vec[0] == 0x00000002u);
EXPECT(vec[1] == 0x00000001u);
EXPECT(vec[2] == 0x7FFFFFFEu);
}
INFO("Support::bitVectorIndexOf");
{
uint32_t vec1[1] = { 0x80000000 };
EXPECT(Support::bitVectorIndexOf(vec1, 0, true) == 31);
EXPECT(Support::bitVectorIndexOf(vec1, 1, true) == 31);
EXPECT(Support::bitVectorIndexOf(vec1, 31, true) == 31);
uint32_t vec2[2] = { 0x00000000, 0x80000000 };
EXPECT(Support::bitVectorIndexOf(vec2, 0, true) == 63);
EXPECT(Support::bitVectorIndexOf(vec2, 1, true) == 63);
EXPECT(Support::bitVectorIndexOf(vec2, 31, true) == 63);
EXPECT(Support::bitVectorIndexOf(vec2, 32, true) == 63);
EXPECT(Support::bitVectorIndexOf(vec2, 33, true) == 63);
EXPECT(Support::bitVectorIndexOf(vec2, 63, true) == 63);
uint32_t vec3[3] = { 0x00000001, 0x00000000, 0x80000000 };
EXPECT(Support::bitVectorIndexOf(vec3, 0, true) == 0);
EXPECT(Support::bitVectorIndexOf(vec3, 1, true) == 95);
EXPECT(Support::bitVectorIndexOf(vec3, 2, true) == 95);
EXPECT(Support::bitVectorIndexOf(vec3, 31, true) == 95);
EXPECT(Support::bitVectorIndexOf(vec3, 32, true) == 95);
EXPECT(Support::bitVectorIndexOf(vec3, 63, true) == 95);
EXPECT(Support::bitVectorIndexOf(vec3, 64, true) == 95);
EXPECT(Support::bitVectorIndexOf(vec3, 95, true) == 95);
uint32_t vec4[3] = { ~vec3[0], ~vec3[1], ~vec3[2] };
EXPECT(Support::bitVectorIndexOf(vec4, 0, false) == 0);
EXPECT(Support::bitVectorIndexOf(vec4, 1, false) == 95);
EXPECT(Support::bitVectorIndexOf(vec4, 2, false) == 95);
EXPECT(Support::bitVectorIndexOf(vec4, 31, false) == 95);
EXPECT(Support::bitVectorIndexOf(vec4, 32, false) == 95);
EXPECT(Support::bitVectorIndexOf(vec4, 63, false) == 95);
EXPECT(Support::bitVectorIndexOf(vec4, 64, false) == 95);
EXPECT(Support::bitVectorIndexOf(vec4, 95, false) == 95);
}
INFO("Support::BitWordIterator<uint32_t>");
{
Support::BitWordIterator<uint32_t> it(0x80000F01u);
EXPECT(it.hasNext());
EXPECT(it.next() == 0);
EXPECT(it.hasNext());
EXPECT(it.next() == 8);
EXPECT(it.hasNext());
EXPECT(it.next() == 9);
EXPECT(it.hasNext());
EXPECT(it.next() == 10);
EXPECT(it.hasNext());
EXPECT(it.next() == 11);
EXPECT(it.hasNext());
EXPECT(it.next() == 31);
EXPECT(!it.hasNext());
// No bits set.
it.init(0x00000000u);
ASMJIT_ASSERT(!it.hasNext());
// Only first bit set.
it.init(0x00000001u);
EXPECT(it.hasNext());
EXPECT(it.next() == 0);
ASMJIT_ASSERT(!it.hasNext());
// Only last bit set (special case).
it.init(0x80000000u);
ASMJIT_ASSERT(it.hasNext());
ASMJIT_ASSERT(it.next() == 31);
ASMJIT_ASSERT(!it.hasNext());
}
INFO("Support::BitWordIterator<uint64_t>");
{
Support::BitWordIterator<uint64_t> it(uint64_t(1) << 63);
ASMJIT_ASSERT(it.hasNext());
ASMJIT_ASSERT(it.next() == 63);
ASMJIT_ASSERT(!it.hasNext());
}
INFO("Support::BitVectorIterator<uint32_t>");
{
// Border cases.
static const uint32_t bitsNone[] = { 0xFFFFFFFFu };
Support::BitVectorIterator<uint32_t> it(bitsNone, 0);
EXPECT(!it.hasNext());
it.init(bitsNone, 0, 1);
EXPECT(!it.hasNext());
it.init(bitsNone, 0, 128);
EXPECT(!it.hasNext());
static const uint32_t bits1[] = { 0x80000008u, 0x80000001u, 0x00000000u, 0x80000000u, 0x00000000u, 0x00000000u, 0x00003000u };
it.init(bits1, ASMJIT_ARRAY_SIZE(bits1));
EXPECT(it.hasNext());
EXPECT(it.next() == 3);
EXPECT(it.hasNext());
EXPECT(it.next() == 31);
EXPECT(it.hasNext());
EXPECT(it.next() == 32);
EXPECT(it.hasNext());
EXPECT(it.next() == 63);
EXPECT(it.hasNext());
EXPECT(it.next() == 127);
EXPECT(it.hasNext());
EXPECT(it.next() == 204);
EXPECT(it.hasNext());
EXPECT(it.next() == 205);
EXPECT(!it.hasNext());
it.init(bits1, ASMJIT_ARRAY_SIZE(bits1), 4);
EXPECT(it.hasNext());
EXPECT(it.next() == 31);
it.init(bits1, ASMJIT_ARRAY_SIZE(bits1), 64);
EXPECT(it.hasNext());
EXPECT(it.next() == 127);
it.init(bits1, ASMJIT_ARRAY_SIZE(bits1), 127);
EXPECT(it.hasNext());
EXPECT(it.next() == 127);
static const uint32_t bits2[] = { 0x80000000u, 0x80000000u, 0x00000000u, 0x80000000u };
it.init(bits2, ASMJIT_ARRAY_SIZE(bits2));
EXPECT(it.hasNext());
EXPECT(it.next() == 31);
EXPECT(it.hasNext());
EXPECT(it.next() == 63);
EXPECT(it.hasNext());
EXPECT(it.next() == 127);
EXPECT(!it.hasNext());
static const uint32_t bits3[] = { 0x00000000u, 0x00000000u, 0x00000000u, 0x00000000u };
it.init(bits3, ASMJIT_ARRAY_SIZE(bits3));
EXPECT(!it.hasNext());
static const uint32_t bits4[] = { 0x00000000u, 0x00000000u, 0x00000000u, 0x80000000u };
it.init(bits4, ASMJIT_ARRAY_SIZE(bits4));
EXPECT(it.hasNext());
EXPECT(it.next() == 127);
EXPECT(!it.hasNext());
}
INFO("Support::BitVectorIterator<uint64_t>");
{
static const uint64_t bits1[] = { 0x80000000u, 0x80000000u, 0x00000000u, 0x80000000u };
Support::BitVectorIterator<uint64_t> it(bits1, ASMJIT_ARRAY_SIZE(bits1));
EXPECT(it.hasNext());
EXPECT(it.next() == 31);
EXPECT(it.hasNext());
EXPECT(it.next() == 95);
EXPECT(it.hasNext());
EXPECT(it.next() == 223);
EXPECT(!it.hasNext());
static const uint64_t bits2[] = { 0x8000000000000000u, 0, 0, 0 };
it.init(bits2, ASMJIT_ARRAY_SIZE(bits2));
EXPECT(it.hasNext());
EXPECT(it.next() == 63);
EXPECT(!it.hasNext());
}
}
static void testSorting() noexcept {
INFO("Support::qSort() - Testing qsort and isort of predefined arrays");
{
constexpr size_t kArraySize = 11;
int ref_[kArraySize] = { -4, -2, -1, 0, 1, 9, 12, 13, 14, 19, 22 };
int arr1[kArraySize] = { 0, 1, -1, 19, 22, 14, -4, 9, 12, 13, -2 };
int arr2[kArraySize];
memcpy(arr2, arr1, kArraySize * sizeof(int));
Support::iSort(arr1, kArraySize);
Support::qSort(arr2, kArraySize);
testArrays(arr1, ref_, kArraySize);
testArrays(arr2, ref_, kArraySize);
}
INFO("Support::qSort() - Testing qsort and isort of artificial arrays");
{
constexpr size_t kArraySize = 200;
int arr1[kArraySize];
int arr2[kArraySize];
int ref_[kArraySize];
for (size_t size = 2; size < kArraySize; size++) {
for (size_t i = 0; i < size; i++) {
arr1[i] = int(size - 1 - i);
arr2[i] = int(size - 1 - i);
ref_[i] = int(i);
}
Support::iSort(arr1, size);
Support::qSort(arr2, size);
testArrays(arr1, ref_, size);
testArrays(arr2, ref_, size);
}
}
INFO("Support::qSort() - Testing qsort and isort with an unstable compare function");
{
constexpr size_t kArraySize = 5;
float arr1[kArraySize] = { 1.0f, 0.0f, 3.0f, -1.0f, std::numeric_limits<float>::quiet_NaN() };
float arr2[kArraySize] = { };
memcpy(arr2, arr1, kArraySize * sizeof(float));
// We don't test as it's undefined where the NaN would be.
Support::iSort(arr1, kArraySize);
Support::qSort(arr2, kArraySize);
}
}
UNIT(support) {
testAlignment();
testBitUtils();
testIntUtils();
testReadWrite();
testBitVector();
testSorting();
}
#endif
ASMJIT_END_NAMESPACE

File diff suppressed because it is too large Load Diff

@ -0,0 +1,37 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/target.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Target - Construction / Destruction]
// ============================================================================
Target::Target() noexcept
: _environment() {}
Target::~Target() noexcept {}
ASMJIT_END_NAMESPACE

@ -0,0 +1,175 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_TARGET_H_INCLUDED
#define ASMJIT_CORE_TARGET_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/func.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::CodeInfo]
// ============================================================================
#ifndef ASMJIT_NO_DEPRECATED
//! Basic information about a code (or target). It describes its architecture,
//! code generation mode (or optimization level), and base address.
class ASMJIT_DEPRECATED_STRUCT("Use Environment instead of CodeInfo") CodeInfo {
public:
//!< Environment information.
Environment _environment;
//! Base address.
uint64_t _baseAddress;
//! \name Construction & Destruction
//! \{
inline CodeInfo() noexcept
: _environment(),
_baseAddress(Globals::kNoBaseAddress) {}
inline explicit CodeInfo(uint32_t arch, uint32_t subArch = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept
: _environment(arch, subArch),
_baseAddress(baseAddress) {}
inline explicit CodeInfo(const Environment& environment, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept
: _environment(environment),
_baseAddress(baseAddress) {}
inline CodeInfo(const CodeInfo& other) noexcept { init(other); }
inline bool isInitialized() const noexcept {
return _environment.arch() != Environment::kArchUnknown;
}
inline void init(const CodeInfo& other) noexcept {
*this = other;
}
inline void init(uint32_t arch, uint32_t subArch = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept {
_environment.init(arch, subArch);
_baseAddress = baseAddress;
}
inline void reset() noexcept {
_environment.reset();
_baseAddress = Globals::kNoBaseAddress;
}
//! \}
//! \name Overloaded Operators
//! \{
inline CodeInfo& operator=(const CodeInfo& other) noexcept = default;
inline bool operator==(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) == 0; }
inline bool operator!=(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) != 0; }
//! \}
//! \name Accessors
//! \{
//! Returns the target environment information, see \ref Environment.
inline const Environment& environment() const noexcept { return _environment; }
//! Returns the target architecture, see \ref Environment::Arch.
inline uint32_t arch() const noexcept { return _environment.arch(); }
//! Returns the target sub-architecture, see \ref Environment::SubArch.
inline uint32_t subArch() const noexcept { return _environment.subArch(); }
//! Returns the native size of the target's architecture GP register.
inline uint32_t gpSize() const noexcept { return _environment.registerSize(); }
//! Tests whether this CodeInfo has a base address set.
inline bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; }
//! Returns the base address or \ref Globals::kNoBaseAddress if it's not set.
inline uint64_t baseAddress() const noexcept { return _baseAddress; }
//! Sets base address to `p`.
inline void setBaseAddress(uint64_t p) noexcept { _baseAddress = p; }
//! Resets base address (implicitly sets it to \ref Globals::kNoBaseAddress).
inline void resetBaseAddress() noexcept { _baseAddress = Globals::kNoBaseAddress; }
//! \}
};
#endif // !ASMJIT_NO_DEPRECATED
// ============================================================================
// [asmjit::Target]
// ============================================================================
//! Target is an abstract class that describes a machine code target.
class ASMJIT_VIRTAPI Target {
public:
ASMJIT_BASE_CLASS(Target)
ASMJIT_NONCOPYABLE(Target)
//! Target environment information.
Environment _environment;
//! \name Construction & Destruction
//! \{
//! Creates a `Target` instance.
ASMJIT_API Target() noexcept;
//! Destroys the `Target` instance.
ASMJIT_API virtual ~Target() noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns CodeInfo of this target.
//!
//! CodeInfo can be used to setup a CodeHolder in case you plan to generate a
//! code compatible and executable by this Runtime.
inline const Environment& environment() const noexcept { return _environment; }
//! Returns the target architecture, see \ref Environment::Arch.
inline uint32_t arch() const noexcept { return _environment.arch(); }
//! Returns the target sub-architecture, see \ref Environment::SubArch.
inline uint32_t subArch() const noexcept { return _environment.subArch(); }
#ifndef ASMJIT_NO_DEPRECATED
ASMJIT_DEPRECATED("Use environment() instead")
inline CodeInfo codeInfo() const noexcept { return CodeInfo(_environment); }
ASMJIT_DEPRECATED("Use environment().format() instead")
inline uint32_t targetType() const noexcept { return _environment.format(); }
#endif // !ASMJIT_NO_DEPRECATED
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_TARGET_H_INCLUDED

@ -0,0 +1,92 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/misc_p.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Type]
// ============================================================================
namespace Type {
template<uint32_t TYPE_ID>
struct BaseOfTypeId {
static constexpr uint32_t kTypeId =
isBase (TYPE_ID) ? TYPE_ID :
isMask8 (TYPE_ID) ? kIdU8 :
isMask16(TYPE_ID) ? kIdU16 :
isMask32(TYPE_ID) ? kIdU32 :
isMask64(TYPE_ID) ? kIdU64 :
isMmx32 (TYPE_ID) ? kIdI32 :
isMmx64 (TYPE_ID) ? kIdI64 :
isVec32 (TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec32Start :
isVec64 (TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec64Start :
isVec128(TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec128Start :
isVec256(TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec256Start :
isVec512(TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec512Start : 0;
};
template<uint32_t TYPE_ID>
struct SizeOfTypeId {
static constexpr uint32_t kTypeSize =
isInt8 (TYPE_ID) ? 1 :
isUInt8 (TYPE_ID) ? 1 :
isInt16 (TYPE_ID) ? 2 :
isUInt16 (TYPE_ID) ? 2 :
isInt32 (TYPE_ID) ? 4 :
isUInt32 (TYPE_ID) ? 4 :
isInt64 (TYPE_ID) ? 8 :
isUInt64 (TYPE_ID) ? 8 :
isFloat32(TYPE_ID) ? 4 :
isFloat64(TYPE_ID) ? 8 :
isFloat80(TYPE_ID) ? 10 :
isMask8 (TYPE_ID) ? 1 :
isMask16 (TYPE_ID) ? 2 :
isMask32 (TYPE_ID) ? 4 :
isMask64 (TYPE_ID) ? 8 :
isMmx32 (TYPE_ID) ? 4 :
isMmx64 (TYPE_ID) ? 8 :
isVec32 (TYPE_ID) ? 4 :
isVec64 (TYPE_ID) ? 8 :
isVec128 (TYPE_ID) ? 16 :
isVec256 (TYPE_ID) ? 32 :
isVec512 (TYPE_ID) ? 64 : 0;
};
const TypeData _typeData = {
#define VALUE(x) BaseOfTypeId<x>::kTypeId
{ ASMJIT_LOOKUP_TABLE_256(VALUE, 0) },
#undef VALUE
#define VALUE(x) SizeOfTypeId<x>::kTypeSize
{ ASMJIT_LOOKUP_TABLE_256(VALUE, 0) }
#undef VALUE
};
} // {Type}
ASMJIT_END_NAMESPACE

@ -0,0 +1,375 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_TYPE_H_INCLUDED
#define ASMJIT_CORE_TYPE_H_INCLUDED
#include "../core/globals.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::Type]
// ============================================================================
//! Provides a minimalist type-system that is used by Asmjit library.
namespace Type {
//! TypeId.
//!
//! This is an additional information that can be used to describe a value-type
//! of physical or virtual register. it's used mostly by BaseCompiler to describe
//! register representation (the group of data stored in the register and the
//! width used) and it's also used by APIs that allow to describe and work with
//! function signatures.
enum Id : uint32_t {
kIdVoid = 0, //!< Void type.
_kIdBaseStart = 32,
_kIdBaseEnd = 44,
_kIdIntStart = 32,
_kIdIntEnd = 41,
kIdIntPtr = 32, //!< Abstract signed integer type that has a native size.
kIdUIntPtr = 33, //!< Abstract unsigned integer type that has a native size.
kIdI8 = 34, //!< 8-bit signed integer type.
kIdU8 = 35, //!< 8-bit unsigned integer type.
kIdI16 = 36, //!< 16-bit signed integer type.
kIdU16 = 37, //!< 16-bit unsigned integer type.
kIdI32 = 38, //!< 32-bit signed integer type.
kIdU32 = 39, //!< 32-bit unsigned integer type.
kIdI64 = 40, //!< 64-bit signed integer type.
kIdU64 = 41, //!< 64-bit unsigned integer type.
_kIdFloatStart = 42,
_kIdFloatEnd = 44,
kIdF32 = 42, //!< 32-bit floating point type.
kIdF64 = 43, //!< 64-bit floating point type.
kIdF80 = 44, //!< 80-bit floating point type.
_kIdMaskStart = 45,
_kIdMaskEnd = 48,
kIdMask8 = 45, //!< 8-bit opmask register (K).
kIdMask16 = 46, //!< 16-bit opmask register (K).
kIdMask32 = 47, //!< 32-bit opmask register (K).
kIdMask64 = 48, //!< 64-bit opmask register (K).
_kIdMmxStart = 49,
_kIdMmxEnd = 50,
kIdMmx32 = 49, //!< 64-bit MMX register only used for 32 bits.
kIdMmx64 = 50, //!< 64-bit MMX register.
_kIdVec32Start = 51,
_kIdVec32End = 60,
kIdI8x4 = 51,
kIdU8x4 = 52,
kIdI16x2 = 53,
kIdU16x2 = 54,
kIdI32x1 = 55,
kIdU32x1 = 56,
kIdF32x1 = 59,
_kIdVec64Start = 61,
_kIdVec64End = 70,
kIdI8x8 = 61,
kIdU8x8 = 62,
kIdI16x4 = 63,
kIdU16x4 = 64,
kIdI32x2 = 65,
kIdU32x2 = 66,
kIdI64x1 = 67,
kIdU64x1 = 68,
kIdF32x2 = 69,
kIdF64x1 = 70,
_kIdVec128Start = 71,
_kIdVec128End = 80,
kIdI8x16 = 71,
kIdU8x16 = 72,
kIdI16x8 = 73,
kIdU16x8 = 74,
kIdI32x4 = 75,
kIdU32x4 = 76,
kIdI64x2 = 77,
kIdU64x2 = 78,
kIdF32x4 = 79,
kIdF64x2 = 80,
_kIdVec256Start = 81,
_kIdVec256End = 90,
kIdI8x32 = 81,
kIdU8x32 = 82,
kIdI16x16 = 83,
kIdU16x16 = 84,
kIdI32x8 = 85,
kIdU32x8 = 86,
kIdI64x4 = 87,
kIdU64x4 = 88,
kIdF32x8 = 89,
kIdF64x4 = 90,
_kIdVec512Start = 91,
_kIdVec512End = 100,
kIdI8x64 = 91,
kIdU8x64 = 92,
kIdI16x32 = 93,
kIdU16x32 = 94,
kIdI32x16 = 95,
kIdU32x16 = 96,
kIdI64x8 = 97,
kIdU64x8 = 98,
kIdF32x16 = 99,
kIdF64x8 = 100,
kIdCount = 101,
kIdMax = 255
};
struct TypeData {
uint8_t baseOf[kIdMax + 1];
uint8_t sizeOf[kIdMax + 1];
};
ASMJIT_VARAPI const TypeData _typeData;
static constexpr bool isVoid(uint32_t typeId) noexcept { return typeId == 0; }
static constexpr bool isValid(uint32_t typeId) noexcept { return typeId >= _kIdIntStart && typeId <= _kIdVec512End; }
static constexpr bool isBase(uint32_t typeId) noexcept { return typeId >= _kIdBaseStart && typeId <= _kIdBaseEnd; }
static constexpr bool isAbstract(uint32_t typeId) noexcept { return typeId >= kIdIntPtr && typeId <= kIdUIntPtr; }
static constexpr bool isInt(uint32_t typeId) noexcept { return typeId >= _kIdIntStart && typeId <= _kIdIntEnd; }
static constexpr bool isInt8(uint32_t typeId) noexcept { return typeId == kIdI8; }
static constexpr bool isUInt8(uint32_t typeId) noexcept { return typeId == kIdU8; }
static constexpr bool isInt16(uint32_t typeId) noexcept { return typeId == kIdI16; }
static constexpr bool isUInt16(uint32_t typeId) noexcept { return typeId == kIdU16; }
static constexpr bool isInt32(uint32_t typeId) noexcept { return typeId == kIdI32; }
static constexpr bool isUInt32(uint32_t typeId) noexcept { return typeId == kIdU32; }
static constexpr bool isInt64(uint32_t typeId) noexcept { return typeId == kIdI64; }
static constexpr bool isUInt64(uint32_t typeId) noexcept { return typeId == kIdU64; }
static constexpr bool isGp8(uint32_t typeId) noexcept { return typeId >= kIdI8 && typeId <= kIdU8; }
static constexpr bool isGp16(uint32_t typeId) noexcept { return typeId >= kIdI16 && typeId <= kIdU16; }
static constexpr bool isGp32(uint32_t typeId) noexcept { return typeId >= kIdI32 && typeId <= kIdU32; }
static constexpr bool isGp64(uint32_t typeId) noexcept { return typeId >= kIdI64 && typeId <= kIdU64; }
static constexpr bool isFloat(uint32_t typeId) noexcept { return typeId >= _kIdFloatStart && typeId <= _kIdFloatEnd; }
static constexpr bool isFloat32(uint32_t typeId) noexcept { return typeId == kIdF32; }
static constexpr bool isFloat64(uint32_t typeId) noexcept { return typeId == kIdF64; }
static constexpr bool isFloat80(uint32_t typeId) noexcept { return typeId == kIdF80; }
static constexpr bool isMask(uint32_t typeId) noexcept { return typeId >= _kIdMaskStart && typeId <= _kIdMaskEnd; }
static constexpr bool isMask8(uint32_t typeId) noexcept { return typeId == kIdMask8; }
static constexpr bool isMask16(uint32_t typeId) noexcept { return typeId == kIdMask16; }
static constexpr bool isMask32(uint32_t typeId) noexcept { return typeId == kIdMask32; }
static constexpr bool isMask64(uint32_t typeId) noexcept { return typeId == kIdMask64; }
static constexpr bool isMmx(uint32_t typeId) noexcept { return typeId >= _kIdMmxStart && typeId <= _kIdMmxEnd; }
static constexpr bool isMmx32(uint32_t typeId) noexcept { return typeId == kIdMmx32; }
static constexpr bool isMmx64(uint32_t typeId) noexcept { return typeId == kIdMmx64; }
static constexpr bool isVec(uint32_t typeId) noexcept { return typeId >= _kIdVec32Start && typeId <= _kIdVec512End; }
static constexpr bool isVec32(uint32_t typeId) noexcept { return typeId >= _kIdVec32Start && typeId <= _kIdVec32End; }
static constexpr bool isVec64(uint32_t typeId) noexcept { return typeId >= _kIdVec64Start && typeId <= _kIdVec64End; }
static constexpr bool isVec128(uint32_t typeId) noexcept { return typeId >= _kIdVec128Start && typeId <= _kIdVec128End; }
static constexpr bool isVec256(uint32_t typeId) noexcept { return typeId >= _kIdVec256Start && typeId <= _kIdVec256End; }
static constexpr bool isVec512(uint32_t typeId) noexcept { return typeId >= _kIdVec512Start && typeId <= _kIdVec512End; }
//! \cond
enum TypeCategory : uint32_t {
kTypeCategoryUnknown = 0,
kTypeCategoryEnum = 1,
kTypeCategoryIntegral = 2,
kTypeCategoryFloatingPoint = 3,
kTypeCategoryFunction = 4
};
template<typename T, uint32_t Category>
struct IdOfT_ByCategory {}; // Fails if not specialized.
template<typename T>
struct IdOfT_ByCategory<T, kTypeCategoryIntegral> {
enum : uint32_t {
kTypeId = (sizeof(T) == 1 && std::is_signed<T>::value) ? kIdI8 :
(sizeof(T) == 1 && !std::is_signed<T>::value) ? kIdU8 :
(sizeof(T) == 2 && std::is_signed<T>::value) ? kIdI16 :
(sizeof(T) == 2 && !std::is_signed<T>::value) ? kIdU16 :
(sizeof(T) == 4 && std::is_signed<T>::value) ? kIdI32 :
(sizeof(T) == 4 && !std::is_signed<T>::value) ? kIdU32 :
(sizeof(T) == 8 && std::is_signed<T>::value) ? kIdI64 :
(sizeof(T) == 8 && !std::is_signed<T>::value) ? kIdU64 : kIdVoid
};
};
template<typename T>
struct IdOfT_ByCategory<T, kTypeCategoryFloatingPoint> {
enum : uint32_t {
kTypeId = (sizeof(T) == 4 ) ? kIdF32 :
(sizeof(T) == 8 ) ? kIdF64 :
(sizeof(T) >= 10) ? kIdF80 : kIdVoid
};
};
template<typename T>
struct IdOfT_ByCategory<T, kTypeCategoryEnum>
: public IdOfT_ByCategory<typename std::underlying_type<T>::type, kTypeCategoryIntegral> {};
template<typename T>
struct IdOfT_ByCategory<T, kTypeCategoryFunction> {
enum: uint32_t { kTypeId = kIdUIntPtr };
};
//! \endcond
//! IdOfT<> template allows to get a TypeId from a C++ type `T`.
template<typename T>
struct IdOfT
#ifdef _DOXYGEN
//! TypeId of C++ type `T`.
static constexpr uint32_t kTypeId = _TypeIdDeducedAtCompileTime_;
#else
: public IdOfT_ByCategory<T,
std::is_enum<T>::value ? kTypeCategoryEnum :
std::is_integral<T>::value ? kTypeCategoryIntegral :
std::is_floating_point<T>::value ? kTypeCategoryFloatingPoint :
std::is_function<T>::value ? kTypeCategoryFunction : kTypeCategoryUnknown>
#endif
{};
//! \cond
template<typename T>
struct IdOfT<T*> { enum : uint32_t { kTypeId = kIdUIntPtr }; };
template<typename T>
struct IdOfT<T&> { enum : uint32_t { kTypeId = kIdUIntPtr }; };
//! \endcond
static inline uint32_t baseOf(uint32_t typeId) noexcept {
ASMJIT_ASSERT(typeId <= kIdMax);
return _typeData.baseOf[typeId];
}
static inline uint32_t sizeOf(uint32_t typeId) noexcept {
ASMJIT_ASSERT(typeId <= kIdMax);
return _typeData.sizeOf[typeId];
}
//! Returns offset needed to convert a `kIntPtr` and `kUIntPtr` TypeId
//! into a type that matches `registerSize` (general-purpose register size).
//! If you find such TypeId it's then only about adding the offset to it.
//!
//! For example:
//!
//! ```
//! uint32_t registerSize = '4' or '8';
//! uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize);
//!
//! uint32_t typeId = 'some type-id';
//!
//! // Normalize some typeId into a non-abstract typeId.
//! if (Type::isAbstract(typeId)) typeId += deabstractDelta;
//!
//! // The same, but by using Type::deabstract() function.
//! typeId = Type::deabstract(typeId, deabstractDelta);
//! ```
static constexpr uint32_t deabstractDeltaOfSize(uint32_t registerSize) noexcept {
return registerSize >= 8 ? kIdI64 - kIdIntPtr : kIdI32 - kIdIntPtr;
}
static constexpr uint32_t deabstract(uint32_t typeId, uint32_t deabstractDelta) noexcept {
return isAbstract(typeId) ? typeId + deabstractDelta : typeId;
}
//! bool as C++ type-name.
struct Bool {};
//! int8_t as C++ type-name.
struct I8 {};
//! uint8_t as C++ type-name.
struct U8 {};
//! int16_t as C++ type-name.
struct I16 {};
//! uint16_t as C++ type-name.
struct U16 {};
//! int32_t as C++ type-name.
struct I32 {};
//! uint32_t as C++ type-name.
struct U32 {};
//! int64_t as C++ type-name.
struct I64 {};
//! uint64_t as C++ type-name.
struct U64 {};
//! intptr_t as C++ type-name.
struct IPtr {};
//! uintptr_t as C++ type-name.
struct UPtr {};
//! float as C++ type-name.
struct F32 {};
//! double as C++ type-name.
struct F64 {};
} // {Type}
// ============================================================================
// [ASMJIT_DEFINE_TYPE_ID]
// ============================================================================
//! \cond
#define ASMJIT_DEFINE_TYPE_ID(T, TYPE_ID) \
namespace Type { \
template<> \
struct IdOfT<T> { \
enum : uint32_t { kTypeId = TYPE_ID }; \
}; \
}
ASMJIT_DEFINE_TYPE_ID(void, kIdVoid);
ASMJIT_DEFINE_TYPE_ID(Bool, kIdU8);
ASMJIT_DEFINE_TYPE_ID(I8 , kIdI8);
ASMJIT_DEFINE_TYPE_ID(U8 , kIdU8);
ASMJIT_DEFINE_TYPE_ID(I16 , kIdI16);
ASMJIT_DEFINE_TYPE_ID(U16 , kIdU16);
ASMJIT_DEFINE_TYPE_ID(I32 , kIdI32);
ASMJIT_DEFINE_TYPE_ID(U32 , kIdU32);
ASMJIT_DEFINE_TYPE_ID(I64 , kIdI64);
ASMJIT_DEFINE_TYPE_ID(U64 , kIdU64);
ASMJIT_DEFINE_TYPE_ID(IPtr, kIdIntPtr);
ASMJIT_DEFINE_TYPE_ID(UPtr, kIdUIntPtr);
ASMJIT_DEFINE_TYPE_ID(F32 , kIdF32);
ASMJIT_DEFINE_TYPE_ID(F64 , kIdF64);
//! \endcond
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_TYPE_H_INCLUDED

@ -0,0 +1,640 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_JIT
#include "../core/osutils.h"
#include "../core/string.h"
#include "../core/support.h"
#include "../core/virtmem.h"
#if !defined(_WIN32)
#include <errno.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
// Linux has a `memfd_create` syscall that we would like to use, if available.
#if defined(__linux__)
#include <sys/syscall.h>
#endif
// Apple recently introduced MAP_JIT flag, which we want to use.
#if defined(__APPLE__)
#include <TargetConditionals.h>
#if TARGET_OS_OSX
#include <sys/utsname.h>
#endif
// Older SDK doesn't define `MAP_JIT`.
#ifndef MAP_JIT
#define MAP_JIT 0x800
#endif
#endif
// BSD/OSX: `MAP_ANONYMOUS` is not defined, `MAP_ANON` is.
#if !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
#endif
#endif
#include <atomic>
#if defined(__APPLE__)
#define ASMJIT_VM_SHM_DETECT 0
#else
#define ASMJIT_VM_SHM_DETECT 1
#endif
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::VirtMem - Utilities]
// ============================================================================
static const uint32_t VirtMem_dualMappingFilter[2] = {
VirtMem::kAccessWrite | VirtMem::kMMapMaxAccessWrite,
VirtMem::kAccessExecute | VirtMem::kMMapMaxAccessExecute
};
// ============================================================================
// [asmjit::VirtMem - Virtual Memory [Windows]]
// ============================================================================
#if defined(_WIN32)
struct ScopedHandle {
inline ScopedHandle() noexcept
: value(nullptr) {}
inline ~ScopedHandle() noexcept {
if (value != nullptr)
::CloseHandle(value);
}
HANDLE value;
};
static void VirtMem_getInfo(VirtMem::Info& vmInfo) noexcept {
SYSTEM_INFO systemInfo;
::GetSystemInfo(&systemInfo);
vmInfo.pageSize = Support::alignUpPowerOf2<uint32_t>(systemInfo.dwPageSize);
vmInfo.pageGranularity = systemInfo.dwAllocationGranularity;
}
// Returns windows-specific protectFlags from \ref VirtMem::Flags.
static DWORD VirtMem_winProtectFlagsFromFlags(uint32_t flags) noexcept {
DWORD protectFlags;
// READ|WRITE|EXECUTE.
if (flags & VirtMem::kAccessExecute)
protectFlags = (flags & VirtMem::kAccessWrite) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
else if (flags & VirtMem::kAccessRW)
protectFlags = (flags & VirtMem::kAccessWrite) ? PAGE_READWRITE : PAGE_READONLY;
else
protectFlags = PAGE_NOACCESS;
// Any other flags to consider?
return protectFlags;
}
static DWORD VirtMem_winDesiredAccessFromFlags(uint32_t flags) noexcept {
DWORD access = (flags & VirtMem::kAccessWrite) ? FILE_MAP_WRITE : FILE_MAP_READ;
if (flags & VirtMem::kAccessExecute)
access |= FILE_MAP_EXECUTE;
return access;
}
Error VirtMem::alloc(void** p, size_t size, uint32_t flags) noexcept {
*p = nullptr;
if (size == 0)
return DebugUtils::errored(kErrorInvalidArgument);
DWORD protectFlags = VirtMem_winProtectFlagsFromFlags(flags);
void* result = ::VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, protectFlags);
if (!result)
return DebugUtils::errored(kErrorOutOfMemory);
*p = result;
return kErrorOk;
}
Error VirtMem::release(void* p, size_t size) noexcept {
DebugUtils::unused(size);
if (ASMJIT_UNLIKELY(!::VirtualFree(p, 0, MEM_RELEASE)))
return DebugUtils::errored(kErrorInvalidArgument);
return kErrorOk;
}
Error VirtMem::protect(void* p, size_t size, uint32_t flags) noexcept {
DWORD protectFlags = VirtMem_winProtectFlagsFromFlags(flags);
DWORD oldFlags;
if (::VirtualProtect(p, size, protectFlags, &oldFlags))
return kErrorOk;
return DebugUtils::errored(kErrorInvalidArgument);
}
Error VirtMem::allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) noexcept {
dm->ro = nullptr;
dm->rw = nullptr;
if (size == 0)
return DebugUtils::errored(kErrorInvalidArgument);
ScopedHandle handle;
handle.value = ::CreateFileMappingW(
INVALID_HANDLE_VALUE,
nullptr,
PAGE_EXECUTE_READWRITE,
(DWORD)(uint64_t(size) >> 32),
(DWORD)(size & 0xFFFFFFFFu),
nullptr);
if (ASMJIT_UNLIKELY(!handle.value))
return DebugUtils::errored(kErrorOutOfMemory);
void* ptr[2];
for (uint32_t i = 0; i < 2; i++) {
uint32_t accessFlags = flags & ~VirtMem_dualMappingFilter[i];
DWORD desiredAccess = VirtMem_winDesiredAccessFromFlags(accessFlags);
ptr[i] = ::MapViewOfFile(handle.value, desiredAccess, 0, 0, size);
if (ptr[i] == nullptr) {
if (i == 0)
::UnmapViewOfFile(ptr[0]);
return DebugUtils::errored(kErrorOutOfMemory);
}
}
dm->ro = ptr[0];
dm->rw = ptr[1];
return kErrorOk;
}
Error VirtMem::releaseDualMapping(DualMapping* dm, size_t size) noexcept {
DebugUtils::unused(size);
bool failed = false;
if (!::UnmapViewOfFile(dm->ro))
failed = true;
if (dm->ro != dm->rw && !UnmapViewOfFile(dm->rw))
failed = true;
if (failed)
return DebugUtils::errored(kErrorInvalidArgument);
dm->ro = nullptr;
dm->rw = nullptr;
return kErrorOk;
}
#endif
// ============================================================================
// [asmjit::VirtMem - Virtual Memory [Posix]]
// ============================================================================
#if !defined(_WIN32)
static void VirtMem_getInfo(VirtMem::Info& vmInfo) noexcept {
uint32_t pageSize = uint32_t(::getpagesize());
vmInfo.pageSize = pageSize;
vmInfo.pageGranularity = Support::max<uint32_t>(pageSize, 65536);
}
#if !defined(SHM_ANON)
static const char* VirtMem_getTmpDir() noexcept {
const char* tmpDir = getenv("TMPDIR");
return tmpDir ? tmpDir : "/tmp";
}
#endif
// Translates libc errors specific to VirtualMemory mapping to `asmjit::Error`.
static Error VirtMem_asmjitErrorFromErrno(int e) noexcept {
switch (e) {
case EACCES:
case EAGAIN:
case ENODEV:
case EPERM:
return kErrorInvalidState;
case EFBIG:
case ENOMEM:
case EOVERFLOW:
return kErrorOutOfMemory;
case EMFILE:
case ENFILE:
return kErrorTooManyHandles;
default:
return kErrorInvalidArgument;
}
}
// Some operating systems don't allow /dev/shm to be executable. On Linux this
// happens when /dev/shm is mounted with 'noexec', which is enforced by systemd.
// Other operating systems like MacOS also restrict executable permissions regarding
// /dev/shm, so we use a runtime detection before attempting to allocate executable
// memory. Sometimes we don't need the detection as we know it would always result
// in `kShmStrategyTmpDir`.
enum ShmStrategy : uint32_t {
kShmStrategyUnknown = 0,
kShmStrategyDevShm = 1,
kShmStrategyTmpDir = 2
};
class AnonymousMemory {
public:
enum FileType : uint32_t {
kFileTypeNone,
kFileTypeShm,
kFileTypeTmp
};
int _fd;
FileType _fileType;
StringTmp<128> _tmpName;
ASMJIT_INLINE AnonymousMemory() noexcept
: _fd(-1),
_fileType(kFileTypeNone),
_tmpName() {}
ASMJIT_INLINE ~AnonymousMemory() noexcept {
unlink();
close();
}
ASMJIT_INLINE int fd() const noexcept { return _fd; }
Error open(bool preferTmpOverDevShm) noexcept {
#if defined(__linux__) && defined(__NR_memfd_create)
// Linux specific 'memfd_create' - if the syscall returns `ENOSYS` it means
// it's not available and we will never call it again (would be pointless).
// Zero initialized, if ever changed to '1' that would mean the syscall is not
// available and we must use `shm_open()` and `shm_unlink()`.
static volatile uint32_t memfd_create_not_supported;
if (!memfd_create_not_supported) {
_fd = (int)syscall(__NR_memfd_create, "vmem", 0);
if (ASMJIT_LIKELY(_fd >= 0))
return kErrorOk;
int e = errno;
if (e == ENOSYS)
memfd_create_not_supported = 1;
else
return DebugUtils::errored(VirtMem_asmjitErrorFromErrno(e));
}
#endif
#if defined(SHM_ANON)
// Originally FreeBSD extension, apparently works in other BSDs too.
DebugUtils::unused(preferTmpOverDevShm);
_fd = ::shm_open(SHM_ANON, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
if (ASMJIT_LIKELY(_fd >= 0))
return kErrorOk;
else
return DebugUtils::errored(VirtMem_asmjitErrorFromErrno(errno));
#else
// POSIX API. We have to generate somehow a unique name. This is nothing
// cryptographic, just using a bit from the stack address to always have
// a different base for different threads (as threads have their own stack)
// and retries for avoiding collisions. We use `shm_open()` with flags that
// require creation of the file so we never open an existing shared memory.
static std::atomic<uint32_t> internalCounter;
const char* kShmFormat = "/shm-id-%016llX";
uint32_t kRetryCount = 100;
uint64_t bits = ((uintptr_t)(void*)this) & 0x55555555u;
for (uint32_t i = 0; i < kRetryCount; i++) {
bits -= uint64_t(OSUtils::getTickCount()) * 773703683;
bits = ((bits >> 14) ^ (bits << 6)) + uint64_t(++internalCounter) * 10619863;
if (!ASMJIT_VM_SHM_DETECT || preferTmpOverDevShm) {
_tmpName.assign(VirtMem_getTmpDir());
_tmpName.appendFormat(kShmFormat, (unsigned long long)bits);
_fd = ::open(_tmpName.data(), O_RDWR | O_CREAT | O_EXCL, 0);
if (ASMJIT_LIKELY(_fd >= 0)) {
_fileType = kFileTypeTmp;
return kErrorOk;
}
}
else {
_tmpName.assignFormat(kShmFormat, (unsigned long long)bits);
_fd = ::shm_open(_tmpName.data(), O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
if (ASMJIT_LIKELY(_fd >= 0)) {
_fileType = kFileTypeShm;
return kErrorOk;
}
}
int e = errno;
if (e != EEXIST)
return DebugUtils::errored(VirtMem_asmjitErrorFromErrno(e));
}
return DebugUtils::errored(kErrorFailedToOpenAnonymousMemory);
#endif
}
void unlink() noexcept {
FileType type = _fileType;
_fileType = kFileTypeNone;
if (type == kFileTypeShm)
::shm_unlink(_tmpName.data());
else if (type == kFileTypeTmp)
::unlink(_tmpName.data());
}
void close() noexcept {
if (_fd >= 0) {
::close(_fd);
_fd = -1;
}
}
Error allocate(size_t size) noexcept {
// TODO: Improve this by using `posix_fallocate()` when available.
if (ftruncate(_fd, off_t(size)) != 0)
return DebugUtils::errored(VirtMem_asmjitErrorFromErrno(errno));
return kErrorOk;
}
};
#if defined(__APPLE__)
// Detects whether the current process is hardened, which means that pages that
// have WRITE and EXECUTABLE flags cannot be allocated without MAP_JIT flag.
static ASMJIT_INLINE bool VirtMem_isHardened() noexcept {
static volatile uint32_t globalHardenedFlag;
enum HardenedFlag : uint32_t {
kHardenedFlagUnknown = 0,
kHardenedFlagDisabled = 1,
kHardenedFlagEnabled = 2
};
uint32_t flag = globalHardenedFlag;
if (flag == kHardenedFlagUnknown) {
VirtMem::Info memInfo;
VirtMem_getInfo(memInfo);
void* ptr = mmap(nullptr, memInfo.pageSize, PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ptr == MAP_FAILED) {
flag = kHardenedFlagEnabled;
}
else {
flag = kHardenedFlagDisabled;
munmap(ptr, memInfo.pageSize);
}
globalHardenedFlag = flag;
}
return flag == kHardenedFlagEnabled;
}
// MAP_JIT flag required to run unsigned JIT code is only supported by kernel
// version 10.14+ (Mojave) and IOS.
static ASMJIT_INLINE bool VirtMem_hasMapJitSupport() noexcept {
#if TARGET_OS_OSX
static volatile int globalVersion;
int ver = globalVersion;
if (!ver) {
struct utsname osname;
uname(&osname);
ver = atoi(osname.release);
globalVersion = ver;
}
return ver >= 18;
#else
// Assume it's available.
return true;
#endif
}
#endif
// Returns `mmap()` protection flags from \ref VirtMem::Flags.
static int VirtMem_mmProtFromFlags(uint32_t flags) noexcept {
int protection = 0;
if (flags & VirtMem::kAccessRead) protection |= PROT_READ;
if (flags & VirtMem::kAccessWrite) protection |= PROT_READ | PROT_WRITE;
if (flags & VirtMem::kAccessExecute) protection |= PROT_READ | PROT_EXEC;
return protection;
}
// Returns either MAP_JIT or 0 based on `flags` and the host operating system.
static ASMJIT_INLINE int VirtMem_mmMapJitFromFlags(uint32_t flags) noexcept {
#if defined(__APPLE__)
// Always use MAP_JIT flag if user asked for it (could be used for testing
// on non-hardened processes) and detect whether it must be used when the
// process is actually hardened (in that case it doesn't make sense to rely
// on user `flags`).
bool useMapJit = (flags & VirtMem::kMMapEnableMapJit) != 0 || VirtMem_isHardened();
if (useMapJit)
return VirtMem_hasMapJitSupport() ? int(MAP_JIT) : 0;
else
return 0;
#else
DebugUtils::unused(flags);
return 0;
#endif
}
// Returns BSD-specific `PROT_MAX()` flags.
static ASMJIT_INLINE int VirtMem_mmMaxProtFromFlags(uint32_t flags) noexcept {
#if defined(PROT_MAX)
static constexpr uint32_t kMaxProtShift = Support::constCtz(VirtMem::kMMapMaxAccessRead);
if (flags & (VirtMem::kMMapMaxAccessReadWrite | VirtMem::kMMapMaxAccessExecute))
return PROT_MAX(VirtMem_mmProtFromFlags(flags >> kMaxProtShift));
else
return 0;
#else
DebugUtils::unused(flags);
return 0;
#endif
}
#if ASMJIT_VM_SHM_DETECT
static Error VirtMem_detectShmStrategy(uint32_t* strategyOut) noexcept {
AnonymousMemory anonMem;
VirtMem::Info vmInfo = VirtMem::info();
ASMJIT_PROPAGATE(anonMem.open(false));
ASMJIT_PROPAGATE(anonMem.allocate(vmInfo.pageSize));
void* ptr = mmap(nullptr, vmInfo.pageSize, PROT_READ | PROT_EXEC, MAP_SHARED, anonMem.fd(), 0);
if (ptr == MAP_FAILED) {
int e = errno;
if (e == EINVAL) {
*strategyOut = kShmStrategyTmpDir;
return kErrorOk;
}
return DebugUtils::errored(VirtMem_asmjitErrorFromErrno(e));
}
else {
munmap(ptr, vmInfo.pageSize);
*strategyOut = kShmStrategyDevShm;
return kErrorOk;
}
}
#endif
static Error VirtMem_getShmStrategy(uint32_t* strategyOut) noexcept {
#if ASMJIT_VM_SHM_DETECT
// Initially don't assume anything. It has to be tested whether
// '/dev/shm' was mounted with 'noexec' flag or not.
static volatile uint32_t globalShmStrategy = kShmStrategyUnknown;
uint32_t strategy = globalShmStrategy;
if (strategy == kShmStrategyUnknown) {
ASMJIT_PROPAGATE(VirtMem_detectShmStrategy(&strategy));
globalShmStrategy = strategy;
}
*strategyOut = strategy;
return kErrorOk;
#else
*strategyOut = kShmStrategyTmpDir;
return kErrorOk;
#endif
}
Error VirtMem::alloc(void** p, size_t size, uint32_t flags) noexcept {
*p = nullptr;
if (size == 0)
return DebugUtils::errored(kErrorInvalidArgument);
int protection = VirtMem_mmProtFromFlags(flags) | VirtMem_mmMaxProtFromFlags(flags);
int mmFlags = MAP_PRIVATE | MAP_ANONYMOUS | VirtMem_mmMapJitFromFlags(flags);
void* ptr = mmap(nullptr, size, protection, mmFlags, -1, 0);
if (ptr == MAP_FAILED)
return DebugUtils::errored(kErrorOutOfMemory);
*p = ptr;
return kErrorOk;
}
Error VirtMem::release(void* p, size_t size) noexcept {
if (ASMJIT_UNLIKELY(munmap(p, size) != 0))
return DebugUtils::errored(kErrorInvalidArgument);
return kErrorOk;
}
Error VirtMem::protect(void* p, size_t size, uint32_t flags) noexcept {
int protection = VirtMem_mmProtFromFlags(flags);
if (mprotect(p, size, protection) == 0)
return kErrorOk;
return DebugUtils::errored(kErrorInvalidArgument);
}
Error VirtMem::allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) noexcept {
dm->ro = nullptr;
dm->rw = nullptr;
if (off_t(size) <= 0)
return DebugUtils::errored(size == 0 ? kErrorInvalidArgument : kErrorTooLarge);
bool preferTmpOverDevShm = (flags & kMappingPreferTmp) != 0;
if (!preferTmpOverDevShm) {
uint32_t strategy;
ASMJIT_PROPAGATE(VirtMem_getShmStrategy(&strategy));
preferTmpOverDevShm = (strategy == kShmStrategyTmpDir);
}
AnonymousMemory anonMem;
ASMJIT_PROPAGATE(anonMem.open(preferTmpOverDevShm));
ASMJIT_PROPAGATE(anonMem.allocate(size));
void* ptr[2];
for (uint32_t i = 0; i < 2; i++) {
uint32_t accessFlags = flags & ~VirtMem_dualMappingFilter[i];
int protection = VirtMem_mmProtFromFlags(accessFlags) | VirtMem_mmMaxProtFromFlags(accessFlags);
ptr[i] = mmap(nullptr, size, protection, MAP_SHARED, anonMem.fd(), 0);
if (ptr[i] == MAP_FAILED) {
// Get the error now before `munmap()` has a chance to clobber it.
int e = errno;
if (i == 1)
munmap(ptr[0], size);
return DebugUtils::errored(VirtMem_asmjitErrorFromErrno(e));
}
}
dm->ro = ptr[0];
dm->rw = ptr[1];
return kErrorOk;
}
Error VirtMem::releaseDualMapping(DualMapping* dm, size_t size) noexcept {
Error err = release(dm->ro, size);
if (dm->ro != dm->rw)
err |= release(dm->rw, size);
if (err)
return DebugUtils::errored(kErrorInvalidArgument);
dm->ro = nullptr;
dm->rw = nullptr;
return kErrorOk;
}
#endif
// ============================================================================
// [asmjit::VirtMem - Virtual Memory [Memory Info]]
// ============================================================================
VirtMem::Info VirtMem::info() noexcept {
static VirtMem::Info vmInfo;
static std::atomic<uint32_t> vmInfoInitialized;
if (!vmInfoInitialized.load()) {
VirtMem::Info localMemInfo;
VirtMem_getInfo(localMemInfo);
vmInfo = localMemInfo;
vmInfoInitialized.store(1u);
}
return vmInfo;
}
ASMJIT_END_NAMESPACE
#endif

@ -0,0 +1,165 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_VIRTMEM_H_INCLUDED
#define ASMJIT_CORE_VIRTMEM_H_INCLUDED
#include "../core/api-config.h"
#ifndef ASMJIT_NO_JIT
#include "../core/globals.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_virtual_memory
//! \{
// ============================================================================
// [asmjit::VirtMem]
// ============================================================================
//! Virtual memory management.
namespace VirtMem {
//! Virtual memory access and mmap-specific flags.
enum Flags : uint32_t {
//! No access flags.
kAccessNone = 0x00000000u,
//! Memory is readable.
kAccessRead = 0x00000001u,
//! Memory is writable.
kAccessWrite = 0x00000002u,
//! Memory is executable.
kAccessExecute = 0x00000004u,
//! A combination of \ref kAccessRead and \ref kAccessWrite.
kAccessReadWrite = kAccessRead | kAccessWrite,
//! A combination of \ref kAccessRead, \ref kAccessWrite.
kAccessRW = kAccessRead | kAccessWrite,
//! A combination of \ref kAccessRead and \ref kAccessExecute.
kAccessRX = kAccessRead | kAccessExecute,
//! A combination of \ref kAccessRead, \ref kAccessWrite, and \ref kAccessExecute.
kAccessRWX = kAccessRead | kAccessWrite | kAccessExecute,
//! Use a `MAP_JIT` flag available on Apple platforms (introduced by Mojave),
//! which allows JIT code to be executed in MAC bundles. This flag is not turned
//! on by default, because when a process uses `fork()` the child process
//! has no access to the pages mapped with `MAP_JIT`, which could break code
//! that doesn't expect this behavior.
kMMapEnableMapJit = 0x00000010u,
//! Pass `PROT_MAX(PROT_READ)` to mmap() on platforms that support `PROT_MAX`.
kMMapMaxAccessRead = 0x00000020u,
//! Pass `PROT_MAX(PROT_WRITE)` to mmap() on platforms that support `PROT_MAX`.
kMMapMaxAccessWrite = 0x00000040u,
//! Pass `PROT_MAX(PROT_EXEC)` to mmap() on platforms that support `PROT_MAX`.
kMMapMaxAccessExecute = 0x00000080u,
//! A combination of \ref kMMapMaxAccessRead and \ref kMMapMaxAccessWrite.
kMMapMaxAccessReadWrite = kMMapMaxAccessRead | kMMapMaxAccessWrite,
//! A combination of \ref kMMapMaxAccessRead and \ref kMMapMaxAccessWrite.
kMMapMaxAccessRW = kMMapMaxAccessRead | kMMapMaxAccessWrite,
//! A combination of \ref kMMapMaxAccessRead and \ref kMMapMaxAccessExecute.
kMMapMaxAccessRX = kMMapMaxAccessRead | kMMapMaxAccessExecute,
//! A combination of \ref kMMapMaxAccessRead, \ref kMMapMaxAccessWrite, \ref kMMapMaxAccessExecute.
kMMapMaxAccessRWX = kMMapMaxAccessRead | kMMapMaxAccessWrite | kMMapMaxAccessExecute,
//! Not an access flag, only used by `allocDualMapping()` to override the
//! default allocation strategy to always use a 'tmp' directory instead of
//! "/dev/shm" (on POSIX platforms). Please note that this flag will be
//! ignored if the operating system allows to allocate an executable memory
//! by a different API than `open()` or `shm_open()`. For example on Linux
//! `memfd_create()` is preferred and on BSDs `shm_open(SHM_ANON, ...)` is
//! used if SHM_ANON is defined.
kMappingPreferTmp = 0x80000000u
};
//! Virtual memory information.
struct Info {
//! Virtual memory page size.
uint32_t pageSize;
//! Virtual memory page granularity.
uint32_t pageGranularity;
};
//! Dual memory mapping used to map an anonymous memory into two memory regions
//! where one region is read-only, but executable, and the second region is
//! read+write, but not executable. Please see \ref VirtMem::allocDualMapping()
//! for more details.
struct DualMapping {
//! Pointer to data with 'Read' or 'Read+Execute' access.
void* ro;
//! Pointer to data with 'Read-Write' access, but never 'Write+Execute'.
void* rw;
};
//! Returns virtual memory information, see `VirtMem::Info` for more details.
ASMJIT_API Info info() noexcept;
//! Allocates virtual memory by either using `mmap()` (POSIX) or `VirtualAlloc()`
//! (Windows).
//!
//! \note `size` should be aligned to page size, use \ref VirtMem::info()
//! to obtain it. Invalid size will not be corrected by the implementation
//! and the allocation would not succeed in such case.
ASMJIT_API Error alloc(void** p, size_t size, uint32_t flags) noexcept;
//! Releases virtual memory previously allocated by \ref VirtMem::alloc().
//!
//! \note The size must be the same as used by \ref VirtMem::alloc(). If the
//! size is not the same value the call will fail on any POSIX system, but
//! pass on Windows, because it's implemented differently.
ASMJIT_API Error release(void* p, size_t size) noexcept;
//! A cross-platform wrapper around `mprotect()` (POSIX) and `VirtualProtect()`
//! (Windows).
ASMJIT_API Error protect(void* p, size_t size, uint32_t flags) noexcept;
//! Allocates virtual memory and creates two views of it where the first view
//! has no write access. This is an addition to the API that should be used
//! in cases in which the operating system either enforces W^X security policy
//! or the application wants to use this policy by default to improve security
//! and prevent an accidental (or purposed) self-modifying code.
//!
//! The memory returned in the `dm` are two independent mappings of the same
//! shared memory region. You must use \ref VirtMem::releaseDualMapping() to
//! release it when it's no longer needed. Never use `VirtMem::release()` to
//! release the memory returned by `allocDualMapping()` as that would fail on
//! Windows.
//!
//! \remarks Both pointers in `dm` would be set to `nullptr` if the function fails.
ASMJIT_API Error allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) noexcept;
//! Releases virtual memory mapping previously allocated by \ref VirtMem::allocDualMapping().
//!
//! \remarks Both pointers in `dm` would be set to `nullptr` if the function succeeds.
ASMJIT_API Error releaseDualMapping(DualMapping* dm, size_t size) noexcept;
} // VirtMem
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif // ASMJIT_CORE_VIRTMEM_H_INCLUDED

@ -0,0 +1,382 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/support.h"
#include "../core/zone.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Zone - Statics]
// ============================================================================
// Zero size block used by `Zone` that doesn't have any memory allocated.
// Should be allocated in read-only memory and should never be modified.
const Zone::Block Zone::_zeroBlock = { nullptr, nullptr, 0 };
// ============================================================================
// [asmjit::Zone - Init / Reset]
// ============================================================================
void Zone::_init(size_t blockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept {
ASMJIT_ASSERT(blockSize >= kMinBlockSize);
ASMJIT_ASSERT(blockSize <= kMaxBlockSize);
ASMJIT_ASSERT(blockAlignment <= 64);
// Just to make the compiler happy...
constexpr size_t kBlockSizeMask = (Support::allOnes<size_t>() >> 4);
constexpr size_t kBlockAlignmentShiftMask = 0x7u;
_assignZeroBlock();
_blockSize = blockSize & kBlockSizeMask;
_isTemporary = temporary != nullptr;
_blockAlignmentShift = Support::ctz(blockAlignment) & kBlockAlignmentShiftMask;
// Setup the first [temporary] block, if necessary.
if (temporary) {
Block* block = temporary->data<Block>();
block->prev = nullptr;
block->next = nullptr;
ASMJIT_ASSERT(temporary->size() >= kBlockSize);
block->size = temporary->size() - kBlockSize;
_assignBlock(block);
}
}
void Zone::reset(uint32_t resetPolicy) noexcept {
Block* cur = _block;
// Can't be altered.
if (cur == &_zeroBlock)
return;
if (resetPolicy == Globals::kResetHard) {
Block* initial = const_cast<Zone::Block*>(&_zeroBlock);
_ptr = initial->data();
_end = initial->data();
_block = initial;
// Since cur can be in the middle of the double-linked list, we have to
// traverse both directions (`prev` and `next`) separately to visit all.
Block* next = cur->next;
do {
Block* prev = cur->prev;
// If this is the first block and this ZoneTmp is temporary then the
// first block is statically allocated. We cannot free it and it makes
// sense to keep it even when this is hard reset.
if (prev == nullptr && _isTemporary) {
cur->prev = nullptr;
cur->next = nullptr;
_assignBlock(cur);
break;
}
::free(cur);
cur = prev;
} while (cur);
cur = next;
while (cur) {
next = cur->next;
::free(cur);
cur = next;
}
}
else {
while (cur->prev)
cur = cur->prev;
_assignBlock(cur);
}
}
// ============================================================================
// [asmjit::Zone - Alloc]
// ============================================================================
void* Zone::_alloc(size_t size, size_t alignment) noexcept {
Block* curBlock = _block;
Block* next = curBlock->next;
size_t rawBlockAlignment = blockAlignment();
size_t minimumAlignment = Support::max<size_t>(alignment, rawBlockAlignment);
// If the `Zone` has been cleared the current block doesn't have to be the
// last one. Check if there is a block that can be used instead of allocating
// a new one. If there is a `next` block it's completely unused, we don't have
// to check for remaining bytes in that case.
if (next) {
uint8_t* ptr = Support::alignUp(next->data(), minimumAlignment);
uint8_t* end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
if (size <= (size_t)(end - ptr)) {
_block = next;
_ptr = ptr + size;
_end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
return static_cast<void*>(ptr);
}
}
size_t blockAlignmentOverhead = alignment - Support::min<size_t>(alignment, Globals::kAllocAlignment);
size_t newSize = Support::max(blockSize(), size);
// Prevent arithmetic overflow.
if (ASMJIT_UNLIKELY(newSize > SIZE_MAX - kBlockSize - blockAlignmentOverhead))
return nullptr;
// Allocate new block - we add alignment overhead to `newSize`, which becomes the
// new block size, and we also add `kBlockOverhead` to the allocator as it includes
// members of `Zone::Block` structure.
newSize += blockAlignmentOverhead;
Block* newBlock = static_cast<Block*>(::malloc(newSize + kBlockSize));
if (ASMJIT_UNLIKELY(!newBlock))
return nullptr;
// Align the pointer to `minimumAlignment` and adjust the size of this block
// accordingly. It's the same as using `minimumAlignment - Support::alignUpDiff()`,
// just written differently.
{
newBlock->prev = nullptr;
newBlock->next = nullptr;
newBlock->size = newSize;
if (curBlock != &_zeroBlock) {
newBlock->prev = curBlock;
curBlock->next = newBlock;
// Does only happen if there is a next block, but the requested memory
// can't fit into it. In this case a new buffer is allocated and inserted
// between the current block and the next one.
if (next) {
newBlock->next = next;
next->prev = newBlock;
}
}
uint8_t* ptr = Support::alignUp(newBlock->data(), minimumAlignment);
uint8_t* end = Support::alignDown(newBlock->data() + newSize, rawBlockAlignment);
_ptr = ptr + size;
_end = end;
_block = newBlock;
ASMJIT_ASSERT(_ptr <= _end);
return static_cast<void*>(ptr);
}
}
void* Zone::allocZeroed(size_t size, size_t alignment) noexcept {
void* p = alloc(size, alignment);
if (ASMJIT_UNLIKELY(!p))
return p;
return memset(p, 0, size);
}
void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept {
if (ASMJIT_UNLIKELY(!data || !size))
return nullptr;
ASMJIT_ASSERT(size != SIZE_MAX);
uint8_t* m = allocT<uint8_t>(size + nullTerminate);
if (ASMJIT_UNLIKELY(!m)) return nullptr;
memcpy(m, data, size);
if (nullTerminate) m[size] = '\0';
return static_cast<void*>(m);
}
char* Zone::sformat(const char* fmt, ...) noexcept {
if (ASMJIT_UNLIKELY(!fmt))
return nullptr;
char buf[512];
size_t size;
va_list ap;
va_start(ap, fmt);
size = unsigned(vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf) - 1, fmt, ap));
va_end(ap);
buf[size++] = 0;
return static_cast<char*>(dup(buf, size));
}
// ============================================================================
// [asmjit::ZoneAllocator - Helpers]
// ============================================================================
#if defined(ASMJIT_BUILD_DEBUG)
static bool ZoneAllocator_hasDynamicBlock(ZoneAllocator* self, ZoneAllocator::DynamicBlock* block) noexcept {
ZoneAllocator::DynamicBlock* cur = self->_dynamicBlocks;
while (cur) {
if (cur == block)
return true;
cur = cur->next;
}
return false;
}
#endif
// ============================================================================
// [asmjit::ZoneAllocator - Init / Reset]
// ============================================================================
void ZoneAllocator::reset(Zone* zone) noexcept {
// Free dynamic blocks.
DynamicBlock* block = _dynamicBlocks;
while (block) {
DynamicBlock* next = block->next;
::free(block);
block = next;
}
// Zero the entire class and initialize to the given `zone`.
memset(this, 0, sizeof(*this));
_zone = zone;
}
// ============================================================================
// [asmjit::ZoneAllocator - Alloc / Release]
// ============================================================================
void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());
// Use the memory pool only if the requested block has a reasonable size.
uint32_t slot;
if (_getSlotIndex(size, slot, allocatedSize)) {
// Slot reuse.
uint8_t* p = reinterpret_cast<uint8_t*>(_slots[slot]);
size = allocatedSize;
if (p) {
_slots[slot] = reinterpret_cast<Slot*>(p)->next;
return p;
}
_zone->align(kBlockAlignment);
p = _zone->ptr();
size_t remain = (size_t)(_zone->end() - p);
if (ASMJIT_LIKELY(remain >= size)) {
_zone->setPtr(p + size);
return p;
}
else {
// Distribute the remaining memory to suitable slots, if possible.
if (remain >= kLoGranularity) {
do {
size_t distSize = Support::min<size_t>(remain, kLoMaxSize);
uint32_t distSlot = uint32_t((distSize - kLoGranularity) / kLoGranularity);
ASMJIT_ASSERT(distSlot < kLoCount);
reinterpret_cast<Slot*>(p)->next = _slots[distSlot];
_slots[distSlot] = reinterpret_cast<Slot*>(p);
p += distSize;
remain -= distSize;
} while (remain >= kLoGranularity);
_zone->setPtr(p);
}
p = static_cast<uint8_t*>(_zone->_alloc(size, kBlockAlignment));
if (ASMJIT_UNLIKELY(!p)) {
allocatedSize = 0;
return nullptr;
}
return p;
}
}
else {
// Allocate a dynamic block.
size_t kBlockOverhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment;
// Handle a possible overflow.
if (ASMJIT_UNLIKELY(kBlockOverhead >= SIZE_MAX - size))
return nullptr;
void* p = ::malloc(size + kBlockOverhead);
if (ASMJIT_UNLIKELY(!p)) {
allocatedSize = 0;
return nullptr;
}
// Link as first in `_dynamicBlocks` double-linked list.
DynamicBlock* block = static_cast<DynamicBlock*>(p);
DynamicBlock* next = _dynamicBlocks;
if (next)
next->prev = block;
block->prev = nullptr;
block->next = next;
_dynamicBlocks = block;
// Align the pointer to the guaranteed alignment and store `DynamicBlock`
// at the beginning of the memory block, so `_releaseDynamic()` can find it.
p = Support::alignUp(static_cast<uint8_t*>(p) + sizeof(DynamicBlock) + sizeof(DynamicBlock*), kBlockAlignment);
reinterpret_cast<DynamicBlock**>(p)[-1] = block;
allocatedSize = size;
return p;
}
}
void* ZoneAllocator::_allocZeroed(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());
void* p = _alloc(size, allocatedSize);
if (ASMJIT_UNLIKELY(!p)) return p;
return memset(p, 0, allocatedSize);
}
void ZoneAllocator::_releaseDynamic(void* p, size_t size) noexcept {
DebugUtils::unused(size);
ASMJIT_ASSERT(isInitialized());
// Pointer to `DynamicBlock` is stored at [-1].
DynamicBlock* block = reinterpret_cast<DynamicBlock**>(p)[-1];
ASMJIT_ASSERT(ZoneAllocator_hasDynamicBlock(this, block));
// Unlink and free.
DynamicBlock* prev = block->prev;
DynamicBlock* next = block->next;
if (prev)
prev->next = next;
else
_dynamicBlocks = next;
if (next)
next->prev = prev;
::free(block);
}
ASMJIT_END_NAMESPACE

@ -0,0 +1,649 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ZONE_H_INCLUDED
#define ASMJIT_CORE_ZONE_H_INCLUDED
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::Zone]
// ============================================================================
//! Zone memory.
//!
//! Zone is an incremental memory allocator that allocates memory by simply
//! incrementing a pointer. It allocates blocks of memory by using C's `malloc()`,
//! but divides these blocks into smaller segments requested by calling
//! `Zone::alloc()` and friends.
//!
//! Zone has no function to release the allocated memory. It has to be released
//! all at once by calling `reset()`. If you need a more friendly allocator that
//! also supports `release()`, consider using `Zone` with `ZoneAllocator`.
class Zone {
public:
ASMJIT_NONCOPYABLE(Zone)
//! \cond INTERNAL
//! A single block of memory managed by `Zone`.
struct Block {
inline uint8_t* data() const noexcept {
return const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(this) + sizeof(*this));
}
//! Link to the previous block.
Block* prev;
//! Link to the next block.
Block* next;
//! Size of the block.
size_t size;
};
enum Limits : size_t {
kBlockSize = sizeof(Block),
kBlockOverhead = Globals::kAllocOverhead + kBlockSize,
kMinBlockSize = 64, // The number is ridiculously small, but still possible.
kMaxBlockSize = size_t(1) << (sizeof(size_t) * 8 - 4 - 1),
kMinAlignment = 1,
kMaxAlignment = 64
};
//! Pointer in the current block.
uint8_t* _ptr;
//! End of the current block.
uint8_t* _end;
//! Current block.
Block* _block;
union {
struct {
//! Default block size.
size_t _blockSize : Support::bitSizeOf<size_t>() - 4;
//! First block is temporary (ZoneTmp).
size_t _isTemporary : 1;
//! Block alignment (1 << alignment).
size_t _blockAlignmentShift : 3;
};
size_t _packedData;
};
static ASMJIT_API const Block _zeroBlock;
//! \endcond
//! \name Construction & Destruction
//! \{
//! Creates a new Zone.
//!
//! The `blockSize` parameter describes the default size of the block. If the
//! `size` parameter passed to `alloc()` is greater than the default size
//! `Zone` will allocate and use a larger block, but it will not change the
//! default `blockSize`.
//!
//! It's not required, but it's good practice to set `blockSize` to a
//! reasonable value that depends on the usage of `Zone`. Greater block sizes
//! are generally safer and perform better than unreasonably low block sizes.
ASMJIT_INLINE explicit Zone(size_t blockSize, size_t blockAlignment = 1) noexcept {
_init(blockSize, blockAlignment, nullptr);
}
ASMJIT_INLINE Zone(size_t blockSize, size_t blockAlignment, const Support::Temporary& temporary) noexcept {
_init(blockSize, blockAlignment, &temporary);
}
//! Moves an existing `Zone`.
//!
//! \note You cannot move an existing `ZoneTmp` as it uses embedded storage.
//! Attempting to move `ZoneTmp` would result in assertion failure in debug
//! mode and undefined behavior in release mode.
ASMJIT_INLINE Zone(Zone&& other) noexcept
: _ptr(other._ptr),
_end(other._end),
_block(other._block),
_packedData(other._packedData) {
ASMJIT_ASSERT(!other.isTemporary());
other._block = const_cast<Block*>(&_zeroBlock);
other._ptr = other._block->data();
other._end = other._block->data();
}
//! Destroys the `Zone` instance.
//!
//! This will destroy the `Zone` instance and release all blocks of memory
//! allocated by it. It performs implicit `reset(Globals::kResetHard)`.
ASMJIT_INLINE ~Zone() noexcept { reset(Globals::kResetHard); }
ASMJIT_API void _init(size_t blockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept;
//! Resets the `Zone` invalidating all blocks allocated.
//!
//! See `Globals::ResetPolicy` for more details.
ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept;
//! \}
//! \name Accessors
//! \{
//! Tests whether this `Zone` is actually a `ZoneTmp` that uses temporary memory.
ASMJIT_INLINE bool isTemporary() const noexcept { return _isTemporary != 0; }
//! Returns the default block size.
ASMJIT_INLINE size_t blockSize() const noexcept { return _blockSize; }
//! Returns the default block alignment.
ASMJIT_INLINE size_t blockAlignment() const noexcept { return size_t(1) << _blockAlignmentShift; }
//! Returns remaining size of the current block.
ASMJIT_INLINE size_t remainingSize() const noexcept { return (size_t)(_end - _ptr); }
//! Returns the current zone cursor (dangerous).
//!
//! This is a function that can be used to get exclusive access to the current
//! block's memory buffer.
template<typename T = uint8_t>
ASMJIT_INLINE T* ptr() noexcept { return reinterpret_cast<T*>(_ptr); }
//! Returns the end of the current zone block, only useful if you use `ptr()`.
template<typename T = uint8_t>
ASMJIT_INLINE T* end() noexcept { return reinterpret_cast<T*>(_end); }
//! Sets the current zone pointer to `ptr` (must be within the current block).
template<typename T>
ASMJIT_INLINE void setPtr(T* ptr) noexcept {
uint8_t* p = reinterpret_cast<uint8_t*>(ptr);
ASMJIT_ASSERT(p >= _ptr && p <= _end);
_ptr = p;
}
//! Sets the end zone pointer to `end` (must be within the current block).
template<typename T>
ASMJIT_INLINE void setEnd(T* end) noexcept {
uint8_t* p = reinterpret_cast<uint8_t*>(end);
ASMJIT_ASSERT(p >= _ptr && p <= _end);
_end = p;
}
//! \}
//! \name Utilities
//! \{
ASMJIT_INLINE void swap(Zone& other) noexcept {
// This could lead to a disaster.
ASMJIT_ASSERT(!this->isTemporary());
ASMJIT_ASSERT(!other.isTemporary());
std::swap(_ptr, other._ptr);
std::swap(_end, other._end);
std::swap(_block, other._block);
std::swap(_packedData, other._packedData);
}
//! Aligns the current pointer to `alignment`.
ASMJIT_INLINE void align(size_t alignment) noexcept {
_ptr = Support::min(Support::alignUp(_ptr, alignment), _end);
}
//! Ensures the remaining size is at least equal or greater than `size`.
//!
//! \note This function doesn't respect any alignment. If you need to ensure
//! there is enough room for an aligned allocation you need to call `align()`
//! before calling `ensure()`.
ASMJIT_INLINE Error ensure(size_t size) noexcept {
if (size <= remainingSize())
return kErrorOk;
else
return _alloc(0, 1) ? kErrorOk : DebugUtils::errored(kErrorOutOfMemory);
}
ASMJIT_INLINE void _assignBlock(Block* block) noexcept {
size_t alignment = blockAlignment();
_ptr = Support::alignUp(block->data(), alignment);
_end = Support::alignDown(block->data() + block->size, alignment);
_block = block;
}
ASMJIT_INLINE void _assignZeroBlock() noexcept {
Block* block = const_cast<Block*>(&_zeroBlock);
_ptr = block->data();
_end = block->data();
_block = block;
}
//! \}
//! \name Allocation
//! \{
//! Allocates the requested memory specified by `size`.
//!
//! Pointer returned is valid until the `Zone` instance is destroyed or reset
//! by calling `reset()`. If you plan to make an instance of C++ from the
//! given pointer use placement `new` and `delete` operators:
//!
//! ```
//! using namespace asmjit;
//!
//! class Object { ... };
//!
//! // Create Zone with default block size of approximately 65536 bytes.
//! Zone zone(65536 - Zone::kBlockOverhead);
//!
//! // Create your objects using zone object allocating, for example:
//! Object* obj = static_cast<Object*>( zone.alloc(sizeof(Object)) );
//!
//! if (!obj) {
//! // Handle out of memory error.
//! }
//!
//! // Placement `new` and `delete` operators can be used to instantiate it.
//! new(obj) Object();
//!
//! // ... lifetime of your objects ...
//!
//! // To destroy the instance (if required).
//! obj->~Object();
//!
//! // Reset or destroy `Zone`.
//! zone.reset();
//! ```
ASMJIT_INLINE void* alloc(size_t size) noexcept {
if (ASMJIT_UNLIKELY(size > remainingSize()))
return _alloc(size, 1);
uint8_t* ptr = _ptr;
_ptr += size;
return static_cast<void*>(ptr);
}
//! Allocates the requested memory specified by `size` and `alignment`.
ASMJIT_INLINE void* alloc(size_t size, size_t alignment) noexcept {
ASMJIT_ASSERT(Support::isPowerOf2(alignment));
uint8_t* ptr = Support::alignUp(_ptr, alignment);
if (ptr >= _end || size > (size_t)(_end - ptr))
return _alloc(size, alignment);
_ptr = ptr + size;
return static_cast<void*>(ptr);
}
//! Allocates the requested memory specified by `size` without doing any checks.
//!
//! Can only be called if `remainingSize()` returns size at least equal to `size`.
ASMJIT_INLINE void* allocNoCheck(size_t size) noexcept {
ASMJIT_ASSERT(remainingSize() >= size);
uint8_t* ptr = _ptr;
_ptr += size;
return static_cast<void*>(ptr);
}
//! Allocates the requested memory specified by `size` and `alignment` without doing any checks.
//!
//! Performs the same operation as `Zone::allocNoCheck(size)` with `alignment` applied.
ASMJIT_INLINE void* allocNoCheck(size_t size, size_t alignment) noexcept {
ASMJIT_ASSERT(Support::isPowerOf2(alignment));
uint8_t* ptr = Support::alignUp(_ptr, alignment);
ASMJIT_ASSERT(size <= (size_t)(_end - ptr));
_ptr = ptr + size;
return static_cast<void*>(ptr);
}
//! Allocates `size` bytes of zeroed memory. See `alloc()` for more details.
ASMJIT_API void* allocZeroed(size_t size, size_t alignment = 1) noexcept;
//! Like `alloc()`, but the return pointer is casted to `T*`.
template<typename T>
ASMJIT_INLINE T* allocT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
return static_cast<T*>(alloc(size, alignment));
}
//! Like `allocNoCheck()`, but the return pointer is casted to `T*`.
template<typename T>
ASMJIT_INLINE T* allocNoCheckT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
return static_cast<T*>(allocNoCheck(size, alignment));
}
//! Like `allocZeroed()`, but the return pointer is casted to `T*`.
template<typename T>
ASMJIT_INLINE T* allocZeroedT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
return static_cast<T*>(allocZeroed(size, alignment));
}
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T>
ASMJIT_INLINE T* newT() noexcept {
void* p = alloc(sizeof(T), alignof(T));
if (ASMJIT_UNLIKELY(!p))
return nullptr;
return new(p) T();
}
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T, typename... Args>
ASMJIT_INLINE T* newT(Args&&... args) noexcept {
void* p = alloc(sizeof(T), alignof(T));
if (ASMJIT_UNLIKELY(!p))
return nullptr;
return new(p) T(std::forward<Args>(args)...);
}
//! \cond INTERNAL
//!
//! Internal alloc function used by other inlines.
ASMJIT_API void* _alloc(size_t size, size_t alignment) noexcept;
//! \endcond
//! Helper to duplicate data.
ASMJIT_API void* dup(const void* data, size_t size, bool nullTerminate = false) noexcept;
//! Helper to duplicate data.
ASMJIT_INLINE void* dupAligned(const void* data, size_t size, size_t alignment, bool nullTerminate = false) noexcept {
align(alignment);
return dup(data, size, nullTerminate);
}
//! Helper to duplicate a formatted string, maximum size is 256 bytes.
ASMJIT_API char* sformat(const char* str, ...) noexcept;
//! \}
};
// ============================================================================
// [b2d::ZoneTmp]
// ============================================================================
//! \ref Zone with `N` bytes of a static storage, used for the initial block.
//!
//! Temporary zones are used in cases where it's known that some memory will be
//! required, but in many cases it won't exceed N bytes, so the whole operation
//! can be performed without a dynamic memory allocation.
template<size_t N>
class ZoneTmp : public Zone {
public:
ASMJIT_NONCOPYABLE(ZoneTmp<N>)
//! Temporary storage, embedded after \ref Zone.
struct Storage {
char data[N];
} _storage;
//! Creates a temporary zone. Dynamic block size is specified by `blockSize`.
ASMJIT_INLINE explicit ZoneTmp(size_t blockSize, size_t blockAlignment = 1) noexcept
: Zone(blockSize, blockAlignment, Support::Temporary(_storage.data, N)) {}
};
// ============================================================================
// [asmjit::ZoneAllocator]
// ============================================================================
//! Zone-based memory allocator that uses an existing `Zone` and provides a
//! `release()` functionality on top of it. It uses `Zone` only for chunks
//! that can be pooled, and uses libc `malloc()` for chunks that are large.
//!
//! The advantage of ZoneAllocator is that it can allocate small chunks of memory
//! really fast, and these chunks, when released, will be reused by consecutive
//! calls to `alloc()`. Also, since ZoneAllocator uses `Zone`, you can turn any
//! `Zone` into a `ZoneAllocator`, and use it in your `Pass` when necessary.
//!
//! ZoneAllocator is used by AsmJit containers to make containers having only
//! few elements fast (and lightweight) and to allow them to grow and use
//! dynamic blocks when require more storage.
class ZoneAllocator {
public:
ASMJIT_NONCOPYABLE(ZoneAllocator)
//! \cond INTERNAL
enum {
// In short, we pool chunks of these sizes:
// [32, 64, 96, 128, 192, 256, 320, 384, 448, 512]
//! How many bytes per a low granularity pool (has to be at least 16).
kLoGranularity = 32,
//! Number of slots of a low granularity pool.
kLoCount = 4,
//! Maximum size of a block that can be allocated in a low granularity pool.
kLoMaxSize = kLoGranularity * kLoCount,
//! How many bytes per a high granularity pool.
kHiGranularity = 64,
//! Number of slots of a high granularity pool.
kHiCount = 6,
//! Maximum size of a block that can be allocated in a high granularity pool.
kHiMaxSize = kLoMaxSize + kHiGranularity * kHiCount,
//! Alignment of every pointer returned by `alloc()`.
kBlockAlignment = kLoGranularity
};
//! Single-linked list used to store unused chunks.
struct Slot {
//! Link to a next slot in a single-linked list.
Slot* next;
};
//! A block of memory that has been allocated dynamically and is not part of
//! block-list used by the allocator. This is used to keep track of all these
//! blocks so they can be freed by `reset()` if not freed explicitly.
struct DynamicBlock {
DynamicBlock* prev;
DynamicBlock* next;
};
//! \endcond
//! Zone used to allocate memory that fits into slots.
Zone* _zone;
//! Indexed slots containing released memory.
Slot* _slots[kLoCount + kHiCount];
//! Dynamic blocks for larger allocations (no slots).
DynamicBlock* _dynamicBlocks;
//! \name Construction & Destruction
//! \{
//! Creates a new `ZoneAllocator`.
//!
//! \note To use it, you must first `init()` it.
inline ZoneAllocator() noexcept {
memset(this, 0, sizeof(*this));
}
//! Creates a new `ZoneAllocator` initialized to use `zone`.
inline explicit ZoneAllocator(Zone* zone) noexcept {
memset(this, 0, sizeof(*this));
_zone = zone;
}
//! Destroys the `ZoneAllocator`.
inline ~ZoneAllocator() noexcept { reset(); }
//! Tests whether the `ZoneAllocator` is initialized (i.e. has `Zone`).
inline bool isInitialized() const noexcept { return _zone != nullptr; }
//! Convenience function to initialize the `ZoneAllocator` with `zone`.
//!
//! It's the same as calling `reset(zone)`.
inline void init(Zone* zone) noexcept { reset(zone); }
//! Resets this `ZoneAllocator` and also forget about the current `Zone` which
//! is attached (if any). Reset optionally attaches a new `zone` passed, or
//! keeps the `ZoneAllocator` in an uninitialized state, if `zone` is null.
ASMJIT_API void reset(Zone* zone = nullptr) noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns the assigned `Zone` of this allocator or null if this `ZoneAllocator`
//! is not initialized.
inline Zone* zone() const noexcept { return _zone; }
//! \}
//! \cond
//! \name Internals
//! \{
//! Returns the slot index to be used for `size`. Returns `true` if a valid slot
//! has been written to `slot` and `allocatedSize` has been filled with slot
//! exact size (`allocatedSize` can be equal or slightly greater than `size`).
static ASMJIT_INLINE bool _getSlotIndex(size_t size, uint32_t& slot) noexcept {
ASMJIT_ASSERT(size > 0);
if (size > kHiMaxSize)
return false;
if (size <= kLoMaxSize)
slot = uint32_t((size - 1) / kLoGranularity);
else
slot = uint32_t((size - kLoMaxSize - 1) / kHiGranularity) + kLoCount;
return true;
}
//! \overload
static ASMJIT_INLINE bool _getSlotIndex(size_t size, uint32_t& slot, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(size > 0);
if (size > kHiMaxSize)
return false;
if (size <= kLoMaxSize) {
slot = uint32_t((size - 1) / kLoGranularity);
allocatedSize = Support::alignUp(size, kLoGranularity);
}
else {
slot = uint32_t((size - kLoMaxSize - 1) / kHiGranularity) + kLoCount;
allocatedSize = Support::alignUp(size, kHiGranularity);
}
return true;
}
//! \}
//! \endcond
//! \name Allocation
//! \{
//! \cond INTERNAL
ASMJIT_API void* _alloc(size_t size, size_t& allocatedSize) noexcept;
ASMJIT_API void* _allocZeroed(size_t size, size_t& allocatedSize) noexcept;
ASMJIT_API void _releaseDynamic(void* p, size_t size) noexcept;
//! \endcond
//! Allocates `size` bytes of memory, ideally from an available pool.
//!
//! \note `size` can't be zero, it will assert in debug mode in such case.
inline void* alloc(size_t size) noexcept {
ASMJIT_ASSERT(isInitialized());
size_t allocatedSize;
return _alloc(size, allocatedSize);
}
//! Like `alloc(size)`, but provides a second argument `allocatedSize` that
//! provides a way to know how big the block returned actually is. This is
//! useful for containers to prevent growing too early.
inline void* alloc(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());
return _alloc(size, allocatedSize);
}
//! Like `alloc()`, but the return pointer is casted to `T*`.
template<typename T>
inline T* allocT(size_t size = sizeof(T)) noexcept {
return static_cast<T*>(alloc(size));
}
//! Like `alloc(size)`, but returns zeroed memory.
inline void* allocZeroed(size_t size) noexcept {
ASMJIT_ASSERT(isInitialized());
size_t allocatedSize;
return _allocZeroed(size, allocatedSize);
}
//! Like `alloc(size, allocatedSize)`, but returns zeroed memory.
inline void* allocZeroed(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());
return _allocZeroed(size, allocatedSize);
}
//! Like `allocZeroed()`, but the return pointer is casted to `T*`.
template<typename T>
inline T* allocZeroedT(size_t size = sizeof(T)) noexcept {
return static_cast<T*>(allocZeroed(size));
}
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T>
inline T* newT() noexcept {
void* p = allocT<T>();
if (ASMJIT_UNLIKELY(!p))
return nullptr;
return new(p) T();
}
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T, typename... Args>
inline T* newT(Args&&... args) noexcept {
void* p = allocT<T>();
if (ASMJIT_UNLIKELY(!p))
return nullptr;
return new(p) T(std::forward<Args>(args)...);
}
//! Releases the memory previously allocated by `alloc()`. The `size` argument
//! has to be the same as used to call `alloc()` or `allocatedSize` returned
//! by `alloc()`.
inline void release(void* p, size_t size) noexcept {
ASMJIT_ASSERT(isInitialized());
ASMJIT_ASSERT(p != nullptr);
ASMJIT_ASSERT(size != 0);
uint32_t slot;
if (_getSlotIndex(size, slot)) {
static_cast<Slot*>(p)->next = static_cast<Slot*>(_slots[slot]);
_slots[slot] = static_cast<Slot*>(p);
}
else {
_releaseDynamic(p, size);
}
}
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ZONE_H_INCLUDED

@ -0,0 +1,331 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonehash.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ZoneHashBase - Helpers]
// ============================================================================
#define ASMJIT_POPULATE_PRIMES(ENTRY) \
ENTRY(2 , 0x80000000, 32), /* [N * 0x80000000 >> 32] (rcp=2147483648) */ \
ENTRY(11 , 0xBA2E8BA3, 35), /* [N * 0xBA2E8BA3 >> 35] (rcp=3123612579) */ \
ENTRY(29 , 0x8D3DCB09, 36), /* [N * 0x8D3DCB09 >> 36] (rcp=2369637129) */ \
ENTRY(41 , 0xC7CE0C7D, 37), /* [N * 0xC7CE0C7D >> 37] (rcp=3352169597) */ \
ENTRY(59 , 0x8AD8F2FC, 37), /* [N * 0x8AD8F2FC >> 37] (rcp=2329473788) */ \
ENTRY(83 , 0xC565C87C, 38), /* [N * 0xC565C87C >> 38] (rcp=3311782012) */ \
ENTRY(131 , 0xFA232CF3, 39), /* [N * 0xFA232CF3 >> 39] (rcp=4196609267) */ \
ENTRY(191 , 0xAB8F69E3, 39), /* [N * 0xAB8F69E3 >> 39] (rcp=2878302691) */ \
ENTRY(269 , 0xF3A0D52D, 40), /* [N * 0xF3A0D52D >> 40] (rcp=4087403821) */ \
ENTRY(383 , 0xAB1CBDD4, 40), /* [N * 0xAB1CBDD4 >> 40] (rcp=2870787540) */ \
ENTRY(541 , 0xF246FACC, 41), /* [N * 0xF246FACC >> 41] (rcp=4064737996) */ \
ENTRY(757 , 0xAD2589A4, 41), /* [N * 0xAD2589A4 >> 41] (rcp=2904918436) */ \
ENTRY(1061 , 0xF7129426, 42), /* [N * 0xF7129426 >> 42] (rcp=4145189926) */ \
ENTRY(1499 , 0xAEE116B7, 42), /* [N * 0xAEE116B7 >> 42] (rcp=2933986999) */ \
ENTRY(2099 , 0xF9C7A737, 43), /* [N * 0xF9C7A737 >> 43] (rcp=4190611255) */ \
ENTRY(2939 , 0xB263D25C, 43), /* [N * 0xB263D25C >> 43] (rcp=2992886364) */ \
ENTRY(4111 , 0xFF10E02E, 44), /* [N * 0xFF10E02E >> 44] (rcp=4279296046) */ \
ENTRY(5779 , 0xB5722823, 44), /* [N * 0xB5722823 >> 44] (rcp=3044157475) */ \
ENTRY(8087 , 0x81A97405, 44), /* [N * 0x81A97405 >> 44] (rcp=2175366149) */ \
ENTRY(11321 , 0xB93E91DB, 45), /* [N * 0xB93E91DB >> 45] (rcp=3107885531) */ \
ENTRY(15859 , 0x843CC26B, 45), /* [N * 0x843CC26B >> 45] (rcp=2218574443) */ \
ENTRY(22189 , 0xBD06B9EA, 46), /* [N * 0xBD06B9EA >> 46] (rcp=3171334634) */ \
ENTRY(31051 , 0x8713F186, 46), /* [N * 0x8713F186 >> 46] (rcp=2266231174) */ \
ENTRY(43451 , 0xC10F1CB9, 47), /* [N * 0xC10F1CB9 >> 47] (rcp=3238993081) */ \
ENTRY(60869 , 0x89D06A86, 47), /* [N * 0x89D06A86 >> 47] (rcp=2312137350) */ \
ENTRY(85159 , 0xC502AF3B, 48), /* [N * 0xC502AF3B >> 48] (rcp=3305287483) */ \
ENTRY(102107 , 0xA44F65AE, 48), /* [N * 0xA44F65AE >> 48] (rcp=2756666798) */ \
ENTRY(122449 , 0x89038F77, 48), /* [N * 0x89038F77 >> 48] (rcp=2298711927) */ \
ENTRY(146819 , 0xE48AF7E9, 49), /* [N * 0xE48AF7E9 >> 49] (rcp=3834312681) */ \
ENTRY(176041 , 0xBE9B145B, 49), /* [N * 0xBE9B145B >> 49] (rcp=3197834331) */ \
ENTRY(211073 , 0x9EF882BA, 49), /* [N * 0x9EF882BA >> 49] (rcp=2667086522) */ \
ENTRY(253081 , 0x849571AB, 49), /* [N * 0x849571AB >> 49] (rcp=2224386475) */ \
ENTRY(303469 , 0xDD239C97, 50), /* [N * 0xDD239C97 >> 50] (rcp=3710098583) */ \
ENTRY(363887 , 0xB86C196D, 50), /* [N * 0xB86C196D >> 50] (rcp=3094092141) */ \
ENTRY(436307 , 0x99CFA4E9, 50), /* [N * 0x99CFA4E9 >> 50] (rcp=2580522217) */ \
ENTRY(523177 , 0x804595C0, 50), /* [N * 0x804595C0 >> 50] (rcp=2152043968) */ \
ENTRY(627293 , 0xD5F69FCF, 51), /* [N * 0xD5F69FCF >> 51] (rcp=3589709775) */ \
ENTRY(752177 , 0xB27063BA, 51), /* [N * 0xB27063BA >> 51] (rcp=2993710010) */ \
ENTRY(901891 , 0x94D170AC, 51), /* [N * 0x94D170AC >> 51] (rcp=2496753836) */ \
ENTRY(1081369 , 0xF83C9767, 52), /* [N * 0xF83C9767 >> 52] (rcp=4164720487) */ \
ENTRY(1296563 , 0xCF09435D, 52), /* [N * 0xCF09435D >> 52] (rcp=3473490781) */ \
ENTRY(1554583 , 0xACAC7198, 52), /* [N * 0xACAC7198 >> 52] (rcp=2896982424) */ \
ENTRY(1863971 , 0x90033EE3, 52), /* [N * 0x90033EE3 >> 52] (rcp=2416131811) */ \
ENTRY(2234923 , 0xF0380EBD, 53), /* [N * 0xF0380EBD >> 53] (rcp=4030205629) */ \
ENTRY(2679673 , 0xC859731E, 53), /* [N * 0xC859731E >> 53] (rcp=3361305374) */ \
ENTRY(3212927 , 0xA718DE27, 53), /* [N * 0xA718DE27 >> 53] (rcp=2803424807) */ \
ENTRY(3852301 , 0x8B5D1B4B, 53), /* [N * 0x8B5D1B4B >> 53] (rcp=2338134859) */ \
ENTRY(4618921 , 0xE8774804, 54), /* [N * 0xE8774804 >> 54] (rcp=3900131332) */ \
ENTRY(5076199 , 0xD386574E, 54), /* [N * 0xD386574E >> 54] (rcp=3548796750) */ \
ENTRY(5578757 , 0xC0783FE1, 54), /* [N * 0xC0783FE1 >> 54] (rcp=3229106145) */ \
ENTRY(6131057 , 0xAF21B08F, 54), /* [N * 0xAF21B08F >> 54] (rcp=2938220687) */ \
ENTRY(6738031 , 0x9F5AFD6E, 54), /* [N * 0x9F5AFD6E >> 54] (rcp=2673540462) */ \
ENTRY(7405163 , 0x90FFC3B9, 54), /* [N * 0x90FFC3B9 >> 54] (rcp=2432680889) */ \
ENTRY(8138279 , 0x83EFECFC, 54), /* [N * 0x83EFECFC >> 54] (rcp=2213539068) */ \
ENTRY(8943971 , 0xF01AA2EF, 55), /* [N * 0xF01AA2EF >> 55] (rcp=4028277487) */ \
ENTRY(9829447 , 0xDA7979B2, 55), /* [N * 0xDA7979B2 >> 55] (rcp=3665394098) */ \
ENTRY(10802581 , 0xC6CB2771, 55), /* [N * 0xC6CB2771 >> 55] (rcp=3335202673) */ \
ENTRY(11872037 , 0xB4E2C7DD, 55), /* [N * 0xB4E2C7DD >> 55] (rcp=3034761181) */ \
ENTRY(13047407 , 0xA4974124, 55), /* [N * 0xA4974124 >> 55] (rcp=2761376036) */ \
ENTRY(14339107 , 0x95C39CF1, 55), /* [N * 0x95C39CF1 >> 55] (rcp=2512624881) */ \
ENTRY(15758737 , 0x8845C763, 55), /* [N * 0x8845C763 >> 55] (rcp=2286274403) */ \
ENTRY(17318867 , 0xF7FE593F, 56), /* [N * 0xF7FE593F >> 56] (rcp=4160641343) */ \
ENTRY(19033439 , 0xE1A75D93, 56), /* [N * 0xE1A75D93 >> 56] (rcp=3785842067) */ \
ENTRY(20917763 , 0xCD5389B3, 56), /* [N * 0xCD5389B3 >> 56] (rcp=3444804019) */ \
ENTRY(22988621 , 0xBAD4841A, 56), /* [N * 0xBAD4841A >> 56] (rcp=3134489626) */ \
ENTRY(25264543 , 0xA9FFF2FF, 56), /* [N * 0xA9FFF2FF >> 56] (rcp=2852123391) */ \
ENTRY(27765763 , 0x9AAF8BF3, 56), /* [N * 0x9AAF8BF3 >> 56] (rcp=2595195891) */ \
ENTRY(30514607 , 0x8CC04E18, 56), /* [N * 0x8CC04E18 >> 56] (rcp=2361413144) */ \
ENTRY(33535561 , 0x80127068, 56), /* [N * 0x80127068 >> 56] (rcp=2148692072) */ \
ENTRY(36855587 , 0xE911F0BB, 57), /* [N * 0xE911F0BB >> 57] (rcp=3910267067) */ \
ENTRY(38661533 , 0xDE2ED7BE, 57), /* [N * 0xDE2ED7BE >> 57] (rcp=3727611838) */ \
ENTRY(40555961 , 0xD3CDF2FD, 57), /* [N * 0xD3CDF2FD >> 57] (rcp=3553489661) */ \
ENTRY(42543269 , 0xC9E9196C, 57), /* [N * 0xC9E9196C >> 57] (rcp=3387496812) */ \
ENTRY(44627909 , 0xC07A9EB6, 57), /* [N * 0xC07A9EB6 >> 57] (rcp=3229261494) */ \
ENTRY(46814687 , 0xB77CEF65, 57), /* [N * 0xB77CEF65 >> 57] (rcp=3078418277) */ \
ENTRY(49108607 , 0xAEEAC65C, 57), /* [N * 0xAEEAC65C >> 57] (rcp=2934621788) */ \
ENTRY(51514987 , 0xA6BF0EF0, 57), /* [N * 0xA6BF0EF0 >> 57] (rcp=2797539056) */ \
ENTRY(54039263 , 0x9EF510B5, 57), /* [N * 0x9EF510B5 >> 57] (rcp=2666860725) */ \
ENTRY(56687207 , 0x97883B42, 57), /* [N * 0x97883B42 >> 57] (rcp=2542287682) */ \
ENTRY(59464897 , 0x907430ED, 57), /* [N * 0x907430ED >> 57] (rcp=2423533805) */ \
ENTRY(62378699 , 0x89B4CA91, 57), /* [N * 0x89B4CA91 >> 57] (rcp=2310326929) */ \
ENTRY(65435273 , 0x83461568, 57), /* [N * 0x83461568 >> 57] (rcp=2202408296) */ \
ENTRY(68641607 , 0xFA489AA8, 58), /* [N * 0xFA489AA8 >> 58] (rcp=4199062184) */ \
ENTRY(72005051 , 0xEE97B1C5, 58), /* [N * 0xEE97B1C5 >> 58] (rcp=4002918853) */ \
ENTRY(75533323 , 0xE3729293, 58), /* [N * 0xE3729293 >> 58] (rcp=3815936659) */ \
ENTRY(79234469 , 0xD8D2BBA3, 58), /* [N * 0xD8D2BBA3 >> 58] (rcp=3637689251) */ \
ENTRY(83116967 , 0xCEB1F196, 58), /* [N * 0xCEB1F196 >> 58] (rcp=3467768214) */ \
ENTRY(87189709 , 0xC50A4426, 58), /* [N * 0xC50A4426 >> 58] (rcp=3305784358) */ \
ENTRY(91462061 , 0xBBD6052B, 58), /* [N * 0xBBD6052B >> 58] (rcp=3151365419) */ \
ENTRY(95943737 , 0xB30FD999, 58), /* [N * 0xB30FD999 >> 58] (rcp=3004160409) */ \
ENTRY(100644991 , 0xAAB29CED, 58), /* [N * 0xAAB29CED >> 58] (rcp=2863832301) */ \
ENTRY(105576619 , 0xA2B96421, 58), /* [N * 0xA2B96421 >> 58] (rcp=2730058785) */ \
ENTRY(110749901 , 0x9B1F8434, 58), /* [N * 0x9B1F8434 >> 58] (rcp=2602533940) */ \
ENTRY(116176651 , 0x93E08B4A, 58), /* [N * 0x93E08B4A >> 58] (rcp=2480966474) */ \
ENTRY(121869317 , 0x8CF837E0, 58), /* [N * 0x8CF837E0 >> 58] (rcp=2365077472) */ \
ENTRY(127840913 , 0x86627F01, 58), /* [N * 0x86627F01 >> 58] (rcp=2254601985) */ \
ENTRY(134105159 , 0x801B8178, 58), /* [N * 0x801B8178 >> 58] (rcp=2149286264) */ \
ENTRY(140676353 , 0xF43F294F, 59), /* [N * 0xF43F294F >> 59] (rcp=4097780047) */ \
ENTRY(147569509 , 0xE8D67089, 59), /* [N * 0xE8D67089 >> 59] (rcp=3906367625) */ \
ENTRY(154800449 , 0xDDF6243C, 59), /* [N * 0xDDF6243C >> 59] (rcp=3723895868) */ \
ENTRY(162385709 , 0xD397E6AE, 59), /* [N * 0xD397E6AE >> 59] (rcp=3549947566) */ \
ENTRY(170342629 , 0xC9B5A65A, 59), /* [N * 0xC9B5A65A >> 59] (rcp=3384125018) */ \
ENTRY(178689419 , 0xC0499865, 59), /* [N * 0xC0499865 >> 59] (rcp=3226048613) */ \
ENTRY(187445201 , 0xB74E35FA, 59), /* [N * 0xB74E35FA >> 59] (rcp=3075356154) */ \
ENTRY(196630033 , 0xAEBE3AC1, 59), /* [N * 0xAEBE3AC1 >> 59] (rcp=2931702465) */ \
ENTRY(206264921 , 0xA694A37F, 59), /* [N * 0xA694A37F >> 59] (rcp=2794759039) */ \
ENTRY(216371963 , 0x9ECCA59F, 59), /* [N * 0x9ECCA59F >> 59] (rcp=2664211871) */ \
ENTRY(226974197 , 0x9761B6AE, 59), /* [N * 0x9761B6AE >> 59] (rcp=2539763374) */ \
ENTRY(238095983 , 0x904F79A1, 59), /* [N * 0x904F79A1 >> 59] (rcp=2421127585) */ \
ENTRY(249762697 , 0x8991CD1F, 59), /* [N * 0x8991CD1F >> 59] (rcp=2308033823) */ \
ENTRY(262001071 , 0x8324BCA5, 59), /* [N * 0x8324BCA5 >> 59] (rcp=2200222885) */ \
ENTRY(274839137 , 0xFA090732, 60), /* [N * 0xFA090732 >> 60] (rcp=4194895666) */ \
ENTRY(288306269 , 0xEE5B16ED, 60), /* [N * 0xEE5B16ED >> 60] (rcp=3998947053) */ \
ENTRY(302433337 , 0xE338CE49, 60), /* [N * 0xE338CE49 >> 60] (rcp=3812150857) */ \
ENTRY(317252587 , 0xD89BABC0, 60), /* [N * 0xD89BABC0 >> 60] (rcp=3634080704) */ \
ENTRY(374358107 , 0xB790EF43, 60), /* [N * 0xB790EF43 >> 60] (rcp=3079728963) */ \
ENTRY(441742621 , 0x9B908414, 60), /* [N * 0x9B908414 >> 60] (rcp=2609939476) */ \
ENTRY(521256293 , 0x83D596FA, 60), /* [N * 0x83D596FA >> 60] (rcp=2211813114) */ \
ENTRY(615082441 , 0xDF72B16E, 61), /* [N * 0xDF72B16E >> 61] (rcp=3748835694) */ \
ENTRY(725797313 , 0xBD5CDB3B, 61), /* [N * 0xBD5CDB3B >> 61] (rcp=3176979259) */ \
ENTRY(856440829 , 0xA07A14E9, 61), /* [N * 0xA07A14E9 >> 61] (rcp=2692355305) */ \
ENTRY(1010600209, 0x87FF5289, 61), /* [N * 0x87FF5289 >> 61] (rcp=2281656969) */ \
ENTRY(1192508257, 0xE6810540, 62), /* [N * 0xE6810540 >> 62] (rcp=3867215168) */ \
ENTRY(1407159797, 0xC357A480, 62), /* [N * 0xC357A480 >> 62] (rcp=3277300864) */ \
ENTRY(1660448617, 0xA58B5B4F, 62), /* [N * 0xA58B5B4F >> 62] (rcp=2777373519) */ \
ENTRY(1959329399, 0x8C4AB55F, 62), /* [N * 0x8C4AB55F >> 62] (rcp=2353706335) */ \
ENTRY(2312008693, 0xEDC86320, 63), /* [N * 0xEDC86320 >> 63] (rcp=3989332768) */ \
ENTRY(2728170257, 0xC982C4D2, 63), /* [N * 0xC982C4D2 >> 63] (rcp=3380790482) */ \
ENTRY(3219240923, 0xAAC599B6, 63) /* [N * 0xAAC599B6 >> 63] (rcp=2865076662) */
struct HashPrime {
//! Prime number
uint32_t prime;
//! Reciprocal to turn division into multiplication.
uint32_t rcp;
};
static const HashPrime ZoneHash_primeArray[] = {
#define E(PRIME, RCP, SHIFT) { PRIME, RCP }
ASMJIT_POPULATE_PRIMES(E)
#undef E
};
static const uint8_t ZoneHash_primeShift[] = {
#define E(PRIME, RCP, SHIFT) uint8_t(SHIFT)
ASMJIT_POPULATE_PRIMES(E)
#undef E
};
// ============================================================================
// [asmjit::ZoneHashBase - Rehash]
// ============================================================================
void ZoneHashBase::_rehash(ZoneAllocator* allocator, uint32_t primeIndex) noexcept {
ASMJIT_ASSERT(primeIndex < ASMJIT_ARRAY_SIZE(ZoneHash_primeArray));
uint32_t newCount = ZoneHash_primeArray[primeIndex].prime;
ZoneHashNode** oldData = _data;
ZoneHashNode** newData = reinterpret_cast<ZoneHashNode**>(
allocator->allocZeroed(size_t(newCount) * sizeof(ZoneHashNode*)));
// We can still store nodes into the table, but it will degrade.
if (ASMJIT_UNLIKELY(newData == nullptr))
return;
uint32_t i;
uint32_t oldCount = _bucketsCount;
_data = newData;
_bucketsCount = newCount;
_bucketsGrow = uint32_t(newCount * 0.9);
_rcpValue = ZoneHash_primeArray[primeIndex].rcp;
_rcpShift = ZoneHash_primeShift[primeIndex];
_primeIndex = uint8_t(primeIndex);
for (i = 0; i < oldCount; i++) {
ZoneHashNode* node = oldData[i];
while (node) {
ZoneHashNode* next = node->_hashNext;
uint32_t hashMod = _calcMod(node->_hashCode);
node->_hashNext = newData[hashMod];
newData[hashMod] = node;
node = next;
}
}
if (oldData != _embedded)
allocator->release(oldData, oldCount * sizeof(ZoneHashNode*));
}
// ============================================================================
// [asmjit::ZoneHashBase - Ops]
// ============================================================================
ZoneHashNode* ZoneHashBase::_insert(ZoneAllocator* allocator, ZoneHashNode* node) noexcept {
uint32_t hashMod = _calcMod(node->_hashCode);
ZoneHashNode* next = _data[hashMod];
node->_hashNext = next;
_data[hashMod] = node;
if (++_size > _bucketsGrow) {
uint32_t primeIndex = Support::min<uint32_t>(_primeIndex + 2, ASMJIT_ARRAY_SIZE(ZoneHash_primeArray) - 1);
if (primeIndex > _primeIndex)
_rehash(allocator, primeIndex);
}
return node;
}
ZoneHashNode* ZoneHashBase::_remove(ZoneAllocator* allocator, ZoneHashNode* node) noexcept {
DebugUtils::unused(allocator);
uint32_t hashMod = _calcMod(node->_hashCode);
ZoneHashNode** pPrev = &_data[hashMod];
ZoneHashNode* p = *pPrev;
while (p) {
if (p == node) {
*pPrev = p->_hashNext;
_size--;
return node;
}
pPrev = &p->_hashNext;
p = *pPrev;
}
return nullptr;
}
// ============================================================================
// [asmjit::ZoneHash - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
struct MyHashNode : public ZoneHashNode {
inline MyHashNode(uint32_t key) noexcept
: ZoneHashNode(key),
_key(key) {}
uint32_t _key;
};
struct MyKeyMatcher {
inline MyKeyMatcher(uint32_t key) noexcept
: _key(key) {}
inline uint32_t hashCode() const noexcept { return _key; }
inline bool matches(const MyHashNode* node) const noexcept { return node->_key == _key; }
uint32_t _key;
};
UNIT(zone_hash) {
uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 10000;
Zone zone(4096);
ZoneAllocator allocator(&zone);
ZoneHash<MyHashNode> hashTable;
uint32_t key;
INFO("Inserting %u elements to HashTable", unsigned(kCount));
for (key = 0; key < kCount; key++) {
hashTable.insert(&allocator, zone.newT<MyHashNode>(key));
}
uint32_t count = kCount;
INFO("Removing %u elements from HashTable and validating each operation", unsigned(kCount));
do {
MyHashNode* node;
for (key = 0; key < count; key++) {
node = hashTable.get(MyKeyMatcher(key));
EXPECT(node != nullptr);
EXPECT(node->_key == key);
}
{
count--;
node = hashTable.get(MyKeyMatcher(count));
hashTable.remove(&allocator, node);
node = hashTable.get(MyKeyMatcher(count));
EXPECT(node == nullptr);
}
} while (count);
EXPECT(hashTable.empty());
}
#endif
ASMJIT_END_NAMESPACE

@ -0,0 +1,218 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ZONEHASH_H_INCLUDED
#define ASMJIT_CORE_ZONEHASH_H_INCLUDED
#include "../core/zone.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::ZoneHashNode]
// ============================================================================
//! Node used by \ref ZoneHash template.
//!
//! You must provide function `bool eq(const Key& key)` in order to make
//! `ZoneHash::get()` working.
class ZoneHashNode {
public:
ASMJIT_NONCOPYABLE(ZoneHashNode)
inline ZoneHashNode(uint32_t hashCode = 0) noexcept
: _hashNext(nullptr),
_hashCode(hashCode),
_customData(0) {}
//! Next node in the chain, null if it terminates the chain.
ZoneHashNode* _hashNext;
//! Precalculated hash-code of key.
uint32_t _hashCode;
//! Padding, can be reused by any Node that inherits `ZoneHashNode`.
uint32_t _customData;
};
// ============================================================================
// [asmjit::ZoneHashBase]
// ============================================================================
//! Base class used by \ref ZoneHash template
class ZoneHashBase {
public:
ASMJIT_NONCOPYABLE(ZoneHashBase)
//! Buckets data.
ZoneHashNode** _data;
//! Count of records inserted into the hash table.
size_t _size;
//! Count of hash buckets.
uint32_t _bucketsCount;
//! When buckets array should grow (only checked after insertion).
uint32_t _bucketsGrow;
//! Reciprocal value of `_bucketsCount`.
uint32_t _rcpValue;
//! How many bits to shift right when hash is multiplied with `_rcpValue`.
uint8_t _rcpShift;
//! Prime value index in internal prime array.
uint8_t _primeIndex;
//! Embedded data, used by empty hash tables.
ZoneHashNode* _embedded[1];
//! \name Construction & Destruction
//! \{
inline ZoneHashBase() noexcept {
reset();
}
inline ZoneHashBase(ZoneHashBase&& other) noexcept {
_data = other._data;
_size = other._size;
_bucketsCount = other._bucketsCount;
_bucketsGrow = other._bucketsGrow;
_rcpValue = other._rcpValue;
_rcpShift = other._rcpShift;
_primeIndex = other._primeIndex;
_embedded[0] = other._embedded[0];
if (_data == other._embedded) _data = _embedded;
}
inline void reset() noexcept {
_data = _embedded;
_size = 0;
_bucketsCount = 1;
_bucketsGrow = 1;
_rcpValue = 1;
_rcpShift = 0;
_primeIndex = 0;
_embedded[0] = nullptr;
}
inline void release(ZoneAllocator* allocator) noexcept {
ZoneHashNode** oldData = _data;
if (oldData != _embedded)
allocator->release(oldData, _bucketsCount * sizeof(ZoneHashNode*));
reset();
}
//! \}
//! \name Accessors
//! \{
inline bool empty() const noexcept { return _size == 0; }
inline size_t size() const noexcept { return _size; }
//! \}
//! \name Utilities
//! \{
inline void _swap(ZoneHashBase& other) noexcept {
std::swap(_data, other._data);
std::swap(_size, other._size);
std::swap(_bucketsCount, other._bucketsCount);
std::swap(_bucketsGrow, other._bucketsGrow);
std::swap(_rcpValue, other._rcpValue);
std::swap(_rcpShift, other._rcpShift);
std::swap(_primeIndex, other._primeIndex);
std::swap(_embedded[0], other._embedded[0]);
if (_data == other._embedded) _data = _embedded;
if (other._data == _embedded) other._data = other._embedded;
}
//! \cond INTERNAL
inline uint32_t _calcMod(uint32_t hash) const noexcept {
uint32_t x = uint32_t((uint64_t(hash) * _rcpValue) >> _rcpShift);
return hash - x * _bucketsCount;
}
ASMJIT_API void _rehash(ZoneAllocator* allocator, uint32_t newCount) noexcept;
ASMJIT_API ZoneHashNode* _insert(ZoneAllocator* allocator, ZoneHashNode* node) noexcept;
ASMJIT_API ZoneHashNode* _remove(ZoneAllocator* allocator, ZoneHashNode* node) noexcept;
//! \endcond
//! \}
};
// ============================================================================
// [asmjit::ZoneHash]
// ============================================================================
//! Low-level hash table specialized for storing string keys and POD values.
//!
//! This hash table allows duplicates to be inserted (the API is so low
//! level that it's up to you if you allow it or not, as you should first
//! `get()` the node and then modify it or insert a new node by using `insert()`,
//! depending on the intention).
template<typename NodeT>
class ZoneHash : public ZoneHashBase {
public:
ASMJIT_NONCOPYABLE(ZoneHash<NodeT>)
typedef NodeT Node;
//! \name Construction & Destruction
//! \{
inline ZoneHash() noexcept
: ZoneHashBase() {}
inline ZoneHash(ZoneHash&& other) noexcept
: ZoneHash(other) {}
//! \}
//! \name Utilities
//! \{
inline void swap(ZoneHash& other) noexcept { ZoneHashBase::_swap(other); }
template<typename KeyT>
inline NodeT* get(const KeyT& key) const noexcept {
uint32_t hashMod = _calcMod(key.hashCode());
NodeT* node = static_cast<NodeT*>(_data[hashMod]);
while (node && !key.matches(node))
node = static_cast<NodeT*>(node->_hashNext);
return node;
}
inline NodeT* insert(ZoneAllocator* allocator, NodeT* node) noexcept { return static_cast<NodeT*>(_insert(allocator, node)); }
inline NodeT* remove(ZoneAllocator* allocator, NodeT* node) noexcept { return static_cast<NodeT*>(_remove(allocator, node)); }
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ZONEHASH_H_INCLUDED

@ -0,0 +1,182 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/zone.h"
#include "../core/zonelist.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ZoneList - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
class MyListNode : public ZoneListNode<MyListNode> {};
UNIT(zone_list) {
Zone zone(4096);
ZoneList<MyListNode> list;
MyListNode* a = zone.newT<MyListNode>();
MyListNode* b = zone.newT<MyListNode>();
MyListNode* c = zone.newT<MyListNode>();
MyListNode* d = zone.newT<MyListNode>();
INFO("Append / Unlink");
// []
EXPECT(list.empty() == true);
// [A]
list.append(a);
EXPECT(list.empty() == false);
EXPECT(list.first() == a);
EXPECT(list.last() == a);
EXPECT(a->prev() == nullptr);
EXPECT(a->next() == nullptr);
// [A, B]
list.append(b);
EXPECT(list.first() == a);
EXPECT(list.last() == b);
EXPECT(a->prev() == nullptr);
EXPECT(a->next() == b);
EXPECT(b->prev() == a);
EXPECT(b->next() == nullptr);
// [A, B, C]
list.append(c);
EXPECT(list.first() == a);
EXPECT(list.last() == c);
EXPECT(a->prev() == nullptr);
EXPECT(a->next() == b);
EXPECT(b->prev() == a);
EXPECT(b->next() == c);
EXPECT(c->prev() == b);
EXPECT(c->next() == nullptr);
// [B, C]
list.unlink(a);
EXPECT(list.first() == b);
EXPECT(list.last() == c);
EXPECT(a->prev() == nullptr);
EXPECT(a->next() == nullptr);
EXPECT(b->prev() == nullptr);
EXPECT(b->next() == c);
EXPECT(c->prev() == b);
EXPECT(c->next() == nullptr);
// [B]
list.unlink(c);
EXPECT(list.first() == b);
EXPECT(list.last() == b);
EXPECT(b->prev() == nullptr);
EXPECT(b->next() == nullptr);
EXPECT(c->prev() == nullptr);
EXPECT(c->next() == nullptr);
// []
list.unlink(b);
EXPECT(list.empty() == true);
EXPECT(list.first() == nullptr);
EXPECT(list.last() == nullptr);
EXPECT(b->prev() == nullptr);
EXPECT(b->next() == nullptr);
INFO("Prepend / Unlink");
// [A]
list.prepend(a);
EXPECT(list.empty() == false);
EXPECT(list.first() == a);
EXPECT(list.last() == a);
EXPECT(a->prev() == nullptr);
EXPECT(a->next() == nullptr);
// [B, A]
list.prepend(b);
EXPECT(list.first() == b);
EXPECT(list.last() == a);
EXPECT(b->prev() == nullptr);
EXPECT(b->next() == a);
EXPECT(a->prev() == b);
EXPECT(a->next() == nullptr);
INFO("InsertAfter / InsertBefore");
// [B, A, C]
list.insertAfter(a, c);
EXPECT(list.first() == b);
EXPECT(list.last() == c);
EXPECT(b->prev() == nullptr);
EXPECT(b->next() == a);
EXPECT(a->prev() == b);
EXPECT(a->next() == c);
EXPECT(c->prev() == a);
EXPECT(c->next() == nullptr);
// [B, D, A, C]
list.insertBefore(a, d);
EXPECT(list.first() == b);
EXPECT(list.last() == c);
EXPECT(b->prev() == nullptr);
EXPECT(b->next() == d);
EXPECT(d->prev() == b);
EXPECT(d->next() == a);
EXPECT(a->prev() == d);
EXPECT(a->next() == c);
EXPECT(c->prev() == a);
EXPECT(c->next() == nullptr);
INFO("PopFirst / Pop");
// [D, A, C]
EXPECT(list.popFirst() == b);
EXPECT(b->prev() == nullptr);
EXPECT(b->next() == nullptr);
EXPECT(list.first() == d);
EXPECT(list.last() == c);
EXPECT(d->prev() == nullptr);
EXPECT(d->next() == a);
EXPECT(a->prev() == d);
EXPECT(a->next() == c);
EXPECT(c->prev() == a);
EXPECT(c->next() == nullptr);
// [D, A]
EXPECT(list.pop() == c);
EXPECT(c->prev() == nullptr);
EXPECT(c->next() == nullptr);
EXPECT(list.first() == d);
EXPECT(list.last() == a);
EXPECT(d->prev() == nullptr);
EXPECT(d->next() == a);
EXPECT(a->prev() == d);
EXPECT(a->next() == nullptr);
}
#endif
ASMJIT_END_NAMESPACE

@ -0,0 +1,205 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ZONELIST_H_INCLUDED
#define ASMJIT_CORE_ZONELIST_H_INCLUDED
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::ZoneListNode]
// ============================================================================
//! Node used by \ref ZoneList template.
template<typename NodeT>
class ZoneListNode {
public:
ASMJIT_NONCOPYABLE(ZoneListNode)
NodeT* _listNodes[Globals::kLinkCount];
//! \name Construction & Destruction
//! \{
inline ZoneListNode() noexcept
: _listNodes { nullptr, nullptr } {}
inline ZoneListNode(ZoneListNode&& other) noexcept
: _listNodes { other._listNodes[0], other._listNodes[1] } {}
//! \}
//! \name Accessors
//! \{
inline bool hasPrev() const noexcept { return _listNodes[Globals::kLinkPrev] != nullptr; }
inline bool hasNext() const noexcept { return _listNodes[Globals::kLinkNext] != nullptr; }
inline NodeT* prev() const noexcept { return _listNodes[Globals::kLinkPrev]; }
inline NodeT* next() const noexcept { return _listNodes[Globals::kLinkNext]; }
//! \}
};
// ============================================================================
// [asmjit::ZoneList<T>]
// ============================================================================
//! Zone allocated list container that uses nodes of `NodeT` type.
template <typename NodeT>
class ZoneList {
public:
ASMJIT_NONCOPYABLE(ZoneList)
NodeT* _nodes[Globals::kLinkCount];
//! \name Construction & Destruction
//! \{
inline ZoneList() noexcept
: _nodes { nullptr, nullptr } {}
inline ZoneList(ZoneList&& other) noexcept
: _nodes { other._nodes[0], other._nodes[1] } {}
inline void reset() noexcept {
_nodes[0] = nullptr;
_nodes[1] = nullptr;
}
//! \}
//! \name Accessors
//! \{
inline bool empty() const noexcept { return _nodes[0] == nullptr; }
inline NodeT* first() const noexcept { return _nodes[Globals::kLinkFirst]; }
inline NodeT* last() const noexcept { return _nodes[Globals::kLinkLast]; }
//! \}
//! \name Utilities
//! \{
inline void swap(ZoneList& other) noexcept {
std::swap(_nodes[0], other._nodes[0]);
std::swap(_nodes[1], other._nodes[1]);
}
// Can be used to both append and prepend.
inline void _addNode(NodeT* node, size_t dir) noexcept {
NodeT* prev = _nodes[dir];
node->_listNodes[!dir] = prev;
_nodes[dir] = node;
if (prev)
prev->_listNodes[dir] = node;
else
_nodes[!dir] = node;
}
// Can be used to both append and prepend.
inline void _insertNode(NodeT* ref, NodeT* node, size_t dir) noexcept {
ASMJIT_ASSERT(ref != nullptr);
NodeT* prev = ref;
NodeT* next = ref->_listNodes[dir];
prev->_listNodes[dir] = node;
if (next)
next->_listNodes[!dir] = node;
else
_nodes[dir] = node;
node->_listNodes[!dir] = prev;
node->_listNodes[ dir] = next;
}
inline void append(NodeT* node) noexcept { _addNode(node, Globals::kLinkLast); }
inline void prepend(NodeT* node) noexcept { _addNode(node, Globals::kLinkFirst); }
inline void insertAfter(NodeT* ref, NodeT* node) noexcept { _insertNode(ref, node, Globals::kLinkNext); }
inline void insertBefore(NodeT* ref, NodeT* node) noexcept { _insertNode(ref, node, Globals::kLinkPrev); }
inline NodeT* unlink(NodeT* node) noexcept {
NodeT* prev = node->prev();
NodeT* next = node->next();
if (prev) { prev->_listNodes[1] = next; node->_listNodes[0] = nullptr; } else { _nodes[0] = next; }
if (next) { next->_listNodes[0] = prev; node->_listNodes[1] = nullptr; } else { _nodes[1] = prev; }
node->_listNodes[0] = nullptr;
node->_listNodes[1] = nullptr;
return node;
}
inline NodeT* popFirst() noexcept {
NodeT* node = _nodes[0];
ASMJIT_ASSERT(node != nullptr);
NodeT* next = node->next();
_nodes[0] = next;
if (next) {
next->_listNodes[0] = nullptr;
node->_listNodes[1] = nullptr;
}
else {
_nodes[1] = nullptr;
}
return node;
}
inline NodeT* pop() noexcept {
NodeT* node = _nodes[1];
ASMJIT_ASSERT(node != nullptr);
NodeT* prev = node->prev();
_nodes[1] = prev;
if (prev) {
prev->_listNodes[1] = nullptr;
node->_listNodes[0] = nullptr;
}
else {
_nodes[0] = nullptr;
}
return node;
}
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ZONELIST_H_INCLUDED

@ -0,0 +1,197 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/zone.h"
#include "../core/zonestack.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ZoneStackBase - Init / Reset]
// ============================================================================
Error ZoneStackBase::_init(ZoneAllocator* allocator, size_t middleIndex) noexcept {
ZoneAllocator* oldAllocator = _allocator;
if (oldAllocator) {
Block* block = _block[Globals::kLinkFirst];
while (block) {
Block* next = block->next();
oldAllocator->release(block, kBlockSize);
block = next;
}
_allocator = nullptr;
_block[Globals::kLinkLeft] = nullptr;
_block[Globals::kLinkRight] = nullptr;
}
if (allocator) {
Block* block = static_cast<Block*>(allocator->alloc(kBlockSize));
if (ASMJIT_UNLIKELY(!block))
return DebugUtils::errored(kErrorOutOfMemory);
block->_link[Globals::kLinkLeft] = nullptr;
block->_link[Globals::kLinkRight] = nullptr;
block->_start = (uint8_t*)block + middleIndex;
block->_end = (uint8_t*)block + middleIndex;
_allocator = allocator;
_block[Globals::kLinkLeft] = block;
_block[Globals::kLinkRight] = block;
}
return kErrorOk;
}
// ============================================================================
// [asmjit::ZoneStackBase - Ops]
// ============================================================================
Error ZoneStackBase::_prepareBlock(uint32_t side, size_t initialIndex) noexcept {
ASMJIT_ASSERT(isInitialized());
Block* prev = _block[side];
ASMJIT_ASSERT(!prev->empty());
Block* block = _allocator->allocT<Block>(kBlockSize);
if (ASMJIT_UNLIKELY(!block))
return DebugUtils::errored(kErrorOutOfMemory);
block->_link[ side] = nullptr;
block->_link[!side] = prev;
block->_start = (uint8_t*)block + initialIndex;
block->_end = (uint8_t*)block + initialIndex;
prev->_link[side] = block;
_block[side] = block;
return kErrorOk;
}
void ZoneStackBase::_cleanupBlock(uint32_t side, size_t middleIndex) noexcept {
Block* block = _block[side];
ASMJIT_ASSERT(block->empty());
Block* prev = block->_link[!side];
if (prev) {
ASMJIT_ASSERT(prev->_link[side] == block);
_allocator->release(block, kBlockSize);
prev->_link[side] = nullptr;
_block[side] = prev;
}
else if (_block[!side] == block) {
// If the container becomes empty center both pointers in the remaining block.
block->_start = (uint8_t*)block + middleIndex;
block->_end = (uint8_t*)block + middleIndex;
}
}
// ============================================================================
// [asmjit::ZoneStack - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
template<typename T>
static void test_zone_stack(ZoneAllocator* allocator, const char* typeName) {
ZoneStack<T> stack;
INFO("Testing ZoneStack<%s>", typeName);
INFO(" (%d items per one Block)", ZoneStack<T>::kNumBlockItems);
EXPECT(stack.init(allocator) == kErrorOk);
EXPECT(stack.empty(), "Stack must be empty after `init()`");
EXPECT(stack.append(42) == kErrorOk);
EXPECT(!stack.empty() , "Stack must not be empty after an item has been appended");
EXPECT(stack.pop() == 42 , "Stack.pop() must return the item that has been appended last");
EXPECT(stack.empty() , "Stack must be empty after the last item has been removed");
EXPECT(stack.prepend(43) == kErrorOk);
EXPECT(!stack.empty() , "Stack must not be empty after an item has been prepended");
EXPECT(stack.popFirst() == 43, "Stack.popFirst() must return the item that has been prepended last");
EXPECT(stack.empty() , "Stack must be empty after the last item has been removed");
int i;
int iMin =-100000;
int iMax = 100000;
INFO("Validating prepend() & popFirst()");
for (i = iMax; i >= 0; i--) stack.prepend(T(i));
for (i = 0; i <= iMax; i++) {
T item = stack.popFirst();
EXPECT(i == item, "Item '%d' didn't match the item '%lld' popped", i, (long long)item);
if (!stack.empty()) {
item = stack.popFirst();
EXPECT(i + 1 == item, "Item '%d' didn't match the item '%lld' popped", i + 1, (long long)item);
stack.prepend(item);
}
}
EXPECT(stack.empty());
INFO("Validating append() & pop()");
for (i = 0; i <= iMax; i++) stack.append(T(i));
for (i = iMax; i >= 0; i--) {
T item = stack.pop();
EXPECT(i == item, "Item '%d' didn't match the item '%lld' popped", i, (long long)item);
if (!stack.empty()) {
item = stack.pop();
EXPECT(i - 1 == item, "Item '%d' didn't match the item '%lld' popped", i - 1, (long long)item);
stack.append(item);
}
}
EXPECT(stack.empty());
INFO("Validating append()/prepend() & popFirst()");
for (i = 1; i <= iMax; i++) stack.append(T(i));
for (i = 0; i >= iMin; i--) stack.prepend(T(i));
for (i = iMin; i <= iMax; i++) {
T item = stack.popFirst();
EXPECT(i == item, "Item '%d' didn't match the item '%lld' popped", i, (long long)item);
}
EXPECT(stack.empty());
INFO("Validating append()/prepend() & pop()");
for (i = 0; i >= iMin; i--) stack.prepend(T(i));
for (i = 1; i <= iMax; i++) stack.append(T(i));
for (i = iMax; i >= iMin; i--) {
T item = stack.pop();
EXPECT(i == item, "Item '%d' didn't match the item '%lld' popped", i, (long long)item);
}
EXPECT(stack.empty());
}
UNIT(zone_stack) {
Zone zone(8096 - Zone::kBlockOverhead);
ZoneAllocator allocator(&zone);
test_zone_stack<int>(&allocator, "int");
test_zone_stack<int64_t>(&allocator, "int64_t");
}
#endif
ASMJIT_END_NAMESPACE

@ -0,0 +1,234 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ZONESTACK_H_INCLUDED
#define ASMJIT_CORE_ZONESTACK_H_INCLUDED
#include "../core/zone.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::ZoneStackBase]
// ============================================================================
//! Base class used by \ref ZoneStack.
class ZoneStackBase {
public:
ASMJIT_NONCOPYABLE(ZoneStackBase)
static constexpr uint32_t kBlockSize = ZoneAllocator::kHiMaxSize;
struct Block {
inline bool empty() const noexcept { return _start == _end; }
inline Block* prev() const noexcept { return _link[Globals::kLinkLeft]; }
inline Block* next() const noexcept { return _link[Globals::kLinkRight]; }
inline void setPrev(Block* block) noexcept { _link[Globals::kLinkLeft] = block; }
inline void setNext(Block* block) noexcept { _link[Globals::kLinkRight] = block; }
template<typename T>
inline T* start() const noexcept { return static_cast<T*>(_start); }
template<typename T>
inline void setStart(T* start) noexcept { _start = static_cast<void*>(start); }
template<typename T>
inline T* end() const noexcept { return (T*)_end; }
template<typename T>
inline void setEnd(T* end) noexcept { _end = (void*)end; }
template<typename T>
inline T* data() const noexcept { return (T*)((uint8_t*)(this) + sizeof(Block)); }
template<typename T>
inline bool canPrepend() const noexcept { return _start > data<void>(); }
template<typename T>
inline bool canAppend() const noexcept {
size_t kNumBlockItems = (kBlockSize - sizeof(Block)) / sizeof(T);
size_t kStartBlockIndex = sizeof(Block);
size_t kEndBlockIndex = kStartBlockIndex + kNumBlockItems * sizeof(T);
return (uintptr_t)_end <= ((uintptr_t)this + kEndBlockIndex - sizeof(T));
}
Block* _link[Globals::kLinkCount]; //!< Next and previous blocks.
void* _start; //!< Pointer to the start of the array.
void* _end; //!< Pointer to the end of the array.
};
//! Allocator used to allocate data.
ZoneAllocator* _allocator;
//! First and last blocks.
Block* _block[Globals::kLinkCount];
//! \name Construction / Destruction
//! \{
inline ZoneStackBase() noexcept {
_allocator = nullptr;
_block[0] = nullptr;
_block[1] = nullptr;
}
inline ~ZoneStackBase() noexcept { reset(); }
inline bool isInitialized() const noexcept { return _allocator != nullptr; }
ASMJIT_API Error _init(ZoneAllocator* allocator, size_t middleIndex) noexcept;
inline Error reset() noexcept { return _init(nullptr, 0); }
//! \}
//! \name Accessors
//! \{
//! Returns `ZoneAllocator` attached to this container.
inline ZoneAllocator* allocator() const noexcept { return _allocator; }
inline bool empty() const noexcept {
ASMJIT_ASSERT(isInitialized());
return _block[0]->start<void>() == _block[1]->end<void>();
}
//! \}
//! \cond INTERNAL
//! \name Internal
//! \{
ASMJIT_API Error _prepareBlock(uint32_t side, size_t initialIndex) noexcept;
ASMJIT_API void _cleanupBlock(uint32_t side, size_t middleIndex) noexcept;
//! \}
//! \endcond
};
// ============================================================================
// [asmjit::ZoneStack<T>]
// ============================================================================
//! Zone allocated stack container.
template<typename T>
class ZoneStack : public ZoneStackBase {
public:
ASMJIT_NONCOPYABLE(ZoneStack<T>)
enum : uint32_t {
kNumBlockItems = uint32_t((kBlockSize - sizeof(Block)) / sizeof(T)),
kStartBlockIndex = uint32_t(sizeof(Block)),
kMidBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems / 2) * sizeof(T)),
kEndBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems ) * sizeof(T))
};
//! \name Construction / Destruction
//! \{
inline ZoneStack() noexcept {}
inline ~ZoneStack() noexcept {}
inline Error init(ZoneAllocator* allocator) noexcept { return _init(allocator, kMidBlockIndex); }
//! \}
//! \name Utilities
//! \{
ASMJIT_INLINE Error prepend(T item) noexcept {
ASMJIT_ASSERT(isInitialized());
Block* block = _block[Globals::kLinkFirst];
if (!block->canPrepend<T>()) {
ASMJIT_PROPAGATE(_prepareBlock(Globals::kLinkFirst, kEndBlockIndex));
block = _block[Globals::kLinkFirst];
}
T* ptr = block->start<T>() - 1;
ASMJIT_ASSERT(ptr >= block->data<T>() && ptr <= block->data<T>() + (kNumBlockItems - 1));
*ptr = item;
block->setStart<T>(ptr);
return kErrorOk;
}
ASMJIT_INLINE Error append(T item) noexcept {
ASMJIT_ASSERT(isInitialized());
Block* block = _block[Globals::kLinkLast];
if (!block->canAppend<T>()) {
ASMJIT_PROPAGATE(_prepareBlock(Globals::kLinkLast, kStartBlockIndex));
block = _block[Globals::kLinkLast];
}
T* ptr = block->end<T>();
ASMJIT_ASSERT(ptr >= block->data<T>() && ptr <= block->data<T>() + (kNumBlockItems - 1));
*ptr++ = item;
block->setEnd(ptr);
return kErrorOk;
}
ASMJIT_INLINE T popFirst() noexcept {
ASMJIT_ASSERT(isInitialized());
ASMJIT_ASSERT(!empty());
Block* block = _block[Globals::kLinkFirst];
ASMJIT_ASSERT(!block->empty());
T* ptr = block->start<T>();
T item = *ptr++;
block->setStart(ptr);
if (block->empty())
_cleanupBlock(Globals::kLinkFirst, kMidBlockIndex);
return item;
}
ASMJIT_INLINE T pop() noexcept {
ASMJIT_ASSERT(isInitialized());
ASMJIT_ASSERT(!empty());
Block* block = _block[Globals::kLinkLast];
ASMJIT_ASSERT(!block->empty());
T* ptr = block->end<T>();
T item = *--ptr;
ASMJIT_ASSERT(ptr >= block->data<T>());
ASMJIT_ASSERT(ptr >= block->start<T>());
block->setEnd(ptr);
if (block->empty())
_cleanupBlock(Globals::kLinkLast, kMidBlockIndex);
return item;
}
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ZONESTACK_H_INCLUDED

@ -0,0 +1,137 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_SMALLSTRING_H_INCLUDED
#define ASMJIT_CORE_SMALLSTRING_H_INCLUDED
#include "../core/globals.h"
#include "../core/zone.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::ZoneStringBase]
// ============================================================================
//! A helper class used by \ref ZoneString implementation.
struct ZoneStringBase {
union {
struct {
uint32_t _size;
char _embedded[sizeof(void*) * 2 - 4];
};
struct {
void* _dummy;
char* _external;
};
};
inline void reset() noexcept {
_dummy = nullptr;
_external = nullptr;
}
Error setData(Zone* zone, uint32_t maxEmbeddedSize, const char* str, size_t size) noexcept {
if (size == SIZE_MAX)
size = strlen(str);
if (size <= maxEmbeddedSize) {
memcpy(_embedded, str, size);
_embedded[size] = '\0';
}
else {
char* external = static_cast<char*>(zone->dup(str, size, true));
if (ASMJIT_UNLIKELY(!external))
return DebugUtils::errored(kErrorOutOfMemory);
_external = external;
}
_size = uint32_t(size);
return kErrorOk;
}
};
// ============================================================================
// [asmjit::ZoneString<N>]
// ============================================================================
//! A string template that can be zone allocated.
//!
//! Helps with creating strings that can be either statically allocated if they
//! are small, or externally allocated in case their size exceeds the limit.
//! The `N` represents the size of the whole `ZoneString` structure, based on
//! that size the maximum size of the internal buffer is determined.
template<size_t N>
class ZoneString {
public:
static constexpr uint32_t kWholeSize =
(N > sizeof(ZoneStringBase)) ? uint32_t(N) : uint32_t(sizeof(ZoneStringBase));
static constexpr uint32_t kMaxEmbeddedSize = kWholeSize - 5;
union {
ZoneStringBase _base;
char _wholeData[kWholeSize];
};
//! \name Construction & Destruction
//! \{
inline ZoneString() noexcept { reset(); }
inline void reset() noexcept { _base.reset(); }
//! \}
//! \name Accessors
//! \{
//! Tests whether the string is empty.
inline bool empty() const noexcept { return _base._size == 0; }
//! Returns the string data.
inline const char* data() const noexcept { return _base._size <= kMaxEmbeddedSize ? _base._embedded : _base._external; }
//! Returns the string size.
inline uint32_t size() const noexcept { return _base._size; }
//! Tests whether the string is embedded (e.g. no dynamically allocated).
inline bool isEmbedded() const noexcept { return _base._size <= kMaxEmbeddedSize; }
//! Copies a new `data` of the given `size` to the string.
//!
//! If the `size` exceeds the internal buffer the given `zone` will be
//! used to duplicate the data, otherwise the internal buffer will be
//! used as a storage.
inline Error setData(Zone* zone, const char* data, size_t size) noexcept {
return _base.setData(zone, kMaxEmbeddedSize, data, size);
}
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_SMALLSTRING_H_INCLUDED

@ -0,0 +1,118 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonetree.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ZoneTree - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
template<typename NodeT>
struct ZoneRBUnit {
typedef ZoneTree<NodeT> Tree;
static void verifyTree(Tree& tree) noexcept {
EXPECT(checkHeight(static_cast<NodeT*>(tree._root)) > 0);
}
// Check whether the Red-Black tree is valid.
static int checkHeight(NodeT* node) noexcept {
if (!node) return 1;
NodeT* ln = node->left();
NodeT* rn = node->right();
// Invalid tree.
EXPECT(ln == nullptr || *ln < *node);
EXPECT(rn == nullptr || *rn > *node);
// Red violation.
EXPECT(!node->isRed() ||
(!ZoneTreeNode::_isValidRed(ln) && !ZoneTreeNode::_isValidRed(rn)));
// Black violation.
int lh = checkHeight(ln);
int rh = checkHeight(rn);
EXPECT(!lh || !rh || lh == rh);
// Only count black links.
return (lh && rh) ? lh + !node->isRed() : 0;
}
};
class MyRBNode : public ZoneTreeNodeT<MyRBNode> {
public:
ASMJIT_NONCOPYABLE(MyRBNode)
inline explicit MyRBNode(uint32_t key) noexcept
: _key(key) {}
inline bool operator<(const MyRBNode& other) const noexcept { return _key < other._key; }
inline bool operator>(const MyRBNode& other) const noexcept { return _key > other._key; }
inline bool operator<(uint32_t queryKey) const noexcept { return _key < queryKey; }
inline bool operator>(uint32_t queryKey) const noexcept { return _key > queryKey; }
uint32_t _key;
};
UNIT(zone_rbtree) {
uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 10000;
Zone zone(4096);
ZoneTree<MyRBNode> rbTree;
uint32_t key;
INFO("Inserting %u elements to RBTree and validating each operation", unsigned(kCount));
for (key = 0; key < kCount; key++) {
rbTree.insert(zone.newT<MyRBNode>(key));
ZoneRBUnit<MyRBNode>::verifyTree(rbTree);
}
uint32_t count = kCount;
INFO("Removing %u elements from RBTree and validating each operation", unsigned(kCount));
do {
MyRBNode* node;
for (key = 0; key < count; key++) {
node = rbTree.get(key);
EXPECT(node != nullptr);
EXPECT(node->_key == key);
}
node = rbTree.get(--count);
rbTree.remove(node);
ZoneRBUnit<MyRBNode>::verifyTree(rbTree);
} while (count);
EXPECT(rbTree.empty());
}
#endif
ASMJIT_END_NAMESPACE

@ -0,0 +1,385 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ZONETREE_H_INCLUDED
#define ASMJIT_CORE_ZONETREE_H_INCLUDED
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::ZoneTreeNode]
// ============================================================================
//! RB-Tree node.
//!
//! The color is stored in a least significant bit of the `left` node.
//!
//! WARNING: Always use accessors to access left and right children.
class ZoneTreeNode {
public:
ASMJIT_NONCOPYABLE(ZoneTreeNode)
enum : uintptr_t {
kRedMask = 0x1,
kPtrMask = ~kRedMask
};
uintptr_t _rbNodeData[Globals::kLinkCount];
//! \name Construction & Destruction
//! \{
inline ZoneTreeNode() noexcept
: _rbNodeData { 0, 0 } {}
//! \}
//! \name Accessors
//! \{
inline bool isRed() const noexcept { return static_cast<bool>(_rbNodeData[0] & kRedMask); }
inline bool hasChild(size_t i) const noexcept { return _rbNodeData[i] > kRedMask; }
inline bool hasLeft() const noexcept { return _rbNodeData[0] > kRedMask; }
inline bool hasRight() const noexcept { return _rbNodeData[1] != 0; }
template<typename T = ZoneTreeNode>
inline T* child(size_t i) const noexcept { return static_cast<T*>(_getChild(i)); }
template<typename T = ZoneTreeNode>
inline T* left() const noexcept { return static_cast<T*>(_getLeft()); }
template<typename T = ZoneTreeNode>
inline T* right() const noexcept { return static_cast<T*>(_getRight()); }
//! \}
//! \cond INTERNAL
//! \name Internal
//! \{
inline ZoneTreeNode* _getChild(size_t i) const noexcept { return (ZoneTreeNode*)(_rbNodeData[i] & kPtrMask); }
inline ZoneTreeNode* _getLeft() const noexcept { return (ZoneTreeNode*)(_rbNodeData[0] & kPtrMask); }
inline ZoneTreeNode* _getRight() const noexcept { return (ZoneTreeNode*)(_rbNodeData[1]); }
inline void _setChild(size_t i, ZoneTreeNode* node) noexcept { _rbNodeData[i] = (_rbNodeData[i] & kRedMask) | (uintptr_t)node; }
inline void _setLeft(ZoneTreeNode* node) noexcept { _rbNodeData[0] = (_rbNodeData[0] & kRedMask) | (uintptr_t)node; }
inline void _setRight(ZoneTreeNode* node) noexcept { _rbNodeData[1] = (uintptr_t)node; }
inline void _makeRed() noexcept { _rbNodeData[0] |= kRedMask; }
inline void _makeBlack() noexcept { _rbNodeData[0] &= kPtrMask; }
//! Tests whether the node is RED (RED node must be non-null and must have RED flag set).
static inline bool _isValidRed(ZoneTreeNode* node) noexcept { return node && node->isRed(); }
//! \}
//! \endcond
};
//! RB-Tree node casted to `NodeT`.
template<typename NodeT>
class ZoneTreeNodeT : public ZoneTreeNode {
public:
ASMJIT_NONCOPYABLE(ZoneTreeNodeT)
//! \name Construction & Destruction
//! \{
inline ZoneTreeNodeT() noexcept
: ZoneTreeNode() {}
//! \}
//! \name Accessors
//! \{
inline NodeT* child(size_t i) const noexcept { return static_cast<NodeT*>(_getChild(i)); }
inline NodeT* left() const noexcept { return static_cast<NodeT*>(_getLeft()); }
inline NodeT* right() const noexcept { return static_cast<NodeT*>(_getRight()); }
//! \}
};
// ============================================================================
// [asmjit::ZoneTree]
// ============================================================================
//! RB-Tree.
template<typename NodeT>
class ZoneTree {
public:
ASMJIT_NONCOPYABLE(ZoneTree)
typedef NodeT Node;
NodeT* _root;
//! \name Construction & Destruction
//! \{
inline ZoneTree() noexcept
: _root(nullptr) {}
inline ZoneTree(ZoneTree&& other) noexcept
: _root(other._root) {}
inline void reset() noexcept { _root = nullptr; }
//! \}
//! \name Accessors
//! \{
inline bool empty() const noexcept { return _root == nullptr; }
inline NodeT* root() const noexcept { return static_cast<NodeT*>(_root); }
//! \}
//! \name Utilities
//! \{
inline void swap(ZoneTree& other) noexcept {
std::swap(_root, other._root);
}
template<typename CompareT = Support::Compare<Support::kSortAscending>>
void insert(NodeT* node, const CompareT& cmp = CompareT()) noexcept {
// Node to insert must not contain garbage.
ASMJIT_ASSERT(!node->hasLeft());
ASMJIT_ASSERT(!node->hasRight());
ASMJIT_ASSERT(!node->isRed());
if (!_root) {
_root = node;
return;
}
ZoneTreeNode head; // False root node,
head._setRight(_root); // having root on the right.
ZoneTreeNode* g = nullptr; // Grandparent.
ZoneTreeNode* p = nullptr; // Parent.
ZoneTreeNode* t = &head; // Iterator.
ZoneTreeNode* q = _root; // Query.
size_t dir = 0; // Direction for accessing child nodes.
size_t last = 0; // Not needed to initialize, but makes some tools happy.
node->_makeRed(); // New nodes are always red and violations fixed appropriately.
// Search down the tree.
for (;;) {
if (!q) {
// Insert new node at the bottom.
q = node;
p->_setChild(dir, node);
}
else if (_isValidRed(q->_getLeft()) && _isValidRed(q->_getRight())) {
// Color flip.
q->_makeRed();
q->_getLeft()->_makeBlack();
q->_getRight()->_makeBlack();
}
// Fix red violation.
if (_isValidRed(q) && _isValidRed(p))
t->_setChild(t->_getRight() == g,
q == p->_getChild(last) ? _singleRotate(g, !last) : _doubleRotate(g, !last));
// Stop if found.
if (q == node)
break;
last = dir;
dir = cmp(*static_cast<NodeT*>(q), *static_cast<NodeT*>(node)) < 0;
// Update helpers.
if (g) t = g;
g = p;
p = q;
q = q->_getChild(dir);
}
// Update root and make it black.
_root = static_cast<NodeT*>(head._getRight());
_root->_makeBlack();
}
//! Remove node from RBTree.
template<typename CompareT = Support::Compare<Support::kSortAscending>>
void remove(ZoneTreeNode* node, const CompareT& cmp = CompareT()) noexcept {
ZoneTreeNode head; // False root node,
head._setRight(_root); // having root on the right.
ZoneTreeNode* g = nullptr; // Grandparent.
ZoneTreeNode* p = nullptr; // Parent.
ZoneTreeNode* q = &head; // Query.
ZoneTreeNode* f = nullptr; // Found item.
ZoneTreeNode* gf = nullptr; // Found grandparent.
size_t dir = 1; // Direction (0 or 1).
// Search and push a red down.
while (q->hasChild(dir)) {
size_t last = dir;
// Update helpers.
g = p;
p = q;
q = q->_getChild(dir);
dir = cmp(*static_cast<NodeT*>(q), *static_cast<NodeT*>(node)) < 0;
// Save found node.
if (q == node) {
f = q;
gf = g;
}
// Push the red node down.
if (!_isValidRed(q) && !_isValidRed(q->_getChild(dir))) {
if (_isValidRed(q->_getChild(!dir))) {
ZoneTreeNode* child = _singleRotate(q, dir);
p->_setChild(last, child);
p = child;
}
else if (!_isValidRed(q->_getChild(!dir)) && p->_getChild(!last)) {
ZoneTreeNode* s = p->_getChild(!last);
if (!_isValidRed(s->_getChild(!last)) && !_isValidRed(s->_getChild(last))) {
// Color flip.
p->_makeBlack();
s->_makeRed();
q->_makeRed();
}
else {
size_t dir2 = g->_getRight() == p;
ZoneTreeNode* child = g->_getChild(dir2);
if (_isValidRed(s->_getChild(last))) {
child = _doubleRotate(p, last);
g->_setChild(dir2, child);
}
else if (_isValidRed(s->_getChild(!last))) {
child = _singleRotate(p, last);
g->_setChild(dir2, child);
}
// Ensure correct coloring.
q->_makeRed();
child->_makeRed();
child->_getLeft()->_makeBlack();
child->_getRight()->_makeBlack();
}
}
}
}
// Replace and remove.
ASMJIT_ASSERT(f != nullptr);
ASMJIT_ASSERT(f != &head);
ASMJIT_ASSERT(q != &head);
p->_setChild(p->_getRight() == q,
q->_getChild(q->_getLeft() == nullptr));
// NOTE: The original algorithm used a trick to just copy 'key/value' to
// `f` and mark `q` for deletion. But this is unacceptable here as we
// really want to destroy the passed `node`. So, we have to make sure that
// we have really removed `f` and not `q`.
if (f != q) {
ASMJIT_ASSERT(f != &head);
ASMJIT_ASSERT(f != gf);
ZoneTreeNode* n = gf ? gf : &head;
dir = (n == &head) ? 1 : cmp(*static_cast<NodeT*>(n), *static_cast<NodeT*>(node)) < 0;
for (;;) {
if (n->_getChild(dir) == f) {
n->_setChild(dir, q);
// RAW copy, including the color.
q->_rbNodeData[0] = f->_rbNodeData[0];
q->_rbNodeData[1] = f->_rbNodeData[1];
break;
}
n = n->_getChild(dir);
// Cannot be true as we know that it must reach `f` in few iterations.
ASMJIT_ASSERT(n != nullptr);
dir = cmp(*static_cast<NodeT*>(n), *static_cast<NodeT*>(node)) < 0;
}
}
// Update root and make it black.
_root = static_cast<NodeT*>(head._getRight());
if (_root) _root->_makeBlack();
}
template<typename KeyT, typename CompareT = Support::Compare<Support::kSortAscending>>
ASMJIT_INLINE NodeT* get(const KeyT& key, const CompareT& cmp = CompareT()) const noexcept {
ZoneTreeNode* node = _root;
while (node) {
auto result = cmp(*static_cast<const NodeT*>(node), key);
if (result == 0) break;
// Go left or right depending on the `result`.
node = node->_getChild(result < 0);
}
return static_cast<NodeT*>(node);
}
//! \}
//! \cond INTERNAL
//! \name Internal
//! \{
static inline bool _isValidRed(ZoneTreeNode* node) noexcept { return ZoneTreeNode::_isValidRed(node); }
//! Single rotation.
static ASMJIT_INLINE ZoneTreeNode* _singleRotate(ZoneTreeNode* root, size_t dir) noexcept {
ZoneTreeNode* save = root->_getChild(!dir);
root->_setChild(!dir, save->_getChild(dir));
save->_setChild( dir, root);
root->_makeRed();
save->_makeBlack();
return save;
}
//! Double rotation.
static ASMJIT_INLINE ZoneTreeNode* _doubleRotate(ZoneTreeNode* root, size_t dir) noexcept {
root->_setChild(!dir, _singleRotate(root->_getChild(!dir), !dir));
return _singleRotate(root, dir);
}
//! \}
//! \endcond
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ZONETREE_H_INCLUDED

@ -0,0 +1,377 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#include "../core/api-build_p.h"
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonevector.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ZoneVectorBase - Helpers]
// ============================================================================
Error ZoneVectorBase::_grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
uint32_t threshold = Globals::kGrowThreshold / sizeOfT;
uint32_t capacity = _capacity;
uint32_t after = _size;
if (ASMJIT_UNLIKELY(std::numeric_limits<uint32_t>::max() - n < after))
return DebugUtils::errored(kErrorOutOfMemory);
after += n;
if (capacity >= after)
return kErrorOk;
// ZoneVector is used as an array to hold short-lived data structures used
// during code generation. The growing strategy is simple - use small capacity
// at the beginning (very good for ZoneAllocator) and then grow quicker to
// prevent successive reallocations.
if (capacity < 4)
capacity = 4;
else if (capacity < 8)
capacity = 8;
else if (capacity < 16)
capacity = 16;
else if (capacity < 64)
capacity = 64;
else if (capacity < 256)
capacity = 256;
while (capacity < after) {
if (capacity < threshold)
capacity *= 2;
else
capacity += threshold;
}
return _reserve(allocator, sizeOfT, capacity);
}
Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
uint32_t oldCapacity = _capacity;
if (oldCapacity >= n) return kErrorOk;
uint32_t nBytes = n * sizeOfT;
if (ASMJIT_UNLIKELY(nBytes < n))
return DebugUtils::errored(kErrorOutOfMemory);
size_t allocatedBytes;
uint8_t* newData = static_cast<uint8_t*>(allocator->alloc(nBytes, allocatedBytes));
if (ASMJIT_UNLIKELY(!newData))
return DebugUtils::errored(kErrorOutOfMemory);
void* oldData = _data;
if (_size)
memcpy(newData, oldData, size_t(_size) * sizeOfT);
if (oldData)
allocator->release(oldData, size_t(oldCapacity) * sizeOfT);
_capacity = uint32_t(allocatedBytes / sizeOfT);
ASMJIT_ASSERT(_capacity >= n);
_data = newData;
return kErrorOk;
}
Error ZoneVectorBase::_resize(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
uint32_t size = _size;
if (_capacity < n) {
ASMJIT_PROPAGATE(_grow(allocator, sizeOfT, n - size));
ASMJIT_ASSERT(_capacity >= n);
}
if (size < n)
memset(static_cast<uint8_t*>(_data) + size_t(size) * sizeOfT, 0, size_t(n - size) * sizeOfT);
_size = n;
return kErrorOk;
}
// ============================================================================
// [asmjit::ZoneBitVector - Ops]
// ============================================================================
Error ZoneBitVector::copyFrom(ZoneAllocator* allocator, const ZoneBitVector& other) noexcept {
BitWord* data = _data;
uint32_t newSize = other.size();
if (!newSize) {
_size = 0;
return kErrorOk;
}
if (newSize > _capacity) {
// Realloc needed... Calculate the minimum capacity (in bytes) requied.
uint32_t minimumCapacityInBits = Support::alignUp<uint32_t>(newSize, kBitWordSizeInBits);
if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize))
return DebugUtils::errored(kErrorOutOfMemory);
// Normalize to bytes.
uint32_t minimumCapacity = minimumCapacityInBits / 8;
size_t allocatedCapacity;
BitWord* newData = static_cast<BitWord*>(allocator->alloc(minimumCapacity, allocatedCapacity));
if (ASMJIT_UNLIKELY(!newData))
return DebugUtils::errored(kErrorOutOfMemory);
// `allocatedCapacity` now contains number in bytes, we need bits.
size_t allocatedCapacityInBits = allocatedCapacity * 8;
// Arithmetic overflow should normally not happen. If it happens we just
// change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
// this value is still safe to be used to call `_allocator->release(...)`.
if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity))
allocatedCapacityInBits = minimumCapacityInBits;
if (data)
allocator->release(data, _capacity / 8);
data = newData;
_data = data;
_capacity = uint32_t(allocatedCapacityInBits);
}
_size = newSize;
_copyBits(data, other.data(), _wordsPerBits(newSize));
return kErrorOk;
}
Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_t idealCapacity, bool newBitsValue) noexcept {
ASMJIT_ASSERT(idealCapacity >= newSize);
if (newSize <= _size) {
// The size after the resize is lesser than or equal to the current size.
uint32_t idx = newSize / kBitWordSizeInBits;
uint32_t bit = newSize % kBitWordSizeInBits;
// Just set all bits outside of the new size in the last word to zero.
// There is a case that there are not bits to set if `bit` is zero. This
// happens when `newSize` is a multiply of `kBitWordSizeInBits` like 64, 128,
// and so on. In that case don't change anything as that would mean settings
// bits outside of the `_size`.
if (bit)
_data[idx] &= (BitWord(1) << bit) - 1u;
_size = newSize;
return kErrorOk;
}
uint32_t oldSize = _size;
BitWord* data = _data;
if (newSize > _capacity) {
// Realloc needed, calculate the minimum capacity (in bytes) requied.
uint32_t minimumCapacityInBits = Support::alignUp<uint32_t>(idealCapacity, kBitWordSizeInBits);
if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize))
return DebugUtils::errored(kErrorOutOfMemory);
// Normalize to bytes.
uint32_t minimumCapacity = minimumCapacityInBits / 8;
size_t allocatedCapacity;
BitWord* newData = static_cast<BitWord*>(allocator->alloc(minimumCapacity, allocatedCapacity));
if (ASMJIT_UNLIKELY(!newData))
return DebugUtils::errored(kErrorOutOfMemory);
// `allocatedCapacity` now contains number in bytes, we need bits.
size_t allocatedCapacityInBits = allocatedCapacity * 8;
// Arithmetic overflow should normally not happen. If it happens we just
// change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
// this value is still safe to be used to call `_allocator->release(...)`.
if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity))
allocatedCapacityInBits = minimumCapacityInBits;
_copyBits(newData, data, _wordsPerBits(oldSize));
if (data)
allocator->release(data, _capacity / 8);
data = newData;
_data = data;
_capacity = uint32_t(allocatedCapacityInBits);
}
// Start (of the old size) and end (of the new size) bits
uint32_t idx = oldSize / kBitWordSizeInBits;
uint32_t startBit = oldSize % kBitWordSizeInBits;
uint32_t endBit = newSize % kBitWordSizeInBits;
// Set new bits to either 0 or 1. The `pattern` is used to set multiple
// bits per bit-word and contains either all zeros or all ones.
BitWord pattern = Support::bitMaskFromBool<BitWord>(newBitsValue);
// First initialize the last bit-word of the old size.
if (startBit) {
uint32_t nBits = 0;
if (idx == (newSize / kBitWordSizeInBits)) {
// The number of bit-words is the same after the resize. In that case
// we need to set only bits necessary in the current last bit-word.
ASMJIT_ASSERT(startBit < endBit);
nBits = endBit - startBit;
}
else {
// There is be more bit-words after the resize. In that case we don't
// have to be extra careful about the last bit-word of the old size.
nBits = kBitWordSizeInBits - startBit;
}
data[idx++] |= pattern << nBits;
}
// Initialize all bit-words after the last bit-word of the old size.
uint32_t endIdx = _wordsPerBits(newSize);
while (idx < endIdx) data[idx++] = pattern;
// Clear unused bits of the last bit-word.
if (endBit)
data[endIdx - 1] = pattern & ((BitWord(1) << endBit) - 1);
_size = newSize;
return kErrorOk;
}
Error ZoneBitVector::_append(ZoneAllocator* allocator, bool value) noexcept {
uint32_t kThreshold = Globals::kGrowThreshold * 8;
uint32_t newSize = _size + 1;
uint32_t idealCapacity = _capacity;
if (idealCapacity < 128)
idealCapacity = 128;
else if (idealCapacity <= kThreshold)
idealCapacity *= 2;
else
idealCapacity += kThreshold;
if (ASMJIT_UNLIKELY(idealCapacity < _capacity)) {
if (ASMJIT_UNLIKELY(_size == std::numeric_limits<uint32_t>::max()))
return DebugUtils::errored(kErrorOutOfMemory);
idealCapacity = newSize;
}
return _resize(allocator, newSize, idealCapacity, value);
}
// ============================================================================
// [asmjit::ZoneVector / ZoneBitVector - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
template<typename T>
static void test_zone_vector(ZoneAllocator* allocator, const char* typeName) {
int i;
int kMax = 100000;
ZoneVector<T> vec;
INFO("ZoneVector<%s> basic tests", typeName);
EXPECT(vec.append(allocator, 0) == kErrorOk);
EXPECT(vec.empty() == false);
EXPECT(vec.size() == 1);
EXPECT(vec.capacity() >= 1);
EXPECT(vec.indexOf(0) == 0);
EXPECT(vec.indexOf(-11) == Globals::kNotFound);
vec.clear();
EXPECT(vec.empty());
EXPECT(vec.size() == 0);
EXPECT(vec.indexOf(0) == Globals::kNotFound);
for (i = 0; i < kMax; i++) {
EXPECT(vec.append(allocator, T(i)) == kErrorOk);
}
EXPECT(vec.empty() == false);
EXPECT(vec.size() == uint32_t(kMax));
EXPECT(vec.indexOf(T(kMax - 1)) == uint32_t(kMax - 1));
EXPECT(vec.rbegin()[0] == kMax - 1);
vec.release(allocator);
}
static void test_zone_bitvector(ZoneAllocator* allocator) {
Zone zone(8096 - Zone::kBlockOverhead);
uint32_t i, count;
uint32_t kMaxCount = 100;
ZoneBitVector vec;
EXPECT(vec.empty());
EXPECT(vec.size() == 0);
INFO("ZoneBitVector::resize()");
for (count = 1; count < kMaxCount; count++) {
vec.clear();
EXPECT(vec.resize(allocator, count, false) == kErrorOk);
EXPECT(vec.size() == count);
for (i = 0; i < count; i++)
EXPECT(vec.bitAt(i) == false);
vec.clear();
EXPECT(vec.resize(allocator, count, true) == kErrorOk);
EXPECT(vec.size() == count);
for (i = 0; i < count; i++)
EXPECT(vec.bitAt(i) == true);
}
INFO("ZoneBitVector::fillBits() / clearBits()");
for (count = 1; count < kMaxCount; count += 2) {
vec.clear();
EXPECT(vec.resize(allocator, count) == kErrorOk);
EXPECT(vec.size() == count);
for (i = 0; i < (count + 1) / 2; i++) {
bool value = bool(i & 1);
if (value)
vec.fillBits(i, count - i * 2);
else
vec.clearBits(i, count - i * 2);
}
for (i = 0; i < count; i++) {
EXPECT(vec.bitAt(i) == bool(i & 1));
}
}
}
UNIT(zone_vector) {
Zone zone(8096 - Zone::kBlockOverhead);
ZoneAllocator allocator(&zone);
test_zone_vector<int>(&allocator, "int");
test_zone_vector<int64_t>(&allocator, "int64_t");
test_zone_bitvector(&allocator);
}
#endif
ASMJIT_END_NAMESPACE

@ -0,0 +1,714 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_ZONEVECTOR_H_INCLUDED
#define ASMJIT_CORE_ZONEVECTOR_H_INCLUDED
#include "../core/support.h"
#include "../core/zone.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::ZoneVectorBase]
// ============================================================================
//! Base class used by \ref ZoneVector template.
class ZoneVectorBase {
public:
ASMJIT_NONCOPYABLE(ZoneVectorBase)
// STL compatibility;
typedef uint32_t size_type;
typedef ptrdiff_t difference_type;
//! Vector data (untyped).
void* _data = nullptr;
//! Size of the vector.
size_type _size = 0;
//! Capacity of the vector.
size_type _capacity = 0;
protected:
//! \name Construction & Destruction
//! \{
//! Creates a new instance of `ZoneVectorBase`.
inline ZoneVectorBase() noexcept {}
inline ZoneVectorBase(ZoneVectorBase&& other) noexcept
: _data(other._data),
_size(other._size),
_capacity(other._capacity) {}
//! \}
//! \cond INTERNAL
//! \name Internal
//! \{
inline void _release(ZoneAllocator* allocator, uint32_t sizeOfT) noexcept {
if (_data != nullptr) {
allocator->release(_data, _capacity * sizeOfT);
reset();
}
}
ASMJIT_API Error _grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
ASMJIT_API Error _resize(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
ASMJIT_API Error _reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
inline void _swap(ZoneVectorBase& other) noexcept {
std::swap(_data, other._data);
std::swap(_size, other._size);
std::swap(_capacity, other._capacity);
}
//! \}
//! \endcond
public:
//! \name Accessors
//! \{
//! Tests whether the vector is empty.
inline bool empty() const noexcept { return _size == 0; }
//! Returns the vector size.
inline size_type size() const noexcept { return _size; }
//! Returns the vector capacity.
inline size_type capacity() const noexcept { return _capacity; }
//! \}
//! \name Utilities
//! \{
//! Makes the vector empty (won't change the capacity or data pointer).
inline void clear() noexcept { _size = 0; }
//! Resets the vector data and set its `size` to zero.
inline void reset() noexcept {
_data = nullptr;
_size = 0;
_capacity = 0;
}
//! Truncates the vector to at most `n` items.
inline void truncate(size_type n) noexcept {
_size = Support::min(_size, n);
}
//! Sets size of the vector to `n`. Used internally by some algorithms.
inline void _setSize(size_type n) noexcept {
ASMJIT_ASSERT(n <= _capacity);
_size = n;
}
//! \}
};
// ============================================================================
// [asmjit::ZoneVector<T>]
// ============================================================================
//! Template used to store and manage array of Zone allocated data.
//!
//! This template has these advantages over other std::vector<>:
//! - Always non-copyable (designed to be non-copyable, we want it).
//! - Optimized for working only with POD types.
//! - Uses ZoneAllocator, thus small vectors are almost for free.
//! - Explicit allocation, ZoneAllocator is not part of the data.
template <typename T>
class ZoneVector : public ZoneVectorBase {
public:
ASMJIT_NONCOPYABLE(ZoneVector<T>)
// STL compatibility;
typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T* iterator;
typedef const T* const_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
//! \name Construction & Destruction
//! \{
inline ZoneVector() noexcept : ZoneVectorBase() {}
inline ZoneVector(ZoneVector&& other) noexcept : ZoneVector(other) {}
//! \}
//! \name Accessors
//! \{
//! Returns vector data.
inline T* data() noexcept { return static_cast<T*>(_data); }
//! Returns vector data (const)
inline const T* data() const noexcept { return static_cast<const T*>(_data); }
//! Returns item at the given index `i` (const).
inline const T& at(size_t i) const noexcept {
ASMJIT_ASSERT(i < _size);
return data()[i];
}
inline void _setEndPtr(T* p) noexcept {
ASMJIT_ASSERT(p >= data() && p <= data() + _capacity);
_setSize(uint32_t((uintptr_t)(p - data())));
}
//! \}
//! \name STL Compatibility (Iterators)
//! \{
inline iterator begin() noexcept { return iterator(data()); };
inline const_iterator begin() const noexcept { return const_iterator(data()); };
inline iterator end() noexcept { return iterator(data() + _size); };
inline const_iterator end() const noexcept { return const_iterator(data() + _size); };
inline reverse_iterator rbegin() noexcept { return reverse_iterator(end()); };
inline const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); };
inline reverse_iterator rend() noexcept { return reverse_iterator(begin()); };
inline const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); };
inline const_iterator cbegin() const noexcept { return const_iterator(data()); };
inline const_iterator cend() const noexcept { return const_iterator(data() + _size); };
inline const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(cend()); };
inline const_reverse_iterator crend() const noexcept { return const_reverse_iterator(cbegin()); };
//! \}
//! \name Utilities
//! \{
//! Swaps this vector with `other`.
inline void swap(ZoneVector<T>& other) noexcept { _swap(other); }
//! Prepends `item` to the vector.
inline Error prepend(ZoneAllocator* allocator, const T& item) noexcept {
if (ASMJIT_UNLIKELY(_size == _capacity))
ASMJIT_PROPAGATE(grow(allocator, 1));
::memmove(static_cast<T*>(_data) + 1, _data, size_t(_size) * sizeof(T));
memcpy(_data, &item, sizeof(T));
_size++;
return kErrorOk;
}
//! Inserts an `item` at the specified `index`.
inline Error insert(ZoneAllocator* allocator, size_t index, const T& item) noexcept {
ASMJIT_ASSERT(index <= _size);
if (ASMJIT_UNLIKELY(_size == _capacity))
ASMJIT_PROPAGATE(grow(allocator, 1));
T* dst = static_cast<T*>(_data) + index;
::memmove(dst + 1, dst, size_t(_size - index) * sizeof(T));
memcpy(dst, &item, sizeof(T));
_size++;
return kErrorOk;
}
//! Appends `item` to the vector.
inline Error append(ZoneAllocator* allocator, const T& item) noexcept {
if (ASMJIT_UNLIKELY(_size == _capacity))
ASMJIT_PROPAGATE(grow(allocator, 1));
memcpy(static_cast<T*>(_data) + _size, &item, sizeof(T));
_size++;
return kErrorOk;
}
//! Appends `other` vector at the end of this vector.
inline Error concat(ZoneAllocator* allocator, const ZoneVector<T>& other) noexcept {
uint32_t size = other._size;
if (_capacity - _size < size)
ASMJIT_PROPAGATE(grow(allocator, size));
if (size) {
memcpy(static_cast<T*>(_data) + _size, other._data, size_t(size) * sizeof(T));
_size += size;
}
return kErrorOk;
}
//! Prepends `item` to the vector (unsafe case).
//!
//! Can only be used together with `willGrow()`. If `willGrow(N)` returns
//! `kErrorOk` then N elements can be added to the vector without checking
//! if there is a place for them. Used mostly internally.
inline void prependUnsafe(const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity);
T* data = static_cast<T*>(_data);
if (_size)
::memmove(data + 1, data, size_t(_size) * sizeof(T));
memcpy(data, &item, sizeof(T));
_size++;
}
//! Append s`item` to the vector (unsafe case).
//!
//! Can only be used together with `willGrow()`. If `willGrow(N)` returns
//! `kErrorOk` then N elements can be added to the vector without checking
//! if there is a place for them. Used mostly internally.
inline void appendUnsafe(const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity);
memcpy(static_cast<T*>(_data) + _size, &item, sizeof(T));
_size++;
}
//! Inserts an `item` at the specified `index` (unsafe case).
inline void insertUnsafe(size_t index, const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity);
ASMJIT_ASSERT(index <= _size);
T* dst = static_cast<T*>(_data) + index;
::memmove(dst + 1, dst, size_t(_size - index) * sizeof(T));
memcpy(dst, &item, sizeof(T));
_size++;
}
//! Concatenates all items of `other` at the end of the vector.
inline void concatUnsafe(const ZoneVector<T>& other) noexcept {
uint32_t size = other._size;
ASMJIT_ASSERT(_capacity - _size >= size);
if (size) {
memcpy(static_cast<T*>(_data) + _size, other._data, size_t(size) * sizeof(T));
_size += size;
}
}
//! Returns index of the given `val` or `Globals::kNotFound` if it doesn't exist.
inline uint32_t indexOf(const T& val) const noexcept {
const T* data = static_cast<const T*>(_data);
uint32_t size = _size;
for (uint32_t i = 0; i < size; i++)
if (data[i] == val)
return i;
return Globals::kNotFound;
}
//! Tests whether the vector contains `val`.
inline bool contains(const T& val) const noexcept {
return indexOf(val) != Globals::kNotFound;
}
//! Removes item at index `i`.
inline void removeAt(size_t i) noexcept {
ASMJIT_ASSERT(i < _size);
T* data = static_cast<T*>(_data) + i;
size_t size = --_size - i;
if (size)
::memmove(data, data + 1, size_t(size) * sizeof(T));
}
//! Pops the last element from the vector and returns it.
inline T pop() noexcept {
ASMJIT_ASSERT(_size > 0);
uint32_t index = --_size;
return data()[index];
}
template<typename CompareT = Support::Compare<Support::kSortAscending>>
inline void sort(const CompareT& cmp = CompareT()) noexcept {
Support::qSort<T, CompareT>(data(), size(), cmp);
}
//! Returns item at index `i`.
inline T& operator[](size_t i) noexcept {
ASMJIT_ASSERT(i < _size);
return data()[i];
}
//! Returns item at index `i`.
inline const T& operator[](size_t i) const noexcept {
ASMJIT_ASSERT(i < _size);
return data()[i];
}
//! Returns a reference to the first element of the vector.
//!
//! \note The vector must have at least one element. Attempting to use
//! `first()` on empty vector will trigger an assertion failure in debug
//! builds.
inline T& first() noexcept { return operator[](0); }
//! \overload
inline const T& first() const noexcept { return operator[](0); }
//! Returns a reference to the last element of the vector.
//!
//! \note The vector must have at least one element. Attempting to use
//! `last()` on empty vector will trigger an assertion failure in debug
//! builds.
inline T& last() noexcept { return operator[](_size - 1); }
//! \overload
inline const T& last() const noexcept { return operator[](_size - 1); }
//! \}
//! \name Memory Management
//! \{
//! Releases the memory held by `ZoneVector<T>` back to the `allocator`.
inline void release(ZoneAllocator* allocator) noexcept {
_release(allocator, sizeof(T));
}
//! Called to grow the buffer to fit at least `n` elements more.
inline Error grow(ZoneAllocator* allocator, uint32_t n) noexcept {
return ZoneVectorBase::_grow(allocator, sizeof(T), n);
}
//! Resizes the vector to hold `n` elements.
//!
//! If `n` is greater than the current size then the additional elements'
//! content will be initialized to zero. If `n` is less than the current
//! size then the vector will be truncated to exactly `n` elements.
inline Error resize(ZoneAllocator* allocator, uint32_t n) noexcept {
return ZoneVectorBase::_resize(allocator, sizeof(T), n);
}
//! Reallocates the internal array to fit at least `n` items.
inline Error reserve(ZoneAllocator* allocator, uint32_t n) noexcept {
return n > _capacity ? ZoneVectorBase::_reserve(allocator, sizeof(T), n) : Error(kErrorOk);
}
inline Error willGrow(ZoneAllocator* allocator, uint32_t n = 1) noexcept {
return _capacity - _size < n ? grow(allocator, n) : Error(kErrorOk);
}
//! \}
};
// ============================================================================
// [asmjit::ZoneBitVector]
// ============================================================================
//! Zone-allocated bit vector.
class ZoneBitVector {
public:
typedef Support::BitWord BitWord;
static constexpr uint32_t kBitWordSizeInBits = Support::kBitWordSizeInBits;
//! Bits.
BitWord* _data = nullptr;
//! Size of the bit-vector (in bits).
uint32_t _size = 0;
//! Capacity of the bit-vector (in bits).
uint32_t _capacity = 0;
ASMJIT_NONCOPYABLE(ZoneBitVector)
//! \cond INTERNAL
//! \name Internal
//! \{
static inline uint32_t _wordsPerBits(uint32_t nBits) noexcept {
return ((nBits + kBitWordSizeInBits - 1) / kBitWordSizeInBits);
}
static inline void _zeroBits(BitWord* dst, uint32_t nBitWords) noexcept {
for (uint32_t i = 0; i < nBitWords; i++)
dst[i] = 0;
}
static inline void _fillBits(BitWord* dst, uint32_t nBitWords) noexcept {
for (uint32_t i = 0; i < nBitWords; i++)
dst[i] = ~BitWord(0);
}
static inline void _copyBits(BitWord* dst, const BitWord* src, uint32_t nBitWords) noexcept {
for (uint32_t i = 0; i < nBitWords; i++)
dst[i] = src[i];
}
//! \}
//! \endcond
//! \name Construction & Destruction
//! \{
inline ZoneBitVector() noexcept {}
inline ZoneBitVector(ZoneBitVector&& other) noexcept
: _data(other._data),
_size(other._size),
_capacity(other._capacity) {}
//! \}
//! \name Overloaded Operators
//! \{
inline bool operator==(const ZoneBitVector& other) const noexcept { return eq(other); }
inline bool operator!=(const ZoneBitVector& other) const noexcept { return !eq(other); }
//! \}
//! \name Accessors
//! \{
//! Tests whether the bit-vector is empty (has no bits).
inline bool empty() const noexcept { return _size == 0; }
//! Returns the size of this bit-vector (in bits).
inline uint32_t size() const noexcept { return _size; }
//! Returns the capacity of this bit-vector (in bits).
inline uint32_t capacity() const noexcept { return _capacity; }
//! Returns the size of the `BitWord[]` array in `BitWord` units.
inline uint32_t sizeInBitWords() const noexcept { return _wordsPerBits(_size); }
//! Returns the capacity of the `BitWord[]` array in `BitWord` units.
inline uint32_t capacityInBitWords() const noexcept { return _wordsPerBits(_capacity); }
//! REturns bit-vector data as `BitWord[]`.
inline BitWord* data() noexcept { return _data; }
//! \overload
inline const BitWord* data() const noexcept { return _data; }
//! \}
//! \name Utilities
//! \{
inline void swap(ZoneBitVector& other) noexcept {
std::swap(_data, other._data);
std::swap(_size, other._size);
std::swap(_capacity, other._capacity);
}
inline void clear() noexcept {
_size = 0;
}
inline void reset() noexcept {
_data = nullptr;
_size = 0;
_capacity = 0;
}
inline void truncate(uint32_t newSize) noexcept {
_size = Support::min(_size, newSize);
_clearUnusedBits();
}
inline bool bitAt(uint32_t index) const noexcept {
ASMJIT_ASSERT(index < _size);
return Support::bitVectorGetBit(_data, index);
}
inline void setBit(uint32_t index, bool value) noexcept {
ASMJIT_ASSERT(index < _size);
Support::bitVectorSetBit(_data, index, value);
}
inline void flipBit(uint32_t index) noexcept {
ASMJIT_ASSERT(index < _size);
Support::bitVectorFlipBit(_data, index);
}
ASMJIT_INLINE Error append(ZoneAllocator* allocator, bool value) noexcept {
uint32_t index = _size;
if (ASMJIT_UNLIKELY(index >= _capacity))
return _append(allocator, value);
uint32_t idx = index / kBitWordSizeInBits;
uint32_t bit = index % kBitWordSizeInBits;
if (bit == 0)
_data[idx] = BitWord(value) << bit;
else
_data[idx] |= BitWord(value) << bit;
_size++;
return kErrorOk;
}
ASMJIT_API Error copyFrom(ZoneAllocator* allocator, const ZoneBitVector& other) noexcept;
inline void clearAll() noexcept {
_zeroBits(_data, _wordsPerBits(_size));
}
inline void fillAll() noexcept {
_fillBits(_data, _wordsPerBits(_size));
_clearUnusedBits();
}
inline void clearBits(uint32_t start, uint32_t count) noexcept {
ASMJIT_ASSERT(start <= _size);
ASMJIT_ASSERT(_size - start >= count);
Support::bitVectorClear(_data, start, count);
}
inline void fillBits(uint32_t start, uint32_t count) noexcept {
ASMJIT_ASSERT(start <= _size);
ASMJIT_ASSERT(_size - start >= count);
Support::bitVectorFill(_data, start, count);
}
//! Performs a logical bitwise AND between bits specified in this array and bits
//! in `other`. If `other` has less bits than `this` then all remaining bits are
//! set to zero.
//!
//! \note The size of the BitVector is unaffected by this operation.
inline void and_(const ZoneBitVector& other) noexcept {
BitWord* dst = _data;
const BitWord* src = other._data;
uint32_t thisBitWordCount = sizeInBitWords();
uint32_t otherBitWordCount = other.sizeInBitWords();
uint32_t commonBitWordCount = Support::min(thisBitWordCount, otherBitWordCount);
uint32_t i = 0;
while (i < commonBitWordCount) {
dst[i] = dst[i] & src[i];
i++;
}
while (i < thisBitWordCount) {
dst[i] = 0;
i++;
}
}
//! Performs a logical bitwise AND between bits specified in this array and
//! negated bits in `other`. If `other` has less bits than `this` then all
//! remaining bits are kept intact.
//!
//! \note The size of the BitVector is unaffected by this operation.
inline void andNot(const ZoneBitVector& other) noexcept {
BitWord* dst = _data;
const BitWord* src = other._data;
uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size));
for (uint32_t i = 0; i < commonBitWordCount; i++)
dst[i] = dst[i] & ~src[i];
}
//! Performs a logical bitwise OP between bits specified in this array and bits
//! in `other`. If `other` has less bits than `this` then all remaining bits
//! are kept intact.
//!
//! \note The size of the BitVector is unaffected by this operation.
inline void or_(const ZoneBitVector& other) noexcept {
BitWord* dst = _data;
const BitWord* src = other._data;
uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size));
for (uint32_t i = 0; i < commonBitWordCount; i++)
dst[i] = dst[i] | src[i];
_clearUnusedBits();
}
inline void _clearUnusedBits() noexcept {
uint32_t idx = _size / kBitWordSizeInBits;
uint32_t bit = _size % kBitWordSizeInBits;
if (!bit) return;
_data[idx] &= (BitWord(1) << bit) - 1u;
}
inline bool eq(const ZoneBitVector& other) const noexcept {
if (_size != other._size)
return false;
const BitWord* aData = _data;
const BitWord* bData = other._data;
uint32_t numBitWords = _wordsPerBits(_size);
for (uint32_t i = 0; i < numBitWords; i++)
if (aData[i] != bData[i])
return false;
return true;
}
//! \}
//! \name Memory Management
//! \{
inline void release(ZoneAllocator* allocator) noexcept {
if (!_data) return;
allocator->release(_data, _capacity / 8);
reset();
}
inline Error resize(ZoneAllocator* allocator, uint32_t newSize, bool newBitsValue = false) noexcept {
return _resize(allocator, newSize, newSize, newBitsValue);
}
ASMJIT_API Error _resize(ZoneAllocator* allocator, uint32_t newSize, uint32_t idealCapacity, bool newBitsValue) noexcept;
ASMJIT_API Error _append(ZoneAllocator* allocator, bool value) noexcept;
//! \}
//! \name Iterators
//! \{
class ForEachBitSet : public Support::BitVectorIterator<BitWord> {
public:
ASMJIT_INLINE explicit ForEachBitSet(const ZoneBitVector& bitVector) noexcept
: Support::BitVectorIterator<BitWord>(bitVector.data(), bitVector.sizeInBitWords()) {}
};
template<class Operator>
class ForEachBitOp : public Support::BitVectorOpIterator<BitWord, Operator> {
public:
ASMJIT_INLINE ForEachBitOp(const ZoneBitVector& a, const ZoneBitVector& b) noexcept
: Support::BitVectorOpIterator<BitWord, Operator>(a.data(), b.data(), a.sizeInBitWords()) {
ASMJIT_ASSERT(a.size() == b.size());
}
};
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ZONEVECTOR_H_INCLUDED

@ -0,0 +1,119 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_X86_H_INCLUDED
#define ASMJIT_X86_H_INCLUDED
//! \addtogroup asmjit_x86
//!
//! ### Namespace
//!
//! - \ref x86 - x86 namespace provides support for X86/X64 code generation.
//!
//! ### Emitters
//!
//! - \ref x86::Assembler - X86/X64 assembler (must read, provides examples).
//! - \ref x86::Builder - X86/X64 builder.
//! - \ref x86::Compiler - X86/X64 compiler.
//! - \ref x86::Emitter - X86/X64 emitter (abstract).
//!
//! ### Supported Instructions
//!
//! - Emitters:
//! - \ref x86::EmitterExplicitT - Provides all instructions that use
//! explicit operands, provides also utility functions. The member
//! functions provided are part of all X86 emitters.
//! - \ref x86::EmitterImplicitT - Provides all instructions that use
//! implicit operands, these cannot be used with \ref x86::Compiler.
//!
//! - Instruction representation:
//! - \ref x86::Inst::Id - instruction identifiers.
//! - \ref x86::Inst::Options - instruction options.
//!
//! ### Register Operands
//!
//! - \ref x86::Reg - Base class for any X86 register.
//! - \ref x86::Gp - General purpose register:
//! - \ref x86::GpbLo - 8-bit low register.
//! - \ref x86::GpbHi - 8-bit high register.
//! - \ref x86::Gpw - 16-bit register.
//! - \ref x86::Gpd - 32-bit register.
//! - \ref x86::Gpq - 64-bit register (X64 only).
//! - \ref x86::Vec - Vector (SIMD) register:
//! - \ref x86::Xmm - 128-bit SIMD register (SSE+).
//! - \ref x86::Ymm - 256-bit SIMD register (AVX+).
//! - \ref x86::Zmm - 512-bit SIMD register (AVX512+).
//! - \ref x86::Mm - 64-bit MMX register.
//! - \ref x86::St - 80-bit FPU register.
//! - \ref x86::KReg - opmask registers (AVX512+).
//! - \ref x86::SReg - segment register.
//! - \ref x86::CReg - control register.
//! - \ref x86::DReg - debug register.
//! - \ref x86::Bnd - bound register (discontinued).
//! - \ref x86::Rip - relative instruction pointer.
//!
//! ### Memory Operands
//!
//! - \ref x86::Mem - X86/X64 memory operand that provides support for all
//! X86 and X64 addressing features including absolute addresses, index
//! scales, and segment override prefixes.
//!
//! ### Other
//!
//! - \ref x86::Features - X86/X64 CPU features on top of \ref BaseFeatures.
//!
//! ### Status and Control Words
//!
//! - \ref asmjit::x86::FpuWord::Status - FPU status word.
//! - \ref asmjit::x86::FpuWord::Control - FPU control word.
//!
//! ### Predicates
//!
//! - \ref x86::Predicate - namespace that provides X86/X64 predicates.
//! - \ref x86::Predicate::Cmp - `CMP[PD|PS|SD|SS]` predicate (SSE+).
//! - \ref x86::Predicate::PCmpStr - `[V]PCMP[I|E]STR[I|M]` predicate (SSE4.1+).
//! - \ref x86::Predicate::Round - `ROUND[PD|PS|SD|SS]` predicate (SSE+).
//! - \ref x86::Predicate::VCmp - `VCMP[PD|PS|SD|SS]` predicate (AVX+).
//! - \ref x86::Predicate::VFixupImm - `VFIXUPIMM[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::Predicate::VFPClass - `VFPCLASS[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::Predicate::VGetMant - `VGETMANT[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::Predicate::VPCmp - `VPCMP[U][B|W|D|Q]` predicate (AVX512+).
//! - \ref x86::Predicate::VPCom - `VPCOM[U][B|W|D|Q]` predicate (XOP).
//! - \ref x86::Predicate::VRange - `VRANGE[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::Predicate::VReduce - `REDUCE[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::TLog - namespace that provides `VPTERNLOG[D|Q]` predicate / operations.
#include "core.h"
#include "asmjit-scope-begin.h"
#include "x86/x86assembler.h"
#include "x86/x86builder.h"
#include "x86/x86compiler.h"
#include "x86/x86emitter.h"
#include "x86/x86features.h"
#include "x86/x86globals.h"
#include "x86/x86instdb.h"
#include "x86/x86operand.h"
#include "asmjit-scope-end.h"
#endif // ASMJIT_X86_H_INCLUDED

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save