diff --git a/DemoDrv/DemoDrv.vcxproj b/DemoDrv/DemoDrv.vcxproj
index 1d0edc8..65c5abc 100644
--- a/DemoDrv/DemoDrv.vcxproj
+++ b/DemoDrv/DemoDrv.vcxproj
@@ -96,7 +96,6 @@
-
diff --git a/DemoDrv/DemoDrv.vcxproj.filters b/DemoDrv/DemoDrv.vcxproj.filters
index 5218e70..ef70a15 100644
--- a/DemoDrv/DemoDrv.vcxproj.filters
+++ b/DemoDrv/DemoDrv.vcxproj.filters
@@ -13,9 +13,6 @@
Source Files
-
- Source Files
-
diff --git a/DemoDrv/DriverEntry.cpp b/DemoDrv/DriverEntry.cpp
index 40ae50d..c6048e1 100644
--- a/DemoDrv/DriverEntry.cpp
+++ b/DemoDrv/DriverEntry.cpp
@@ -1,7 +1,6 @@
#include "Theodosius.h"
-extern "C" int __cdecl drv_entry()
+ObfuscateRoutine extern "C" void drv_entry()
{
- DbgPrint("> hello world! this is a demo!\n");
- DbgPrint("> current pml4 = 0x%p\n", cppdemo::get_dirbase());
+ DbgPrint("> Hello World!\n");
}
\ No newline at end of file
diff --git a/DemoDrv/ObfuscateDemo.cpp b/DemoDrv/ObfuscateDemo.cpp
deleted file mode 100644
index b607ed3..0000000
--- a/DemoDrv/ObfuscateDemo.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-#include "Theodosius.h"
-
-namespace cppdemo
-{
- ObfuscateRoutine
- unsigned long long get_dirbase()
- {
- cr3 result;
- result.flags =
- *(unsigned long long*)(IoGetCurrentProcess() + 0x28);
-
- result.flags = NULL;
- if (!result.address_of_page_directory)
- return -1;
-
- return result.address_of_page_directory << 12;
- }
-}
\ No newline at end of file
diff --git a/DemoDrv/Theodosius.h b/DemoDrv/Theodosius.h
index accc39a..970f7ab 100644
--- a/DemoDrv/Theodosius.h
+++ b/DemoDrv/Theodosius.h
@@ -1,27 +1,7 @@
#pragma once
#include
#define ObfuscateRoutine __declspec(code_seg(".theo"), noinline)
-#define MutatedRoutine __declspec(code_seg(".theo1"), noinline)
-#define EncryptedRoutine __declspec(code_seg(".theo2"), noinline)
+#define MutateRoutine __declspec(code_seg(".theo1"), noinline)
extern "C" unsigned long DbgPrint(const char* format, ...);
-extern "C" unsigned long long IoGetCurrentProcess();
-
-namespace cppdemo
-{
- unsigned long long get_dirbase();
-}
-
-typedef union
-{
- struct
- {
- unsigned long long reserved1 : 3;
- unsigned long long page_level_write_through : 1;
- unsigned long long page_level_cache_disable : 1;
- unsigned long long reserved2 : 7;
- unsigned long long address_of_page_directory : 36;
- unsigned long long reserved3 : 16;
- };
- unsigned long long flags;
-} cr3;
\ No newline at end of file
+extern "C" unsigned long long IoGetCurrentProcess();
\ No newline at end of file
diff --git a/Theodosius/Theodosius.vcxproj b/Theodosius/Theodosius.vcxproj
index 8cea400..271ba6d 100644
--- a/Theodosius/Theodosius.vcxproj
+++ b/Theodosius/Theodosius.vcxproj
@@ -85,6 +85,54 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -92,6 +140,77 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Theodosius/Theodosius.vcxproj.filters b/Theodosius/Theodosius.vcxproj.filters
index fcffd97..eb1d99c 100644
--- a/Theodosius/Theodosius.vcxproj.filters
+++ b/Theodosius/Theodosius.vcxproj.filters
@@ -24,6 +24,24 @@
{c51e3b93-1496-49d7-838f-825d75b29ee6}
+
+ {d28d9202-4139-42a0-9f49-71beb5e01670}
+
+
+ {a847dc8c-08a3-4ea7-a20d-157963dd41a8}
+
+
+ {706001e9-56f5-41d2-b209-9f5543d0bd11}
+
+
+ {a8e52093-e1b2-4ef3-b427-ebea8772bbbf}
+
+
+ {da6ded33-7d62-4f83-b8e7-4d343fe49cd7}
+
+
+ {244a52bf-80cb-43ac-ac0d-a6aad89b9eb0}
+
@@ -41,6 +59,150 @@
Source Files
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\core
+
+
+ Source Files\asmjit\x86
+
+
+ Source Files\asmjit\x86
+
+
+ Source Files\asmjit\x86
+
+
+ Source Files\asmjit\x86
+
+
+ Source Files\asmjit\x86
+
+
+ Source Files\asmjit\x86
+
+
+ Source Files\asmjit\x86
+
+
+ Source Files\asmjit\x86
+
+
+ Source Files\asmjit\x86
+
+
+ Source Files\asmjit\x86
+
+
+ Source Files\asmjit\x86
+
@@ -205,6 +367,219 @@
Header Files
+
+ Header Files\asmjit
+
+
+ Header Files\asmjit
+
+
+ Header Files\asmjit
+
+
+ Header Files\asmjit
+
+
+ Header Files\asmjit
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\core
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
+
+ Header Files\asmjit\x86
+
diff --git a/Theodosius/asmjit.natvis b/Theodosius/asmjit.natvis
new file mode 100644
index 0000000..b73d848
--- /dev/null
+++ b/Theodosius/asmjit.natvis
@@ -0,0 +1,201 @@
+
+
+
+
+
+
+
+ {_small.data, s8}
+ {_large.data, s8}
+
+
+ Small
+ Large
+ External
+
+ - (int)_small.type, d
+ - _large.size, d
+ - asmjit::String::kSSOCapacity, d
+ - _large.capacity, d
+ - _small.data, s8
+ - _large.data, s8
+
+
+
+
+ {{ [size={_size, d} capacity={_capacity, d}] }}
+
+ - _size, d
+ - _capacity, d
+
+ _size
+ (($T1*)_data)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ [None]
+ [Reg] {{ id={_baseId, d} group={regGroup(), d} type={regType(), d} size={opSize(), d} }}
+ [Mem] {{ baseId={memBaseId(), d} indexId={memIndexId(), d} offset={(__int64)memOffset(), d} }}
+ [Imm] {{ val={immValue(), d} hex={immValue(), X} }}
+ [Label] {{ id={_baseId} }}
+ [Unknown]
+
+ - _signature, X
+ - (asmjit::Operand_::OpType)opType()
+ - opSize(), d
+ - (asmjit::BaseReg::RegType)regType()
+ - (asmjit::BaseReg::RegGroup)regGroup()
+ - (asmjit::BaseReg::RegType)memBaseType()
+ - (asmjit::BaseReg::RegType)memIndexType()
+ - (asmjit::BaseMem::AddrType)memAddrType()
+ - (bool)memRegHome()
+ - _baseId
+ - _data[0]
+ - _data[1]
+ - _data[0]
+ - _data[1]
+ - _data[0]
+ - _data[1]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ [RegValue {{ regType={regType()} indirect={isIndirect()} done={isDone()} }}]
+ [StackValue {{ indirect={isIndirect()} done={isDone()} }}]
+ [Unknown]
+
+
+ - _data
+ - (asmjit::Type::Id)(typeId())
+ - (asmjit::BaseReg::RegType)regType()
+ - regId()
+ - stackOffset()
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ [InstNode]
+ [SectionNode]
+ [LabelNode]
+ [AlignNode]
+ [EmbedDataNode]
+ [EmbedLabelNode]
+ [EmbedLabelDeltaNode]
+ [ConstPoolNode]
+ [CommentNode]
+ [SentinelNode]
+ [JumpNode]
+ [FuncNode]
+ [FuncRetNode]
+ [InvokeNode]
+ [UnknownNode {nodeType(), d}]
+
+
+ - _prev
+ - _next
+
+ - (asmjit::BaseNode::NodeType)_any._nodeType
+ - (asmjit::BaseNode::Flags)_any._nodeFlags
+
+ - _position
+ - _userDataU64
+ - _userDataPtr
+ - _passData
+ - _inlineComment, s8
+
+ - ((asmjit::InstNode*)this)->_baseInst
+ - _inst._opCount
+ - _inst._opCapacity
+ - ((asmjit::InstNode*)this)->_opArray, [_inst._opCount]
+
+ - ((asmjit::SectionNode*)this)->_id
+ - ((asmjit::SectionNode*)this)->_nextSection
+
+ - ((asmjit::LabelNode*)this)->_id
+
+ - ((asmjit::AlignNode*)this)->_alignMode
+ - ((asmjit::AlignNode*)this)->_alignment
+
+ - _embed._typeId, d
+ - _embed._typeSize, d
+ - ((asmjit::EmbedDataNode*)this)->_itemCount
+ - ((asmjit::EmbedDataNode*)this)->_repeatCount
+ - ((asmjit::EmbedDataNode*)this)->_inlineData
+ - ((asmjit::EmbedDataNode*)this)->_externalData
+
+ - ((asmjit::EmbedLabelNode*)this)->_id
+
+ - ((asmjit::EmbedLabelDeltaNode*)this)->_id
+ - ((asmjit::EmbedLabelDeltaNode*)this)->_baseId
+ - ((asmjit::EmbedLabelDeltaNode*)this)->_dataSize
+
+ - ((asmjit::ConstPoolNode*)this)->_constPool
+
+ - (asmjit::SentinelNode::SentinelType)_sentinel._sentinelType
+
+ - ((asmjit::JumpNode*)this)->_annotation
+
+ - ((asmjit::FuncNode*)this)->_funcDetail
+ - ((asmjit::FuncNode*)this)->_frame
+ - ((asmjit::FuncNode*)this)->_exitNode
+ - ((asmjit::FuncNode*)this)->_end
+ - ((asmjit::FuncNode*)this)->_args, [((asmjit::FuncNode*)this)->_funcDetail._argCount]
+
+ - ((asmjit::InvokeNode*)this)->_funcDetail
+ - ((asmjit::InvokeNode*)this)->_rets, [((asmjit::InvokeNode*)this)->_funcDetail._retCount]
+ - ((asmjit::InvokeNode*)this)->_args, [((asmjit::InvokeNode*)this)->_funcDetail._argCount]
+
+
+
diff --git a/Theodosius/asmjit/asmjit-scope-begin.h b/Theodosius/asmjit/asmjit-scope-begin.h
new file mode 100644
index 0000000..6ee5050
--- /dev/null
+++ b/Theodosius/asmjit/asmjit-scope-begin.h
@@ -0,0 +1,35 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifdef _WIN32
+ #pragma push_macro("min")
+ #pragma push_macro("max")
+
+ #ifdef min
+ #undef min
+ #endif
+
+ #ifdef max
+ #undef max
+ #endif
+#endif
diff --git a/Theodosius/asmjit/asmjit-scope-end.h b/Theodosius/asmjit/asmjit-scope-end.h
new file mode 100644
index 0000000..447105a
--- /dev/null
+++ b/Theodosius/asmjit/asmjit-scope-end.h
@@ -0,0 +1,27 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifdef _WIN32
+ #pragma pop_macro("min")
+ #pragma pop_macro("max")
+#endif
diff --git a/Theodosius/asmjit/asmjit.h b/Theodosius/asmjit/asmjit.h
new file mode 100644
index 0000000..400426c
--- /dev/null
+++ b/Theodosius/asmjit/asmjit.h
@@ -0,0 +1,37 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_ASMJIT_H_INCLUDED
+#define ASMJIT_ASMJIT_H_INCLUDED
+
+#include "./core.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "./x86.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "./arm.h"
+#endif
+
+#endif // ASMJIT_ASMJIT_H_INCLUDED
diff --git a/Theodosius/asmjit/core.h b/Theodosius/asmjit/core.h
new file mode 100644
index 0000000..52540ab
--- /dev/null
+++ b/Theodosius/asmjit/core.h
@@ -0,0 +1,2063 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_H_INCLUDED
+#define ASMJIT_CORE_H_INCLUDED
+
+//! Root namespace used by AsmJit.
+namespace asmjit {
+
+// ============================================================================
+// [Documentation - mainpage]
+// ============================================================================
+
+//! \mainpage API Reference
+//!
+//! AsmJit C++ API reference documentation generated by Doxygen.
+//!
+//! AsmJit library uses one global namespace called \ref asmjit, which provides
+//! the whole functionality. Core functionality is within \ref asmjit namespace
+//! and architecture specific functionality is always in its own namespace. For
+//! example \ref asmjit::x86 provides both 32-bit and 64-bit X86 code generation.
+//!
+//! \section main_groups Documentation Groups
+//!
+//! AsmJit documentation is structured into groups. Groups can be followed in
+//! order to learn AsmJit, but knowledge from multiple groups is required to
+//! use AsmJit properly:
+//!
+//! $$DOCS_GROUP_OVERVIEW$$
+//!
+//! \note It's important to understand that in order to learn AsmJit all groups
+//! are important. Some groups can be omitted if a particular tool is out of
+//! interest - for example \ref asmjit_assembler users don't need to know about
+//! \ref asmjit_builder, but it's not the opposite. \ref asmjit_builder users
+//! must know about \ref asmjit_assembler as it also uses operands, labels, and
+//! other concepts. Similarly \ref asmjit_compiler users must know how both \ref
+//! asmjit_assembler and \ref asmjit_builder tools work.
+//!
+//! \section where_to_start Where To Start
+//!
+//! AsmJit \ref asmjit_core provides the following two classes that are essential
+//! from the code generation perspective:
+//!
+//! - \ref CodeHolder provides functionality
+//! to temporarily hold the generated code. It stores all the necessary
+//! information about the code - code buffers, sections, labels, symbols,
+//! and information about relocations.
+//!
+//! - \ref BaseEmitter provides interface used
+//! by emitter implementations. The interface provides basic building blocks
+//! that are then implemented by \ref BaseAssembler, \ref BaseBuilder, and
+//! \ref BaseCompiler.
+//!
+//! Code emitters:
+//!
+//! - \ref asmjit_assembler - provides direct machine code generation.
+//!
+//! - \ref asmjit_builder - provides intermediate code generation that can
+//! be processed before it's serialized to \ref BaseAssembler.
+//!
+//! - \ref asmjit_compiler - provides high-level code generation with built-in
+//! register allocation.
+//!
+//! - \ref FuncNode - provides insight into how function looks from the Compiler
+//! perspective and how it's stored in a node-list.
+//!
+//! \section main_recommendations Recommendations
+//!
+//! The following steps are recommended for all AsmJit users:
+//!
+//! - Make sure that you use \ref Logger, see \ref asmjit_logging.
+//!
+//! - Make sure that you use \ref ErrorHandler, see \ref asmjit_error_handling.
+//!
+//! - Instruction validation in your debug builds can reveal problems too.
+//! AsmJit provides validation at instruction level, that can be enabled
+//! by \ref BaseEmitter::addValidationOptions().
+//!
+//! See \ref BaseEmitter::ValidationOptions for more details.
+//!
+//! - Make sure you put a breakpoint into \ref DebugUtils::errored() function
+//! if you have a problem with AsmJit returning errors during instruction
+//! encoding or register allocation. Having an active breakpoint there can
+//! help to reveal the origin of the error, to inspect variables and other
+//! conditions that caused to it.
+//!
+//! The reason for using \ref Logger and \ref ErrorHandler is that they provide
+//! a very useful information about what's happening inside emitters. In many
+//! cases the information provided by these two is crucial to quickly fix issues
+//! that happen during development (for example wrong instruction, address, or
+//! register used). In addition, output from \ref Logger is always necessary
+//! when filling bug reports. In other words, using logging and proper error
+//! handling can save a lot of time during the development.
+//!
+//! \section main_other Other Pages
+//!
+//! - Class List - List of classes sorted alphabetically
+//! - AsmJit Namespace - List of symbols provided by `asmjit` namespace
+
+// ============================================================================
+// [Documentation - asmjit_build]
+// ============================================================================
+
+//! \defgroup asmjit_build Build Instructions
+//! \brief Build instructions, supported environments, and feature selection.
+//!
+//! ### Overview
+//!
+//! AsmJit is designed to be easy embeddable in any project. However, it depends
+//! on some compile-time definitions that can be used to enable or disable
+//! features to decrease the resulting binary size. A typical way of building
+//! AsmJit is to use [cmake](https://www.cmake.org), but it's also possible to
+//! just include AsmJit source code in your project and to just build it. The
+//! easiest way to include AsmJit in your project is to just include **src**
+//! directory in your project and to define \ref ASMJIT_STATIC. AsmJit can be
+//! just updated from time to time without any changes to this integration
+//! process. Do not embed AsmJit's `test` files in such case as these are used
+//! exclusively for testing.
+//!
+//! ### Supported C++ Compilers
+//!
+//! - Requirements:
+//!
+//! - AsmJit won't build without C++11 enabled. If you use older GCC or Clang
+//! you would have to enable at least C++11 standard through compiler flags.
+//!
+//! - Tested:
+//!
+//! - **Clang** - Tested by Travis-CI - Clang 3.9+ (with C++11 enabled) is
+//! officially supported (older Clang versions having C++11 support are
+//! probably fine, but are not regularly tested).
+//!
+//! - **GNU** - Tested by Travis-CI - GCC 4.8+ (with C++11 enabled) is
+//! officially supported.
+//!
+//! - **MINGW** - Tested by Travis-CI - Use the latest version, if possible.
+//!
+//! - **MSVC** - Tested by Travis-CI - VS2017+ is officially supported, VS2015
+//! is reported to work.
+//!
+//! - Untested:
+//!
+//! - **Intel** - No maintainers and no CI environment to regularly test
+//! this compiler.
+//!
+//! - **Other** C++ compilers would require basic support in
+//! [core/api-config.h](https://github.com/asmjit/asmjit/tree/master/src/asmjit/core/api-config.h).
+//!
+//! ### Supported Operating Systems and Platforms
+//!
+//! - Tested:
+//!
+//! - **Linux** - Tested by Travis-CI (any distribution is generally supported).
+//!
+//! - **OSX** - Tested by Travis-CI (any version is supported).
+//!
+//! - **Windows** - Tested by Travis-CI - (Windows 7+ is officially supported).
+//!
+//! - **Emscripten** - Works if compiled with \ref ASMJIT_NO_JIT. AsmJit
+//! cannot generate WASM code, but can be used to generate X86/X64 code
+//! within a browser, for example.
+//!
+//! - Untested:
+//!
+//! - **BSDs** - No maintainers, no CI environment to regularly test BSDs,
+//! but they should work out of box.
+//!
+//! - **Haiku** - Not regularly tested, but reported to work.
+//!
+//! - **Other** operating systems would require some testing and support in
+//! the following files:
+//! - [core/api-config.h](https://github.com/asmjit/asmjit/tree/master/src/asmjit/core/api-config.h)
+//! - [core/osutils.cpp](https://github.com/asmjit/asmjit/tree/master/src/asmjit/core/osutils.cpp)
+//! - [core/virtmem.cpp](https://github.com/asmjit/asmjit/tree/master/src/asmjit/core/virtmem.cpp)
+//!
+//! ### Supported Backends / Architectures
+//!
+//! - **X86** - Both 32-bit and 64-bit backends tested by Travis-CI.
+//! - **ARM** - Work-in-progress (not public at the moment).
+//!
+//! ### Static Builds and Embedding
+//!
+//! These definitions can be used to enable static library build. Embed is used
+//! when AsmJit's source code is embedded directly in another project, implies
+//! static build as well.
+//!
+//! - \ref ASMJIT_EMBED - Asmjit is embedded, implies \ref ASMJIT_STATIC.
+//! - \ref ASMJIT_STATIC - Enable static-library build.
+//!
+//! \note Projects that use AsmJit statically must define \ref ASMJIT_STATIC in
+//! all compilation units that use AsmJit, otherwise AsmJit would use dynamic
+//! library imports in \ref ASMJIT_API decorator. The recommendation is to
+//! define this macro across the whole project that uses AsmJit this way.
+//!
+//! ### Build Configuration
+//!
+//! These definitions control whether asserts are active or not. By default
+//! AsmJit would autodetect build configuration from existing pre-processor
+//! definitions, but this behavior can be overridden, for example to enable
+//! debug asserts in release configuration.
+//!
+//! - \ref ASMJIT_BUILD_DEBUG - Overrides build configuration to debug,
+//! asserts will be enabled in this case.
+//! - \ref ASMJIT_BUILD_RELEASE - Overrides build configuration to release,
+//! asserts will be disabled in this case.
+//!
+//! \note There is usually no need to override the build configuration. AsmJit
+//! detects the build configuration by checking whether `NDEBUG` is defined and
+//! automatically defines \ref ASMJIT_BUILD_RELEASE if configuration overrides
+//! were not used. We only recommend using build configuration overrides in
+//! special situations, like using AsmJit in release configuration with asserts
+//! enabled for whatever reason.
+//!
+//! ### AsmJit Backends
+//!
+//! AsmJit currently supports only X86/X64 backend, but the plan is to add more
+//! backends in the future. By default AsmJit builds only the host backend, which
+//! is autodetected at compile-time, but this can be overridden.
+//!
+//! - \ref ASMJIT_BUILD_X86 - Always build X86 backend (X86 and X86_64).
+//! - \ref ASMJIT_BUILD_ARM - Always build ARM backend (ARM and AArch64).
+//! - \ref ASMJIT_BUILD_HOST - Always build the host backend.
+//!
+//! ### Features Selection
+//!
+//! AsmJit builds by defaults all supported features, which includes all emitters,
+//! logging, instruction validation and introspection, and JIT memory allocation.
+//! Features can be disabled at compile time by using `ASMJIT_NO_...` definitions.
+//!
+//! - \ref ASMJIT_NO_DEPRECATED - Disables deprecated API at compile time
+//! so it won't be available and the compilation will fail if there is
+//! attempt to use such API. This includes deprecated classes, namespaces,
+//! enumerations, and functions.
+//!
+//! - \ref ASMJIT_NO_FOREIGN - Disables the support for foreign architectures.
+//! If defined, it would internally set \ref ASMJIT_BUILD_HOST to true.
+//!
+//! - \ref ASMJIT_NO_BUILDER - Disables \ref asmjit_builder functionality
+//! completely. This implies \ref ASMJIT_NO_COMPILER as \ref asmjit_compiler
+//! cannot be used without \ref asmjit_builder.
+//!
+//! - \ref ASMJIT_NO_COMPILER - Disables \ref asmjit_compiler functionality
+//! completely.
+//!
+//! - \ref ASMJIT_NO_JIT - Disables JIT memory management and \ref JitRuntime.
+//!
+//! - \ref ASMJIT_NO_LOGGING - Disables \ref Logger and \ref Formatter.
+//!
+//! - \ref ASMJIT_NO_TEXT - Disables everything that contains string
+//! representation of AsmJit constants, should be used together with
+//! \ref ASMJIT_NO_LOGGING as logging doesn't make sense without the
+//! ability to quiry instruction names, register names, etc...
+//!
+//! - \ref ASMJIT_NO_VALIDATION - Disables validation API.
+//!
+//! - \ref ASMJIT_NO_INTROSPECTION - Disables instruction introspection API,
+//! must be used together with \ref ASMJIT_NO_COMPILER as \ref asmjit_compiler
+//! requires introspection for its liveness analysis and register allocation.
+//!
+//! \note It's not recommended to disable features if you plan to build AsmJit
+//! as a shared library that will be used by multiple projects that you don't
+//! control how AsmJit was built (for example AsmJit in a Linux distribution).
+//! The possibility to disable certain features exists mainly for customized
+//! AsmJit builds.
+
+// ============================================================================
+// [Documentation - asmjit_breaking_changes]
+// ============================================================================
+
+//! \defgroup asmjit_breaking_changes Breaking Changes
+//! \brief Documentation of breaking changes
+//!
+//! ### Overview
+//!
+//! AsmJit is a live project that is being actively developed. Deprecating the
+//! existing API in favor of a new one is preferred, but it's not always
+//! possible if the changes are significant. AsmJit authors prefer to do
+//! accumulated breaking changes at once instead of breaking the API often.
+//! This page documents deprecated and removed APIs and should serve as a how-to
+//! guide for people that want to port existing code to work with the newest AsmJit.
+//!
+//! ### Tips
+//!
+//! Useful tips before you start:
+//!
+//! - Visit our [Public Gitter Channel](https://gitter.im/asmjit/asmjit) if
+//! you need a quick help.
+//!
+//! - Build AsmJit with `ASMJIT_NO_DEPRECATED` macro defined to make sure that
+//! you are not using deprecated functionality at all. Deprecated functions
+//! are decorated with `ASMJIT_DEPRECATED()` macro, but sometimes it's not
+//! possible to decorate everything like classes, which are used by deprecated
+//! functions as well, because some compilers would warn about that. If your
+//! project compiles fine with `ASMJIT_NO_DEPRECATED` it's not using anything,
+//! which was deprecated.
+//!
+//! ### Changes committed at 2020-05-30
+//!
+//! AsmJit has been cleaned up significantly, many todo items have been fixed
+//! and many functions and classes have been redesigned, some in an incompatible
+//! way.
+//!
+//! Core changes:
+//!
+//! - \ref Imm operand has now only \ref Imm::value() and \ref Imm::valueAs()
+//! functions that return its value content, and \ref Imm::setValue() function
+//! that sets the content. Functions like `setI8()`, `setU8()` were deprecated.
+//!
+//! Old functions were deprecated, but code using them should still compile.
+//!
+//! - `ArchInfo` has been replaced with \ref Environment. Environment provides
+//! more details about the architecture, but drops some properties that
+//! were used by arch info - `gpSize(`) and `gpCount()`. `gpSize()` can
+//! be replaced with `registerSize()` getter, which returns a native register
+//! size of the architecture the environment uses. However, `gpCount()` was
+//! removed - at the moment \ref ArchRegs can be used to access such properties.
+//!
+//! Some other functions were renamed, like `ArchInfo::isX86Family()` is
+//! now \ref Environment::isFamilyX86(), etc. The reason for changing the
+//! order was support for more propertries and all the accessors now
+//! start with the type of the property, like \ref Environment::isPlatformWindows().
+//!
+//! This function causes many other classes to provide `environment()` getter
+//! instead of `archInfo()` getter. In addition, AsmJit now uses `arch()` to
+//! get an architecture instead of `archId()`. `ArchInfo::kIdXXX` was renamed
+//! to `Environment::kArchXXX`.
+//!
+//! Some functions were deprecated, some removed...
+//!
+//! - `CodeInfo` has been removed in favor of \ref Environment. If you used
+//! `CodeInfo` to set architecture and base address, this is now possible
+//! with \ref Environment and setting base address explicitly by \ref
+//! CodeHolder::init() - the first argument is \ref Environment, and the
+//! second argument is base address, which defaults to \ref
+//! Globals::kNoBaseAddress.
+//!
+//! CodeInfo class was deprecated, but the code using it should still
+//! compile with warnings.
+//!
+//! - \ref CallConv has been updated to offer a more unified way of representing
+//! calling conventions - many calling conventions were abstracted to follow
+//! standard naming like \ref CallConv::kIdCDecl or \ref CallConv::kIdStdCall.
+//!
+//! This change means that other APIs like \ref FuncDetail::init() now
+//! require both, calling convention and target \ref Environment.
+//!
+//! - `Logging` namespace has been renamed to \ref Formatter, which now
+//! provides general functionality for formatting in AsmJit.
+//!
+//! Logging namespace should still work, but its use is deprecated.
+//! Unfortunately this will be without deprecation warnings, so please
+//! make sure you don't use it.
+//!
+//! - `Data64`, `Data128`, and `Data256` structs were deprecated and should
+//! no longer be used. There is no replacement, AsmJit users should simply
+//! create their own structures if they need them or use the new repeated
+//! embed API in emitters, see \ref BaseEmitter::embedDataArray().
+//!
+//! Emitter changes:
+//!
+//! - \ref BaseEmitter::emit() function signature has been changed to accept
+//! 3 operands by reference and the rest 3 operands as a continuous array.
+//! This change is purely cosmetic and shouldn't affect users as emit()
+//! has many overloads that dispatch to the right function.
+//!
+//! - \ref x86::Emitter (Assembler, Builder, Compiler) deprecates embed
+//! utilities like `dint8()`, `duint8()`, `duint16()`, `dxmm()`, etc...
+//! in favor of a new and more powerful \ref BaseEmitter::embedDataArray().
+//! This function also allows emitting repeated values and/or patterns,
+//! which is used by helpers \ref BaseEmitter::embedUInt8(), and others...
+//!
+//! - Validation is now available through \ref BaseEmitter::ValidationOptions,
+//! which can be enabled/disabled through \ref BaseEmitter::addValidationOptions()
+//! and \ref BaseEmitter::clearValidationOptions(), respectively. Validation
+//! options now separate between encoding and Builder/Compiler so it's possible
+//! to choose the granularity required.
+//!
+//! Builder changes:
+//!
+//! - Internal functions for creating nodes were redesigned. They now accept
+//! a pointer to the node created as a first parameter. These changes should
+//! not affect AsmJit users as these functions were used internally.
+//!
+//! Compiler changes:
+//!
+//! - `FuncCallNode` has been renamed to \ref InvokeNode. Additionally, function
+//! calls should now use \ref x86::Compiler::invoke() instead of `call()`.
+//! The reason behind this is to remove the confusion between a `call`
+//! instruction and AsmJit's `call()` intrinsic, which is now `invoke()`.
+//!
+//! - Creating new nodes also changed. Now the preferred way of invoking a
+//! function is to call \ref x86::Compiler::invoke() where the first
+//! argument is `InvokeNode**`. The function now returns an error and would
+//! call \ref ErrorHandler in case of a failure. Error handling was
+//! unspecified in the past - the function was marked noexcept, but called
+//! error handler, which could throw.
+//!
+//! The reason behind this change is to make the API consistent with other
+//! changes and to also make it possible to inspect the possible error. In
+//! the previous API it returned a new node or `nullptr` in case of error,
+//! which the user couldn't inspect unless there was an attached \ref
+//! ErrorHandler.
+//!
+//! Samples:
+//!
+//! ```
+//! #include
+//! using namespace asmjit;
+//!
+//! // The basic setup of JitRuntime and CodeHolder changed, use environment()
+//! // instead of codeInfo().
+//! void basicSetup() {
+//! JitRuntime rt;
+//! CodeHolder code(rt.environment());
+//! }
+//!
+//! // Calling a function (Compiler) changed - use invoke() instead of call().
+//! void functionInvocation(x86::Compiler& cc) {
+//! InvokeNode* invokeNode;
+//! cc.invoke(&invokeNode, targetOperand, FuncSignatureT<...>(...));
+//! }
+//! ```
+
+// ============================================================================
+// [Documentation - asmjit_core]
+// ============================================================================
+
+//! \defgroup asmjit_core Core
+//! \brief Globals, code storage, and emitter interface.
+//!
+//! ### Overview
+//!
+//! AsmJit library uses \ref CodeHolder to hold code during code generation and
+//! emitters inheriting from \ref BaseEmitter to emit code. CodeHolder uses
+//! containers to manage its data:
+//!
+//! - \ref Section - stores information about a code or data section.
+//! - \ref CodeBuffer - stores actual code or data, part of \ref Section.
+//! - \ref LabelEntry - stores information about a label - its name, offset,
+//! section where it belongs to, and other bits.
+//! - \ref LabelLink - stores information about yet unbound label, which was
+//! already used by the assembler.
+//! - \ref RelocEntry - stores information about a relocation.
+//! - \ref AddressTableEntry - stores information about an address, which was
+//! used in a jump or call. Such address may need relocation.
+//!
+//! To generate code you would need to instantiate at least the following classes:
+//!
+//! - \ref CodeHolder - to hold code during code generation.
+//! - \ref BaseEmitter - to emit code into \ref CodeHolder.
+//! - \ref Target (optional) - most likely \ref JitRuntime to keep the generated
+//! code in executable memory. \ref Target can be customized by inheriting from
+//! it.
+//!
+//! There are also other core classes that are important:
+//!
+//! - \ref Environment - describes where the code will run. Environment brings
+//! the concept of target triples or tuples into AsmJit, which means that users
+//! can specify target architecture, platform, and ABI.
+//! - \ref Type - encapsulates lightweight type functionality that can be used
+//! to describe primitive and vector types. Types are used by higher level
+//! utilities, for example by \ref asmjit_function and \ref asmjit_compiler.
+//! - \ref CpuInfo - encapsulates CPU information - stores both CPU information
+//! and features described by \ref BaseFeatures.
+//!
+//! AsmJit also provides global constants:
+//!
+//! - \ref Globals - namespace that provides global constants.
+//! - \ref ByteOrder - byte-order constants and functionality.
+//!
+//! \note CodeHolder examples use \ref x86::Assembler as abstract interfaces cannot
+//! be used to generate code.
+//!
+//! ### CodeHolder & Emitters
+//!
+//! The example below shows how the mentioned classes interact to generate X86 code:
+//!
+//! ```
+//! #include
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! // Signature of the generated function.
+//! typedef int (*Func)(void);
+//!
+//! int main() {
+//! JitRuntime rt; // Runtime specialized for JIT code execution.
+//!
+//! CodeHolder code; // Holds code and relocation information.
+//! code.init(rt.environment()); // Initialize code to match the JIT environment.
+//!
+//! x86::Assembler a(&code); // Create and attach x86::Assembler to code.
+//! a.mov(x86::eax, 1); // Move one to eax register.
+//! a.ret(); // Return from function.
+//! // ===== x86::Assembler is no longer needed from here and can be destroyed =====
+//!
+//! Func fn; // Holds address to the generated function.
+//! Error err = rt.add(&fn, &code); // Add the generated code to the runtime.
+//! if (err) return 1; // Handle a possible error returned by AsmJit.
+//! // ===== CodeHolder is no longer needed from here and can be destroyed =====
+//!
+//! int result = fn(); // Execute the generated code.
+//! printf("%d\n", result); // Print the resulting "1".
+//!
+//! // All classes use RAII, all resources will be released before `main()` returns,
+//! // the generated function can be, however, released explicitly if you intend to
+//! // reuse or keep the runtime alive, which you should in a production-ready code.
+//! rt.release(fn);
+//!
+//! return 0;
+//! }
+//! ```
+//!
+//! The example above used \ref x86::Assembler as an emitter. AsmJit provides the
+//! following emitters that offer various levels of abstraction:
+//!
+//! - \ref asmjit_assembler - Low-level emitter that emits directly to \ref CodeBuffer.
+//! - \ref asmjit_builder - Low-level emitter that emits to a \ref BaseNode list.
+//! - \ref asmjit_compiler - High-level emitter that provides register allocation.
+//!
+//! ### Targets and JitRuntime
+//!
+//! AsmJit's \ref Target is an interface that provides basic target abstraction.
+//! At the moment AsmJit provides only one implementation called \ref JitRuntime,
+//! which as the name suggests provides JIT code target and execution runtime.
+//! \ref JitRuntime provides all the necessary stuff to implement a simple JIT
+//! compiler with basic memory management. It only provides \ref JitRuntime::add()
+//! and \ref JitRuntime::release() functions that are used to either add code
+//! to the runtime or release it. \ref JitRuntime doesn't do any decisions on
+//! when the code should be released, the decision is up to the developer.
+//!
+//! See more at \ref asmjit_virtual_memory group.
+//!
+//! ### More About Environment
+//!
+//! In the previous example the \ref Environment is retrieved from \ref JitRuntime.
+//! It's logical as \ref JitRuntime always returns an \ref Environment that is
+//! compatible with the host. For example if your application runs in 64-bit mode
+//! the \ref Environment returned will use \ref Environment::kArchX64 architecture
+//! in contrast to \ref Environment::kArchX86, which will be used in 32-bit mode on
+//! any X86 platform.
+//!
+//! AsmJit allows to setup the \ref Environment manually and to select a different
+//! architecture and ABI when necessary. So let's do something else this time, let's
+//! always generate a 32-bit code and print its binary representation. To do that, we
+//! can create our own \ref Environment and initialize it to \ref Environment::kArchX86.
+//!
+//! ```
+//! #include
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! int main(int argc, char* argv[]) {
+//! using namespace asmjit::x86;
+//!
+//! // Create a custom environment initialized to 32-bit X86 architecture.
+//! Environment env;
+//! env.setArch(Environment::kArchX86);
+//!
+//! CodeHolder code; // Create a CodeHolder.
+//! code.init(env); // Initialize CodeHolder with custom environment.
+//!
+//! // Generate a 32-bit function that sums 4 floats and looks like:
+//! // void func(float* dst, const float* a, const float* b)
+//! x86::Assembler a(&code); // Create and attach x86::Assembler to `code`.
+//!
+//! a.mov(eax, dword_ptr(esp, 4)); // Load the destination pointer.
+//! a.mov(ecx, dword_ptr(esp, 8)); // Load the first source pointer.
+//! a.mov(edx, dword_ptr(esp, 12)); // Load the second source pointer.
+//!
+//! a.movups(xmm0, ptr(ecx)); // Load 4 floats from [ecx] to XMM0.
+//! a.movups(xmm1, ptr(edx)); // Load 4 floats from [edx] to XMM1.
+//! a.addps(xmm0, xmm1); // Add 4 floats in XMM1 to XMM0.
+//! a.movups(ptr(eax), xmm0); // Store the result to [eax].
+//! a.ret(); // Return from function.
+//!
+//! // We have no Runtime this time, it's on us what we do with the code.
+//! // CodeHolder stores code in Section, which provides some basic properties
+//! // and CodeBuffer structure. We are interested in section's CodeBuffer.
+//! //
+//! // NOTE: The first section is always '.text', it can be retrieved by
+//! // code.sectionById(0) or simply by code.textSection().
+//! CodeBuffer& buffer = code.textSection()->buffer();
+//!
+//! // Print the machine-code generated or do something else with it...
+//! // 8B4424048B4C24048B5424040F28010F58010F2900C3
+//! for (size_t i = 0; i < buffer.length; i++)
+//! printf("%02X", buffer.data[i]);
+//!
+//! return 0;
+//! }
+//! ```
+//!
+//! ### Explicit Code Relocation
+//!
+//! In addition to \ref Environment, \ref CodeHolder can be configured to
+//! specify a base-address (or a virtual base-address in a linker terminology),
+//! which could be static (useful when you know the location where the target's
+//! machine code will be) or dynamic. AsmJit assumes dynamic base-address by
+//! default and relocates the code held by \ref CodeHolder to a user provided
+//! address on-demand. To be able to relocate to a user provided address it needs
+//! to store some information about relocations, which is represented by \ref
+//! RelocEntry. Relocation entries are only required if you call external functions
+//! from the generated code that cannot be encoded by using a 32-bit displacement
+//! (64-bit displacements are not provided by aby supported architecture).
+//!
+//! There is also a concept called \ref LabelLink - label link is a lightweight
+//! data structure that doesn't have any identifier and is stored in \ref LabelEntry
+//! as a single-linked list. Label link represents either unbound yet used label
+//! and cross-sections links (only relevant to code that uses multiple sections).
+//! Since crossing sections is something that cannot be resolved immediately these
+//! links persist until offsets of these sections are assigned and until
+//! \ref CodeHolder::resolveUnresolvedLinks() is called. It's an error if you end
+//! up with code that has unresolved label links after flattening. You can verify
+//! it by calling \ref CodeHolder::hasUnresolvedLinks(), which inspects the value
+//! returned by \ref CodeHolder::unresolvedLinkCount().
+//!
+//! AsmJit can flatten code that uses multiple sections by assigning each section
+//! an incrementing offset that respects its alignment. Use \ref CodeHolder::flatten()
+//! to do that. After the sections are flattened their offsets and virtual-sizes
+//! are adjusted to respect each section's buffer size and alignment. The \ref
+//! CodeHolder::resolveUnresolvedLinks() function must be called before relocating
+//! the code held by \ref CodeHolder. You can also flatten your code manually by
+//! iterating over all sections and calculating their offsets (relative to base)
+//! by your own algorithm. In that case \ref CodeHolder::flatten() should not be
+//! called, however, \ref CodeHolder::resolveUnresolvedLinks() should be.
+//!
+//! The example below shows how to use a built-in virtual memory allocator
+//! \ref JitAllocator instead of using \ref JitRuntime (just in case you want
+//! to use your own memory management) and how to relocate the generated code
+//! into your own memory block - you can use your own virtual memory allocator
+//! if you prefer that, but that's OS specific and not covered by the documentation.
+//!
+//! The following code is similar to the previous one, but implements a function
+//! working in both 32-bit and 64-bit environments:
+//!
+//! ```
+//! #include
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! typedef void (*SumIntsFunc)(int* dst, const int* a, const int* b);
+//!
+//! int main() {
+//! // Create a custom environment that matches the current host environment.
+//! Environment env = hostEnvironment();
+//!
+//! CodeHolder code; // Create a CodeHolder.
+//! code.init(env); // Initialize CodeHolder with environment.
+//!
+//! x86::Assembler a(&code); // Create and attach x86::Assembler to `code`.
+//!
+//! // Signature: 'void func(int* dst, const int* a, const int* b)'.
+//! x86::Gp dst;
+//! x86::Gp src_a;
+//! x86::Gp src_b;
+//!
+//! // Handle the difference between 32-bit and 64-bit calling conventions
+//! // (arguments passed through stack vs. arguments passed by registers).
+//! if (env.is32Bit()) {
+//! dst = x86::eax;
+//! src_a = x86::ecx;
+//! src_b = x86::edx;
+//! a.mov(dst , x86::dword_ptr(x86::esp, 4));
+//! a.mov(src_a, x86::dword_ptr(x86::esp, 8));
+//! a.mov(src_b, x86::dword_ptr(x86::esp, 12));
+//! }
+//! else {
+//! if (env.isPlatformWindows()) {
+//! dst = x86::rcx; // First argument (destination pointer).
+//! src_a = x86::rdx; // Second argument (source 'a' pointer).
+//! src_b = x86::r8; // Third argument (source 'b' pointer).
+//! }
+//! else {
+//! dst = x86::rdi; // First argument (destination pointer).
+//! src_a = x86::rsi; // Second argument (source 'a' pointer).
+//! src_b = x86::rdx; // Third argument (source 'b' pointer).
+//! }
+//! }
+//!
+//! a.movdqu(x86::xmm0, x86::ptr(src_a)); // Load 4 ints from [src_a] to XMM0.
+//! a.movdqu(x86::xmm1, x86::ptr(src_b)); // Load 4 ints from [src_b] to XMM1.
+//! a.paddd(x86::xmm0, x86::xmm1); // Add 4 ints in XMM1 to XMM0.
+//! a.movdqu(x86::ptr(dst), x86::xmm0); // Store the result to [dst].
+//! a.ret(); // Return from function.
+//!
+//! // Even when we didn't use multiple sections AsmJit could insert one section
+//! // called '.addrtab' (address table section), which would be filled by data
+//! // required by relocations (absolute jumps and calls). You can omit this code
+//! // if you are 100% sure your code doesn't contain multiple sections and
+//! // such relocations. You can use `CodeHolder::hasAddressTable()` to verify
+//! // whether the address table section does exist.
+//! code.flatten();
+//! code.resolveUnresolvedLinks();
+//!
+//! // After the code was generated it can be relocated manually to any memory
+//! // location, however, we need to know it's size before we perform memory
+//! // allocation. `CodeHolder::codeSize()` returns the worst estimated code
+//! // size in case that relocations are not possible without trampolines (in
+//! // that case some extra code at the end of the current code buffer is
+//! // generated during relocation).
+//! size_t estimatedSize = code.codeSize();
+//!
+//! // Instead of rolling up our own memory allocator we can use the one AsmJit
+//! // provides. It's decoupled so you don't need to use `JitRuntime` for that.
+//! JitAllocator allocator;
+//!
+//! // Allocate an executable virtual memory and handle a possible failure.
+//! void* p = allocator.alloc(estimatedSize);
+//! if (!p)
+//! return 0;
+//!
+//! // Now relocate the code to the address provided by the memory allocator.
+//! // Please note that this DOESN'T COPY anything to `p`. This function will
+//! // store the address in CodeHolder and use relocation entries to patch the
+//! // existing code in all sections to respect the base address provided.
+//! code.relocateToBase((uint64_t)p);
+//!
+//! // This is purely optional. There are cases in which the relocation can omit
+//! // unneeded data, which would shrink the size of address table. If that
+//! // happened the codeSize returned after relocateToBase() would be smaller
+//! // than the originally `estimatedSize`.
+//! size_t codeSize = code.codeSize();
+//!
+//! // This will copy code from all sections to `p`. Iterating over all sections
+//! // and calling `memcpy()` would work as well, however, this function supports
+//! // additional options that can be used to also zero pad sections' virtual
+//! // size, etc.
+//! //
+//! // With some additional features, copyFlattenData() does roughly this:
+//! // for (Section* section : code.sections())
+//! // memcpy((uint8_t*)p + section->offset(),
+//! // section->data(),
+//! // section->bufferSize());
+//! code.copyFlattenedData(p, codeSize, CodeHolder::kCopyPadSectionBuffer);
+//!
+//! // Execute the generated function.
+//! int inA[4] = { 4, 3, 2, 1 };
+//! int inB[4] = { 1, 5, 2, 8 };
+//! int out[4];
+//!
+//! // This code uses AsmJit's ptr_as_func<> to cast between void* and SumIntsFunc.
+//! ptr_as_func(p)(out, inA, inB);
+//!
+//! // Prints {5 8 4 9}
+//! printf("{%d %d %d %d}\n", out[0], out[1], out[2], out[3]);
+//!
+//! // Release 'p' is it's no longer needed. It will be destroyed with 'vm'
+//! // instance anyway, but it's a good practice to release it explicitly
+//! // when you know that the function will not be needed anymore.
+//! allocator.release(p);
+//!
+//! return 0;
+//! }
+//! ```
+//!
+//! If you know the base-address in advance (before the code generation) it can
+//! be passed as a second argument to \ref CodeHolder::init(). In that case the
+//! Assembler will know the absolute position of each instruction and would be
+//! able to use it during instruction encoding to prevent relocations where
+//! possible. The following example shows how to configure the base address:
+//!
+//! ```
+//! #include
+//! #include
+//!
+//! void initializeCodeHolder(CodeHolder& code) {
+//! Environment env = hostEnvironment();
+//! uint64_t baseAddress = uint64_t(0x1234);
+//!
+//! // initialize CodeHolder with environment and custom base address.
+//! code.init(env, baseAddress);
+//! }
+//! ```
+//!
+//! ### Label Offsets and Links
+//!
+//! When a label that is not yet bound is used by the Assembler, it creates a
+//! \ref LabelLink, which is then added to a \ref LabelEntry. These links are
+//! also created if a label is used in a different section than in which it
+//! was bound. Let's examine some functions that can be used to check whether
+//! there are any unresolved links.
+//!
+//! ```
+//! #include
+//! #include
+//!
+//! void labelLinksExample(CodeHolder& code, const Label& label) {
+//! // Tests whether the `label` is bound.
+//! bool isBound = code.isLabelBound(label);
+//! printf("Label %u is %s\n", label.id(), isBound ? "bound" : "not bound");
+//!
+//! // Returns true if the code contains either referenced, but unbound
+//! // labels, or cross-section label links that are not resolved yet.
+//! bool hasUnresolved = code.hasUnresolvedLinks(); // Boolean answer.
+//! size_t nUnresolved = code.unresolvedLinkCount(); // Count of unresolved links.
+//!
+//! printf("Number of unresolved links: %zu\n", nUnresolved);
+//! }
+//! ```
+//!
+//! There is no function that would return the number of unbound labels as this
+//! is completely unimportant from CodeHolder's perspective. If a label is not
+//! used then it doesn't matter whether it's bound or not, only actually used
+//! labels matter. After a Label is bound it's possible to query its offset
+//! offset relative to the start of the section where it was bound:
+//!
+//! ```
+//! #include
+//! #include
+//!
+//! void labelOffsetExample(CodeHolder& code, const Label& label) {
+//! // Label offset is known after it's bound. The offset provided is relative
+//! // to the start of the section, see below for alternative. If the given
+//! // label is not bound the offset returned will be zero. It's recommended
+//! // to always check whether the label is bound before using its offset.
+//! uint64_t sectionOffset = code.labelOffset(label);
+//! printf("Label offset relative to section: %llu\n", (unsigned long long)sectionOffset);
+//!
+//! // If you use multiple sections and want the offset relative to the base.
+//! // NOTE: This function expects that the section has already an offset and
+//! // the label-link was resolved (if this is not true you will still get an
+//! // offset relative to the start of the section).
+//! uint64_t baseOffset = code.labelOffsetFromBase(label);
+//! printf("Label offset relative to base: %llu\n", (unsigned long long)baseOffset);
+//! }
+//! ```
+//!
+//! ### Sections
+//!
+//! AsmJit allows to create multiple sections within the same \ref CodeHolder.
+//! A test-case [asmjit_test_x86_sections.cpp](https://github.com/asmjit/asmjit/blob/master/test/asmjit_test_x86_sections.cpp)
+//! can be used as a reference point although the following example should
+//! also provide a useful insight:
+//!
+//! ```
+//! #include
+//! #include
+//!
+//! void sectionsExample(CodeHolder& code) {
+//! // Text section is always provided as the first section.
+//! Section* text = code.textSection(); // or code.sectionById(0);
+//!
+//! // To create another section use CodeHolder::newSection().
+//! Section* data;
+//! Error err = code.newSection(&data,
+//! ".data", // Section name
+//! SIZE_MAX, // Name length if the name is not null terminated (or SIZE_MAX).
+//! 0, // Section flags, see Section::Flags.
+//! 8, // Section alignment, must be power of 2.
+//! 0); // Section order value (optional, default 0).
+//!
+//! // When you switch sections in Assembler, Builder, or Compiler the cursor
+//! // will always move to the end of that section. When you create an Assembler
+//! // the cursor would be placed at the end of the first (.text) section, which
+//! // is initially empty.
+//! x86::Assembler a(&code);
+//! Label L_Data = a.newLabel();
+//!
+//! a.mov(x86::eax, x86::ebx); // Emits in .text section.
+//!
+//! a.section(data); // Switches to the end of .data section.
+//! a.bind(L_Data); // Binds label in this .data section
+//! a.db(0x01); // Emits byte in .data section.
+//!
+//! a.section(text); // Switches to the end of .text section.
+//! a.add(x86::ebx, x86::eax); // Emits in .text section.
+//!
+//! // References a label in .text section, which was bound in .data section.
+//! // This would create a LabelLink even when the L_Data is already bound,
+//! // because the reference crosses sections. See below...
+//! a.lea(x86::rsi, x86::ptr(L_Data));
+//! }
+//! ```
+//!
+//! The last line in the example above shows that a LabelLink would be created
+//! even for bound labels that cross sections. In this case a referenced label
+//! was bound in another section, which means that the link couldn't be resolved
+//! at that moment. If your code uses sections, but you wish AsmJit to flatten
+//! these sections (you don't plan to flatten them manually) then there is an
+//! API for that.
+//!
+//! ```
+//! #include
+//! #include
+//!
+//! // ... (continuing the previous example) ...
+//! void sectionsExampleContinued(CodeHolder& code) {
+//! // Suppose we have some code that contains multiple sections and
+//! // we would like to flatten it by using AsmJit's built-in API:
+//! Error err = code.flatten();
+//! if (err) {
+//! // There are many reasons it can fail, so always handle a possible error.
+//! printf("Failed to flatten the code: %s\n", DebugUtils::errorAsString(err));
+//! exit(1);
+//! }
+//!
+//! // After flattening all sections would contain assigned offsets
+//! // relative to base. Offsets are 64-bit unsigned integers so we
+//! // cast them to `size_t` for simplicity. On 32-bit targets it's
+//! // guaranteed that the offset cannot be greater than `2^32 - 1`.
+//! printf("Data section offset %zu", size_t(data->offset()));
+//!
+//! // The flattening doesn't resolve unresolved label links, this
+//! // has to be done manually as flattening can be done separately.
+//! err = code.resolveUnresolvedLinks();
+//! if (err) {
+//! // This is the kind of error that should always be handled...
+//! printf("Failed to resolve label links: %s\n", DebugUtils::errorAsString(err));
+//! exit(1);
+//! }
+//!
+//! if (code.hasUnresolvedLinks()) {
+//! // This would mean either unbound label or some other issue.
+//! printf("The code has %zu unbound labels\n", code.unresovedLinkCount());
+//! exit(1);
+//! }
+//! }
+//! ```
+
+// ============================================================================
+// [Documentation - asmjit_assembler]
+// ============================================================================
+
+//! \defgroup asmjit_assembler Assembler
+//! \brief Assembler interface and operands.
+//!
+//! ### Overview
+//!
+//! AsmJit's Assembler is used to emit machine code directly into a \ref
+//! CodeBuffer. In general, code generation with assembler requires the knowledge
+//! of the following:
+//!
+//! - \ref BaseAssembler and architecture-specific assemblers:
+//! - \ref x86::Assembler - Assembler specific to X86 architecture
+//! - \ref Operand and its variations:
+//! - \ref BaseReg - Base class for a register operand, inherited by:
+//! - \ref x86::Reg - Register operand specific to X86 architecture.
+//! - \ref BaseMem - Base class for a memory operand, inherited by:
+//! - \ref x86::Mem - Memory operand specific to X86 architecture.
+//! - \ref Imm - Immediate (value) operand.
+//! - \ref Label - Label operand.
+//!
+//! \note Assembler examples use \ref x86::Assembler as abstract interfaces cannot
+//! be used to generate code.
+//!
+//! ### Operand Basics
+//!
+//! Let's start with operands. \ref Operand is a data structure that defines a
+//! data layout of any operand. It can be inherited, but any class inheriting
+//! it cannot add any members to it, only the existing layout can be reused.
+//! AsmJit allows to construct operands dynamically, to store them, and to query
+//! a complete information about them at run-time. Operands are small (always 16
+//! bytes per \ref Operand) and can be copied and passed by value. Please never
+//! allocate individual operands dynamically by using a `new` keyword - it would
+//! work, but then you would have to be responsible for deleting such operands.
+//! In AsmJit operands are always part of some other data structures like \ref
+//! InstNode, which is part of \ref asmjit_builder tool.
+//!
+//! Operands contain only identifiers, but not pointers to any code-generation data.
+//! For example \ref Label operand only provides label identifier, but not a pointer
+//! to \ref LabelEntry structure. In AsmJit such IDs are used to link stuff together
+//! without having to deal with pointers.
+//!
+//! AsmJit's operands all inherit from a base class called \ref Operand. Operands
+//! have the following properties that are commonly accessible by getters and setters:
+//!
+//! - \ref Operand - Base operand, which only provides accessors that are common
+//! to all operand types.
+//! - \ref BaseReg - Describes either physical or virtual register. Physical
+//! registers have id that matches the target's machine id directly whereas
+//! virtual registers must be allocated into physical registers by a register
+//! allocator pass. Register operand provides:
+//! - Register Type - Unique id that describes each possible register provided
+//! by the target architecture - for example X86 backend provides \ref
+//! x86::Reg::RegType, which defines all variations of general purpose registers
+//! (GPB-LO, GPB-HI, GPW, GPD, and GPQ) and all types of other registers like K,
+//! MM, BND, XMM, YMM, and ZMM.
+//! - Register Group - Groups multiple register types under a single group - for
+//! example all general-purpose registers (of all sizes) on X86 are part of
+//! \ref x86::Reg::kGroupGp and all SIMD registers (XMM, YMM, ZMM) are part
+//! of \ref x86::Reg::kGroupVec.
+//! - Register Size - Contains the size of the register in bytes. If the size
+//! depends on the mode (32-bit vs 64-bit) then generally the higher size is
+//! used (for example RIP register has size 8 by default).
+//! - Register Id - Contains physical or virtual id of the register.
+//! - \ref BaseMem - Used to reference a memory location. Memory operand provides:
+//! - Base Register - A base register type and id (physical or virtual).
+//! - Index Register - An index register type and id (physical or virtual).
+//! - Offset - Displacement or absolute address to be referenced (32-bit if base
+//! register is used and 64-bit if base register is not used).
+//! - Flags that can describe various architecture dependent information (like
+//! scale and segment-override on X86).
+//! - \ref Imm - Immediate values are usually part of instructions (encoded within
+//! the instruction itself) or data.
+//! - \ref Label - used to reference a location in code or data. Labels must be
+//! created by the \ref BaseEmitter or by \ref CodeHolder. Each label has its
+//! unique id per \ref CodeHolder instance.
+//!
+//! ### Operand Manipulation
+//!
+//! AsmJit allows to construct operands dynamically, to store them, and to query
+//! a complete information about them at run-time. Operands are small (always 16
+//! bytes per `Operand`) and should be always copied (by value) if you intend to
+//! store them (don't create operands by using `new` keyword, it's not recommended).
+//! Operands are safe to be passed to `memcpy()` and `memset()`, which becomes
+//! handy when working with arrays of operands. If you set all members of an \ref
+//! Operand to zero the operand would become NONE operand, which is the same as a
+//! default constructed Operand.
+//!
+//! The example below illustrates how operands can be used and modified even
+//! without using any other code generation classes. The example uses X86
+//! architecture-specific operands.
+//!
+//! ```
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! // Registers can be copied, it's a common practice.
+//! x86::Gp dstRegByValue() { return x86::ecx; }
+//!
+//! void usingOperandsExample(x86::Assembler& a) {
+//! // Gets `ecx` register returned by a function.
+//! x86::Gp dst = dstRegByValue();
+//! // Gets `rax` register directly from the provided `x86` namespace.
+//! x86::Gp src = x86::rax;
+//! // Constructs `r10` dynamically.
+//! x86::Gp idx = x86::gpq(10);
+//! // Constructs [src + idx] memory address - referencing [rax + r10].
+//! x86::Mem m = x86::ptr(src, idx);
+//!
+//! // Examine `m`: Returns `x86::Reg::kTypeGpq`.
+//! m.indexType();
+//! // Examine `m`: Returns 10 (`r10`).
+//! m.indexId();
+//!
+//! // Reconstruct `idx` stored in mem:
+//! x86::Gp idx_2 = x86::Gp::fromTypeAndId(m.indexType(), m.indexId());
+//!
+//! // True, `idx` and idx_2` are identical.
+//! idx == idx_2;
+//!
+//! // Possible - op will still be the same as `m`.
+//! Operand op = m;
+//! // True (can be casted to BaseMem or architecture-specific Mem).
+//! op.isMem();
+//!
+//! // True, `op` is just a copy of `m`.
+//! m == op;
+//!
+//! // Static cast is fine and valid here.
+//! static_cast(op).addOffset(1);
+//! // However, using `as()` to cast to a derived type is preferred.
+//! op.as().addOffset(1);
+//! // False, `op` now points to [rax + r10 + 2], which is not [rax + r10].
+//! m == op;
+//!
+//! // Emitting 'mov' - type safe way.
+//! a.mov(dst, m);
+//! // Not possible, `mov` doesn't provide mov(x86::Gp, Operand) overload.
+//! a.mov(dst, op);
+//!
+//! // Type-unsafe, but possible.
+//! a.emit(x86::Inst::kIdMov, dst, m);
+//! // Also possible, `emit()` is typeless and can be used with raw Operand.
+//! a.emit(x86::Inst::kIdMov, dst, op);
+//! }
+//! ```
+//!
+//! Some operands have to be created explicitly by emitters. For example labels
+//! must be created by \ref BaseEmitter::newLabel(), which creates a label entry
+//! and returns a \ref Label operand with the id that refers to it. Such label
+//! then can be used by emitters.
+//!
+//! ### Memory Operands
+//!
+//! Some architectures like X86 provide a complex memory addressing model that
+//! allows to encode addresses having a BASE register, INDEX register with a
+//! possible scale (left shift), and displacement (called offset in AsmJit).
+//! Memory address on X86 can also specify memory segment (segment-override in
+//! X86 terminology) and some instructions (gather / scatter) require INDEX to
+//! be a \ref x86::Vec register instead of a general-purpose register.
+//!
+//! AsmJit allows to encode and work with all forms of addresses mentioned and
+//! implemented by X86. In addition, it also allows to construct absolute 64-bit
+//! memory address operands, which is only allowed in one form of 'mov' instruction.
+//!
+//! ```
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! void testX86Mem() {
+//! // Makes it easier to access x86 stuff...
+//! using namespace asmjit::x86;
+//!
+//! // BASE + OFFSET.
+//! Mem a = ptr(rax); // a = [rax]
+//! Mem b = ptr(rax, 15); // b = [rax + 15]
+//!
+//! // BASE + INDEX << SHIFT - Shift is in BITS as used by X86!
+//! Mem c = ptr(rax, rbx); // c = [rax + rbx]
+//! Mem d = ptr(rax, rbx, 2); // d = [rax + rbx << 2]
+//! Mem e = ptr(rax, rbx, 2, 15); // e = [rax + rbx << 2 + 15]
+//!
+//! // BASE + VM (Vector Index) (encoded as MOD+VSIB).
+//! Mem f = ptr(rax, xmm1); // f = [rax + xmm1]
+//! Mem g = ptr(rax, xmm1, 2); // g = [rax + xmm1 << 2]
+//! Mem h = ptr(rax, xmm1, 2, 15); // h = [rax + xmm1 << 2 + 15]
+//!
+//! // Absolute adddress:
+//! uint64_t addr = (uint64_t)0x1234;
+//! Mem i = ptr(addr); // i = [0x1234]
+//! Mem j = ptr(addr, rbx); // j = [0x1234 + rbx]
+//! Mem k = ptr(addr, rbx, 2); // k = [0x1234 + rbx << 2]
+//!
+//! // LABEL - Will be encoded as RIP (64-bit) or absolute address (32-bit).
+//! Label L = ...;
+//! Mem m = ptr(L); // m = [L]
+//! Mem n = ptr(L, rbx); // n = [L + rbx]
+//! Mem o = ptr(L, rbx, 2); // o = [L + rbx << 2]
+//! Mem p = ptr(L, rbx, 2, 15); // p = [L + rbx << 2 + 15]
+//!
+//! // RIP - 64-bit only (RIP can't use INDEX).
+//! Mem q = ptr(rip, 24); // q = [rip + 24]
+//! }
+//! ```
+//!
+//! Memory operands can optionally contain memory size. This is required by
+//! instructions where the memory size cannot be deduced from other operands,
+//! like `inc` and `dec` on X86:
+//!
+//! ```
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! void testX86Mem() {
+//! // The same as: dword ptr [rax + rbx].
+//! x86::Mem a = x86::dword_ptr(rax, rbx);
+//!
+//! // The same as: qword ptr [rdx + rsi << 0 + 1].
+//! x86::Mem b = x86::qword_ptr(rdx, rsi, 0, 1);
+//! }
+//! ```
+//!
+//! Memory operands provide API that can be used to access its properties:
+//!
+//! ```
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! void testX86Mem() {
+//! // The same as: dword ptr [rax + 12].
+//! x86::Mem mem = x86::dword_ptr(rax, 12);
+//!
+//! mem.hasBase(); // true.
+//! mem.hasIndex(); // false.
+//! mem.size(); // 4.
+//! mem.offset(); // 12.
+//!
+//! mem.setSize(0); // Sets the size to 0 (makes it sizeless).
+//! mem.addOffset(-1); // Adds -1 to the offset and makes it 11.
+//! mem.setOffset(0); // Sets the offset to 0.
+//! mem.setBase(rcx); // Changes BASE to RCX.
+//! mem.setIndex(rax); // Changes INDEX to RAX.
+//! mem.hasIndex(); // true.
+//! }
+//! // ...
+//! ```
+//!
+//! Making changes to memory operand is very comfortable when emitting loads
+//! and stores:
+//!
+//! ```
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! void testX86Mem(CodeHolder& code) {
+//! x86::Assembler a(code); // Your initialized x86::Assembler.
+//! x86::Mem mSrc = x86::ptr(eax); // Construct [eax] memory operand.
+//!
+//! // One way of emitting bunch of loads is to use `mem.adjusted()`, which
+//! // returns a new memory operand and keeps the source operand unchanged.
+//! a.movaps(x86::xmm0, mSrc); // No adjustment needed to load [eax].
+//! a.movaps(x86::xmm1, mSrc.adjusted(16)); // Loads from [eax + 16].
+//! a.movaps(x86::xmm2, mSrc.adjusted(32)); // Loads from [eax + 32].
+//! a.movaps(x86::xmm3, mSrc.adjusted(48)); // Loads from [eax + 48].
+//!
+//! // ... do something with xmm0-3 ...
+//!
+//! // Another way of adjusting memory is to change the operand in-place.
+//! // If you want to keep the original operand you can simply clone it.
+//! x86::Mem mDst = mSrc.clone(); // Clone mSrc.
+//!
+//! a.movaps(mDst, x86::xmm0); // Stores xmm0 to [eax].
+//! mDst.addOffset(16); // Adds 16 to `mDst`.
+//!
+//! a.movaps(mDst, x86::xmm1); // Stores to [eax + 16] .
+//! mDst.addOffset(16); // Adds 16 to `mDst`.
+//!
+//! a.movaps(mDst, x86::xmm2); // Stores to [eax + 32].
+//! mDst.addOffset(16); // Adds 16 to `mDst`.
+//!
+//! a.movaps(mDst, x86::xmm3); // Stores to [eax + 48].
+//! }
+//! ```
+//!
+//! ### Assembler Examples
+//!
+//! - \ref x86::Assembler provides many X86/X64 examples.
+
+// ============================================================================
+// [Documentation - asmjit_builder]
+// ============================================================================
+
+//! \defgroup asmjit_builder Builder
+//! \brief Builder interface, nodes, and passes.
+//!
+//! ### Overview
+//!
+//! Both \ref BaseBuilder and \ref BaseCompiler interfaces describe emitters
+//! that emit into a representation that allows further processing. The code
+//! stored in such representation is completely safe to be patched, simplified,
+//! reordered, obfuscated, removed, injected, analyzed, or processed some other
+//! way. Each instruction, label, directive, or other building block is stored
+//! as \ref BaseNode (or derived class like \ref InstNode or \ref LabelNode)
+//! and contains all the information necessary to pass that node later to the
+//! assembler.
+//!
+//! \ref BaseBuilder is an emitter that inherits from \ref BaseEmitter interface.
+//! It was designed to provide a maximum compatibility with the existing \ref
+//! BaseAssembler emitter so users can move from assembler to builder when needed,
+//! for example to implement post-processing, which is not possible with Assembler.
+//!
+//! ### Builder Nodes
+//!
+//! \ref BaseBuilder doesn't generate machine code directly, it uses an intermediate
+//! representation based on nodes, however, it allows to serialize to \ref BaseAssembler
+//! when the code is ready to be encoded.
+//!
+//! There are multiple node types used by both \ref BaseBuilder and \ref BaseCompiler :
+//!
+//! - Basic nodes:
+//! - \ref BaseNode - Base class for all nodes.
+//! - \ref InstNode - Represents an instruction node.
+//! - \ref AlignNode - Represents an alignment directive (.align).
+//! - \ref LabelNode - Represents a location where to bound a \ref Label.
+//!
+//! - Data nodes:
+//! - \ref EmbedDataNode - Represents data.
+//! - \ref EmbedLabelNode - Represents \ref Label address embedded as data.
+//! - \ref EmbedLabelDeltaNode - Represents a difference of two labels
+//! embedded in data.
+//! - \ref ConstPoolNode - Represents a constant pool data embedded as data.
+//!
+//! - Informative nodes:
+//! - \ref CommentNode - Represents a comment string, doesn't affect code
+//! generation.
+//! - \ref SentinelNode - A marker that can be used to remember certain
+//! position in code or data, doesn't affect code generation. Used by
+//! \ref FuncNode to mark the end of a function.
+//!
+//! - Other nodes are provided by \ref asmjit_compiler infrastructure.
+//!
+//! ### Builder Examples
+//!
+//! - \ref x86::Builder provides many X86/X64 examples.
+
+// ============================================================================
+// [Documentation - asmjit_compiler]
+// ============================================================================
+
+//! \defgroup asmjit_compiler Compiler
+//! \brief Compiler interface.
+//!
+//! ### Overview
+//!
+//! \ref BaseCompiler is a high-level interface built on top of \ref BaseBuilder
+//! interface, which provides register allocation and support for defining and
+//! invoking functions. At the moment it's the easiest way of generating code
+//! in AsmJit as most architecture and OS specifics is properly abstracted and
+//! handled by AsmJit automatically. However, abstractions also mean restrictions,
+//! which means that \ref BaseCompiler has more limitations than \ref BaseAssembler
+//! or \ref BaseBuilder.
+//!
+//! Since \ref BaseCompiler provides register allocation it also establishes the
+//! concept of functions - a function in Compiler sense is a unit in which virtual
+//! registers are allocated into physical registers by the register allocator.
+//! In addition, it enables to use such virtual registers in function invocations.
+//!
+//! \ref BaseCompiler automatically handles function calling conventions. It's
+//! still architecture dependent, but makes the code generation much easies.
+//! Functions are essential; the first-step to generate some code is to define a
+//! signature of the function to be generated (before generating the function body
+//! itself). Function arguments and return value(s) are handled by assigning
+//! virtual registers to them. Similarly, function calls are handled the same way.
+//!
+//! ### Compiler Nodes
+//!
+//! \ref BaseCompiler adds some nodes that are required for function generation
+//! and invocation:
+//!
+//! - \ref FuncNode - Represents a function definition.
+//! - \ref FuncRetNode - Represents a function return.
+//! - \ref InvokeNode - Represents a function invocation.
+//!
+//! \ref BaseCompiler also makes the use of passes (\ref Pass) and automatically
+//! adds an architecture-dependent register allocator pass to the list of passes
+//! when attached to \ref CodeHolder.
+//!
+//! ### Compiler Examples
+//!
+//! - \ref x86::Compiler provides many X86/X64 examples.
+//!
+//! ### Compiler Tips
+//!
+//! Users of AsmJit have done mistakes in the past, this section should provide
+//! some useful tips for beginners:
+//!
+//! - Virtual registers in compiler are bound to a single function. At the
+//! moment the implementation doesn't care whether a single virtual register
+//! is used in multiple functions, but it sees it as two independent virtual
+//! registers in that case. This means that virtual registers cannot be used
+//! to implement global variables. Global variables are basically memory
+//! addresses which functions can read from and write to, and they have to
+//! be implemented in the same way.
+//!
+//! - Compiler provides a useful debugging functionality, which can be turned
+//! on through \ref FormatOptions::Flags. Use \ref Logger::addFlags() to
+//! turn on additional logging features when using Compiler.
+
+// ============================================================================
+// [Documentation - asmjit_function]
+// ============================================================================
+
+//! \defgroup asmjit_function Function
+//! \brief Function definitions.
+//!
+//! ### Overview
+//!
+//! AsmJit provides functionality that can be used to define function signatures
+//! and to calculate automatically optimal function frame that can be used directly
+//! by a prolog and epilog insertion. This feature was exclusive to AsmJit's Compiler
+//! for a very long time, but was abstracted out and is now available for all users
+//! regardless of the emitter they use. The following use cases are possible:
+//!
+//! - Calculate function frame before the function is generated - this is the
+//! only way available to \ref BaseAssembler users and it will be described
+//! in this section.
+//!
+//! - Calculate function frame after the function is generated - this way is
+//! generally used by \ref BaseBuilder and \ref BaseCompiler emitters and
+//! this way is generally described in \ref asmjit_compiler section.
+//!
+//! The following concepts are used to describe and create functions in AsmJit:
+//!
+//! - \ref Type::Id - Type-id is an 8-bit value that describes a platform
+//! independent type as we know from C/C++. It provides abstractions for
+//! most common types like `int8_t`, `uint32_t`, `uintptr_t`, `float`,
+//! `double`, and all possible vector types to match ISAs up to AVX512.
+//! \ref Type::Id was introduced originally for \ref asmjit_compiler, but
+//! it's now used by \ref FuncSignature as well.
+//!
+//! - \ref CallConv - Describes a calling convention - this class contains
+//! instructions to assign registers and stack addresses to function
+//! arguments and return value(s), but doesn't specify any function
+//! signature itself. Calling conventions are architecture and OS dependent.
+//!
+//! - \ref FuncSignature - Describes a function signature, for example
+//! `int func(int, int)`. FuncSignature contains a function calling convention
+//! id, return value type, and function arguments. The signature itself is
+//! platform independent and uses \ref Type::Id to describe types of function
+//! arguments and function return value(s).
+//!
+//! - \ref FuncDetail - Architecture and ABI dependent information that describes
+//! \ref CallConv and expanded \ref FuncSignature. Each function argument and
+//! return value is represented as \ref FuncValue that contains the original
+//! \ref Type::Id enriched with additional information that specifies whether
+//! the value is passed or returned by register (and which register) or by
+//! stack. Each value also contains some other metadata that provide additional
+//! information required to handle it properly (for example whether a vector is
+//! passed indirectly by a pointer as required by WIN64 calling convention).
+//!
+//! - \ref FuncFrame - Contains information about the function frame that can
+//! be used by prolog/epilog inserter (PEI). Holds call stack size size and
+//! alignment, local stack size and alignment, and various attributes that
+//! describe how prolog and epilog should be constructed. `FuncFrame` doesn't
+//! know anything about function's arguments or return values, it hold only
+//! information necessary to create a valid and ABI conforming function prologs
+//! and epilogs.
+//!
+//! - \ref FuncArgsAssignment - A helper class that can be used to reassign
+//! function arguments into user specified registers. It's architecture and
+//! ABI dependent mapping from function arguments described by \ref CallConv
+//! and \ref FuncDetail into registers specified by the user.
+//!
+//! It's a lot of concepts where each represents one step in a function frame
+//! calculation. It can be used to create function prologs, epilogs, and also
+//! to calculate information necessary to perform function calls.
+
+// ============================================================================
+// [Documentation - asmjit_logging]
+// ============================================================================
+
+//! \defgroup asmjit_logging Logging
+//! \brief Logging and formatting.
+//!
+//! ### Overview
+//!
+//! The initial phase of a project that generates machine code is not always smooth.
+//! Failure cases are common not just at the beginning phase, but also during the
+//! development or refactoring. AsmJit provides logging functionality to address
+//! this issue. AsmJit does already a good job with function overloading to prevent
+//! from emitting unencodable instructions, but it can't prevent from emitting machine
+//! code that is correct at instruction level, but doesn't work when it's executed as
+//! a whole. Logging has always been an important part of AsmJit's infrastructure and
+//! looking at logs can sometimes reveal code generation issues quickly.
+//!
+//! AsmJit provides API for logging and formatting:
+//! - \ref Logger - A logger that you can pass to \ref CodeHolder and all emitters
+//! that inherit from \ref BaseEmitter.
+//! - \ref FormatOptions - Formatting options that can change how instructions and
+//! operands are formatted.
+//! - \ref Formatter - A namespace that provides functions that can format input
+//! data like \ref Operand, \ref BaseReg, \ref Label, and \ref BaseNode into
+//! \ref String.
+//!
+//! AsmJit's \ref Logger serves the following purposes:
+//! - Provides a basic foundation for logging.
+//! - Abstract class leaving the implementation on users. The following built-in
+//! inplementations are provided for simplicty:
+//! - \ref FileLogger implements logging into a standard `FILE` stream.
+//! - \ref StringLogger serializes all logs into a \ref String instance.
+//!
+//! AsmJit's \ref FormatOptions provides the following to customize the formatting of
+//! instructions and operands through:
+//! - \ref FormatOptions::Flags
+//! - \ref FormatOptions::IndentationType
+//!
+//! ### Logging
+//!
+//! A \ref Logger is typically attached to a \ref CodeHolder, which propagates it
+//! to all attached emitters automatically. The example below illustrates how to
+//! use \ref FileLogger that outputs to standard output:
+//!
+//! ```
+//! #include
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! int main() {
+//! JitRuntime rt; // Runtime specialized for JIT code execution.
+//! FileLogger logger(stdout); // Logger should always survive CodeHolder.
+//!
+//! CodeHolder code; // Holds code and relocation information.
+//! code.init(rt.environment()); // Initialize to the same arch as JIT runtime.
+//! code.setLogger(&logger); // Attach the `logger` to `code` holder.
+//!
+//! // ... code as usual, everything emitted will be logged to `stdout` ...
+//! return 0;
+//! }
+//! ```
+//!
+//! If output to FILE stream is not desired it's possible to use \ref StringLogger,
+//! which concatenates everything into a multi-line string:
+//!
+//! ```
+//! #include
+//! #include
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! int main() {
+//! JitRuntime rt; // Runtime specialized for JIT code execution.
+//! StringLogger logger; // Logger should always survive CodeHolder.
+//!
+//! CodeHolder code; // Holds code and relocation information.
+//! code.init(rt.environment()); // Initialize to the same arch as JIT runtime.
+//! code.setLogger(&logger); // Attach the `logger` to `code` holder.
+//!
+//! // ... code as usual, logging will be concatenated to logger string ...
+//!
+//! // You can either use the string from StringLogger directly or you can
+//! // move it. Logger::data() returns its content as null terminated char[].
+//! printf("Logger content: %s\n", logger.data());
+//!
+//! // It can be moved into your own string like this:
+//! String content = std::move(logger.content());
+//! printf("The same content: %s\n", content.data());
+//!
+//! return 0;
+//! }
+//! ```
+//!
+//! ### Formatting
+//!
+//! AsmJit uses \ref Formatter to format inputs that are then passed to \ref
+//! Logger. Formatting is public and can be used by AsmJit users as well. The
+//! most important thing to know regarding formatting is that \ref Formatter
+//! always appends to the output string, so it can be used to build complex
+//! strings without having to concatenate intermediate strings.
+//!
+//! The first example illustrates how to format operands:
+//!
+//! ```
+//! #include
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! void logOperand(uint32_t arch, const Operand_& op) {
+//! // The emitter is optional (named labels and virtual registers need it).
+//! BaseEmitter* emitter = nullptr;
+//!
+//! // No flags by default.
+//! uint32_t formatFlags = FormatOptions::kNoFlags;
+//!
+//! StringTmp<128> sb;
+//! Formatter::formatOperand(sb, formatFlags, emitter, arch, op);
+//! printf("%s\n", sb.data());
+//! }
+//!
+//! void formattingExample() {
+//! using namespace x86;
+//!
+//! // Architecture is not part of operand, it must be passed explicitly.
+//! // Format flags. We pass it explicitly also to 'logOperand' to make
+//! // compatible with what AsmJit normally does.
+//! uint32_t arch = Environment::kArchX64;
+//!
+//! log(arch, rax); // Prints 'rax'.
+//! log(arch, ptr(rax, rbx, 2)); // Prints '[rax + rbx * 4]`.
+//! log(arch, dword_ptr(rax, rbx, 2)); // Prints 'dword [rax + rbx * 4]`.
+//! log(arch, imm(42)); // Prints '42'.
+//! }
+//! ```
+//!
+//! Next example illustrates how to format whole instructions:
+//!
+//! ```
+//! #include
+//! #include
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! template
+//! void logInstruction(uint32_t arch, const BaseInst& inst, Args&&... args) {
+//! // The emitter is optional (named labels and virtual registers need it).
+//! BaseEmitter* emitter = nullptr;
+//!
+//! // No flags by default.
+//! uint32_t formatFlags = FormatOptions::kNoFlags;
+//!
+//! // The formatter expects operands in an array.
+//! Operand_ operands { std::forward(args)... };
+//!
+//! StringTmp<128> sb;
+//! Formatter::formatInstruction(
+//! sb, formatFlags, emitter, arch, inst, operands, sizeof...(args));
+//! printf("%s\n", sb.data());
+//! }
+//!
+//! void formattingExample() {
+//! using namespace x86;
+//!
+//! // Architecture is not part of operand, it must be passed explicitly.
+//! // Format flags. We pass it explicitly also to 'logOperand' to make
+//! // compatible with what AsmJit normally does.
+//! uint32_t arch = Environment::kArchX64;
+//!
+//! // Prints 'mov rax, rcx'.
+//! logInstruction(arch, BaseInst(Inst::kIdMov), rax, rcx);
+//!
+//! // Prints 'vaddpd zmm0, zmm1, [rax] {1to8}'.
+//! logInstruction(arch,
+//! BaseInst(Inst::kIdVaddpd),
+//! zmm0, zmm1, ptr(rax)._1toN());
+//!
+//! // BaseInst abstracts instruction id, instruction options, and extraReg.
+//! // Prints 'lock add [rax], rcx'.
+//! logInstruction(arch,
+//! BaseInst(Inst::kIdAdd, Inst::kOptionLock),
+//! x86::ptr(rax), rcx);
+//!
+//! // Similarly an extra register (like AVX-512 selector) can be used.
+//! // Prints 'vaddpd zmm0 {k2} {z}, zmm1, [rax]'.
+//! logInstruction(arch,
+//! BaseInst(Inst::kIdAdd, Inst::kOptionZMask, k2),
+//! zmm0, zmm1, ptr(rax));
+//! }
+//! ```
+//!
+//! And finally, the example below illustrates how to use a built-in function
+//! to format the content of \ref BaseBuilder, which consists of nodes:
+//!
+//! ```
+//! #include
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! void formattingExample(BaseBuilder* builder) {
+//! uint32_t formatFlags = FormatOptions::kNoFlags;
+//!
+//! // This also shows how temporary strings can be used.
+//! StringTmp<512> sb;
+//!
+//! // FormatNodeList requires the String for output, formatting flags, which
+//! // were zero (no extra flags), and the builder instance, which we have
+//! // provided. An overloaded version also exists, which accepts begin and
+//! // and end nodes, which can be used to only format a range of nodes.
+//! Formatter::formatNodeList(sb, formatFlags, builder);
+//!
+//! // You can do whatever else with the string, it's always null terminated,
+//! // so it can be passed to C functions like printf().
+//! printf("%s\n", sb.data());
+//! }
+//! ```
+
+// ============================================================================
+// [Documentation - asmjit_error_handling]
+// ============================================================================
+
+//! \defgroup asmjit_error_handling Error Handling
+//! \brief Error handling.
+//!
+//! ### Overview
+//!
+//! AsmJit uses error codes to represent and return errors. Every function that
+//! can fail returns an \ref Error code. Exceptions are never thrown by AsmJit
+//! itself even in extreme conditions like out-of-memory, but it's possible to
+//! override \ref ErrorHandler::handleError() to throw, in that case no error
+//! will be returned and exception will be thrown instead. All functions where
+//! this can happen are not marked `noexcept`.
+//!
+//! Errors should never be ignored, however, checking errors after each AsmJit
+//! API call would simply overcomplicate the whole code generation experience.
+//! \ref ErrorHandler exists to make the use of AsmJit API simpler as it allows
+//! to customize how errors can be handled:
+//!
+//! - Record the error and continue (the way how the error is user-implemented).
+//! - Throw an exception. AsmJit doesn't use exceptions and is completely
+//! exception-safe, but it's perfectly legal to throw an exception from
+//! the error handler.
+//! - Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts Assembler,
+//! Builder and Compiler to a consistent state before calling \ref
+//! ErrorHandler::handleError(), so `longjmp()` can be used without issues to
+//! cancel the code-generation if an error occurred. This method can be used if
+//! exception handling in your project is turned off and you still want some
+//! comfort. In most cases it should be safe as AsmJit uses \ref Zone memory
+//! and the ownership of memory it allocates always ends with the instance that
+//! allocated it. If using this approach please never jump outside the life-time
+//! of \ref CodeHolder and \ref BaseEmitter.
+//!
+//! ### Using ErrorHandler
+//!
+//! An example of attaching \ref ErrorHandler to \ref CodeHolder.
+//!
+//! ```
+//! #include
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! // A simple error handler implementation, extend according to your needs.
+//! class MyErrorHandler : public ErrorHandler {
+//! public:
+//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
+//! printf("AsmJit error: %s\n", message);
+//! }
+//! };
+//!
+//! int main() {
+//! JitRuntime rt;
+//!
+//! MyErrorHandler myErrorHandler;
+//! CodeHolder code;
+//!
+//! code.init(rt.environment());
+//! code.setErrorHandler(&myErrorHandler);
+//!
+//! x86::Assembler a(&code);
+//! // ... code generation ...
+//!
+//! return 0;
+//! }
+//! ```
+//!
+//! Useful classes in error handling group:
+//!
+//! - See \ref DebugUtils that provides utilities useful for debugging.
+//! - See \ref Error that lists error codes that AsmJit uses.
+//! - See \ref ErrorHandler for more details about error handling.
+
+// ============================================================================
+// [Documentation - asmjit_instruction_db]
+// ============================================================================
+
+//! \defgroup asmjit_instruction_db Instruction DB
+//! \brief Instruction database (introspection, read/write, validation, ...).
+//!
+//! ### Overview
+//!
+//! AsmJit provides a public instruction database that can be used to query
+//! information about a complete instruction. The instruction database requires
+//! the knowledge of the following:
+//!
+//! - \ref BaseInst - Base instruction that contains instruction id, options,
+//! and a possible extra-register that represents either REP prefix counter
+//! or AVX-512 selector (mask).
+//! - \ref Operand - Represents operands of an instruction.
+//!
+//! Each instruction can be then queried for the following information:
+//!
+//! - \ref InstRWInfo - Read/write information of instruction and its oprands.
+//! - \ref OpRWInfo - Read/write information of a single operand, part of
+//! \ref InstRWInfo data structure.
+//! - \ref BaseFeatures - CPU features required to execute the instruction.
+//!
+//! In addition to query functionality AsmJit is also able to validate whether
+//! an instruction and its operands are valid. This is useful for making sure
+//! that what user tries to emit is correct and it can be also used by other
+//! projects that parse user input, like AsmTK project.
+//!
+//! ### Query API
+//!
+//! The instruction query API is provided by \ref InstAPI namespace. The
+//! following queries are possible:
+//!
+//! - \ref InstAPI::queryRWInfo() - queries read/write information of the
+//! given instruction and its operands. Includes also CPU flags read/written.
+//!
+//! - \ref InstAPI::queryFeatures() - queries CPU features that are required
+//! to execute the given instruction. A full instruction with operands must
+//! be given as some architectures like X86 may require different features
+//! for the same instruction based on its operands.
+//!
+//! - asmjit_test_x86_instinfo.cpp
+//! can be also used as a reference about accessing instruction information.
+//!
+//! ### Validation API
+//!
+//! The instruction validation API is provided by \ref InstAPI namespace in the
+//! similar fashion like the Query API, however, validation can also be turned
+//! on at \ref BaseEmitter level. The following is possible:
+//!
+//! - \ref InstAPI::validate() - low-level instruction validation function
+//! that is used internally by emitters if strict validation is enabled.
+//!
+//! - \ref BaseEmitter::addValidationOptions() - can be used to enable
+//! validation at emitter level, see \ref BaseEmitter::ValidationOptions.
+
+
+// ============================================================================
+// [Documentation - asmjit_virtual_memory]
+// ============================================================================
+
+//! \defgroup asmjit_virtual_memory Virtual Memory
+//! \brief Virtual memory management.
+//!
+//! ### Overview
+//!
+//! AsmJit's virtual memory management is divided into two main categories:
+//!
+//! - Low level API that provides cross-platform abstractions for virtual
+//! memory allocation. Implemented in \ref VirtMem namespace.
+//! - High level API that makes it very easy to store generated code for
+//! execution. See \ref JitRuntime, which is used by many examples for its
+//! simplicity and easy integration with \ref CodeHolder. There is also
+//! \ref JitAllocator, which lays somewhere between RAW memory allocation
+//! and \ref JitRuntime.
+
+// ============================================================================
+// [Documentation - asmjit_zone_memory]
+// ============================================================================
+
+//! \defgroup asmjit_zone Zone Memory
+//! \brief Zone memory allocator and containers.
+//!
+//! ### Overview
+//!
+//! AsmJit uses zone memory allocation (also known as Arena allocation) to allocate
+//! most of the data it uses. It's a fast allocator that allows AsmJit to allocate
+//! a lot of small data structures fast and without `malloc()` overhead. Since
+//! code generators and all related classes are usually short-lived this approach
+//! decreases memory usage and fragmentation as arena-based allocators always
+//! allocate larger blocks of memory, which are then split into smaller chunks.
+//!
+//! Another advantage of zone memory allocation is that since the whole library
+//! uses this strategy it's very easy to deallocate everything that a particular
+//! instance is holding by simply releasing the memory the allocator holds. This
+//! improves destruction time of such objects as there is no destruction at all.
+//! Long-lived objects just reset its data in destructor or in their reset()
+//! member function for a future reuse. For this purpose all containers in AsmJit
+//! are also zone allocated.
+//!
+//! ### Zone Allocation
+//!
+//! - \ref Zone - Incremental zone memory allocator with minimum features. It
+//! can only allocate memory without the possibility to return it back to
+//! the allocator.
+//!
+//! - \ref ZoneTmp - A temporary \ref Zone with some initial static storage.
+//! If the allocation requests fit the static storage allocated then there
+//! will be no dynamic memory allocation during the lifetime of \ref ZoneTmp,
+//! otherwise it would act as \ref Zone with one preallocated block on the
+//! stack.
+//!
+//! - \ref ZoneAllocator - A wrapper of \ref Zone that provides the capability
+//! of returning memory to the allocator. Such memory is stored in a pool for
+//! later reuse.
+//!
+//! ### Zone Allocated Containers
+//!
+//! - \ref ZoneString - Zone allocated string.
+//! - \ref ZoneHash - Zone allocated hash table.
+//! - \ref ZoneTree - Zone allocated red-black tree.
+//! - \ref ZoneList - Zone allocated double-linked list.
+//! - \ref ZoneStack - Zone allocated stack.
+//! - \ref ZoneVector - Zone allocated vector.
+//! - \ref ZoneBitVector - Zone allocated vector of bits.
+//!
+//! ### Using Zone Allocated Containers
+//!
+//! The most common data structure exposed by AsmJit is \ref ZoneVector. It's very
+//! similar to `std::vector`, but the implementation doesn't use exceptions and
+//! uses the mentioned \ref ZoneAllocator for performance reasons. You don't have
+//! to worry about allocations as you should not need to add items to AsmJit's
+//! data structures directly as there should be API for all required operations.
+//!
+//! The following APIs in \ref CodeHolder returns \ref ZoneVector reference:
+//!
+//! ```
+//! using namespace asmjit;
+//!
+//! void example(CodeHolder& code) {
+//! // Contains all emitters attached to CodeHolder.
+//! const ZoneVector& emitters = code.emitters();
+//!
+//! // Contains all section entries managed by CodeHolder.
+//! const ZoneVector& sections = code.sections();
+//!
+//! // Contains all label entries managed by CodeHolder.
+//! const ZoneVector& labelEntries = code.labelEntries();
+//!
+//! // Contains all relocation entries managed by CodeHolder.
+//! const ZoneVector& relocEntries = code.relocEntries();
+//! }
+//! ```
+//!
+//! \ref ZoneVector has overloaded array access operator to make it possible
+//! to access its elements through operator[]. Some standard functions like
+//! \ref ZoneVector::empty(), \ref ZoneVector::size(), and \ref ZoneVector::data()
+//! are provided as well. Vectors are also iterable through a range-based for loop:
+//!
+//! ```
+//! using namespace asmjit;
+//!
+//! void example(CodeHolder& code) {
+//! for (LabelEntry* le : code.labelEntries()) {
+//! printf("Label #%u {Bound=%s Offset=%llu}",
+//! le->id(),
+//! le->isBound() ? "true" : "false",
+//! (unsigned long long)le->offset());
+//! }
+//! }
+//! ```
+//!
+//! ### Design Considerations
+//!
+//! Zone-allocated containers do not store the allocator within the container.
+//! This decision was made to reduce the footprint of such containers as AsmJit
+//! tooling, especially Compiler's register allocation, may use many instances
+//! of such containers to perform code analysis and register allocation.
+//!
+//! For example to append an item into a \ref ZoneVector it's required to pass
+//! the allocator as the first argument, so it can be used in case that the
+//! vector needs a reallocation. Such function also returns an error, which
+//! must be propagated to the caller.
+//!
+//! ```
+//! using namespace asmjit
+//!
+//! Error example(ZoneAllocator* allocator) {
+//! ZoneVector vector;
+//!
+//! // Unfortunately, allocator must be provided to all functions that mutate
+//! // the vector. However, AsmJit users should never need to do this as all
+//! // manipulation should be done through public API, which takes care of
+//! // that.
+//! for (int i = 0; i < 100; i++) {
+//! ASMJIT_PROPAGATE(vector.append(allocator, i));
+//! }
+//!
+//! // By default vector's destructor doesn't release anything as it knows
+//! // that its content is zone allocated. However, \ref ZoneVector::release
+//! // can be used to explicitly release the vector data to the allocator if
+//! // necessary
+//! vector.release(allocator);
+//! }
+//! ```
+//!
+//! Containers like \ref ZoneVector also provide a functionality to reserve a
+//! certain number of items before any items are added to it. This approach is
+//! used internally in most places as it allows to prepare space for data that
+//! will be added to some container before the data itself was created.
+//!
+//! ```
+//! using namespace asmjit
+//!
+//! Error example(ZoneAllocator* allocator) {
+//! ZoneVector vector;
+//!
+//! ASMJIT_PROPAGATE(vector.willGrow(100));
+//! for (int i = 0; i < 100; i++) {
+//! // Cannot fail.
+//! vector.appendUnsafe(allocator, i);
+//! }
+//!
+//! vector.release(allocator);
+//! }
+//! ```
+
+// ============================================================================
+// [Documentation - asmjit_utilities]
+// ============================================================================
+
+//! \defgroup asmjit_utilities Utilities
+//! \brief Utility classes and functions.
+//!
+//! ### Overview
+//!
+//! AsmJit uses and provides utility classes and functions, that can be used
+//! with AsmJit. The functionality can be divided into the following topics:
+//!
+//! ### String Functionality
+//!
+//! - \ref String - AsmJit's string container, which is used internally
+//! and which doesn't use exceptions and has a stable layout, which is
+//! not dependent on C++ standard library.
+//! - \ref StringTmp - String that can have base storage allocated on
+//! stack. The amount of storage on stack can be specified as a template
+//! parameter.
+//! - \ref FixedString - Fixed string container limited up to N characters.
+//!
+//! ### Code Generation Utilities
+//!
+//! - \ref ConstPool - Constant pool used by \ref BaseCompiler, but also
+//! available to users that may find use of it.
+//!
+//! ### Support Functionality Used by AsmJit
+//!
+//! - \ref Support namespace provides many other utility functions and
+//! classes that are used by AsmJit, and made public.
+
+// ============================================================================
+// [Documentation - asmjit_ backends]
+// ============================================================================
+
+//! \defgroup asmjit_x86 X86 Backend
+//! \brief X86/X64 backend.
+
+// ============================================================================
+// [Documentation - asmjit_ra]
+// ============================================================================
+
+//! \cond INTERNAL
+//! \defgroup asmjit_ra RA
+//! \brief Register allocator internals.
+//! \endcond
+
+} // {asmjit}
+
+// ============================================================================
+// [Core Headers]
+// ============================================================================
+
+#include "asmjit-scope-begin.h"
+#include "core/archtraits.h"
+#include "core/assembler.h"
+#include "core/builder.h"
+#include "core/codeholder.h"
+#include "core/compiler.h"
+#include "core/constpool.h"
+#include "core/cpuinfo.h"
+#include "core/datatypes.h"
+#include "core/emitter.h"
+#include "core/environment.h"
+#include "core/errorhandler.h"
+#include "core/features.h"
+#include "core/formatter.h"
+#include "core/func.h"
+#include "core/globals.h"
+#include "core/inst.h"
+#include "core/jitallocator.h"
+#include "core/jitruntime.h"
+#include "core/logger.h"
+#include "core/operand.h"
+#include "core/osutils.h"
+#include "core/string.h"
+#include "core/support.h"
+#include "core/target.h"
+#include "core/type.h"
+#include "core/virtmem.h"
+#include "core/zone.h"
+#include "core/zonehash.h"
+#include "core/zonelist.h"
+#include "core/zonetree.h"
+#include "core/zonestack.h"
+#include "core/zonestring.h"
+#include "core/zonevector.h"
+#include "asmjit-scope-end.h"
+
+// ============================================================================
+// [Deprecated]
+// ============================================================================
+
+#ifndef ASMJIT_NO_DEPRECATED
+namespace asmjit {
+
+#ifndef ASMJIT_NO_COMPILER
+ASMJIT_DEPRECATED("Use InvokeNode instead of FuncCallNode")
+typedef InvokeNode FuncCallNode;
+#endif // !ASMJIT_NO_COMPILER
+
+#ifndef ASMJIT_NO_LOGGING
+namespace Logging { using namespace Formatter; }
+#endif //! ASMJIT_NO_LOGGING
+
+} // {asmjit}
+#endif // !ASMJIT_NO_DEPRECATED
+
+#endif // ASMJIT_CORE_H_INCLUDED
diff --git a/Theodosius/asmjit/core/api-build_p.h b/Theodosius/asmjit/core/api-build_p.h
new file mode 100644
index 0000000..db37ca7
--- /dev/null
+++ b/Theodosius/asmjit/core/api-build_p.h
@@ -0,0 +1,77 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_API_BUILD_P_H_INCLUDED
+#define ASMJIT_CORE_API_BUILD_P_H_INCLUDED
+
+#define ASMJIT_EXPORTS
+
+// Only turn-off these warnings when building asmjit itself.
+#ifdef _MSC_VER
+ #ifndef _CRT_SECURE_NO_DEPRECATE
+ #define _CRT_SECURE_NO_DEPRECATE
+ #endif
+ #ifndef _CRT_SECURE_NO_WARNINGS
+ #define _CRT_SECURE_NO_WARNINGS
+ #endif
+#endif
+
+// Dependencies only required for asmjit build, but never exposed through public headers.
+#ifdef _WIN32
+ #ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+ #endif
+ #ifndef NOMINMAX
+ #define NOMINMAX
+ #endif
+ #include
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Build-Only]
+// ============================================================================
+
+#include "./api-config.h"
+
+#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__) && !defined(__clang__)
+ #define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os")))
+ #define ASMJIT_FAVOR_SPEED __attribute__((__optimize__("O3")))
+#elif ASMJIT_CXX_HAS_ATTRIBUTE(__minsize__, 0)
+ #define ASMJIT_FAVOR_SIZE __attribute__((__minsize__))
+ #define ASMJIT_FAVOR_SPEED
+#else
+ #define ASMJIT_FAVOR_SIZE
+ #define ASMJIT_FAVOR_SPEED
+#endif
+
+// Make sure '#ifdef'ed unit tests are properly highlighted in IDE.
+#if !defined(ASMJIT_TEST) && defined(__INTELLISENSE__)
+ #define ASMJIT_TEST
+#endif
+
+// Include a unit testing package if this is a `asmjit_test_unit` build.
+#if defined(ASMJIT_TEST)
+ #include "../../../test/broken.h"
+#endif
+
+#endif // ASMJIT_CORE_API_BUILD_P_H_INCLUDED
diff --git a/Theodosius/asmjit/core/api-config.h b/Theodosius/asmjit/core/api-config.h
new file mode 100644
index 0000000..aab3473
--- /dev/null
+++ b/Theodosius/asmjit/core/api-config.h
@@ -0,0 +1,552 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_API_CONFIG_H_INCLUDED
+#define ASMJIT_CORE_API_CONFIG_H_INCLUDED
+
+// ============================================================================
+// [asmjit::Version]
+// ============================================================================
+
+//! \addtogroup asmjit_core
+//! \{
+
+//! AsmJit library version in `(Major << 16) | (Minor << 8) | (Patch)` format.
+#define ASMJIT_LIBRARY_VERSION 0x010400 /* 1.4.0 */
+
+//! \}
+
+// ============================================================================
+// [asmjit::Build - Documentation]
+// ============================================================================
+
+// NOTE: Doxygen cannot document macros that are not defined, that's why we have
+// to define them and then undefine them, so it won't use the macros with its
+// own preprocessor.
+#ifdef _DOXYGEN
+namespace asmjit {
+
+//! \addtogroup asmjit_build
+//! \{
+
+//! Asmjit is embedded, implies \ref ASMJIT_STATIC.
+#define ASMJIT_EMBED
+
+//! Enables static-library build.
+#define ASMJIT_STATIC
+
+//! Defined when AsmJit's build configuration is 'Debug'.
+//!
+//! \note Can be defined explicitly to bypass autodetection.
+#define ASMJIT_BUILD_DEBUG
+
+//! Defined when AsmJit's build configuration is 'Release'.
+//!
+//! \note Can be defined explicitly to bypass autodetection.
+#define ASMJIT_BUILD_RELEASE
+
+//! Defined to build X86/X64 backend.
+#define ASMJIT_BUILD_X86
+
+//! Defined to build host backend autodetected at compile-time.
+#define ASMJIT_BUILD_HOST
+
+//! Disables deprecated API at compile time.
+#define ASMJIT_NO_DEPRECATED
+
+//! Disable non-host architectures entirely.
+#define ASMJIT_NO_FOREIGN
+
+//! Disables \ref asmjit_builder functionality completely.
+#define ASMJIT_NO_BUILDER
+
+//! Disables \ref asmjit_compiler functionality completely.
+#define ASMJIT_NO_COMPILER
+
+//! Disables JIT memory management and \ref JitRuntime.
+#define ASMJIT_NO_JIT
+
+//! Disables \ref Logger and \ref Formatter.
+#define ASMJIT_NO_LOGGING
+
+//! Disables everything that contains text.
+#define ASMJIT_NO_TEXT
+
+//! Disables instruction validation API.
+#define ASMJIT_NO_VALIDATION
+
+//! Disables instruction introspection API.
+#define ASMJIT_NO_INTROSPECTION
+
+// Avoid doxygen preprocessor using feature-selection definitions.
+#undef ASMJIT_NO_BUILDER
+#undef ASMJIT_NO_COMPILER
+#undef ASMJIT_NO_JIT
+#undef ASMJIT_NO_LOGGING
+#undef ASMJIT_NO_TEXT
+#undef ASMJIT_NO_VALIDATION
+#undef ASMJIT_NO_INTROSPECTION
+
+//! \}
+
+} // {asmjit}
+#endif // _DOXYGEN
+
+// Enable all features at IDE level, so it's properly highlighted and indexed.
+#ifdef __INTELLISENSE__
+ #ifndef ASMJIT_BUILD_X86
+ #define ASMJIT_BUILD_X86
+ #endif
+#endif
+
+// ============================================================================
+// [asmjit::Dependencies]
+// ============================================================================
+
+// We really want std-types as globals.
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#if !defined(_WIN32) && !defined(__EMSCRIPTEN__)
+ #include
+#endif
+
+
+// ============================================================================
+// [asmjit::Options]
+// ============================================================================
+
+// ASMJIT_NO_BUILDER implies ASMJIT_NO_COMPILER.
+#if defined(ASMJIT_NO_BUILDER) && !defined(ASMJIT_NO_COMPILER)
+ #define ASMJIT_NO_COMPILER
+#endif
+
+// Prevent compile-time errors caused by misconfiguration.
+#if defined(ASMJIT_NO_TEXT) && !defined(ASMJIT_NO_LOGGING)
+ #pragma "ASMJIT_NO_TEXT can only be defined when ASMJIT_NO_LOGGING is defined."
+ #undef ASMJIT_NO_TEXT
+#endif
+
+#if defined(ASMJIT_NO_INTROSPECTION) && !defined(ASMJIT_NO_COMPILER)
+ #pragma message("ASMJIT_NO_INTROSPECTION can only be defined when ASMJIT_NO_COMPILER is defined")
+ #undef ASMJIT_NO_INTROSPECTION
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Deprecated]
+// ============================================================================
+
+#ifndef ASMJIT_NO_DEPRECATED
+ #if defined(ASMJIT_BUILD_EMBED) || defined(ASMJIT_BUILD_STATIC)
+ #if defined(ASMJIT_BUILD_EMBED)
+ #pragma message("'ASMJIT_BUILD_EMBED' is deprecated, use 'ASMJIT_STATIC'")
+ #endif
+ #if defined(ASMJIT_BUILD_STATIC)
+ #pragma message("'ASMJIT_BUILD_STATIC' is deprecated, use 'ASMJIT_STATIC'")
+ #endif
+
+ #if !defined(ASMJIT_STATIC)
+ #define ASMJIT_STATIC
+ #endif
+ #endif
+#endif // !ASMJIT_NO_DEPRECATED
+
+// ============================================================================
+// [asmjit::Build - Globals - Build Mode]
+// ============================================================================
+
+// Detect ASMJIT_BUILD_DEBUG and ASMJIT_BUILD_RELEASE if not defined.
+#if !defined(ASMJIT_BUILD_DEBUG) && !defined(ASMJIT_BUILD_RELEASE)
+ #if !defined(NDEBUG)
+ #define ASMJIT_BUILD_DEBUG
+ #else
+ #define ASMJIT_BUILD_RELEASE
+ #endif
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Target Architecture Information]
+// ============================================================================
+
+#if defined(_M_X64) || defined(__x86_64__)
+ #define ASMJIT_ARCH_X86 64
+#elif defined(_M_IX86) || defined(__X86__) || defined(__i386__)
+ #define ASMJIT_ARCH_X86 32
+#else
+ #define ASMJIT_ARCH_X86 0
+#endif
+
+#if defined(__arm64__) || defined(__aarch64__)
+# define ASMJIT_ARCH_ARM 64
+#elif defined(_M_ARM) || defined(_M_ARMT) || defined(__arm__) || defined(__thumb__) || defined(__thumb2__)
+ #define ASMJIT_ARCH_ARM 32
+#else
+ #define ASMJIT_ARCH_ARM 0
+#endif
+
+#if defined(_MIPS_ARCH_MIPS64) || defined(__mips64)
+ #define ASMJIT_ARCH_MIPS 64
+#elif defined(_MIPS_ARCH_MIPS32) || defined(_M_MRX000) || defined(__mips__)
+ #define ASMJIT_ARCH_MIPS 32
+#else
+ #define ASMJIT_ARCH_MIPS 0
+#endif
+
+#define ASMJIT_ARCH_BITS (ASMJIT_ARCH_X86 | ASMJIT_ARCH_ARM | ASMJIT_ARCH_MIPS)
+#if ASMJIT_ARCH_BITS == 0
+ #undef ASMJIT_ARCH_BITS
+ #if defined (__LP64__) || defined(_LP64)
+ #define ASMJIT_ARCH_BITS 64
+ #else
+ #define ASMJIT_ARCH_BITS 32
+ #endif
+#endif
+
+#if (defined(__ARMEB__)) || \
+ (defined(__MIPSEB__)) || \
+ (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
+ #define ASMJIT_ARCH_LE 0
+ #define ASMJIT_ARCH_BE 1
+#else
+ #define ASMJIT_ARCH_LE 1
+ #define ASMJIT_ARCH_BE 0
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Build Architectures Definitions]
+// ============================================================================
+
+#if !defined(ASMJIT_NO_FOREIGN)
+ // If 'ASMJIT_NO_FOREIGN' is not defined then all architectures will be built.
+ #if !defined(ASMJIT_BUILD_X86)
+ #define ASMJIT_BUILD_X86
+ #endif
+#else
+ // Detect architectures to build if building only for the host architecture.
+ #if ASMJIT_ARCH_X86 && !defined(ASMJIT_BUILD_X86)
+ #define ASMJIT_BUILD_X86
+ #endif
+#endif
+
+// Define 'ASMJIT_BUILD_HOST' if we know that host architecture will be built.
+#if !defined(ASMJIT_BUILD_HOST) && ASMJIT_ARCH_X86 && defined(ASMJIT_BUILD_X86)
+ #define ASMJIT_BUILD_HOST
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - C++ Compiler and Features Detection]
+// ============================================================================
+
+#define ASMJIT_CXX_GNU 0
+#define ASMJIT_CXX_MAKE_VER(MAJOR, MINOR) ((MAJOR) * 1000 + (MINOR))
+
+// Intel Compiler [pretends to be GNU or MSC, so it must be checked first]:
+// - https://software.intel.com/en-us/articles/c0x-features-supported-by-intel-c-compiler
+// - https://software.intel.com/en-us/articles/c14-features-supported-by-intel-c-compiler
+// - https://software.intel.com/en-us/articles/c17-features-supported-by-intel-c-compiler
+#if defined(__INTEL_COMPILER)
+
+// MSC Compiler:
+// - https://msdn.microsoft.com/en-us/library/hh567368.aspx
+//
+// Version List:
+// - 16.00.0 == VS2010
+// - 17.00.0 == VS2012
+// - 18.00.0 == VS2013
+// - 19.00.0 == VS2015
+// - 19.10.0 == VS2017
+#elif defined(_MSC_VER) && defined(_MSC_FULL_VER)
+
+// Clang Compiler [Pretends to be GNU, so it must be checked before]:
+// - https://clang.llvm.org/cxx_status.html
+#elif defined(__clang_major__) && defined(__clang_minor__) && defined(__clang_patchlevel__)
+
+// GNU Compiler:
+// - https://gcc.gnu.org/projects/cxx-status.html
+#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+
+ #undef ASMJIT_CXX_GNU
+ #define ASMJIT_CXX_GNU ASMJIT_CXX_MAKE_VER(__GNUC__, __GNUC_MINOR__)
+
+#endif
+
+// Compiler features detection macros.
+#if defined(__clang__) && defined(__has_attribute)
+ #define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (__has_attribute(NAME))
+#else
+ #define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (!(!(CHECK)))
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - API Decorators & Language Extensions]
+// ============================================================================
+
+// API (Export / Import).
+#if !defined(ASMJIT_STATIC)
+ #if defined(_WIN32) && (defined(_MSC_VER) || defined(__MINGW32__))
+ #ifdef ASMJIT_EXPORTS
+ #define ASMJIT_API __declspec(dllexport)
+ #else
+ #define ASMJIT_API __declspec(dllimport)
+ #endif
+ #elif defined(_WIN32) && defined(__GNUC__)
+ #ifdef ASMJIT_EXPORTS
+ #define ASMJIT_API __attribute__((__dllexport__))
+ #else
+ #define ASMJIT_API __attribute__((__dllimport__))
+ #endif
+ #elif defined(__GNUC__)
+ #define ASMJIT_API __attribute__((__visibility__("default")))
+ #endif
+#endif
+
+#if !defined(ASMJIT_API)
+ #define ASMJIT_API
+#endif
+
+#if !defined(ASMJIT_VARAPI)
+ #define ASMJIT_VARAPI extern ASMJIT_API
+#endif
+
+// This is basically a workaround. When using MSVC and marking class as DLL
+// export everything gets exported, which is unwanted in most projects. MSVC
+// automatically exports typeinfo and vtable if at least one symbol of the
+// class is exported. However, GCC has some strange behavior that even if
+// one or more symbol is exported it doesn't export typeinfo unless the
+// class itself is decorated with "visibility(default)" (i.e. ASMJIT_API).
+#if !defined(_WIN32) && defined(__GNUC__)
+ #define ASMJIT_VIRTAPI ASMJIT_API
+#else
+ #define ASMJIT_VIRTAPI
+#endif
+
+// Function attributes.
+#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__)
+ #define ASMJIT_INLINE inline __attribute__((__always_inline__))
+#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER)
+ #define ASMJIT_INLINE __forceinline
+#else
+ #define ASMJIT_INLINE inline
+#endif
+
+#if defined(__GNUC__)
+ #define ASMJIT_NOINLINE __attribute__((__noinline__))
+ #define ASMJIT_NORETURN __attribute__((__noreturn__))
+#elif defined(_MSC_VER)
+ #define ASMJIT_NOINLINE __declspec(noinline)
+ #define ASMJIT_NORETURN __declspec(noreturn)
+#else
+ #define ASMJIT_NOINLINE
+ #define ASMJIT_NORETURN
+#endif
+
+// Calling conventions.
+#if ASMJIT_ARCH_X86 == 32 && defined(__GNUC__)
+ #define ASMJIT_CDECL __attribute__((__cdecl__))
+ #define ASMJIT_STDCALL __attribute__((__stdcall__))
+ #define ASMJIT_FASTCALL __attribute__((__fastcall__))
+ #define ASMJIT_REGPARM(N) __attribute__((__regparm__(N)))
+#elif ASMJIT_ARCH_X86 == 32 && defined(_MSC_VER)
+ #define ASMJIT_CDECL __cdecl
+ #define ASMJIT_STDCALL __stdcall
+ #define ASMJIT_FASTCALL __fastcall
+ #define ASMJIT_REGPARM(N)
+#else
+ #define ASMJIT_CDECL
+ #define ASMJIT_STDCALL
+ #define ASMJIT_FASTCALL
+ #define ASMJIT_REGPARM(N)
+#endif
+
+#if ASMJIT_ARCH_X86 && defined(_WIN32) && defined(_MSC_VER)
+ #define ASMJIT_VECTORCALL __vectorcall
+#elif ASMJIT_ARCH_X86 && defined(_WIN32)
+ #define ASMJIT_VECTORCALL __attribute__((__vectorcall__))
+#else
+ #define ASMJIT_VECTORCALL
+#endif
+
+
+// Type alignment (not allowed by C++11 'alignas' keyword).
+#if defined(__GNUC__)
+ #define ASMJIT_ALIGN_TYPE(TYPE, N) __attribute__((__aligned__(N))) TYPE
+#elif defined(_MSC_VER)
+ #define ASMJIT_ALIGN_TYPE(TYPE, N) __declspec(align(N)) TYPE
+#else
+ #define ASMJIT_ALIGN_TYPE(TYPE, N) TYPE
+#endif
+
+//! \def ASMJIT_MAY_ALIAS
+//!
+//! Expands to `__attribute__((__may_alias__))` if supported.
+#if defined(__GNUC__)
+ #define ASMJIT_MAY_ALIAS __attribute__((__may_alias__))
+#else
+ #define ASMJIT_MAY_ALIAS
+#endif
+
+//! \def ASMJIT_LIKELY(...)
+//!
+//! Condition is likely to be taken (mostly error handling and edge cases).
+
+//! \def ASMJIT_UNLIKELY(...)
+//!
+//! Condition is unlikely to be taken (mostly error handling and edge cases).
+#if defined(__GNUC__)
+ #define ASMJIT_LIKELY(...) __builtin_expect(!!(__VA_ARGS__), 1)
+ #define ASMJIT_UNLIKELY(...) __builtin_expect(!!(__VA_ARGS__), 0)
+#else
+ #define ASMJIT_LIKELY(...) (__VA_ARGS__)
+ #define ASMJIT_UNLIKELY(...) (__VA_ARGS__)
+#endif
+
+//! \def ASMJIT_FALLTHROUGH
+//!
+//! Portable [[fallthrough]] attribute.
+#if defined(__clang__) && __cplusplus >= 201103L
+ #define ASMJIT_FALLTHROUGH [[clang::fallthrough]]
+#elif defined(__GNUC__) && __GNUC__ >= 7
+ #define ASMJIT_FALLTHROUGH __attribute__((__fallthrough__))
+#else
+ #define ASMJIT_FALLTHROUGH ((void)0) /* fallthrough */
+#endif
+
+//! \def ASMJIT_DEPRECATED
+//!
+//! Marks function, class, struct, enum, or anything else as deprecated.
+#if defined(__GNUC__)
+ #define ASMJIT_DEPRECATED(MESSAGE) __attribute__((__deprecated__(MESSAGE)))
+ #if defined(__clang__)
+ #define ASMJIT_DEPRECATED_STRUCT(MESSAGE) __attribute__((__deprecated__(MESSAGE)))
+ #else
+ #define ASMJIT_DEPRECATED_STRUCT(MESSAGE) /* not usable if a deprecated function uses it */
+ #endif
+#elif defined(_MSC_VER)
+ #define ASMJIT_DEPRECATED(MESSAGE) __declspec(deprecated(MESSAGE))
+ #define ASMJIT_DEPRECATED_STRUCT(MESSAGE) /* not usable if a deprecated function uses it */
+#else
+ #define ASMJIT_DEPRECATED(MESSAGE)
+ #define ASMJIT_DEPRECATED_STRUCT(MESSAGE)
+#endif
+
+// Utilities.
+#define ASMJIT_OFFSET_OF(STRUCT, MEMBER) ((int)(intptr_t)((const char*)&((const STRUCT*)0x100)->MEMBER) - 0x100)
+#define ASMJIT_ARRAY_SIZE(X) uint32_t(sizeof(X) / sizeof(X[0]))
+
+#if ASMJIT_CXX_HAS_ATTRIBUTE(no_sanitize, 0)
+ #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize__("undefined")))
+#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 9)
+ #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize_undefined__))
+#else
+ #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Begin-Namespace / End-Namespace]
+// ============================================================================
+
+#if defined(__clang__)
+ #define ASMJIT_BEGIN_NAMESPACE \
+ namespace asmjit { \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wconstant-logical-operand\"") \
+ _Pragma("clang diagnostic ignored \"-Wunnamed-type-template-args\"")
+ #define ASMJIT_END_NAMESPACE \
+ _Pragma("clang diagnostic pop") \
+ }
+#elif defined(__GNUC__) && __GNUC__ == 4
+ #define ASMJIT_BEGIN_NAMESPACE \
+ namespace asmjit { \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
+ #define ASMJIT_END_NAMESPACE \
+ _Pragma("GCC diagnostic pop") \
+ }
+#elif defined(__GNUC__) && __GNUC__ >= 8
+ #define ASMJIT_BEGIN_NAMESPACE \
+ namespace asmjit { \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wclass-memaccess\"")
+ #define ASMJIT_END_NAMESPACE \
+ _Pragma("GCC diagnostic pop") \
+ }
+#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+ #define ASMJIT_BEGIN_NAMESPACE \
+ namespace asmjit { \
+ __pragma(warning(push)) \
+ __pragma(warning(disable: 4127)) /* conditional expression is const */ \
+ __pragma(warning(disable: 4201)) /* nameless struct/union */
+ #define ASMJIT_END_NAMESPACE \
+ __pragma(warning(pop)) \
+ }
+#endif
+
+#if !defined(ASMJIT_BEGIN_NAMESPACE) && !defined(ASMJIT_END_NAMESPACE)
+ #define ASMJIT_BEGIN_NAMESPACE namespace asmjit {
+ #define ASMJIT_END_NAMESPACE }
+#endif
+
+#define ASMJIT_BEGIN_SUB_NAMESPACE(NAMESPACE) \
+ ASMJIT_BEGIN_NAMESPACE \
+ namespace NAMESPACE {
+
+#define ASMJIT_END_SUB_NAMESPACE \
+ } \
+ ASMJIT_END_NAMESPACE
+
+// ============================================================================
+// [asmjit::Build - Globals - Utilities]
+// ============================================================================
+
+#define ASMJIT_NONCOPYABLE(...) \
+ private: \
+ __VA_ARGS__(const __VA_ARGS__& other) = delete; \
+ __VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
+ public:
+
+#define ASMJIT_NONCONSTRUCTIBLE(...) \
+ private: \
+ __VA_ARGS__() = delete; \
+ __VA_ARGS__(const __VA_ARGS__& other) = delete; \
+ __VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
+ public:
+
+// ============================================================================
+// [asmjit::Build - Globals - Cleanup]
+// ============================================================================
+
+// Cleanup definitions that are only used within this header file.
+#undef ASMJIT_CXX_GNU
+#undef ASMJIT_CXX_MAKE_VER
+
+#endif // ASMJIT_CORE_API_CONFIG_H_INCLUDED
diff --git a/Theodosius/asmjit/core/archcommons.h b/Theodosius/asmjit/core/archcommons.h
new file mode 100644
index 0000000..fda2451
--- /dev/null
+++ b/Theodosius/asmjit/core/archcommons.h
@@ -0,0 +1,164 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
+#define ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
+
+// This file provides architecture-specific classes that are required in the
+// core library. For example Imm operand allows to be created from arm::Shift
+// in a const-expr way, so the arm::Shift must be provided. So this header
+// file provides everything architecture-specific that is used by the Core API.
+
+#include "../core/globals.h"
+
+// ============================================================================
+// [asmjit::arm]
+// ============================================================================
+
+ASMJIT_BEGIN_SUB_NAMESPACE(arm)
+
+//! \addtogroup asmjit_arm
+//! \{
+
+//! Represents ARM immediate shift operation type and value.
+class Shift {
+public:
+ //! Operation predicate (ARM) describes either SHIFT or EXTEND operation.
+ //!
+ //! \note The constants are AsmJit specific. The first 5 values describe real
+ //! constants on ARM32 and AArch64 hardware, however, the addition constants
+ //! that describe extend modes are specific to AsmJit and would be translated
+ //! to the AArch64 specific constants by the assembler.
+ enum Op : uint32_t {
+ //! Shift left logical operation (default).
+ //!
+ //! Available to all ARM architectures.
+ kOpLSL = 0x00u,
+
+ //! Shift right logical operation.
+ //!
+ //! Available to all ARM architectures.
+ kOpLSR = 0x01u,
+
+ //! Shift right arithmetic operation.
+ //!
+ //! Available to all ARM architectures.
+ kOpASR = 0x02u,
+
+ //! Rotate right operation.
+ //!
+ //! \note Not available in AArch64 mode.
+ kOpROR = 0x03u,
+
+ //! Rotate right with carry operation (encoded as `kShiftROR` with zero).
+ //!
+ //! \note Not available in AArch64 mode.
+ kOpRRX = 0x04u,
+
+ //! Shift left by filling low order bits with ones.
+ kOpMSL = 0x05u,
+
+ //! UXTN extend register operation (AArch64 only).
+ kOpUXTB = 0x06u,
+ //! UXTH extend register operation (AArch64 only).
+ kOpUXTH = 0x07u,
+ //! UXTW extend register operation (AArch64 only).
+ kOpUXTW = 0x08u,
+ //! UXTX extend register operation (AArch64 only).
+ kOpUXTX = 0x09u,
+
+ //! SXTB extend register operation (AArch64 only).
+ kOpSXTB = 0x0Au,
+ //! SXTH extend register operation (AArch64 only).
+ kOpSXTH = 0x0Bu,
+ //! SXTW extend register operation (AArch64 only).
+ kOpSXTW = 0x0Cu,
+ //! SXTX extend register operation (AArch64 only).
+ kOpSXTX = 0x0Du
+
+ // NOTE: 0xE and 0xF are used by memory operand to specify POST|PRE offset mode.
+ };
+
+ //! Shift operation.
+ uint32_t _op;
+ //! Shift Value.
+ uint32_t _value;
+
+ //! Default constructed Shift is not initialized.
+ inline Shift() noexcept = default;
+
+ //! Copy constructor (default)
+ constexpr Shift(const Shift& other) noexcept = default;
+
+ //! Constructs Shift from operation `op` and shift `value`.
+ constexpr Shift(uint32_t op, uint32_t value) noexcept
+ : _op(op),
+ _value(value) {}
+
+ //! Returns the shift operation.
+ constexpr uint32_t op() const noexcept { return _op; }
+ //! Returns the shift smount.
+ constexpr uint32_t value() const noexcept { return _value; }
+
+ //! Sets shift operation to `op`.
+ inline void setOp(uint32_t op) noexcept { _op = op; }
+ //! Sets shift amount to `value`.
+ inline void setValue(uint32_t value) noexcept { _value = value; }
+};
+
+//! Constructs a `LSL #value` shift (logical shift left).
+static constexpr Shift lsl(uint32_t value) noexcept { return Shift(Shift::kOpLSL, value); }
+//! Constructs a `LSR #value` shift (logical shift right).
+static constexpr Shift lsr(uint32_t value) noexcept { return Shift(Shift::kOpLSR, value); }
+//! Constructs a `ASR #value` shift (arithmetic shift right).
+static constexpr Shift asr(uint32_t value) noexcept { return Shift(Shift::kOpASR, value); }
+//! Constructs a `ROR #value` shift (rotate right).
+static constexpr Shift ror(uint32_t value) noexcept { return Shift(Shift::kOpROR, value); }
+//! Constructs a `RRX` shift (rotate with carry by 1).
+static constexpr Shift rrx() noexcept { return Shift(Shift::kOpRRX, 0); }
+//! Constructs a `MSL #value` shift (logical shift left filling ones).
+static constexpr Shift msl(uint32_t value) noexcept { return Shift(Shift::kOpMSL, value); }
+
+//! Constructs a `UXTB #value` extend and shift (unsigned byte extend).
+static constexpr Shift uxtb(uint32_t value) noexcept { return Shift(Shift::kOpUXTB, value); }
+//! Constructs a `UXTH #value` extend and shift (unsigned hword extend).
+static constexpr Shift uxth(uint32_t value) noexcept { return Shift(Shift::kOpUXTH, value); }
+//! Constructs a `UXTW #value` extend and shift (unsigned word extend).
+static constexpr Shift uxtw(uint32_t value) noexcept { return Shift(Shift::kOpUXTW, value); }
+//! Constructs a `UXTX #value` extend and shift (unsigned dword extend).
+static constexpr Shift uxtx(uint32_t value) noexcept { return Shift(Shift::kOpUXTX, value); }
+
+//! Constructs a `SXTB #value` extend and shift (signed byte extend).
+static constexpr Shift sxtb(uint32_t value) noexcept { return Shift(Shift::kOpSXTB, value); }
+//! Constructs a `SXTH #value` extend and shift (signed hword extend).
+static constexpr Shift sxth(uint32_t value) noexcept { return Shift(Shift::kOpSXTH, value); }
+//! Constructs a `SXTW #value` extend and shift (signed word extend).
+static constexpr Shift sxtw(uint32_t value) noexcept { return Shift(Shift::kOpSXTW, value); }
+//! Constructs a `SXTX #value` extend and shift (signed dword extend).
+static constexpr Shift sxtx(uint32_t value) noexcept { return Shift(Shift::kOpSXTX, value); }
+
+//! \}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
diff --git a/Theodosius/asmjit/core/archtraits.cpp b/Theodosius/asmjit/core/archtraits.cpp
new file mode 100644
index 0000000..f069354
--- /dev/null
+++ b/Theodosius/asmjit/core/archtraits.cpp
@@ -0,0 +1,155 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/archtraits.h"
+#include "../core/misc_p.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86archtraits_p.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/armarchtraits_p.h"
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ArchTraits]
+// ============================================================================
+
+static const constexpr ArchTraits noArchTraits = {
+ 0xFF, // SP.
+ 0xFF, // FP.
+ 0xFF, // LR.
+ 0xFF, // PC.
+ { 0, 0, 0 }, // Reserved.
+ 0, // HW stack alignment.
+ 0, // Min stack offset.
+ 0, // Max stack offset.
+ { 0, 0, 0, 0}, // ISA features [Gp, Vec, Other0, Other1].
+ { { 0 } }, // RegTypeToSignature.
+ { 0 }, // RegTypeToTypeId.
+ { 0 } // TypeIdToRegType.
+};
+
+ASMJIT_VARAPI const ArchTraits _archTraits[Environment::kArchCount] = {
+ // No architecture.
+ noArchTraits,
+
+ // X86/X86 architectures.
+#ifdef ASMJIT_BUILD_X86
+ x86::x86ArchTraits,
+ x86::x64ArchTraits,
+#else
+ noArchTraits,
+ noArchTraits,
+#endif
+
+ // RISCV32/RISCV64 architectures.
+ noArchTraits,
+ noArchTraits,
+
+ // ARM architecture
+ noArchTraits,
+
+ // AArch64 architecture.
+#ifdef ASMJIT_BUILD_ARM
+ arm::a64ArchTraits,
+#else
+ noArchTraits,
+#endif
+
+ // ARM/Thumb architecture.
+ noArchTraits,
+
+ // Reserved.
+ noArchTraits,
+
+ // MIPS32/MIPS64
+ noArchTraits,
+ noArchTraits
+};
+
+// ============================================================================
+// [asmjit::ArchUtils]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegInfo(uint32_t arch, uint32_t typeId, uint32_t* typeIdOut, RegInfo* regInfoOut) noexcept {
+ const ArchTraits& archTraits = ArchTraits::byArch(arch);
+
+ // Passed RegType instead of TypeId?
+ if (typeId <= BaseReg::kTypeMax)
+ typeId = archTraits.regTypeToTypeId(typeId);
+
+ if (ASMJIT_UNLIKELY(!Type::isValid(typeId)))
+ return DebugUtils::errored(kErrorInvalidTypeId);
+
+ // First normalize architecture dependent types.
+ if (Type::isAbstract(typeId)) {
+ bool is32Bit = Environment::is32Bit(arch);
+ if (typeId == Type::kIdIntPtr)
+ typeId = is32Bit ? Type::kIdI32 : Type::kIdI64;
+ else
+ typeId = is32Bit ? Type::kIdU32 : Type::kIdU64;
+ }
+
+ // Type size helps to construct all groups of registers.
+ // TypeId is invalid if the size is zero.
+ uint32_t size = Type::sizeOf(typeId);
+ if (ASMJIT_UNLIKELY(!size))
+ return DebugUtils::errored(kErrorInvalidTypeId);
+
+ if (ASMJIT_UNLIKELY(typeId == Type::kIdF80))
+ return DebugUtils::errored(kErrorInvalidUseOfF80);
+
+ uint32_t regType = 0;
+ if (typeId >= Type::_kIdBaseStart && typeId < Type::_kIdVec32Start) {
+ regType = archTraits._typeIdToRegType[typeId - Type::_kIdBaseStart];
+ if (!regType) {
+ if (typeId == Type::kIdI64 || typeId == Type::kIdU64)
+ return DebugUtils::errored(kErrorInvalidUseOfGpq);
+ else
+ return DebugUtils::errored(kErrorInvalidTypeId);
+ }
+ }
+ else {
+ if (size <= 8 && archTraits._regInfo[BaseReg::kTypeVec64].isValid())
+ regType = BaseReg::kTypeVec64;
+ else if (size <= 16 && archTraits._regInfo[BaseReg::kTypeVec128].isValid())
+ regType = BaseReg::kTypeVec128;
+ else if (size == 32 && archTraits._regInfo[BaseReg::kTypeVec256].isValid())
+ regType = BaseReg::kTypeVec256;
+ else if (archTraits._regInfo[BaseReg::kTypeVec512].isValid())
+ regType = BaseReg::kTypeVec512;
+ else
+ return DebugUtils::errored(kErrorInvalidTypeId);
+ }
+
+ *typeIdOut = typeId;
+ regInfoOut->reset(archTraits.regTypeToSignature(regType));
+ return kErrorOk;
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/Theodosius/asmjit/core/archtraits.h b/Theodosius/asmjit/core/archtraits.h
new file mode 100644
index 0000000..5af6c7e
--- /dev/null
+++ b/Theodosius/asmjit/core/archtraits.h
@@ -0,0 +1,174 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
+#define ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
+
+#include "../core/environment.h"
+#include "../core/operand.h"
+#include "../core/type.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::ArchTraits]
+// ============================================================================
+
+//! Architecture traits used by Function API and Compiler's register allocator.
+struct ArchTraits {
+ //! ISA features for each register group.
+ enum IsaFeatures : uint32_t {
+ //! ISA features a register swap by using a single instruction.
+ kIsaFeatureSwap = 0x01u,
+ //! ISA features a push/pop like instruction for this register group.
+ kIsaFeaturePushPop = 0x02u,
+ };
+
+ //! Stack pointer register id.
+ uint8_t _spRegId;
+ //! Frame pointer register id.
+ uint8_t _fpRegId;
+ //! Link register id.
+ uint8_t _linkRegId;
+ //! Instruction pointer (or program counter) register id, if accessible.
+ uint8_t _ipRegId;
+
+ // Reserved.
+ uint8_t _reserved[3];
+ //! Hardware stack alignment requirement.
+ uint8_t _hwStackAlignment;
+ //! Minimum addressable offset on stack guaranteed for all instructions.
+ uint32_t _minStackOffset;
+ //! Maximum addressable offset on stack depending on specific instruction.
+ uint32_t _maxStackOffset;
+
+ //! Flags for each virtual register group (always covers GP and Vec groups).
+ uint8_t _isaFlags[BaseReg::kGroupVirt];
+
+ //! Maps register type into a signature, that provides group, size and can
+ //! be used to construct register operands.
+ RegInfo _regInfo[BaseReg::kTypeMax + 1];
+ //! Maps a register to type-id, see \ref Type::Id.
+ uint8_t _regTypeToTypeId[BaseReg::kTypeMax + 1];
+ //! Maps base TypeId values (from TypeId::_kIdBaseStart) to register types, see \ref Type::Id.
+ uint8_t _typeIdToRegType[32];
+
+ //! Resets all members to zeros.
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns stack pointer register id.
+ inline constexpr uint32_t spRegId() const noexcept { return _spRegId; }
+ //! Returns stack frame register id.
+ inline constexpr uint32_t fpRegId() const noexcept { return _fpRegId; }
+ //! Returns link register id, if the architecture provides it.
+ inline constexpr uint32_t linkRegId() const noexcept { return _linkRegId; }
+ //! Returns instruction pointer register id, if the architecture provides it.
+ inline constexpr uint32_t ipRegId() const noexcept { return _ipRegId; }
+
+ //! Returns a hardware stack alignment requirement.
+ //!
+ //! \note This is a hardware constraint. Architectures that don't constrain
+ //! it would return the lowest alignment (1), however, some architectures may
+ //! constrain the alignment, for example AArch64 requires 16-byte alignment.
+ inline constexpr uint32_t hwStackAlignment() const noexcept { return _hwStackAlignment; }
+
+ //! Tests whether the architecture provides link register, which is used across
+ //! function calls. If the link register is not provided then a function call
+ //! pushes the return address on stack (X86/X64).
+ inline constexpr bool hasLinkReg() const noexcept { return _linkRegId != BaseReg::kIdBad; }
+
+ //! Returns minimum addressable offset on stack guaranteed for all instructions.
+ inline constexpr uint32_t minStackOffset() const noexcept { return _minStackOffset; }
+ //! Returns maximum addressable offset on stack depending on specific instruction.
+ inline constexpr uint32_t maxStackOffset() const noexcept { return _maxStackOffset; }
+
+ //! Returns ISA flags of the given register `group`.
+ inline constexpr uint32_t isaFlags(uint32_t group) const noexcept { return _isaFlags[group]; }
+ //! Tests whether the given register `group` has the given `flag` set.
+ inline constexpr bool hasIsaFlag(uint32_t group, uint32_t flag) const noexcept { return (_isaFlags[group] & flag) != 0; }
+ //! Tests whether the ISA provides register swap instruction for the given register `group`.
+ inline constexpr bool hasSwap(uint32_t group) const noexcept { return hasIsaFlag(group, kIsaFeatureSwap); }
+ //! Tests whether the ISA provides push/pop instructions for the given register `group`.
+ inline constexpr bool hasPushPop(uint32_t group) const noexcept { return hasIsaFlag(group, kIsaFeaturePushPop); }
+
+ inline uint32_t hasRegType(uint32_t rType) const noexcept {
+ return rType <= BaseReg::kTypeMax && _regInfo[rType].signature() != 0;
+ }
+
+ inline uint32_t regTypeToSignature(uint32_t rType) const noexcept {
+ ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
+ return _regInfo[rType].signature();
+ }
+
+ inline uint32_t regTypeToGroup(uint32_t rType) const noexcept {
+ ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
+ return _regInfo[rType].group();
+ }
+
+ inline uint32_t regTypeToSize(uint32_t rType) const noexcept {
+ ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
+ return _regInfo[rType].size();
+ }
+
+ inline uint32_t regTypeToTypeId(uint32_t rType) const noexcept {
+ ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
+ return _regTypeToTypeId[rType];
+ }
+
+ //! \}
+
+ //! \name Statics
+ //! \{
+
+ //! Returns a const reference to `ArchTraits` for the given architecture `arch`.
+ static inline const ArchTraits& byArch(uint32_t arch) noexcept;
+
+ //! \}
+};
+
+ASMJIT_VARAPI const ArchTraits _archTraits[Environment::kArchCount];
+
+inline const ArchTraits& ArchTraits::byArch(uint32_t arch) noexcept { return _archTraits[arch & ~Environment::kArchBigEndianMask]; }
+
+// ============================================================================
+// [asmjit::ArchUtils]
+// ============================================================================
+
+//! Architecture utilities.
+namespace ArchUtils {
+
+ASMJIT_API Error typeIdToRegInfo(uint32_t arch, uint32_t typeId, uint32_t* typeIdOut, RegInfo* regInfo) noexcept;
+
+} // {ArchUtils}
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
diff --git a/Theodosius/asmjit/core/assembler.cpp b/Theodosius/asmjit/core/assembler.cpp
new file mode 100644
index 0000000..c0cbf0f
--- /dev/null
+++ b/Theodosius/asmjit/core/assembler.cpp
@@ -0,0 +1,409 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/assembler.h"
+#include "../core/codewriter_p.h"
+#include "../core/constpool.h"
+#include "../core/emitterutils_p.h"
+#include "../core/formatter.h"
+#include "../core/logger.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::BaseAssembler - Construction / Destruction]
+// ============================================================================
+
+BaseAssembler::BaseAssembler() noexcept
+ : BaseEmitter(kTypeAssembler) {}
+
+BaseAssembler::~BaseAssembler() noexcept {}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Buffer Management]
+// ============================================================================
+
+Error BaseAssembler::setOffset(size_t offset) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ size_t size = Support::max(_section->bufferSize(), this->offset());
+ if (ASMJIT_UNLIKELY(offset > size))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ _bufferPtr = _bufferData + offset;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Section Management]
+// ============================================================================
+
+static void BaseAssembler_initSection(BaseAssembler* self, Section* section) noexcept {
+ uint8_t* p = section->_buffer._data;
+
+ self->_section = section;
+ self->_bufferData = p;
+ self->_bufferPtr = p + section->_buffer._size;
+ self->_bufferEnd = p + section->_buffer._capacity;
+}
+
+Error BaseAssembler::section(Section* section) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section)
+ return reportError(DebugUtils::errored(kErrorInvalidSection));
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger)
+ _logger->logf(".section %s {#%u}\n", section->name(), section->id());
+#endif
+
+ BaseAssembler_initSection(this, section);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Label Management]
+// ============================================================================
+
+Label BaseAssembler::newLabel() {
+ uint32_t labelId = Globals::kInvalidId;
+ if (ASMJIT_LIKELY(_code)) {
+ LabelEntry* le;
+ Error err = _code->newLabelEntry(&le);
+ if (ASMJIT_UNLIKELY(err))
+ reportError(err);
+ else
+ labelId = le->id();
+ }
+ return Label(labelId);
+}
+
+Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) {
+ uint32_t labelId = Globals::kInvalidId;
+ if (ASMJIT_LIKELY(_code)) {
+ LabelEntry* le;
+ Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId);
+ if (ASMJIT_UNLIKELY(err))
+ reportError(err);
+ else
+ labelId = le->id();
+ }
+ return Label(labelId);
+}
+
+Error BaseAssembler::bind(const Label& label) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ Error err = _code->bindLabel(label, _section->id(), offset());
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger)
+ EmitterUtils::logLabelBound(this, label);
+#endif
+
+ resetInlineComment();
+ if (err)
+ return reportError(err);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Embed]
+// ============================================================================
+
+#ifndef ASMJIT_NO_LOGGING
+struct DataSizeByPower {
+ char str[4];
+};
+
+static const DataSizeByPower dataSizeByPowerTable[] = {
+ { "db" },
+ { "dw" },
+ { "dd" },
+ { "dq" }
+};
+#endif
+
+Error BaseAssembler::embed(const void* data, size_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ if (dataSize == 0)
+ return kErrorOk;
+
+ CodeWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
+
+ writer.emitData(data, dataSize);
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger)
+ _logger->logBinary(data, dataSize);
+#endif
+
+ writer.done(this);
+ return kErrorOk;
+}
+
+Error BaseAssembler::embedDataArray(uint32_t typeId, const void* data, size_t itemCcount, size_t repeatCount) {
+ uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize());
+ uint32_t finalTypeId = Type::deabstract(typeId, deabstractDelta);
+
+ if (ASMJIT_UNLIKELY(!Type::isValid(finalTypeId)))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (itemCcount == 0 || repeatCount == 0)
+ return kErrorOk;
+
+ uint32_t typeSize = Type::sizeOf(finalTypeId);
+ Support::FastUInt8 of = 0;
+
+ size_t dataSize = Support::mulOverflow(itemCcount, size_t(typeSize), &of);
+ size_t totalSize = Support::mulOverflow(dataSize, repeatCount, &of);
+
+ if (ASMJIT_UNLIKELY(of))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ CodeWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, totalSize));
+
+#ifndef ASMJIT_NO_LOGGING
+ const uint8_t* start = writer.cursor();
+#endif
+
+ for (size_t i = 0; i < repeatCount; i++) {
+ writer.emitData(data, dataSize);
+ }
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger)
+ _logger->logBinary(start, totalSize);
+#endif
+
+ writer.done(this);
+ return kErrorOk;
+}
+
+Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ if (ASMJIT_UNLIKELY(!isLabelValid(label)))
+ return reportError(DebugUtils::errored(kErrorInvalidLabel));
+
+ ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment())));
+ ASMJIT_PROPAGATE(bind(label));
+
+ size_t size = pool.size();
+ CodeWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, size));
+
+ pool.fill(writer.cursor());
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger)
+ _logger->logBinary(writer.cursor(), size);
+#endif
+
+ writer.advance(size);
+ writer.done(this);
+
+ return kErrorOk;
+}
+
+Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ ASMJIT_ASSERT(_code != nullptr);
+ RelocEntry* re;
+ LabelEntry* le = _code->labelEntry(label);
+
+ if (ASMJIT_UNLIKELY(!le))
+ return reportError(DebugUtils::errored(kErrorInvalidLabel));
+
+ if (dataSize == 0)
+ dataSize = registerSize();
+
+ if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
+ return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
+
+ CodeWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger) {
+ StringTmp<256> sb;
+ sb.appendFormat("%s ", dataSizeByPowerTable[Support::ctz(dataSize)].str);
+ Formatter::formatLabel(sb, 0, this, label.id());
+ sb.append('\n');
+ _logger->log(sb);
+ }
+#endif
+
+ Error err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs);
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ re->_sourceSectionId = _section->id();
+ re->_sourceOffset = offset();
+ re->_format.resetToDataValue(uint32_t(dataSize));
+
+ if (le->isBound()) {
+ re->_targetSectionId = le->section()->id();
+ re->_payload = le->offset();
+ }
+ else {
+ OffsetFormat of;
+ of.resetToDataValue(uint32_t(dataSize));
+
+ LabelLink* link = _code->newLabelLink(le, _section->id(), offset(), 0, of);
+ if (ASMJIT_UNLIKELY(!link))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ link->relocId = re->id();
+ }
+
+ // Emit dummy DWORD/QWORD depending on the data size.
+ writer.emitZeros(dataSize);
+ writer.done(this);
+
+ return kErrorOk;
+}
+
+Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ LabelEntry* labelEntry = _code->labelEntry(label);
+ LabelEntry* baseEntry = _code->labelEntry(base);
+
+ if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry))
+ return reportError(DebugUtils::errored(kErrorInvalidLabel));
+
+ if (dataSize == 0)
+ dataSize = registerSize();
+
+ if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
+ return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
+
+ CodeWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_logger) {
+ StringTmp<256> sb;
+ sb.appendFormat(".%s (", dataSizeByPowerTable[Support::ctz(dataSize)].str);
+ Formatter::formatLabel(sb, 0, this, label.id());
+ sb.append(" - ");
+ Formatter::formatLabel(sb, 0, this, base.id());
+ sb.append(")\n");
+ _logger->log(sb);
+ }
+#endif
+
+ // If both labels are bound within the same section it means the delta can be calculated now.
+ if (labelEntry->isBound() && baseEntry->isBound() && labelEntry->section() == baseEntry->section()) {
+ uint64_t delta = labelEntry->offset() - baseEntry->offset();
+ writer.emitValueLE(delta, dataSize);
+ }
+ else {
+ RelocEntry* re;
+ Error err = _code->newRelocEntry(&re, RelocEntry::kTypeExpression);
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ Expression* exp = _code->_zone.newT();
+ if (ASMJIT_UNLIKELY(!exp))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ exp->reset();
+ exp->opType = Expression::kOpSub;
+ exp->setValueAsLabel(0, labelEntry);
+ exp->setValueAsLabel(1, baseEntry);
+
+ re->_format.resetToDataValue(dataSize);
+ re->_sourceSectionId = _section->id();
+ re->_sourceOffset = offset();
+ re->_payload = (uint64_t)(uintptr_t)exp;
+
+ writer.emitZeros(dataSize);
+ }
+
+ writer.done(this);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Comment]
+// ============================================================================
+
+Error BaseAssembler::comment(const char* data, size_t size) {
+ if (!hasEmitterFlag(kFlagLogComments)) {
+ if (!hasEmitterFlag(kFlagAttached))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+ return kErrorOk;
+ }
+
+#ifndef ASMJIT_NO_LOGGING
+ // Logger cannot be NULL if `kFlagLogComments` is set.
+ ASMJIT_ASSERT(_logger != nullptr);
+
+ _logger->log(data, size);
+ _logger->log("\n", 1);
+ return kErrorOk;
+#else
+ DebugUtils::unused(data, size);
+ return kErrorOk;
+#endif
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Events]
+// ============================================================================
+
+Error BaseAssembler::onAttach(CodeHolder* code) noexcept {
+ ASMJIT_PROPAGATE(Base::onAttach(code));
+
+ // Attach to the end of the .text section.
+ BaseAssembler_initSection(this, code->_sections[0]);
+
+ return kErrorOk;
+}
+
+Error BaseAssembler::onDetach(CodeHolder* code) noexcept {
+ _section = nullptr;
+ _bufferData = nullptr;
+ _bufferEnd = nullptr;
+ _bufferPtr = nullptr;
+ return Base::onDetach(code);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/Theodosius/asmjit/core/assembler.h b/Theodosius/asmjit/core/assembler.h
new file mode 100644
index 0000000..6e38bc5
--- /dev/null
+++ b/Theodosius/asmjit/core/assembler.h
@@ -0,0 +1,152 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ASSEMBLER_H_INCLUDED
+#define ASMJIT_CORE_ASSEMBLER_H_INCLUDED
+
+#include "../core/codeholder.h"
+#include "../core/datatypes.h"
+#include "../core/emitter.h"
+#include "../core/operand.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_assembler
+//! \{
+
+// ============================================================================
+// [asmjit::BaseAssembler]
+// ============================================================================
+
+//! Base assembler.
+//!
+//! This is a base class that provides interface used by architecture specific
+//! assembler implementations. Assembler doesn't hold any data, instead it's
+//! attached to \ref CodeHolder, which provides all the data that Assembler
+//! needs and which can be altered by it.
+//!
+//! Check out architecture specific assemblers for more details and examples:
+//!
+//! - \ref x86::Assembler - X86/X64 assembler implementation.
+class ASMJIT_VIRTAPI BaseAssembler : public BaseEmitter {
+public:
+ ASMJIT_NONCOPYABLE(BaseAssembler)
+ typedef BaseEmitter Base;
+
+ //! Current section where the assembling happens.
+ Section* _section = nullptr;
+ //! Start of the CodeBuffer of the current section.
+ uint8_t* _bufferData = nullptr;
+ //! End (first invalid byte) of the current section.
+ uint8_t* _bufferEnd = nullptr;
+ //! Pointer in the CodeBuffer of the current section.
+ uint8_t* _bufferPtr = nullptr;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `BaseAssembler` instance.
+ ASMJIT_API BaseAssembler() noexcept;
+ //! Destroys the `BaseAssembler` instance.
+ ASMJIT_API virtual ~BaseAssembler() noexcept;
+
+ //! \}
+
+ //! \name Code-Buffer Management
+ //! \{
+
+ //! Returns the capacity of the current CodeBuffer.
+ inline size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); }
+ //! Returns the number of remaining bytes in the current CodeBuffer.
+ inline size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); }
+
+ //! Returns the current position in the CodeBuffer.
+ inline size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); }
+
+ //! Sets the current position in the CodeBuffer to `offset`.
+ //!
+ //! \note The `offset` cannot be greater than buffer size even if it's
+ //! within the buffer's capacity.
+ ASMJIT_API Error setOffset(size_t offset);
+
+ //! Returns the start of the CodeBuffer in the current section.
+ inline uint8_t* bufferData() const noexcept { return _bufferData; }
+ //! Returns the end (first invalid byte) in the current section.
+ inline uint8_t* bufferEnd() const noexcept { return _bufferEnd; }
+ //! Returns the current pointer in the CodeBuffer in the current section.
+ inline uint8_t* bufferPtr() const noexcept { return _bufferPtr; }
+
+ //! \}
+
+ //! \name Section Management
+ //! \{
+
+ //! Returns the current section.
+ inline Section* currentSection() const noexcept { return _section; }
+
+ ASMJIT_API Error section(Section* section) override;
+
+ //! \}
+
+ //! \name Label Management
+ //! \{
+
+ ASMJIT_API Label newLabel() override;
+ ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) override;
+ ASMJIT_API Error bind(const Label& label) override;
+
+ //! \}
+
+ //! \name Embed
+ //! \{
+
+ ASMJIT_API Error embed(const void* data, size_t dataSize) override;
+ ASMJIT_API Error embedDataArray(uint32_t typeId, const void* data, size_t itemCcount, size_t repeatCount = 1) override;
+ ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
+
+ ASMJIT_API Error embedLabel(const Label& label, size_t dataSize = 0) override;
+ ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) override;
+
+ //! \}
+
+ //! \name Comment
+ //! \{
+
+ ASMJIT_API Error comment(const char* data, size_t size = SIZE_MAX) override;
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ASSEMBLER_H_INCLUDED
diff --git a/Theodosius/asmjit/core/builder.cpp b/Theodosius/asmjit/core/builder.cpp
new file mode 100644
index 0000000..ad89f1d
--- /dev/null
+++ b/Theodosius/asmjit/core/builder.cpp
@@ -0,0 +1,920 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_BUILDER
+
+#include "../core/builder.h"
+#include "../core/emitterutils_p.h"
+#include "../core/errorhandler.h"
+#include "../core/formatter.h"
+#include "../core/logger.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::PostponedErrorHandler (Internal)]
+// ============================================================================
+
+//! Postponed error handler that never throws. Used as a temporal error handler
+//! to run passes. If error occurs, the caller is notified and will call the
+//! real error handler, that can throw.
+class PostponedErrorHandler : public ErrorHandler {
+public:
+ void handleError(Error err, const char* message, BaseEmitter* origin) override {
+ DebugUtils::unused(err, origin);
+ _message.assign(message);
+ }
+
+ StringTmp<128> _message;
+};
+
+// ============================================================================
+// [asmjit::BaseBuilder - Utilities]
+// ============================================================================
+
+static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept {
+ for (Pass* pass : self->_passes)
+ pass->~Pass();
+ self->_passes.reset();
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Construction / Destruction]
+// ============================================================================
+
+BaseBuilder::BaseBuilder() noexcept
+ : BaseEmitter(kTypeBuilder),
+ _codeZone(32768 - Zone::kBlockOverhead),
+ _dataZone(16384 - Zone::kBlockOverhead),
+ _passZone(65536 - Zone::kBlockOverhead),
+ _allocator(&_codeZone) {}
+
+BaseBuilder::~BaseBuilder() noexcept {
+ BaseBuilder_deletePasses(this);
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Node Management]
+// ============================================================================
+
+Error BaseBuilder::_newInstNode(InstNode** out, uint32_t instId, uint32_t instOptions, uint32_t opCount) {
+ uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
+ ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
+
+ InstNode* node = _allocator.allocT(InstNode::nodeSizeOfOpCapacity(opCapacity));
+ if (ASMJIT_UNLIKELY(!node))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ *out = new(node) InstNode(this, instId, instOptions, opCount, opCapacity);
+ return kErrorOk;
+}
+
+
+Error BaseBuilder::_newLabelNode(LabelNode** out) {
+ *out = nullptr;
+
+ ASMJIT_PROPAGATE(_newNodeT(out));
+ return registerLabelNode(*out);
+}
+
+Error BaseBuilder::_newAlignNode(AlignNode** out, uint32_t alignMode, uint32_t alignment) {
+ *out = nullptr;
+ return _newNodeT(out, alignMode, alignment);
+}
+
+Error BaseBuilder::_newEmbedDataNode(EmbedDataNode** out, uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount) {
+ *out = nullptr;
+
+ uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize());
+ uint32_t finalTypeId = Type::deabstract(typeId, deabstractDelta);
+
+ if (ASMJIT_UNLIKELY(!Type::isValid(finalTypeId)))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ uint32_t typeSize = Type::sizeOf(finalTypeId);
+ Support::FastUInt8 of = 0;
+
+ size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
+ if (ASMJIT_UNLIKELY(of))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ EmbedDataNode* node;
+ ASMJIT_PROPAGATE(_newNodeT(&node));
+
+ node->_embed._typeId = uint8_t(typeId);
+ node->_embed._typeSize = uint8_t(typeSize);
+ node->_itemCount = itemCount;
+ node->_repeatCount = repeatCount;
+
+ uint8_t* dstData = node->_inlineData;
+ if (dataSize > EmbedDataNode::kInlineBufferSize) {
+ dstData = static_cast(_dataZone.alloc(dataSize, 8));
+ if (ASMJIT_UNLIKELY(!dstData))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ node->_externalData = dstData;
+ }
+
+ if (data)
+ memcpy(dstData, data, dataSize);
+
+ *out = node;
+ return kErrorOk;
+}
+
+Error BaseBuilder::_newConstPoolNode(ConstPoolNode** out) {
+ *out = nullptr;
+
+ ASMJIT_PROPAGATE(_newNodeT(out));
+ return registerLabelNode(*out);
+}
+
+Error BaseBuilder::_newCommentNode(CommentNode** out, const char* data, size_t size) {
+ *out = nullptr;
+
+ if (data) {
+ if (size == SIZE_MAX)
+ size = strlen(data);
+
+ if (size > 0) {
+ data = static_cast(_dataZone.dup(data, size, true));
+ if (ASMJIT_UNLIKELY(!data))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ }
+ }
+
+ return _newNodeT(out, data);
+}
+
+BaseNode* BaseBuilder::addNode(BaseNode* node) noexcept {
+ ASMJIT_ASSERT(node);
+ ASMJIT_ASSERT(!node->_prev);
+ ASMJIT_ASSERT(!node->_next);
+ ASMJIT_ASSERT(!node->isActive());
+
+ if (!_cursor) {
+ if (!_firstNode) {
+ _firstNode = node;
+ _lastNode = node;
+ }
+ else {
+ node->_next = _firstNode;
+ _firstNode->_prev = node;
+ _firstNode = node;
+ }
+ }
+ else {
+ BaseNode* prev = _cursor;
+ BaseNode* next = _cursor->next();
+
+ node->_prev = prev;
+ node->_next = next;
+
+ prev->_next = node;
+ if (next)
+ next->_prev = node;
+ else
+ _lastNode = node;
+ }
+
+ node->addFlags(BaseNode::kFlagIsActive);
+ if (node->isSection())
+ _dirtySectionLinks = true;
+
+ _cursor = node;
+ return node;
+}
+
+BaseNode* BaseBuilder::addAfter(BaseNode* node, BaseNode* ref) noexcept {
+ ASMJIT_ASSERT(node);
+ ASMJIT_ASSERT(ref);
+
+ ASMJIT_ASSERT(!node->_prev);
+ ASMJIT_ASSERT(!node->_next);
+
+ BaseNode* prev = ref;
+ BaseNode* next = ref->next();
+
+ node->_prev = prev;
+ node->_next = next;
+
+ node->addFlags(BaseNode::kFlagIsActive);
+ if (node->isSection())
+ _dirtySectionLinks = true;
+
+ prev->_next = node;
+ if (next)
+ next->_prev = node;
+ else
+ _lastNode = node;
+
+ return node;
+}
+
+BaseNode* BaseBuilder::addBefore(BaseNode* node, BaseNode* ref) noexcept {
+ ASMJIT_ASSERT(node != nullptr);
+ ASMJIT_ASSERT(!node->_prev);
+ ASMJIT_ASSERT(!node->_next);
+ ASMJIT_ASSERT(!node->isActive());
+ ASMJIT_ASSERT(ref != nullptr);
+ ASMJIT_ASSERT(ref->isActive());
+
+ BaseNode* prev = ref->prev();
+ BaseNode* next = ref;
+
+ node->_prev = prev;
+ node->_next = next;
+
+ node->addFlags(BaseNode::kFlagIsActive);
+ if (node->isSection())
+ _dirtySectionLinks = true;
+
+ next->_prev = node;
+ if (prev)
+ prev->_next = node;
+ else
+ _firstNode = node;
+
+ return node;
+}
+
+BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept {
+ if (!node->isActive())
+ return node;
+
+ BaseNode* prev = node->prev();
+ BaseNode* next = node->next();
+
+ if (_firstNode == node)
+ _firstNode = next;
+ else
+ prev->_next = next;
+
+ if (_lastNode == node)
+ _lastNode = prev;
+ else
+ next->_prev = prev;
+
+ node->_prev = nullptr;
+ node->_next = nullptr;
+ node->clearFlags(BaseNode::kFlagIsActive);
+ if (node->isSection())
+ _dirtySectionLinks = true;
+
+ if (_cursor == node)
+ _cursor = prev;
+
+ return node;
+}
+
+void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept {
+ if (first == last) {
+ removeNode(first);
+ return;
+ }
+
+ if (!first->isActive())
+ return;
+
+ BaseNode* prev = first->prev();
+ BaseNode* next = last->next();
+
+ if (_firstNode == first)
+ _firstNode = next;
+ else
+ prev->_next = next;
+
+ if (_lastNode == last)
+ _lastNode = prev;
+ else
+ next->_prev = prev;
+
+ BaseNode* node = first;
+ uint32_t didRemoveSection = false;
+
+ for (;;) {
+ next = node->next();
+ ASMJIT_ASSERT(next != nullptr);
+
+ node->_prev = nullptr;
+ node->_next = nullptr;
+ node->clearFlags(BaseNode::kFlagIsActive);
+ didRemoveSection |= uint32_t(node->isSection());
+
+ if (_cursor == node)
+ _cursor = prev;
+
+ if (node == last)
+ break;
+ node = next;
+ }
+
+ if (didRemoveSection)
+ _dirtySectionLinks = true;
+}
+
+BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept {
+ BaseNode* old = _cursor;
+ _cursor = node;
+ return old;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Section]
+// ============================================================================
+
+Error BaseBuilder::sectionNodeOf(SectionNode** out, uint32_t sectionId) {
+ *out = nullptr;
+
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId)))
+ return reportError(DebugUtils::errored(kErrorInvalidSection));
+
+ if (sectionId >= _sectionNodes.size()) {
+ Error err = _sectionNodes.reserve(&_allocator, sectionId + 1);
+ if (ASMJIT_UNLIKELY(err != kErrorOk))
+ return reportError(err);
+ }
+
+ SectionNode* node = nullptr;
+ if (sectionId < _sectionNodes.size())
+ node = _sectionNodes[sectionId];
+
+ if (!node) {
+ ASMJIT_PROPAGATE(_newNodeT(&node, sectionId));
+
+ // We have already reserved enough space, this cannot fail now.
+ if (sectionId >= _sectionNodes.size())
+ _sectionNodes.resize(&_allocator, sectionId + 1);
+
+ _sectionNodes[sectionId] = node;
+ }
+
+ *out = node;
+ return kErrorOk;
+}
+
+Error BaseBuilder::section(Section* section) {
+ SectionNode* node;
+ ASMJIT_PROPAGATE(sectionNodeOf(&node, section->id()));
+
+ if (!node->isActive()) {
+ // Insert the section at the end if it was not part of the code.
+ addAfter(node, lastNode());
+ _cursor = node;
+ }
+ else {
+ // This is a bit tricky. We cache section links to make sure that
+ // switching sections doesn't involve traversal in linked-list unless
+ // the position of the section has changed.
+ if (hasDirtySectionLinks())
+ updateSectionLinks();
+
+ if (node->_nextSection)
+ _cursor = node->_nextSection->_prev;
+ else
+ _cursor = _lastNode;
+ }
+
+ return kErrorOk;
+}
+
+void BaseBuilder::updateSectionLinks() noexcept {
+ if (!_dirtySectionLinks)
+ return;
+
+ BaseNode* node_ = _firstNode;
+ SectionNode* currentSection = nullptr;
+
+ while (node_) {
+ if (node_->isSection()) {
+ if (currentSection)
+ currentSection->_nextSection = node_->as();
+ currentSection = node_->as();
+ }
+ node_ = node_->next();
+ }
+
+ if (currentSection)
+ currentSection->_nextSection = nullptr;
+
+ _dirtySectionLinks = false;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Labels]
+// ============================================================================
+
+Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) {
+ *out = nullptr;
+
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ uint32_t index = labelId;
+ if (ASMJIT_UNLIKELY(index >= _code->labelCount()))
+ return DebugUtils::errored(kErrorInvalidLabel);
+
+ if (index >= _labelNodes.size())
+ ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, index + 1));
+
+ LabelNode* node = _labelNodes[index];
+ if (!node) {
+ ASMJIT_PROPAGATE(_newNodeT(&node, labelId));
+ _labelNodes[index] = node;
+ }
+
+ *out = node;
+ return kErrorOk;
+}
+
+Error BaseBuilder::registerLabelNode(LabelNode* node) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ LabelEntry* le;
+ ASMJIT_PROPAGATE(_code->newLabelEntry(&le));
+ uint32_t labelId = le->id();
+
+ // We just added one label so it must be true.
+ ASMJIT_ASSERT(_labelNodes.size() < labelId + 1);
+ ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, labelId + 1));
+
+ _labelNodes[labelId] = node;
+ node->_labelId = labelId;
+
+ return kErrorOk;
+}
+
+static Error BaseBuilder_newLabelInternal(BaseBuilder* self, uint32_t labelId) {
+ ASMJIT_ASSERT(self->_labelNodes.size() < labelId + 1);
+
+ uint32_t growBy = labelId - self->_labelNodes.size();
+ Error err = self->_labelNodes.willGrow(&self->_allocator, growBy);
+
+ if (ASMJIT_UNLIKELY(err))
+ return self->reportError(err);
+
+ LabelNode* node;
+ ASMJIT_PROPAGATE(self->_newNodeT(&node, labelId));
+
+ self->_labelNodes.resize(&self->_allocator, labelId + 1);
+ self->_labelNodes[labelId] = node;
+ node->_labelId = labelId;
+ return kErrorOk;
+}
+
+Label BaseBuilder::newLabel() {
+ uint32_t labelId = Globals::kInvalidId;
+ LabelEntry* le;
+
+ if (_code &&
+ _code->newLabelEntry(&le) == kErrorOk &&
+ BaseBuilder_newLabelInternal(this, le->id()) == kErrorOk) {
+ labelId = le->id();
+ }
+
+ return Label(labelId);
+}
+
+Label BaseBuilder::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) {
+ uint32_t labelId = Globals::kInvalidId;
+ LabelEntry* le;
+
+ if (_code &&
+ _code->newNamedLabelEntry(&le, name, nameSize, type, parentId) == kErrorOk &&
+ BaseBuilder_newLabelInternal(this, le->id()) == kErrorOk) {
+ labelId = le->id();
+ }
+
+ return Label(labelId);
+}
+
+Error BaseBuilder::bind(const Label& label) {
+ LabelNode* node;
+ ASMJIT_PROPAGATE(labelNodeOf(&node, label));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Passes]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept {
+ for (Pass* pass : _passes)
+ if (strcmp(pass->name(), name) == 0)
+ return pass;
+ return nullptr;
+}
+
+ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(pass == nullptr)) {
+ // Since this is directly called by `addPassT()` we treat `null` argument
+ // as out-of-memory condition. Otherwise it would be API misuse.
+ return DebugUtils::errored(kErrorOutOfMemory);
+ }
+ else if (ASMJIT_UNLIKELY(pass->_cb)) {
+ // Kinda weird, but okay...
+ if (pass->_cb == this)
+ return kErrorOk;
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ ASMJIT_PROPAGATE(_passes.append(&_allocator, pass));
+ pass->_cb = this;
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(pass == nullptr))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (pass->_cb != nullptr) {
+ if (pass->_cb != this)
+ return DebugUtils::errored(kErrorInvalidState);
+
+ uint32_t index = _passes.indexOf(pass);
+ ASMJIT_ASSERT(index != Globals::kNotFound);
+
+ pass->_cb = nullptr;
+ _passes.removeAt(index);
+ }
+
+ pass->~Pass();
+ return kErrorOk;
+}
+
+Error BaseBuilder::runPasses() {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (_passes.empty())
+ return kErrorOk;
+
+ ErrorHandler* prev = errorHandler();
+ PostponedErrorHandler postponed;
+
+ Error err = kErrorOk;
+ setErrorHandler(&postponed);
+
+ for (Pass* pass : _passes) {
+ _passZone.reset();
+ err = pass->run(&_passZone, _logger);
+ if (err)
+ break;
+ }
+ _passZone.reset();
+ setErrorHandler(prev);
+
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err, !postponed._message.empty() ? postponed._message.data() : nullptr);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Emit]
+// ============================================================================
+
+Error BaseBuilder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
+ uint32_t opCount = EmitterUtils::opCountFromEmitArgs(o0, o1, o2, opExt);
+ uint32_t options = instOptions() | forcedInstOptions();
+
+ if (options & BaseInst::kOptionReserved) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifndef ASMJIT_NO_VALIDATION
+ // Strict validation.
+ if (hasValidationOption(kValidationOptionIntermediate)) {
+ Operand_ opArray[Globals::kMaxOpCount];
+ EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
+
+ Error err = InstAPI::validate(arch(), BaseInst(instId, options, _extraReg), opArray, opCount);
+ if (ASMJIT_UNLIKELY(err)) {
+ resetInstOptions();
+ resetExtraReg();
+ resetInlineComment();
+ return reportError(err);
+ }
+ }
+#endif
+
+ // Clear options that should never be part of `InstNode`.
+ options &= ~BaseInst::kOptionReserved;
+ }
+
+ uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
+ ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
+
+ InstNode* node = _allocator.allocT(InstNode::nodeSizeOfOpCapacity(opCapacity));
+ const char* comment = inlineComment();
+
+ resetInstOptions();
+ resetInlineComment();
+
+ if (ASMJIT_UNLIKELY(!node)) {
+ resetExtraReg();
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ }
+
+ node = new(node) InstNode(this, instId, options, opCount, opCapacity);
+ node->setExtraReg(extraReg());
+ node->setOp(0, o0);
+ node->setOp(1, o1);
+ node->setOp(2, o2);
+ for (uint32_t i = 3; i < opCount; i++)
+ node->setOp(i, opExt[i - 3]);
+ node->resetOpRange(opCount, opCapacity);
+
+ if (comment)
+ node->setInlineComment(static_cast(_dataZone.dup(comment, strlen(comment), true)));
+
+ addNode(node);
+ resetExtraReg();
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Align]
+// ============================================================================
+
+Error BaseBuilder::align(uint32_t alignMode, uint32_t alignment) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ AlignNode* node;
+ ASMJIT_PROPAGATE(_newAlignNode(&node, alignMode, alignment));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Embed]
+// ============================================================================
+
+Error BaseBuilder::embed(const void* data, size_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ EmbedDataNode* node;
+ ASMJIT_PROPAGATE(_newEmbedDataNode(&node, Type::kIdU8, data, dataSize));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error BaseBuilder::embedDataArray(uint32_t typeId, const void* data, size_t itemCount, size_t itemRepeat) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ EmbedDataNode* node;
+ ASMJIT_PROPAGATE(_newEmbedDataNode(&node, typeId, data, itemCount, itemRepeat));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (!isLabelValid(label))
+ return reportError(DebugUtils::errored(kErrorInvalidLabel));
+
+ ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment())));
+ ASMJIT_PROPAGATE(bind(label));
+
+ EmbedDataNode* node;
+ ASMJIT_PROPAGATE(_newEmbedDataNode(&node, Type::kIdU8, nullptr, pool.size()));
+
+ pool.fill(node->data());
+ addNode(node);
+ return kErrorOk;
+}
+
+// EmbedLabel / EmbedLabelDelta
+// ----------------------------
+//
+// If dataSize is zero it means that the size is the same as target register
+// width, however, if it's provided we really want to validate whether it's
+// within the possible range.
+
+static inline bool BaseBuilder_checkDataSize(size_t dataSize) noexcept {
+ return !dataSize || (Support::isPowerOf2(dataSize) && dataSize <= 8);
+}
+
+Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (!BaseBuilder_checkDataSize(dataSize))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ EmbedLabelNode* node;
+ ASMJIT_PROPAGATE(_newNodeT(&node, label.id(), uint32_t(dataSize)));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (!BaseBuilder_checkDataSize(dataSize))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ EmbedLabelDeltaNode* node;
+ ASMJIT_PROPAGATE(_newNodeT(&node, label.id(), base.id(), uint32_t(dataSize)));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Comment]
+// ============================================================================
+
+Error BaseBuilder::comment(const char* data, size_t size) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ CommentNode* node;
+ ASMJIT_PROPAGATE(_newCommentNode(&node, data, size));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Serialize]
+// ============================================================================
+
+Error BaseBuilder::serializeTo(BaseEmitter* dst) {
+ Error err = kErrorOk;
+ BaseNode* node_ = _firstNode;
+
+ Operand_ opArray[Globals::kMaxOpCount];
+
+ do {
+ dst->setInlineComment(node_->inlineComment());
+
+ if (node_->isInst()) {
+ InstNode* node = node_->as();
+
+ // NOTE: Inlined to remove one additional call per instruction.
+ dst->setInstOptions(node->instOptions());
+ dst->setExtraReg(node->extraReg());
+
+ const Operand_* op = node->operands();
+ const Operand_* opExt = EmitterUtils::noExt;
+
+ uint32_t opCount = node->opCount();
+ if (opCount > 3) {
+ uint32_t i = 4;
+ opArray[3] = op[3];
+
+ while (i < opCount) {
+ opArray[i].copyFrom(op[i]);
+ i++;
+ }
+ while (i < Globals::kMaxOpCount) {
+ opArray[i].reset();
+ i++;
+ }
+ opExt = opArray + 3;
+ }
+
+ err = dst->_emit(node->id(), op[0], op[1], op[2], opExt);
+ }
+ else if (node_->isLabel()) {
+ if (node_->isConstPool()) {
+ ConstPoolNode* node = node_->as();
+ err = dst->embedConstPool(node->label(), node->constPool());
+ }
+ else {
+ LabelNode* node = node_->as();
+ err = dst->bind(node->label());
+ }
+ }
+ else if (node_->isAlign()) {
+ AlignNode* node = node_->as();
+ err = dst->align(node->alignMode(), node->alignment());
+ }
+ else if (node_->isEmbedData()) {
+ EmbedDataNode* node = node_->as();
+ err = dst->embedDataArray(node->typeId(), node->data(), node->itemCount(), node->repeatCount());
+ }
+ else if (node_->isEmbedLabel()) {
+ EmbedLabelNode* node = node_->as();
+ err = dst->embedLabel(node->label(), node->dataSize());
+ }
+ else if (node_->isEmbedLabelDelta()) {
+ EmbedLabelDeltaNode* node = node_->as();
+ err = dst->embedLabelDelta(node->label(), node->baseLabel(), node->dataSize());
+ }
+ else if (node_->isSection()) {
+ SectionNode* node = node_->as();
+ err = dst->section(_code->sectionById(node->id()));
+ }
+ else if (node_->isComment()) {
+ CommentNode* node = node_->as();
+ err = dst->comment(node->inlineComment());
+ }
+
+ if (err) break;
+ node_ = node_->next();
+ } while (node_);
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Events]
+// ============================================================================
+
+Error BaseBuilder::onAttach(CodeHolder* code) noexcept {
+ ASMJIT_PROPAGATE(Base::onAttach(code));
+
+ SectionNode* initialSection;
+ Error err = sectionNodeOf(&initialSection, 0);
+
+ if (!err)
+ err = _passes.willGrow(&_allocator, 8);
+
+ if (ASMJIT_UNLIKELY(err)) {
+ onDetach(code);
+ return err;
+ }
+
+ _cursor = initialSection;
+ _firstNode = initialSection;
+ _lastNode = initialSection;
+ initialSection->setFlags(BaseNode::kFlagIsActive);
+
+ return kErrorOk;
+}
+
+Error BaseBuilder::onDetach(CodeHolder* code) noexcept {
+ BaseBuilder_deletePasses(this);
+ _sectionNodes.reset();
+ _labelNodes.reset();
+
+ _allocator.reset(&_codeZone);
+ _codeZone.reset();
+ _dataZone.reset();
+ _passZone.reset();
+
+ _nodeFlags = 0;
+
+ _cursor = nullptr;
+ _firstNode = nullptr;
+ _lastNode = nullptr;
+
+ return Base::onDetach(code);
+}
+
+// ============================================================================
+// [asmjit::Pass - Construction / Destruction]
+// ============================================================================
+
+Pass::Pass(const char* name) noexcept
+ : _name(name) {}
+Pass::~Pass() noexcept {}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_BUILDER
diff --git a/Theodosius/asmjit/core/builder.h b/Theodosius/asmjit/core/builder.h
new file mode 100644
index 0000000..317bda1
--- /dev/null
+++ b/Theodosius/asmjit/core/builder.h
@@ -0,0 +1,1435 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_BUILDER_H_INCLUDED
+#define ASMJIT_CORE_BUILDER_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_BUILDER
+
+#include "../core/assembler.h"
+#include "../core/codeholder.h"
+#include "../core/constpool.h"
+#include "../core/formatter.h"
+#include "../core/inst.h"
+#include "../core/operand.h"
+#include "../core/string.h"
+#include "../core/support.h"
+#include "../core/type.h"
+#include "../core/zone.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_builder
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class BaseBuilder;
+class Pass;
+
+class BaseNode;
+class InstNode;
+class SectionNode;
+class LabelNode;
+class AlignNode;
+class EmbedDataNode;
+class EmbedLabelNode;
+class ConstPoolNode;
+class CommentNode;
+class SentinelNode;
+class LabelDeltaNode;
+
+// Only used by Compiler infrastructure.
+class JumpAnnotation;
+
+// ============================================================================
+// [asmjit::BaseBuilder]
+// ============================================================================
+
+//! Builder interface.
+//!
+//! `BaseBuilder` interface was designed to be used as a \ref BaseAssembler
+//! replacement in case pre-processing or post-processing of the generated code
+//! is required. The code can be modified during or after code generation. Pre
+//! or post processing can be done manually or through a \ref Pass object. \ref
+//! BaseBuilder stores the emitted code as a double-linked list of nodes, which
+//! allows O(1) insertion and removal during processing.
+//!
+//! Check out architecture specific builders for more details and examples:
+//!
+//! - \ref x86::Builder - X86/X64 builder implementation.
+class ASMJIT_VIRTAPI BaseBuilder : public BaseEmitter {
+public:
+ ASMJIT_NONCOPYABLE(BaseBuilder)
+ typedef BaseEmitter Base;
+
+ //! Base zone used to allocate nodes and passes.
+ Zone _codeZone;
+ //! Data zone used to allocate data and names.
+ Zone _dataZone;
+ //! Pass zone, passed to `Pass::run()`.
+ Zone _passZone;
+ //! Allocator that uses `_codeZone`.
+ ZoneAllocator _allocator;
+
+ //! Array of `Pass` objects.
+ ZoneVector _passes {};
+ //! Maps section indexes to `LabelNode` nodes.
+ ZoneVector _sectionNodes {};
+ //! Maps label indexes to `LabelNode` nodes.
+ ZoneVector _labelNodes {};
+
+ //! Current node (cursor).
+ BaseNode* _cursor = nullptr;
+ //! First node of the current section.
+ BaseNode* _firstNode = nullptr;
+ //! Last node of the current section.
+ BaseNode* _lastNode = nullptr;
+
+ //! Flags assigned to each new node.
+ uint32_t _nodeFlags = 0;
+ //! The sections links are dirty (used internally).
+ bool _dirtySectionLinks = false;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `BaseBuilder` instance.
+ ASMJIT_API BaseBuilder() noexcept;
+ //! Destroys the `BaseBuilder` instance.
+ ASMJIT_API virtual ~BaseBuilder() noexcept;
+
+ //! \}
+
+ //! \name Node Management
+ //! \{
+
+ //! Returns the first node.
+ inline BaseNode* firstNode() const noexcept { return _firstNode; }
+ //! Returns the last node.
+ inline BaseNode* lastNode() const noexcept { return _lastNode; }
+
+ //! Allocates and instantiates a new node of type `T` and returns its instance.
+ //! If the allocation fails `nullptr` is returned.
+ //!
+ //! The template argument `T` must be a type that is extends \ref BaseNode.
+ //!
+ //! \remarks The pointer returned (if non-null) is owned by the Builder or
+ //! Compiler. When the Builder/Compiler is destroyed it destroys all nodes
+ //! it created so no manual memory management is required.
+ template
+ inline Error _newNodeT(T** out, Args&&... args) {
+ *out = _allocator.newT(this, std::forward(args)...);
+ if (ASMJIT_UNLIKELY(!*out))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return kErrorOk;
+ }
+
+ //! Creates a new \ref InstNode.
+ ASMJIT_API Error _newInstNode(InstNode** out, uint32_t instId, uint32_t instOptions, uint32_t opCount);
+ //! Creates a new \ref LabelNode.
+ ASMJIT_API Error _newLabelNode(LabelNode** out);
+ //! Creates a new \ref AlignNode.
+ ASMJIT_API Error _newAlignNode(AlignNode** out, uint32_t alignMode, uint32_t alignment);
+ //! Creates a new \ref EmbedDataNode.
+ ASMJIT_API Error _newEmbedDataNode(EmbedDataNode** out, uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount = 1);
+ //! Creates a new \ref ConstPoolNode.
+ ASMJIT_API Error _newConstPoolNode(ConstPoolNode** out);
+ //! Creates a new \ref CommentNode.
+ ASMJIT_API Error _newCommentNode(CommentNode** out, const char* data, size_t size);
+
+ //! Adds `node` after the current and sets the current node to the given `node`.
+ ASMJIT_API BaseNode* addNode(BaseNode* node) noexcept;
+ //! Inserts the given `node` after `ref`.
+ ASMJIT_API BaseNode* addAfter(BaseNode* node, BaseNode* ref) noexcept;
+ //! Inserts the given `node` before `ref`.
+ ASMJIT_API BaseNode* addBefore(BaseNode* node, BaseNode* ref) noexcept;
+ //! Removes the given `node`.
+ ASMJIT_API BaseNode* removeNode(BaseNode* node) noexcept;
+ //! Removes multiple nodes.
+ ASMJIT_API void removeNodes(BaseNode* first, BaseNode* last) noexcept;
+
+ //! Returns the cursor.
+ //!
+ //! When the Builder/Compiler is created it automatically creates a '.text'
+ //! \ref SectionNode, which will be the initial one. When instructions are
+ //! added they are always added after the cursor and the cursor is changed
+ //! to be that newly added node. Use `setCursor()` to change where new nodes
+ //! are inserted.
+ inline BaseNode* cursor() const noexcept {
+ return _cursor;
+ }
+
+ //! Sets the current node to `node` and return the previous one.
+ ASMJIT_API BaseNode* setCursor(BaseNode* node) noexcept;
+
+ //! Sets the current node without returning the previous node.
+ //!
+ //! Only use this function if you are concerned about performance and want
+ //! this inlined (for example if you set the cursor in a loop, etc...).
+ inline void _setCursor(BaseNode* node) noexcept {
+ _cursor = node;
+ }
+
+ //! \}
+
+ //! \name Section Management
+ //! \{
+
+ //! Returns a vector of SectionNode objects.
+ //!
+ //! \note If a section of some id is not associated with the Builder/Compiler
+ //! it would be null, so always check for nulls if you iterate over the vector.
+ inline const ZoneVector& sectionNodes() const noexcept {
+ return _sectionNodes;
+ }
+
+ //! Tests whether the `SectionNode` of the given `sectionId` was registered.
+ inline bool hasRegisteredSectionNode(uint32_t sectionId) const noexcept {
+ return sectionId < _sectionNodes.size() && _sectionNodes[sectionId] != nullptr;
+ }
+
+ //! Returns or creates a `SectionNode` that matches the given `sectionId`.
+ //!
+ //! \remarks This function will either get the existing `SectionNode` or create
+ //! it in case it wasn't created before. You can check whether a section has a
+ //! registered `SectionNode` by using `BaseBuilder::hasRegisteredSectionNode()`.
+ ASMJIT_API Error sectionNodeOf(SectionNode** out, uint32_t sectionId);
+
+ ASMJIT_API Error section(Section* section) override;
+
+ //! Returns whether the section links of active section nodes are dirty. You can
+ //! update these links by calling `updateSectionLinks()` in such case.
+ inline bool hasDirtySectionLinks() const noexcept { return _dirtySectionLinks; }
+
+ //! Updates links of all active section nodes.
+ ASMJIT_API void updateSectionLinks() noexcept;
+
+ //! \}
+
+ //! \name Label Management
+ //! \{
+
+ //! Returns a vector of \ref LabelNode nodes.
+ //!
+ //! \note If a label of some id is not associated with the Builder/Compiler
+ //! it would be null, so always check for nulls if you iterate over the vector.
+ inline const ZoneVector& labelNodes() const noexcept { return _labelNodes; }
+
+ //! Tests whether the `LabelNode` of the given `labelId` was registered.
+ inline bool hasRegisteredLabelNode(uint32_t labelId) const noexcept {
+ return labelId < _labelNodes.size() && _labelNodes[labelId] != nullptr;
+ }
+
+ //! \overload
+ inline bool hasRegisteredLabelNode(const Label& label) const noexcept {
+ return hasRegisteredLabelNode(label.id());
+ }
+
+ //! Gets or creates a \ref LabelNode that matches the given `labelId`.
+ //!
+ //! \remarks This function will either get the existing `LabelNode` or create
+ //! it in case it wasn't created before. You can check whether a label has a
+ //! registered `LabelNode` by calling \ref BaseBuilder::hasRegisteredLabelNode().
+ ASMJIT_API Error labelNodeOf(LabelNode** out, uint32_t labelId);
+
+ //! \overload
+ inline Error labelNodeOf(LabelNode** out, const Label& label) {
+ return labelNodeOf(out, label.id());
+ }
+
+ //! Registers this \ref LabelNode (internal).
+ //!
+ //! This function is used internally to register a newly created `LabelNode`
+ //! with this instance of Builder/Compiler. Use \ref labelNodeOf() functions
+ //! to get back \ref LabelNode from a label or its identifier.
+ ASMJIT_API Error registerLabelNode(LabelNode* node);
+
+ ASMJIT_API Label newLabel() override;
+ ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) override;
+ ASMJIT_API Error bind(const Label& label) override;
+
+ //! \}
+
+ //! \name Passes
+ //! \{
+
+ //! Returns a vector of `Pass` instances that will be executed by `runPasses()`.
+ inline const ZoneVector& passes() const noexcept { return _passes; }
+
+ //! Allocates and instantiates a new pass of type `T` and returns its instance.
+ //! If the allocation fails `nullptr` is returned.
+ //!
+ //! The template argument `T` must be a type that is extends \ref Pass.
+ //!
+ //! \remarks The pointer returned (if non-null) is owned by the Builder or
+ //! Compiler. When the Builder/Compiler is destroyed it destroys all passes
+ //! it created so no manual memory management is required.
+ template
+ inline T* newPassT() noexcept { return _codeZone.newT(); }
+
+ //! \overload
+ template
+ inline T* newPassT(Args&&... args) noexcept { return _codeZone.newT(std::forward(args)...); }
+
+ template
+ inline Error addPassT() { return addPass(newPassT()); }
+
+ template
+ inline Error addPassT(Args&&... args) { return addPass(newPassT(std::forward(args)...)); }
+
+ //! Returns `Pass` by name.
+ //!
+ //! If the pass having the given `name` doesn't exist `nullptr` is returned.
+ ASMJIT_API Pass* passByName(const char* name) const noexcept;
+ //! Adds `pass` to the list of passes.
+ ASMJIT_API Error addPass(Pass* pass) noexcept;
+ //! Removes `pass` from the list of passes and delete it.
+ ASMJIT_API Error deletePass(Pass* pass) noexcept;
+
+ //! Runs all passes in order.
+ ASMJIT_API Error runPasses();
+
+ //! \}
+
+ //! \name Emit
+ //! \{
+
+ ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) override;
+
+ //! \}
+
+ //! \name Align
+ //! \{
+
+ ASMJIT_API Error align(uint32_t alignMode, uint32_t alignment) override;
+
+ //! \}
+
+ //! \name Embed
+ //! \{
+
+ ASMJIT_API Error embed(const void* data, size_t dataSize) override;
+ ASMJIT_API Error embedDataArray(uint32_t typeId, const void* data, size_t count, size_t repeat = 1) override;
+ ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
+
+ ASMJIT_API Error embedLabel(const Label& label, size_t dataSize = 0) override;
+ ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) override;
+
+ //! \}
+
+ //! \name Comment
+ //! \{
+
+ ASMJIT_API Error comment(const char* data, size_t size = SIZE_MAX) override;
+
+ //! \}
+
+ //! \name Serialization
+ //! \{
+
+ //! Serializes everything the given emitter `dst`.
+ //!
+ //! Although not explicitly required the emitter will most probably be of
+ //! Assembler type. The reason is that there is no known use of serializing
+ //! nodes held by Builder/Compiler into another Builder-like emitter.
+ ASMJIT_API Error serializeTo(BaseEmitter* dst);
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use serializeTo() instead, serialize() is now also an instruction.")
+ inline Error serialize(BaseEmitter* dst) {
+ return serializeTo(dst);
+ }
+
+#ifndef ASMJIT_NO_LOGGING
+ ASMJIT_DEPRECATED("Use Formatter::formatNodeList(sb, formatFlags, builder)")
+ inline Error dump(String& sb, uint32_t formatFlags = 0) const noexcept {
+ return Formatter::formatNodeList(sb, formatFlags, this);
+ }
+#endif // !ASMJIT_NO_LOGGING
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+// ============================================================================
+// [asmjit::BaseNode]
+// ============================================================================
+
+//! Base node.
+//!
+//! Every node represents a building-block used by \ref BaseBuilder. It can
+//! be instruction, data, label, comment, directive, or any other high-level
+//! representation that can be transformed to the building blocks mentioned.
+//! Every class that inherits \ref BaseBuilder can define its own high-level
+//! nodes that can be later lowered to basic nodes like instructions.
+class BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(BaseNode)
+
+ union {
+ struct {
+ //! Previous node.
+ BaseNode* _prev;
+ //! Next node.
+ BaseNode* _next;
+ };
+ //! Links (an alternative view to previous and next nodes).
+ BaseNode* _links[2];
+ };
+
+ //! Data shared between all types of nodes.
+ struct AnyData {
+ //! Node type, see \ref NodeType.
+ uint8_t _nodeType;
+ //! Node flags, see \ref Flags.
+ uint8_t _nodeFlags;
+ //! Not used by BaseNode.
+ uint8_t _reserved0;
+ //! Not used by BaseNode.
+ uint8_t _reserved1;
+ };
+
+ //! Data used by \ref InstNode.
+ struct InstData {
+ //! Node type, see \ref NodeType.
+ uint8_t _nodeType;
+ //! Node flags, see \ref Flags.
+ uint8_t _nodeFlags;
+ //! Instruction operands count (used).
+ uint8_t _opCount;
+ //! Instruction operands capacity (allocated).
+ uint8_t _opCapacity;
+ };
+
+ //! Data used by \ref EmbedDataNode.
+ struct EmbedData {
+ //! Node type, see \ref NodeType.
+ uint8_t _nodeType;
+ //! Node flags, see \ref Flags.
+ uint8_t _nodeFlags;
+ //! Type id, see \ref Type::Id.
+ uint8_t _typeId;
+ //! Size of `_typeId`.
+ uint8_t _typeSize;
+ };
+
+ //! Data used by \ref SentinelNode.
+ struct SentinelData {
+ //! Node type, see \ref NodeType.
+ uint8_t _nodeType;
+ //! Node flags, see \ref Flags.
+ uint8_t _nodeFlags;
+ //! Sentinel type.
+ uint8_t _sentinelType;
+ //! Not used by BaseNode.
+ uint8_t _reserved1;
+ };
+
+ //! Data that can have different meaning dependning on \ref NodeType.
+ union {
+ //! Data useful by any node type.
+ AnyData _any;
+ //! Data specific to \ref InstNode.
+ InstData _inst;
+ //! Data specific to \ref EmbedDataNode.
+ EmbedData _embed;
+ //! Data specific to \ref SentinelNode.
+ SentinelData _sentinel;
+ };
+
+ //! Node position in code (should be unique).
+ uint32_t _position;
+
+ //! Value reserved for AsmJit users never touched by AsmJit itself.
+ union {
+ //! User data as 64-bit integer.
+ uint64_t _userDataU64;
+ //! User data as pointer.
+ void* _userDataPtr;
+ };
+
+ //! Data used exclusively by the current `Pass`.
+ void* _passData;
+
+ //! Inline comment/annotation or nullptr if not used.
+ const char* _inlineComment;
+
+ //! Type of `BaseNode`.
+ enum NodeType : uint32_t {
+ //! Invalid node (internal, don't use).
+ kNodeNone = 0,
+
+ // [BaseBuilder]
+
+ //! Node is \ref InstNode or \ref InstExNode.
+ kNodeInst = 1,
+ //! Node is \ref SectionNode.
+ kNodeSection = 2,
+ //! Node is \ref LabelNode.
+ kNodeLabel = 3,
+ //! Node is \ref AlignNode.
+ kNodeAlign = 4,
+ //! Node is \ref EmbedDataNode.
+ kNodeEmbedData = 5,
+ //! Node is \ref EmbedLabelNode.
+ kNodeEmbedLabel = 6,
+ //! Node is \ref EmbedLabelDeltaNode.
+ kNodeEmbedLabelDelta = 7,
+ //! Node is \ref ConstPoolNode.
+ kNodeConstPool = 8,
+ //! Node is \ref CommentNode.
+ kNodeComment = 9,
+ //! Node is \ref SentinelNode.
+ kNodeSentinel = 10,
+
+ // [BaseCompiler]
+
+ //! Node is \ref JumpNode (acts as InstNode).
+ kNodeJump = 15,
+ //! Node is \ref FuncNode (acts as LabelNode).
+ kNodeFunc = 16,
+ //! Node is \ref FuncRetNode (acts as InstNode).
+ kNodeFuncRet = 17,
+ //! Node is \ref InvokeNode (acts as InstNode).
+ kNodeInvoke = 18,
+
+ // [UserDefined]
+
+ //! First id of a user-defined node.
+ kNodeUser = 32,
+
+#ifndef ASMJIT_NO_DEPRECATED
+ kNodeFuncCall = kNodeInvoke
+#endif // !ASMJIT_NO_DEPRECATED
+ };
+
+ //! Node flags, specify what the node is and/or does.
+ enum Flags : uint32_t {
+ //! Node is code that can be executed (instruction, label, align, etc...).
+ kFlagIsCode = 0x01u,
+ //! Node is data that cannot be executed (data, const-pool, etc...).
+ kFlagIsData = 0x02u,
+ //! Node is informative, can be removed and ignored.
+ kFlagIsInformative = 0x04u,
+ //! Node can be safely removed if unreachable.
+ kFlagIsRemovable = 0x08u,
+ //! Node does nothing when executed (label, align, explicit nop).
+ kFlagHasNoEffect = 0x10u,
+ //! Node is an instruction or acts as it.
+ kFlagActsAsInst = 0x20u,
+ //! Node is a label or acts as it.
+ kFlagActsAsLabel = 0x40u,
+ //! Node is active (part of the code).
+ kFlagIsActive = 0x80u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `BaseNode` - always use `BaseBuilder` to allocate nodes.
+ ASMJIT_INLINE BaseNode(BaseBuilder* cb, uint32_t type, uint32_t flags = 0) noexcept {
+ _prev = nullptr;
+ _next = nullptr;
+ _any._nodeType = uint8_t(type);
+ _any._nodeFlags = uint8_t(flags | cb->_nodeFlags);
+ _any._reserved0 = 0;
+ _any._reserved1 = 0;
+ _position = 0;
+ _userDataU64 = 0;
+ _passData = nullptr;
+ _inlineComment = nullptr;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Casts this node to `T*`.
+ template
+ inline T* as() noexcept { return static_cast(this); }
+ //! Casts this node to `const T*`.
+ template
+ inline const T* as() const noexcept { return static_cast(this); }
+
+ //! Returns previous node or `nullptr` if this node is either first or not
+ //! part of Builder/Compiler node-list.
+ inline BaseNode* prev() const noexcept { return _prev; }
+ //! Returns next node or `nullptr` if this node is either last or not part
+ //! of Builder/Compiler node-list.
+ inline BaseNode* next() const noexcept { return _next; }
+
+ //! Returns the type of the node, see `NodeType`.
+ inline uint32_t type() const noexcept { return _any._nodeType; }
+
+ //! Sets the type of the node, see `NodeType` (internal).
+ //!
+ //! \remarks You should never set a type of a node to anything else than the
+ //! initial value. This function is only provided for users that use custom
+ //! nodes and need to change the type either during construction or later.
+ inline void setType(uint32_t type) noexcept { _any._nodeType = uint8_t(type); }
+
+ //! Tests whether this node is either `InstNode` or extends it.
+ inline bool isInst() const noexcept { return hasFlag(kFlagActsAsInst); }
+ //! Tests whether this node is `SectionNode`.
+ inline bool isSection() const noexcept { return type() == kNodeSection; }
+ //! Tests whether this node is either `LabelNode` or extends it.
+ inline bool isLabel() const noexcept { return hasFlag(kFlagActsAsLabel); }
+ //! Tests whether this node is `AlignNode`.
+ inline bool isAlign() const noexcept { return type() == kNodeAlign; }
+ //! Tests whether this node is `EmbedDataNode`.
+ inline bool isEmbedData() const noexcept { return type() == kNodeEmbedData; }
+ //! Tests whether this node is `EmbedLabelNode`.
+ inline bool isEmbedLabel() const noexcept { return type() == kNodeEmbedLabel; }
+ //! Tests whether this node is `EmbedLabelDeltaNode`.
+ inline bool isEmbedLabelDelta() const noexcept { return type() == kNodeEmbedLabelDelta; }
+ //! Tests whether this node is `ConstPoolNode`.
+ inline bool isConstPool() const noexcept { return type() == kNodeConstPool; }
+ //! Tests whether this node is `CommentNode`.
+ inline bool isComment() const noexcept { return type() == kNodeComment; }
+ //! Tests whether this node is `SentinelNode`.
+ inline bool isSentinel() const noexcept { return type() == kNodeSentinel; }
+
+ //! Tests whether this node is `FuncNode`.
+ inline bool isFunc() const noexcept { return type() == kNodeFunc; }
+ //! Tests whether this node is `FuncRetNode`.
+ inline bool isFuncRet() const noexcept { return type() == kNodeFuncRet; }
+ //! Tests whether this node is `InvokeNode`.
+ inline bool isInvoke() const noexcept { return type() == kNodeInvoke; }
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use isInvoke")
+ inline bool isFuncCall() const noexcept { return isInvoke(); }
+#endif // !ASMJIT_NO_DEPRECATED
+
+ //! Returns the node flags, see \ref Flags.
+ inline uint32_t flags() const noexcept { return _any._nodeFlags; }
+ //! Tests whether the node has the given `flag` set.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (uint32_t(_any._nodeFlags) & flag) != 0; }
+ //! Replaces node flags with `flags`.
+ inline void setFlags(uint32_t flags) noexcept { _any._nodeFlags = uint8_t(flags); }
+ //! Adds the given `flags` to node flags.
+ inline void addFlags(uint32_t flags) noexcept { _any._nodeFlags = uint8_t(_any._nodeFlags | flags); }
+ //! Clears the given `flags` from node flags.
+ inline void clearFlags(uint32_t flags) noexcept { _any._nodeFlags = uint8_t(_any._nodeFlags & (flags ^ 0xFF)); }
+
+ //! Tests whether the node is code that can be executed.
+ inline bool isCode() const noexcept { return hasFlag(kFlagIsCode); }
+ //! Tests whether the node is data that cannot be executed.
+ inline bool isData() const noexcept { return hasFlag(kFlagIsData); }
+ //! Tests whether the node is informative only (is never encoded like comment, etc...).
+ inline bool isInformative() const noexcept { return hasFlag(kFlagIsInformative); }
+ //! Tests whether the node is removable if it's in an unreachable code block.
+ inline bool isRemovable() const noexcept { return hasFlag(kFlagIsRemovable); }
+ //! Tests whether the node has no effect when executed (label, .align, nop, ...).
+ inline bool hasNoEffect() const noexcept { return hasFlag(kFlagHasNoEffect); }
+ //! Tests whether the node is part of the code.
+ inline bool isActive() const noexcept { return hasFlag(kFlagIsActive); }
+
+ //! Tests whether the node has a position assigned.
+ //!
+ //! \remarks Returns `true` if node position is non-zero.
+ inline bool hasPosition() const noexcept { return _position != 0; }
+ //! Returns node position.
+ inline uint32_t position() const noexcept { return _position; }
+ //! Sets node position.
+ //!
+ //! Node position is a 32-bit unsigned integer that is used by Compiler to
+ //! track where the node is relatively to the start of the function. It doesn't
+ //! describe a byte position in a binary, instead it's just a pseudo position
+ //! used by liveness analysis and other tools around Compiler.
+ //!
+ //! If you don't use Compiler then you may use `position()` and `setPosition()`
+ //! freely for your own purposes if the 32-bit value limit is okay for you.
+ inline void setPosition(uint32_t position) noexcept { _position = position; }
+
+ //! Returns user data casted to `T*`.
+ //!
+ //! User data is decicated to be used only by AsmJit users and not touched
+ //! by the library. The data has a pointer size so you can either store a
+ //! pointer or `intptr_t` value through `setUserDataAsIntPtr()`.
+ template
+ inline T* userDataAsPtr() const noexcept { return static_cast(_userDataPtr); }
+ //! Returns user data casted to `int64_t`.
+ inline int64_t userDataAsInt64() const noexcept { return int64_t(_userDataU64); }
+ //! Returns user data casted to `uint64_t`.
+ inline uint64_t userDataAsUInt64() const noexcept { return _userDataU64; }
+
+ //! Sets user data to `data`.
+ template
+ inline void setUserDataAsPtr(T* data) noexcept { _userDataPtr = static_cast(data); }
+ //! Sets used data to the given 64-bit signed `value`.
+ inline void setUserDataAsInt64(int64_t value) noexcept { _userDataU64 = uint64_t(value); }
+ //! Sets used data to the given 64-bit unsigned `value`.
+ inline void setUserDataAsUInt64(uint64_t value) noexcept { _userDataU64 = value; }
+
+ //! Resets user data to zero / nullptr.
+ inline void resetUserData() noexcept { _userDataU64 = 0; }
+
+ //! Tests whether the node has an associated pass data.
+ inline bool hasPassData() const noexcept { return _passData != nullptr; }
+ //! Returns the node pass data - data used during processing & transformations.
+ template
+ inline T* passData() const noexcept { return (T*)_passData; }
+ //! Sets the node pass data to `data`.
+ template
+ inline void setPassData(T* data) noexcept { _passData = (void*)data; }
+ //! Resets the node pass data to nullptr.
+ inline void resetPassData() noexcept { _passData = nullptr; }
+
+ //! Tests whether the node has an inline comment/annotation.
+ inline bool hasInlineComment() const noexcept { return _inlineComment != nullptr; }
+ //! Returns an inline comment/annotation string.
+ inline const char* inlineComment() const noexcept { return _inlineComment; }
+ //! Sets an inline comment/annotation string to `s`.
+ inline void setInlineComment(const char* s) noexcept { _inlineComment = s; }
+ //! Resets an inline comment/annotation string to nullptr.
+ inline void resetInlineComment() noexcept { _inlineComment = nullptr; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::InstNode]
+// ============================================================================
+
+//! Instruction node.
+//!
+//! Wraps an instruction with its options and operands.
+class InstNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(InstNode)
+
+ enum : uint32_t {
+ //! Count of embedded operands per `InstNode` that are always allocated as
+ //! a part of the instruction. Minimum embedded operands is 4, but in 32-bit
+ //! more pointers are smaller and we can embed 5. The rest (up to 6 operands)
+ //! is always stored in `InstExNode`.
+ kBaseOpCapacity = uint32_t((128 - sizeof(BaseNode) - sizeof(BaseInst)) / sizeof(Operand_))
+ };
+
+ //! Base instruction data.
+ BaseInst _baseInst;
+ //! First 4 or 5 operands (indexed from 0).
+ Operand_ _opArray[kBaseOpCapacity];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `InstNode` instance.
+ ASMJIT_INLINE InstNode(BaseBuilder* cb, uint32_t instId, uint32_t options, uint32_t opCount, uint32_t opCapacity = kBaseOpCapacity) noexcept
+ : BaseNode(cb, kNodeInst, kFlagIsCode | kFlagIsRemovable | kFlagActsAsInst),
+ _baseInst(instId, options) {
+ _inst._opCapacity = uint8_t(opCapacity);
+ _inst._opCount = uint8_t(opCount);
+ }
+
+ //! \cond INTERNAL
+ //! Reset all built-in operands, including `extraReg`.
+ inline void _resetOps() noexcept {
+ _baseInst.resetExtraReg();
+ resetOpRange(0, opCapacity());
+ }
+ //! \endcond
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline BaseInst& baseInst() noexcept { return _baseInst; }
+ inline const BaseInst& baseInst() const noexcept { return _baseInst; }
+
+ //! Returns the instruction id, see `BaseInst::Id`.
+ inline uint32_t id() const noexcept { return _baseInst.id(); }
+ //! Sets the instruction id to `id`, see `BaseInst::Id`.
+ inline void setId(uint32_t id) noexcept { _baseInst.setId(id); }
+
+ //! Returns instruction options.
+ inline uint32_t instOptions() const noexcept { return _baseInst.options(); }
+ //! Sets instruction options.
+ inline void setInstOptions(uint32_t options) noexcept { _baseInst.setOptions(options); }
+ //! Adds instruction options.
+ inline void addInstOptions(uint32_t options) noexcept { _baseInst.addOptions(options); }
+ //! Clears instruction options.
+ inline void clearInstOptions(uint32_t options) noexcept { _baseInst.clearOptions(options); }
+
+ //! Tests whether the node has an extra register operand.
+ inline bool hasExtraReg() const noexcept { return _baseInst.hasExtraReg(); }
+ //! Returns extra register operand.
+ inline RegOnly& extraReg() noexcept { return _baseInst.extraReg(); }
+ //! \overload
+ inline const RegOnly& extraReg() const noexcept { return _baseInst.extraReg(); }
+ //! Sets extra register operand to `reg`.
+ inline void setExtraReg(const BaseReg& reg) noexcept { _baseInst.setExtraReg(reg); }
+ //! Sets extra register operand to `reg`.
+ inline void setExtraReg(const RegOnly& reg) noexcept { _baseInst.setExtraReg(reg); }
+ //! Resets extra register operand.
+ inline void resetExtraReg() noexcept { _baseInst.resetExtraReg(); }
+
+ //! Returns operand count.
+ inline uint32_t opCount() const noexcept { return _inst._opCount; }
+ //! Returns operand capacity.
+ inline uint32_t opCapacity() const noexcept { return _inst._opCapacity; }
+
+ //! Sets operand count.
+ inline void setOpCount(uint32_t opCount) noexcept { _inst._opCount = uint8_t(opCount); }
+
+ //! Returns operands array.
+ inline Operand* operands() noexcept { return (Operand*)_opArray; }
+ //! Returns operands array (const).
+ inline const Operand* operands() const noexcept { return (const Operand*)_opArray; }
+
+ //! Returns operand at the given `index`.
+ inline Operand& op(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < opCapacity());
+ return _opArray[index].as();
+ }
+
+ //! Returns operand at the given `index` (const).
+ inline const Operand& op(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < opCapacity());
+ return _opArray[index].as();
+ }
+
+ //! Sets operand at the given `index` to `op`.
+ inline void setOp(uint32_t index, const Operand_& op) noexcept {
+ ASMJIT_ASSERT(index < opCapacity());
+ _opArray[index].copyFrom(op);
+ }
+
+ //! Resets operand at the given `index` to none.
+ inline void resetOp(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < opCapacity());
+ _opArray[index].reset();
+ }
+
+ //! Resets operands at `[start, end)` range.
+ inline void resetOpRange(uint32_t start, uint32_t end) noexcept {
+ for (uint32_t i = start; i < end; i++)
+ _opArray[i].reset();
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline bool hasOpType(uint32_t opType) const noexcept {
+ for (uint32_t i = 0, count = opCount(); i < count; i++)
+ if (_opArray[i].opType() == opType)
+ return true;
+ return false;
+ }
+
+ inline bool hasRegOp() const noexcept { return hasOpType(Operand::kOpReg); }
+ inline bool hasMemOp() const noexcept { return hasOpType(Operand::kOpMem); }
+ inline bool hasImmOp() const noexcept { return hasOpType(Operand::kOpImm); }
+ inline bool hasLabelOp() const noexcept { return hasOpType(Operand::kOpLabel); }
+
+ inline uint32_t indexOfOpType(uint32_t opType) const noexcept {
+ uint32_t i = 0;
+ uint32_t count = opCount();
+
+ while (i < count) {
+ if (_opArray[i].opType() == opType)
+ break;
+ i++;
+ }
+
+ return i;
+ }
+
+ inline uint32_t indexOfMemOp() const noexcept { return indexOfOpType(Operand::kOpMem); }
+ inline uint32_t indexOfImmOp() const noexcept { return indexOfOpType(Operand::kOpImm); }
+ inline uint32_t indexOfLabelOp() const noexcept { return indexOfOpType(Operand::kOpLabel); }
+
+ //! \}
+
+ //! \name Rewriting
+ //! \{
+
+ //! \cond INTERNAL
+ inline uint32_t* _getRewriteArray() noexcept { return &_baseInst._extraReg._id; }
+ inline const uint32_t* _getRewriteArray() const noexcept { return &_baseInst._extraReg._id; }
+
+ ASMJIT_INLINE uint32_t getRewriteIndex(const uint32_t* id) const noexcept {
+ const uint32_t* array = _getRewriteArray();
+ ASMJIT_ASSERT(array <= id);
+
+ size_t index = (size_t)(id - array);
+ ASMJIT_ASSERT(index < 32);
+
+ return uint32_t(index);
+ }
+
+ ASMJIT_INLINE void rewriteIdAtIndex(uint32_t index, uint32_t id) noexcept {
+ uint32_t* array = _getRewriteArray();
+ array[index] = id;
+ }
+ //! \endcond
+
+ //! \}
+
+ //! \name Static Functions
+ //! \{
+
+ //! \cond INTERNAL
+ static inline uint32_t capacityOfOpCount(uint32_t opCount) noexcept {
+ return opCount <= kBaseOpCapacity ? kBaseOpCapacity : Globals::kMaxOpCount;
+ }
+
+ static inline size_t nodeSizeOfOpCapacity(uint32_t opCapacity) noexcept {
+ size_t base = sizeof(InstNode) - kBaseOpCapacity * sizeof(Operand);
+ return base + opCapacity * sizeof(Operand);
+ }
+ //! \endcond
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::InstExNode]
+// ============================================================================
+
+//! Instruction node with maximum number of operands.
+//!
+//! This node is created automatically by Builder/Compiler in case that the
+//! required number of operands exceeds the default capacity of `InstNode`.
+class InstExNode : public InstNode {
+public:
+ ASMJIT_NONCOPYABLE(InstExNode)
+
+ //! Continued `_opArray[]` to hold up to `kMaxOpCount` operands.
+ Operand_ _opArrayEx[Globals::kMaxOpCount - kBaseOpCapacity];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `InstExNode` instance.
+ inline InstExNode(BaseBuilder* cb, uint32_t instId, uint32_t options, uint32_t opCapacity = Globals::kMaxOpCount) noexcept
+ : InstNode(cb, instId, options, opCapacity) {}
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::SectionNode]
+// ============================================================================
+
+//! Section node.
+class SectionNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(SectionNode)
+
+ //! Section id.
+ uint32_t _id;
+
+ //! Next section node that follows this section.
+ //!
+ //! This link is only valid when the section is active (is part of the code)
+ //! and when `Builder::hasDirtySectionLinks()` returns `false`. If you intend
+ //! to use this field you should always call `Builder::updateSectionLinks()`
+ //! before you do so.
+ SectionNode* _nextSection;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `SectionNode` instance.
+ inline SectionNode(BaseBuilder* cb, uint32_t id = 0) noexcept
+ : BaseNode(cb, kNodeSection, kFlagHasNoEffect),
+ _id(id),
+ _nextSection(nullptr) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the section id.
+ inline uint32_t id() const noexcept { return _id; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::LabelNode]
+// ============================================================================
+
+//! Label node.
+class LabelNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(LabelNode)
+
+ //! Label identifier.
+ uint32_t _labelId;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `LabelNode` instance.
+ inline LabelNode(BaseBuilder* cb, uint32_t labelId = 0) noexcept
+ : BaseNode(cb, kNodeLabel, kFlagHasNoEffect | kFlagActsAsLabel),
+ _labelId(labelId) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns \ref Label representation of the \ref LabelNode.
+ inline Label label() const noexcept { return Label(_labelId); }
+ //! Returns the id of the label.
+ inline uint32_t labelId() const noexcept { return _labelId; }
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use labelId() instead")
+ inline uint32_t id() const noexcept { return labelId(); }
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+// ============================================================================
+// [asmjit::AlignNode]
+// ============================================================================
+
+//! Align directive (BaseBuilder).
+//!
+//! Wraps `.align` directive.
+class AlignNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(AlignNode)
+
+ //! Align mode, see `AlignMode`.
+ uint32_t _alignMode;
+ //! Alignment (in bytes).
+ uint32_t _alignment;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `AlignNode` instance.
+ inline AlignNode(BaseBuilder* cb, uint32_t alignMode, uint32_t alignment) noexcept
+ : BaseNode(cb, kNodeAlign, kFlagIsCode | kFlagHasNoEffect),
+ _alignMode(alignMode),
+ _alignment(alignment) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns align mode.
+ inline uint32_t alignMode() const noexcept { return _alignMode; }
+ //! Sets align mode to `alignMode`.
+ inline void setAlignMode(uint32_t alignMode) noexcept { _alignMode = alignMode; }
+
+ //! Returns align offset in bytes.
+ inline uint32_t alignment() const noexcept { return _alignment; }
+ //! Sets align offset in bytes to `offset`.
+ inline void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::EmbedDataNode]
+// ============================================================================
+
+//! Embed data node.
+//!
+//! Wraps `.data` directive. The node contains data that will be placed at the
+//! node's position in the assembler stream. The data is considered to be RAW;
+//! no analysis nor byte-order conversion is performed on RAW data.
+class EmbedDataNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(EmbedDataNode)
+
+ enum : uint32_t {
+ kInlineBufferSize = 128 - (sizeof(BaseNode) + sizeof(size_t) * 2)
+ };
+
+ size_t _itemCount;
+ size_t _repeatCount;
+
+ union {
+ uint8_t* _externalData;
+ uint8_t _inlineData[kInlineBufferSize];
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `EmbedDataNode` instance.
+ inline EmbedDataNode(BaseBuilder* cb) noexcept
+ : BaseNode(cb, kNodeEmbedData, kFlagIsData),
+ _itemCount(0),
+ _repeatCount(0) {
+ _embed._typeId = uint8_t(Type::kIdU8),
+ _embed._typeSize = uint8_t(1);
+ memset(_inlineData, 0, kInlineBufferSize);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns \ref Type::Id of the data.
+ inline uint32_t typeId() const noexcept { return _embed._typeId; }
+ //! Returns the size of a single data element.
+ inline uint32_t typeSize() const noexcept { return _embed._typeSize; }
+
+ //! Returns a pointer to the data casted to `uint8_t`.
+ inline uint8_t* data() const noexcept {
+ return dataSize() <= kInlineBufferSize ? const_cast(_inlineData) : _externalData;
+ }
+
+ //! Returns a pointer to the data casted to `T`.
+ template
+ inline T* dataAs() const noexcept { return reinterpret_cast(data()); }
+
+ //! Returns the number of (typed) items in the array.
+ inline size_t itemCount() const noexcept { return _itemCount; }
+
+ //! Returns how many times the data is repeated (default 1).
+ //!
+ //! Repeated data is useful when defining constants for SIMD, for example.
+ inline size_t repeatCount() const noexcept { return _repeatCount; }
+
+ //! Returns the size of the data, not considering the number of times it repeats.
+ //!
+ //! \note The returned value is the same as `typeSize() * itemCount()`.
+ inline size_t dataSize() const noexcept { return typeSize() * _itemCount; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::EmbedLabelNode]
+// ============================================================================
+
+//! Label data node.
+class EmbedLabelNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(EmbedLabelNode)
+
+ uint32_t _labelId;
+ uint32_t _dataSize;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `EmbedLabelNode` instance.
+ inline EmbedLabelNode(BaseBuilder* cb, uint32_t labelId = 0, uint32_t dataSize = 0) noexcept
+ : BaseNode(cb, kNodeEmbedLabel, kFlagIsData),
+ _labelId(labelId),
+ _dataSize(dataSize) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the label to embed as \ref Label operand.
+ inline Label label() const noexcept { return Label(_labelId); }
+ //! Returns the id of the label.
+ inline uint32_t labelId() const noexcept { return _labelId; }
+
+ //! Sets the label id from `label` operand.
+ inline void setLabel(const Label& label) noexcept { setLabelId(label.id()); }
+ //! Sets the label id (use with caution, improper use can break a lot of things).
+ inline void setLabelId(uint32_t labelId) noexcept { _labelId = labelId; }
+
+ //! Returns the data size.
+ inline uint32_t dataSize() const noexcept { return _dataSize; }
+ //! Sets the data size.
+ inline void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; }
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use labelId() instead")
+ inline uint32_t id() const noexcept { return labelId(); }
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+// ============================================================================
+// [asmjit::EmbedLabelDeltaNode]
+// ============================================================================
+
+//! Label data node.
+class EmbedLabelDeltaNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(EmbedLabelDeltaNode)
+
+ uint32_t _labelId;
+ uint32_t _baseLabelId;
+ uint32_t _dataSize;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `EmbedLabelDeltaNode` instance.
+ inline EmbedLabelDeltaNode(BaseBuilder* cb, uint32_t labelId = 0, uint32_t baseLabelId = 0, uint32_t dataSize = 0) noexcept
+ : BaseNode(cb, kNodeEmbedLabelDelta, kFlagIsData),
+ _labelId(labelId),
+ _baseLabelId(baseLabelId),
+ _dataSize(dataSize) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the label as `Label` operand.
+ inline Label label() const noexcept { return Label(_labelId); }
+ //! Returns the id of the label.
+ inline uint32_t labelId() const noexcept { return _labelId; }
+
+ //! Sets the label id from `label` operand.
+ inline void setLabel(const Label& label) noexcept { setLabelId(label.id()); }
+ //! Sets the label id.
+ inline void setLabelId(uint32_t labelId) noexcept { _labelId = labelId; }
+
+ //! Returns the base label as `Label` operand.
+ inline Label baseLabel() const noexcept { return Label(_baseLabelId); }
+ //! Returns the id of the base label.
+ inline uint32_t baseLabelId() const noexcept { return _baseLabelId; }
+
+ //! Sets the base label id from `label` operand.
+ inline void setBaseLabel(const Label& baseLabel) noexcept { setBaseLabelId(baseLabel.id()); }
+ //! Sets the base label id.
+ inline void setBaseLabelId(uint32_t baseLabelId) noexcept { _baseLabelId = baseLabelId; }
+
+ //! Returns the size of the embedded label address.
+ inline uint32_t dataSize() const noexcept { return _dataSize; }
+ //! Sets the size of the embedded label address.
+ inline void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; }
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use labelId() instead")
+ inline uint32_t id() const noexcept { return labelId(); }
+
+ ASMJIT_DEPRECATED("Use setLabelId() instead")
+ inline void setId(uint32_t id) noexcept { setLabelId(id); }
+
+ ASMJIT_DEPRECATED("Use baseLabelId() instead")
+ inline uint32_t baseId() const noexcept { return baseLabelId(); }
+
+ ASMJIT_DEPRECATED("Use setBaseLabelId() instead")
+ inline void setBaseId(uint32_t id) noexcept { setBaseLabelId(id); }
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+// ============================================================================
+// [asmjit::ConstPoolNode]
+// ============================================================================
+
+//! A node that wraps `ConstPool`.
+class ConstPoolNode : public LabelNode {
+public:
+ ASMJIT_NONCOPYABLE(ConstPoolNode)
+
+ ConstPool _constPool;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `ConstPoolNode` instance.
+ inline ConstPoolNode(BaseBuilder* cb, uint32_t id = 0) noexcept
+ : LabelNode(cb, id),
+ _constPool(&cb->_codeZone) {
+
+ setType(kNodeConstPool);
+ addFlags(kFlagIsData);
+ clearFlags(kFlagIsCode | kFlagHasNoEffect);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the constant-pool is empty.
+ inline bool empty() const noexcept { return _constPool.empty(); }
+ //! Returns the size of the constant-pool in bytes.
+ inline size_t size() const noexcept { return _constPool.size(); }
+ //! Returns minimum alignment.
+ inline size_t alignment() const noexcept { return _constPool.alignment(); }
+
+ //! Returns the wrapped `ConstPool` instance.
+ inline ConstPool& constPool() noexcept { return _constPool; }
+ //! Returns the wrapped `ConstPool` instance (const).
+ inline const ConstPool& constPool() const noexcept { return _constPool; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! See `ConstPool::add()`.
+ inline Error add(const void* data, size_t size, size_t& dstOffset) noexcept {
+ return _constPool.add(data, size, dstOffset);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::CommentNode]
+// ============================================================================
+
+//! Comment node.
+class CommentNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(CommentNode)
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `CommentNode` instance.
+ inline CommentNode(BaseBuilder* cb, const char* comment) noexcept
+ : BaseNode(cb, kNodeComment, kFlagIsInformative | kFlagHasNoEffect | kFlagIsRemovable) {
+ _inlineComment = comment;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::SentinelNode]
+// ============================================================================
+
+//! Sentinel node.
+//!
+//! Sentinel is a marker that is completely ignored by the code builder. It's
+//! used to remember a position in a code as it never gets removed by any pass.
+class SentinelNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(SentinelNode)
+
+ //! Type of the sentinel (purery informative purpose).
+ enum SentinelType : uint32_t {
+ //! Type of the sentinel is not known.
+ kSentinelUnknown = 0u,
+ //! This is a sentinel used at the end of \ref FuncNode.
+ kSentinelFuncEnd = 1u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `SentinelNode` instance.
+ inline SentinelNode(BaseBuilder* cb, uint32_t sentinelType = kSentinelUnknown) noexcept
+ : BaseNode(cb, kNodeSentinel, kFlagIsInformative | kFlagHasNoEffect) {
+
+ _sentinel._sentinelType = uint8_t(sentinelType);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the type of the sentinel.
+ inline uint32_t sentinelType() const noexcept {
+ return _sentinel._sentinelType;
+ }
+
+ //! Sets the type of the sentinel.
+ inline void setSentinelType(uint32_t type) noexcept {
+ _sentinel._sentinelType = uint8_t(type);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::Pass]
+// ============================================================================
+
+//! Pass can be used to implement code transformations, analysis, and lowering.
+class ASMJIT_VIRTAPI Pass {
+public:
+ ASMJIT_BASE_CLASS(Pass)
+ ASMJIT_NONCOPYABLE(Pass)
+
+ //! BaseBuilder this pass is assigned to.
+ BaseBuilder* _cb = nullptr;
+ //! Name of the pass.
+ const char* _name = nullptr;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API Pass(const char* name) noexcept;
+ ASMJIT_API virtual ~Pass() noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns \ref BaseBuilder associated with the pass.
+ inline const BaseBuilder* cb() const noexcept { return _cb; }
+ //! Returns the name of the pass.
+ inline const char* name() const noexcept { return _name; }
+
+ //! \}
+
+ //! \name Pass Interface
+ //! \{
+
+ //! Processes the code stored in Builder or Compiler.
+ //!
+ //! This is the only function that is called by the `BaseBuilder` to process
+ //! the code. It passes `zone`, which will be reset after the `run()` finishes.
+ virtual Error run(Zone* zone, Logger* logger) = 0;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_BUILDER
+#endif // ASMJIT_CORE_BUILDER_H_INCLUDED
diff --git a/Theodosius/asmjit/core/codebuffer.h b/Theodosius/asmjit/core/codebuffer.h
new file mode 100644
index 0000000..76c86b1
--- /dev/null
+++ b/Theodosius/asmjit/core/codebuffer.h
@@ -0,0 +1,126 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CODEBUFFER_H_INCLUDED
+#define ASMJIT_CORE_CODEBUFFER_H_INCLUDED
+
+#include "../core/globals.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::CodeBuffer]
+// ============================================================================
+
+//! Code or data buffer.
+struct CodeBuffer {
+ //! The content of the buffer (data).
+ uint8_t* _data;
+ //! Number of bytes of `data` used.
+ size_t _size;
+ //! Buffer capacity (in bytes).
+ size_t _capacity;
+ //! Buffer flags.
+ uint32_t _flags;
+
+ //! Code buffer flags.
+ enum Flags : uint32_t {
+ //! Buffer is external (not allocated by asmjit).
+ kFlagIsExternal = 0x00000001u,
+ //! Buffer is fixed (cannot be reallocated).
+ kFlagIsFixed = 0x00000002u
+ };
+
+ //! \name Overloaded Operators
+ //! \{
+
+ //! Returns a referebce to the byte at the given `index`.
+ inline uint8_t& operator[](size_t index) noexcept {
+ ASMJIT_ASSERT(index < _size);
+ return _data[index];
+ }
+ //! \overload
+ inline const uint8_t& operator[](size_t index) const noexcept {
+ ASMJIT_ASSERT(index < _size);
+ return _data[index];
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns code buffer flags, see \ref Flags.
+ inline uint32_t flags() const noexcept { return _flags; }
+ //! Tests whether the code buffer has the given `flag` set.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+
+ //! Tests whether this code buffer has a fixed size.
+ //!
+ //! Fixed size means that the code buffer is fixed and cannot grow.
+ inline bool isFixed() const noexcept { return hasFlag(kFlagIsFixed); }
+
+ //! Tests whether the data in this code buffer is external.
+ //!
+ //! External data can only be provided by users, it's never used by AsmJit.
+ inline bool isExternal() const noexcept { return hasFlag(kFlagIsExternal); }
+
+ //! Tests whether the data in this code buffer is allocated (non-null).
+ inline bool isAllocated() const noexcept { return _data != nullptr; }
+
+ //! Tests whether the code buffer is empty.
+ inline bool empty() const noexcept { return !_size; }
+
+ //! Returns the size of the data.
+ inline size_t size() const noexcept { return _size; }
+ //! Returns the capacity of the data.
+ inline size_t capacity() const noexcept { return _capacity; }
+
+ //! Returns the pointer to the data the buffer references.
+ inline uint8_t* data() noexcept { return _data; }
+ //! \overload
+ inline const uint8_t* data() const noexcept { return _data; }
+
+ //! \}
+
+ //! \name Iterators
+ //! \{
+
+ inline uint8_t* begin() noexcept { return _data; }
+ inline const uint8_t* begin() const noexcept { return _data; }
+
+ inline uint8_t* end() noexcept { return _data + _size; }
+ inline const uint8_t* end() const noexcept { return _data + _size; }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CODEBUFFER_H_INCLUDED
+
diff --git a/Theodosius/asmjit/core/codeholder.cpp b/Theodosius/asmjit/core/codeholder.cpp
new file mode 100644
index 0000000..3c4154e
--- /dev/null
+++ b/Theodosius/asmjit/core/codeholder.cpp
@@ -0,0 +1,1150 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/assembler.h"
+#include "../core/codewriter_p.h"
+#include "../core/logger.h"
+#include "../core/support.h"
+
+#include
+#include
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [Globals]
+// ============================================================================
+
+static const char CodeHolder_addrTabName[] = ".addrtab";
+
+//! Encode MOD byte.
+static inline uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) noexcept {
+ return (m << 6) | (o << 3) | rm;
+}
+
+// ============================================================================
+// [asmjit::LabelLinkIterator]
+// ============================================================================
+
+class LabelLinkIterator {
+public:
+ ASMJIT_INLINE LabelLinkIterator(LabelEntry* le) noexcept { reset(le); }
+
+ ASMJIT_INLINE explicit operator bool() const noexcept { return isValid(); }
+ ASMJIT_INLINE bool isValid() const noexcept { return _link != nullptr; }
+
+ ASMJIT_INLINE LabelLink* link() const noexcept { return _link; }
+ ASMJIT_INLINE LabelLink* operator->() const noexcept { return _link; }
+
+ ASMJIT_INLINE void reset(LabelEntry* le) noexcept {
+ _pPrev = &le->_links;
+ _link = *_pPrev;
+ }
+
+ ASMJIT_INLINE void next() noexcept {
+ _pPrev = &_link->next;
+ _link = *_pPrev;
+ }
+
+ ASMJIT_INLINE void resolveAndNext(CodeHolder* code) noexcept {
+ LabelLink* linkToDelete = _link;
+
+ _link = _link->next;
+ *_pPrev = _link;
+
+ code->_unresolvedLinkCount--;
+ code->_allocator.release(linkToDelete, sizeof(LabelLink));
+ }
+
+ LabelLink** _pPrev;
+ LabelLink* _link;
+};
+
+// ============================================================================
+// [asmjit::CodeHolder - Utilities]
+// ============================================================================
+
+static void CodeHolder_resetInternal(CodeHolder* self, uint32_t resetPolicy) noexcept {
+ uint32_t i;
+ const ZoneVector& emitters = self->emitters();
+
+ i = emitters.size();
+ while (i)
+ self->detach(emitters[--i]);
+
+ // Reset everything into its construction state.
+ self->_environment.reset();
+ self->_baseAddress = Globals::kNoBaseAddress;
+ self->_logger = nullptr;
+ self->_errorHandler = nullptr;
+
+ // Reset all sections.
+ uint32_t numSections = self->_sections.size();
+ for (i = 0; i < numSections; i++) {
+ Section* section = self->_sections[i];
+ if (section->_buffer.data() && !section->_buffer.isExternal())
+ ::free(section->_buffer._data);
+ section->_buffer._data = nullptr;
+ section->_buffer._capacity = 0;
+ }
+
+ // Reset zone allocator and all containers using it.
+ ZoneAllocator* allocator = self->allocator();
+
+ self->_emitters.reset();
+ self->_namedLabels.reset();
+ self->_relocations.reset();
+ self->_labelEntries.reset();
+ self->_sections.reset();
+ self->_sectionsByOrder.reset();
+
+ self->_unresolvedLinkCount = 0;
+ self->_addressTableSection = nullptr;
+ self->_addressTableEntries.reset();
+
+ allocator->reset(&self->_zone);
+ self->_zone.reset(resetPolicy);
+}
+
+static void CodeHolder_onSettingsUpdated(CodeHolder* self) noexcept {
+ // Notify all attached emitters about a settings update.
+ for (BaseEmitter* emitter : self->emitters()) {
+ emitter->onSettingsUpdated();
+ }
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Construction / Destruction]
+// ============================================================================
+
+CodeHolder::CodeHolder() noexcept
+ : _environment(),
+ _baseAddress(Globals::kNoBaseAddress),
+ _logger(nullptr),
+ _errorHandler(nullptr),
+ _zone(16384 - Zone::kBlockOverhead),
+ _allocator(&_zone),
+ _unresolvedLinkCount(0),
+ _addressTableSection(nullptr) {}
+
+CodeHolder::~CodeHolder() noexcept {
+ CodeHolder_resetInternal(this, Globals::kResetHard);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Init / Reset]
+// ============================================================================
+
+inline void CodeHolder_setSectionDefaultName(
+ Section* section,
+ char c0 = 0, char c1 = 0, char c2 = 0, char c3 = 0,
+ char c4 = 0, char c5 = 0, char c6 = 0, char c7 = 0) noexcept {
+
+ section->_name.u32[0] = Support::bytepack32_4x8(uint8_t(c0), uint8_t(c1), uint8_t(c2), uint8_t(c3));
+ section->_name.u32[1] = Support::bytepack32_4x8(uint8_t(c4), uint8_t(c5), uint8_t(c6), uint8_t(c7));
+}
+
+Error CodeHolder::init(const Environment& environment, uint64_t baseAddress) noexcept {
+ // Cannot reinitialize if it's locked or there is one or more emitter attached.
+ if (isInitialized())
+ return DebugUtils::errored(kErrorAlreadyInitialized);
+
+ // If we are just initializing there should be no emitters attached.
+ ASMJIT_ASSERT(_emitters.empty());
+
+ // Create a default section and insert it to the `_sections` array.
+ Error err = _sections.willGrow(&_allocator) |
+ _sectionsByOrder.willGrow(&_allocator);
+ if (err == kErrorOk) {
+ Section* section = _allocator.allocZeroedT();
+ if (ASMJIT_LIKELY(section)) {
+ section->_flags = Section::kFlagExec | Section::kFlagConst;
+ CodeHolder_setSectionDefaultName(section, '.', 't', 'e', 'x', 't');
+ _sections.appendUnsafe(section);
+ _sectionsByOrder.appendUnsafe(section);
+ }
+ else {
+ err = DebugUtils::errored(kErrorOutOfMemory);
+ }
+ }
+
+ if (ASMJIT_UNLIKELY(err)) {
+ _zone.reset();
+ return err;
+ }
+ else {
+ _environment = environment;
+ _baseAddress = baseAddress;
+ return kErrorOk;
+ }
+}
+
+void CodeHolder::reset(uint32_t resetPolicy) noexcept {
+ CodeHolder_resetInternal(this, resetPolicy);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Attach / Detach]
+// ============================================================================
+
+Error CodeHolder::attach(BaseEmitter* emitter) noexcept {
+ // Catch a possible misuse of the API.
+ if (ASMJIT_UNLIKELY(!emitter))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ // Invalid emitter, this should not be possible.
+ uint32_t type = emitter->emitterType();
+ if (ASMJIT_UNLIKELY(type == BaseEmitter::kTypeNone || type >= BaseEmitter::kTypeCount))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // This is suspicious, but don't fail if `emitter` is already attached
+ // to this code holder. This is not error, but it's not recommended.
+ if (emitter->_code != nullptr) {
+ if (emitter->_code == this)
+ return kErrorOk;
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ // Reserve the space now as we cannot fail after `onAttach()` succeeded.
+ ASMJIT_PROPAGATE(_emitters.willGrow(&_allocator, 1));
+ ASMJIT_PROPAGATE(emitter->onAttach(this));
+
+ // Connect CodeHolder <-> BaseEmitter.
+ ASMJIT_ASSERT(emitter->_code == this);
+ _emitters.appendUnsafe(emitter);
+
+ return kErrorOk;
+}
+
+Error CodeHolder::detach(BaseEmitter* emitter) noexcept {
+ if (ASMJIT_UNLIKELY(!emitter))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (ASMJIT_UNLIKELY(emitter->_code != this))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // NOTE: We always detach if we were asked to, if error happens during
+ // `emitter->onDetach()` we just propagate it, but the BaseEmitter will
+ // be detached.
+ Error err = kErrorOk;
+ if (!emitter->isDestroyed())
+ err = emitter->onDetach(this);
+
+ // Disconnect CodeHolder <-> BaseEmitter.
+ uint32_t index = _emitters.indexOf(emitter);
+ ASMJIT_ASSERT(index != Globals::kNotFound);
+
+ _emitters.removeAt(index);
+ emitter->_code = nullptr;
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Logging]
+// ============================================================================
+
+void CodeHolder::setLogger(Logger* logger) noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ _logger = logger;
+ CodeHolder_onSettingsUpdated(this);
+#else
+ DebugUtils::unused(logger);
+#endif
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Error Handling]
+// ============================================================================
+
+void CodeHolder::setErrorHandler(ErrorHandler* errorHandler) noexcept {
+ _errorHandler = errorHandler;
+ CodeHolder_onSettingsUpdated(this);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Code Buffer]
+// ============================================================================
+
+static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t n) noexcept {
+ uint8_t* oldData = cb->_data;
+ uint8_t* newData;
+
+ if (oldData && !cb->isExternal())
+ newData = static_cast(::realloc(oldData, n));
+ else
+ newData = static_cast(::malloc(n));
+
+ if (ASMJIT_UNLIKELY(!newData))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ cb->_data = newData;
+ cb->_capacity = n;
+
+ // Update pointers used by assemblers, if attached.
+ for (BaseEmitter* emitter : self->emitters()) {
+ if (emitter->isAssembler()) {
+ BaseAssembler* a = static_cast(emitter);
+ if (&a->_section->_buffer == cb) {
+ size_t offset = a->offset();
+
+ a->_bufferData = newData;
+ a->_bufferEnd = newData + n;
+ a->_bufferPtr = newData + offset;
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept {
+ // The size of the section must be valid.
+ size_t size = cb->size();
+ if (ASMJIT_UNLIKELY(n > std::numeric_limits::max() - size))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // We can now check if growing the buffer is really necessary. It's unlikely
+ // that this function is called while there is still room for `n` bytes.
+ size_t capacity = cb->capacity();
+ size_t required = cb->size() + n;
+ if (ASMJIT_UNLIKELY(required <= capacity))
+ return kErrorOk;
+
+ if (cb->isFixed())
+ return DebugUtils::errored(kErrorTooLarge);
+
+ size_t kInitialCapacity = 8096;
+ if (capacity < kInitialCapacity)
+ capacity = kInitialCapacity;
+ else
+ capacity += Globals::kAllocOverhead;
+
+ do {
+ size_t old = capacity;
+ if (capacity < Globals::kGrowThreshold)
+ capacity *= 2;
+ else
+ capacity += Globals::kGrowThreshold;
+
+ // Overflow.
+ if (ASMJIT_UNLIKELY(old > capacity))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ } while (capacity - Globals::kAllocOverhead < required);
+
+ return CodeHolder_reserveInternal(this, cb, capacity - Globals::kAllocOverhead);
+}
+
+Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept {
+ size_t capacity = cb->capacity();
+
+ if (n <= capacity)
+ return kErrorOk;
+
+ if (cb->isFixed())
+ return DebugUtils::errored(kErrorTooLarge);
+
+ return CodeHolder_reserveInternal(this, cb, n);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Sections]
+// ============================================================================
+
+Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t nameSize, uint32_t flags, uint32_t alignment, int32_t order) noexcept {
+ *sectionOut = nullptr;
+
+ if (nameSize == SIZE_MAX)
+ nameSize = strlen(name);
+
+ if (alignment == 0)
+ alignment = 1;
+
+ if (ASMJIT_UNLIKELY(!Support::isPowerOf2(alignment)))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxSectionNameSize))
+ return DebugUtils::errored(kErrorInvalidSectionName);
+
+ uint32_t sectionId = _sections.size();
+ if (ASMJIT_UNLIKELY(sectionId == Globals::kInvalidId))
+ return DebugUtils::errored(kErrorTooManySections);
+
+ ASMJIT_PROPAGATE(_sections.willGrow(&_allocator));
+ ASMJIT_PROPAGATE(_sectionsByOrder.willGrow(&_allocator));
+
+ Section* section = _allocator.allocZeroedT();
+ if (ASMJIT_UNLIKELY(!section))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ section->_id = sectionId;
+ section->_flags = flags;
+ section->_alignment = alignment;
+ section->_order = order;
+ memcpy(section->_name.str, name, nameSize);
+
+ Section** insertPosition = std::lower_bound(_sectionsByOrder.begin(), _sectionsByOrder.end(), section, [](const Section* a, const Section* b) {
+ return std::make_tuple(a->order(), a->id()) < std::make_tuple(b->order(), b->id());
+ });
+
+ _sections.appendUnsafe(section);
+ _sectionsByOrder.insertUnsafe((size_t)(insertPosition - _sectionsByOrder.data()), section);
+
+ *sectionOut = section;
+ return kErrorOk;
+}
+
+Section* CodeHolder::sectionByName(const char* name, size_t nameSize) const noexcept {
+ if (nameSize == SIZE_MAX)
+ nameSize = strlen(name);
+
+ // This could be also put in a hash-table similarly like we do with labels,
+ // however it's questionable as the number of sections should be pretty low
+ // in general. Create an issue if this becomes a problem.
+ if (nameSize <= Globals::kMaxSectionNameSize) {
+ for (Section* section : _sections)
+ if (memcmp(section->_name.str, name, nameSize) == 0 && section->_name.str[nameSize] == '\0')
+ return section;
+ }
+
+ return nullptr;
+}
+
+Section* CodeHolder::ensureAddressTableSection() noexcept {
+ if (_addressTableSection)
+ return _addressTableSection;
+
+ newSection(&_addressTableSection, CodeHolder_addrTabName, sizeof(CodeHolder_addrTabName) - 1, 0, _environment.registerSize(), std::numeric_limits::max());
+ return _addressTableSection;
+}
+
+Error CodeHolder::addAddressToAddressTable(uint64_t address) noexcept {
+ AddressTableEntry* entry = _addressTableEntries.get(address);
+ if (entry)
+ return kErrorOk;
+
+ Section* section = ensureAddressTableSection();
+ if (ASMJIT_UNLIKELY(!section))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ entry = _zone.newT(address);
+ if (ASMJIT_UNLIKELY(!entry))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ _addressTableEntries.insert(entry);
+ section->_virtualSize += _environment.registerSize();
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Labels / Symbols]
+// ============================================================================
+
+//! Only used to lookup a label from `_namedLabels`.
+class LabelByName {
+public:
+ inline LabelByName(const char* key, size_t keySize, uint32_t hashCode, uint32_t parentId) noexcept
+ : _key(key),
+ _keySize(uint32_t(keySize)),
+ _hashCode(hashCode),
+ _parentId(parentId) {}
+
+ inline uint32_t hashCode() const noexcept { return _hashCode; }
+
+ inline bool matches(const LabelEntry* entry) const noexcept {
+ return entry->nameSize() == _keySize &&
+ entry->parentId() == _parentId &&
+ ::memcmp(entry->name(), _key, _keySize) == 0;
+ }
+
+ const char* _key;
+ uint32_t _keySize;
+ uint32_t _hashCode;
+ uint32_t _parentId;
+};
+
+// Returns a hash of `name` and fixes `nameSize` if it's `SIZE_MAX`.
+static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize) noexcept {
+ uint32_t hashCode = 0;
+ if (nameSize == SIZE_MAX) {
+ size_t i = 0;
+ for (;;) {
+ uint8_t c = uint8_t(name[i]);
+ if (!c) break;
+ hashCode = Support::hashRound(hashCode, c);
+ i++;
+ }
+ nameSize = i;
+ }
+ else {
+ for (size_t i = 0; i < nameSize; i++) {
+ uint8_t c = uint8_t(name[i]);
+ if (ASMJIT_UNLIKELY(!c)) return DebugUtils::errored(kErrorInvalidLabelName);
+ hashCode = Support::hashRound(hashCode, c);
+ }
+ }
+ return hashCode;
+}
+
+LabelLink* CodeHolder::newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel, const OffsetFormat& format) noexcept {
+ LabelLink* link = _allocator.allocT();
+ if (ASMJIT_UNLIKELY(!link)) return nullptr;
+
+ link->next = le->_links;
+ le->_links = link;
+
+ link->sectionId = sectionId;
+ link->relocId = Globals::kInvalidId;
+ link->offset = offset;
+ link->rel = rel;
+ link->format = format;
+
+ _unresolvedLinkCount++;
+ return link;
+}
+
+Error CodeHolder::newLabelEntry(LabelEntry** entryOut) noexcept {
+ *entryOut = nullptr;
+
+ uint32_t labelId = _labelEntries.size();
+ if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId))
+ return DebugUtils::errored(kErrorTooManyLabels);
+
+ ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
+ LabelEntry* le = _allocator.allocZeroedT();
+
+ if (ASMJIT_UNLIKELY(!le))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ le->_setId(labelId);
+ le->_parentId = Globals::kInvalidId;
+ le->_offset = 0;
+ _labelEntries.appendUnsafe(le);
+
+ *entryOut = le;
+ return kErrorOk;
+}
+
+Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, size_t nameSize, uint32_t type, uint32_t parentId) noexcept {
+ *entryOut = nullptr;
+ uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize);
+
+ if (ASMJIT_UNLIKELY(nameSize == 0))
+ return DebugUtils::errored(kErrorInvalidLabelName);
+
+ if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxLabelNameSize))
+ return DebugUtils::errored(kErrorLabelNameTooLong);
+
+ switch (type) {
+ case Label::kTypeLocal:
+ if (ASMJIT_UNLIKELY(parentId >= _labelEntries.size()))
+ return DebugUtils::errored(kErrorInvalidParentLabel);
+
+ hashCode ^= parentId;
+ break;
+
+ case Label::kTypeGlobal:
+ case Label::kTypeExternal:
+ if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId))
+ return DebugUtils::errored(kErrorNonLocalLabelCannotHaveParent);
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidArgument);
+ }
+
+ // Don't allow to insert duplicates. Local labels allow duplicates that have
+ // different id, this is already accomplished by having a different hashes
+ // between the same label names having different parent labels.
+ LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode, parentId));
+ if (ASMJIT_UNLIKELY(le))
+ return DebugUtils::errored(kErrorLabelAlreadyDefined);
+
+ Error err = kErrorOk;
+ uint32_t labelId = _labelEntries.size();
+
+ if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId))
+ return DebugUtils::errored(kErrorTooManyLabels);
+
+ ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
+ le = _allocator.allocZeroedT();
+
+ if (ASMJIT_UNLIKELY(!le))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ le->_hashCode = hashCode;
+ le->_setId(labelId);
+ le->_type = uint8_t(type);
+ le->_parentId = parentId;
+ le->_offset = 0;
+ ASMJIT_PROPAGATE(le->_name.setData(&_zone, name, nameSize));
+
+ _labelEntries.appendUnsafe(le);
+ _namedLabels.insert(allocator(), le);
+
+ *entryOut = le;
+ return err;
+}
+
+uint32_t CodeHolder::labelIdByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
+ uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize);
+ if (ASMJIT_UNLIKELY(!nameSize))
+ return 0;
+
+ if (parentId != Globals::kInvalidId)
+ hashCode ^= parentId;
+
+ LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode, parentId));
+ return le ? le->id() : uint32_t(Globals::kInvalidId);
+}
+
+ASMJIT_API Error CodeHolder::resolveUnresolvedLinks() noexcept {
+ if (!hasUnresolvedLinks())
+ return kErrorOk;
+
+ Error err = kErrorOk;
+ for (LabelEntry* le : labelEntries()) {
+ if (!le->isBound())
+ continue;
+
+ LabelLinkIterator link(le);
+ if (link) {
+ Support::FastUInt8 of = 0;
+ Section* toSection = le->section();
+ uint64_t toOffset = Support::addOverflow(toSection->offset(), le->offset(), &of);
+
+ do {
+ uint32_t linkSectionId = link->sectionId;
+ if (link->relocId == Globals::kInvalidId) {
+ Section* fromSection = sectionById(linkSectionId);
+ size_t linkOffset = link->offset;
+
+ CodeBuffer& buf = _sections[linkSectionId]->buffer();
+ ASMJIT_ASSERT(linkOffset < buf.size());
+
+ // Calculate the offset relative to the start of the virtual base.
+ Support::FastUInt8 localOF = of;
+ uint64_t fromOffset = Support::addOverflow(fromSection->offset(), linkOffset, &localOF);
+ int64_t displacement = int64_t(toOffset - fromOffset + uint64_t(int64_t(link->rel)));
+
+ if (!localOF) {
+ ASMJIT_ASSERT(size_t(linkOffset) < buf.size());
+ ASMJIT_ASSERT(buf.size() - size_t(linkOffset) >= link->format.valueSize());
+
+ // Overwrite a real displacement in the CodeBuffer.
+ if (CodeWriterUtils::writeOffset(buf._data + linkOffset, displacement, link->format)) {
+ link.resolveAndNext(this);
+ continue;
+ }
+ }
+
+ err = DebugUtils::errored(kErrorInvalidDisplacement);
+ // Falls through to `link.next()`.
+ }
+
+ link.next();
+ } while (link);
+ }
+ }
+
+ return err;
+}
+
+ASMJIT_API Error CodeHolder::bindLabel(const Label& label, uint32_t toSectionId, uint64_t toOffset) noexcept {
+ LabelEntry* le = labelEntry(label);
+ if (ASMJIT_UNLIKELY(!le))
+ return DebugUtils::errored(kErrorInvalidLabel);
+
+ if (ASMJIT_UNLIKELY(toSectionId > _sections.size()))
+ return DebugUtils::errored(kErrorInvalidSection);
+
+ // Label can be bound only once.
+ if (ASMJIT_UNLIKELY(le->isBound()))
+ return DebugUtils::errored(kErrorLabelAlreadyBound);
+
+ // Bind the label.
+ Section* section = _sections[toSectionId];
+ le->_section = section;
+ le->_offset = toOffset;
+
+ Error err = kErrorOk;
+ CodeBuffer& buf = section->buffer();
+
+ // Fix all links to this label we have collected so far if they are within
+ // the same section. We ignore any inter-section links as these have to be
+ // fixed later.
+ LabelLinkIterator link(le);
+ while (link) {
+ uint32_t linkSectionId = link->sectionId;
+ size_t linkOffset = link->offset;
+
+ uint32_t relocId = link->relocId;
+ if (relocId != Globals::kInvalidId) {
+ // Adjust relocation data only.
+ RelocEntry* re = _relocations[relocId];
+ re->_payload += toOffset;
+ re->_targetSectionId = toSectionId;
+ }
+ else {
+ if (linkSectionId != toSectionId) {
+ link.next();
+ continue;
+ }
+
+ ASMJIT_ASSERT(linkOffset < buf.size());
+ int64_t displacement = int64_t(toOffset - uint64_t(linkOffset) + uint64_t(int64_t(link->rel)));
+
+ // Size of the value we are going to patch. Only BYTE/DWORD is allowed.
+ ASMJIT_ASSERT(buf.size() - size_t(linkOffset) >= link->format.regionSize());
+
+ // Overwrite a real displacement in the CodeBuffer.
+ if (!CodeWriterUtils::writeOffset(buf._data + linkOffset, displacement, link->format)) {
+ err = DebugUtils::errored(kErrorInvalidDisplacement);
+ link.next();
+ continue;
+ }
+ }
+
+ link.resolveAndNext(this);
+ }
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Relocations]
+// ============================================================================
+
+Error CodeHolder::newRelocEntry(RelocEntry** dst, uint32_t relocType) noexcept {
+ ASMJIT_PROPAGATE(_relocations.willGrow(&_allocator));
+
+ uint32_t relocId = _relocations.size();
+ if (ASMJIT_UNLIKELY(relocId == Globals::kInvalidId))
+ return DebugUtils::errored(kErrorTooManyRelocations);
+
+ RelocEntry* re = _allocator.allocZeroedT();
+ if (ASMJIT_UNLIKELY(!re))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ re->_id = relocId;
+ re->_relocType = uint8_t(relocType);
+ re->_sourceSectionId = Globals::kInvalidId;
+ re->_targetSectionId = Globals::kInvalidId;
+ _relocations.appendUnsafe(re);
+
+ *dst = re;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Expression Evaluation]
+// ============================================================================
+
+static Error CodeHolder_evaluateExpression(CodeHolder* self, Expression* exp, uint64_t* out) noexcept {
+ uint64_t value[2];
+ for (size_t i = 0; i < 2; i++) {
+ uint64_t v;
+ switch (exp->valueType[i]) {
+ case Expression::kValueNone: {
+ v = 0;
+ break;
+ }
+
+ case Expression::kValueConstant: {
+ v = exp->value[i].constant;
+ break;
+ }
+
+ case Expression::kValueLabel: {
+ LabelEntry* le = exp->value[i].label;
+ if (!le->isBound())
+ return DebugUtils::errored(kErrorExpressionLabelNotBound);
+ v = le->section()->offset() + le->offset();
+ break;
+ }
+
+ case Expression::kValueExpression: {
+ Expression* nested = exp->value[i].expression;
+ ASMJIT_PROPAGATE(CodeHolder_evaluateExpression(self, nested, &v));
+ break;
+ }
+
+ default:
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ value[i] = v;
+ }
+
+ uint64_t result;
+ uint64_t& a = value[0];
+ uint64_t& b = value[1];
+
+ switch (exp->opType) {
+ case Expression::kOpAdd:
+ result = a + b;
+ break;
+
+ case Expression::kOpSub:
+ result = a - b;
+ break;
+
+ case Expression::kOpMul:
+ result = a * b;
+ break;
+
+ case Expression::kOpSll:
+ result = (b > 63) ? uint64_t(0) : uint64_t(a << b);
+ break;
+
+ case Expression::kOpSrl:
+ result = (b > 63) ? uint64_t(0) : uint64_t(a >> b);
+ break;
+
+ case Expression::kOpSra:
+ result = Support::sar(a, Support::min(b, 63));
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ *out = result;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Utilities]
+// ============================================================================
+
+Error CodeHolder::flatten() noexcept {
+ uint64_t offset = 0;
+ for (Section* section : _sectionsByOrder) {
+ uint64_t realSize = section->realSize();
+ if (realSize) {
+ uint64_t alignedOffset = Support::alignUp(offset, section->alignment());
+ if (ASMJIT_UNLIKELY(alignedOffset < offset))
+ return DebugUtils::errored(kErrorTooLarge);
+
+ Support::FastUInt8 of = 0;
+ offset = Support::addOverflow(alignedOffset, realSize, &of);
+
+ if (ASMJIT_UNLIKELY(of))
+ return DebugUtils::errored(kErrorTooLarge);
+ }
+ }
+
+ // Now we know that we can assign offsets of all sections properly.
+ Section* prev = nullptr;
+ offset = 0;
+ for (Section* section : _sectionsByOrder) {
+ uint64_t realSize = section->realSize();
+ if (realSize)
+ offset = Support::alignUp(offset, section->alignment());
+ section->_offset = offset;
+
+ // Make sure the previous section extends a bit to cover the alignment.
+ if (prev)
+ prev->_virtualSize = offset - prev->_offset;
+
+ prev = section;
+ offset += realSize;
+ }
+
+ return kErrorOk;
+}
+
+size_t CodeHolder::codeSize() const noexcept {
+ Support::FastUInt8 of = 0;
+ uint64_t offset = 0;
+
+ for (Section* section : _sectionsByOrder) {
+ uint64_t realSize = section->realSize();
+
+ if (realSize) {
+ uint64_t alignedOffset = Support::alignUp(offset, section->alignment());
+ ASMJIT_ASSERT(alignedOffset >= offset);
+ offset = Support::addOverflow(alignedOffset, realSize, &of);
+ }
+ }
+
+ if ((sizeof(uint64_t) > sizeof(size_t) && offset > SIZE_MAX) || of)
+ return SIZE_MAX;
+
+ return size_t(offset);
+}
+
+Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
+ // Base address must be provided.
+ if (ASMJIT_UNLIKELY(baseAddress == Globals::kNoBaseAddress))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ _baseAddress = baseAddress;
+ uint32_t addressSize = _environment.registerSize();
+
+ Section* addressTableSection = _addressTableSection;
+ uint32_t addressTableEntryCount = 0;
+ uint8_t* addressTableEntryData = nullptr;
+
+ if (addressTableSection) {
+ ASMJIT_PROPAGATE(
+ reserveBuffer(&addressTableSection->_buffer, size_t(addressTableSection->virtualSize())));
+ addressTableEntryData = addressTableSection->_buffer.data();
+ }
+
+ // Relocate all recorded locations.
+ for (const RelocEntry* re : _relocations) {
+ // Possibly deleted or optimized-out entry.
+ if (re->relocType() == RelocEntry::kTypeNone)
+ continue;
+
+ Section* sourceSection = sectionById(re->sourceSectionId());
+ Section* targetSection = nullptr;
+
+ if (re->targetSectionId() != Globals::kInvalidId)
+ targetSection = sectionById(re->targetSectionId());
+
+ uint64_t value = re->payload();
+ uint64_t sectionOffset = sourceSection->offset();
+ uint64_t sourceOffset = re->sourceOffset();
+
+ // Make sure that the `RelocEntry` doesn't go out of bounds.
+ size_t regionSize = re->format().regionSize();
+ if (ASMJIT_UNLIKELY(re->sourceOffset() >= sourceSection->bufferSize() ||
+ sourceSection->bufferSize() - size_t(re->sourceOffset()) < regionSize))
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ uint8_t* buffer = sourceSection->data();
+ size_t valueOffset = size_t(re->sourceOffset()) + re->format().valueOffset();
+
+ switch (re->relocType()) {
+ case RelocEntry::kTypeExpression: {
+ Expression* expression = (Expression*)(uintptr_t(value));
+ ASMJIT_PROPAGATE(CodeHolder_evaluateExpression(this, expression, &value));
+ break;
+ }
+
+ case RelocEntry::kTypeAbsToAbs: {
+ break;
+ }
+
+ case RelocEntry::kTypeRelToAbs: {
+ // Value is currently a relative offset from the start of its section.
+ // We have to convert it to an absolute offset (including base address).
+ if (ASMJIT_UNLIKELY(!targetSection))
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ //value += baseAddress + sectionOffset + sourceOffset + regionSize;
+ value += baseAddress + targetSection->offset();
+ break;
+ }
+
+ case RelocEntry::kTypeAbsToRel: {
+ value -= baseAddress + sectionOffset + sourceOffset + regionSize;
+ if (addressSize > 4 && !Support::isInt32(int64_t(value)))
+ return DebugUtils::errored(kErrorRelocOffsetOutOfRange);
+ break;
+ }
+
+ case RelocEntry::kTypeX64AddressEntry: {
+ if (re->format().valueSize() != 4 || valueOffset < 2)
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ // First try whether a relative 32-bit displacement would work.
+ value -= baseAddress + sectionOffset + sourceOffset + regionSize;
+ if (!Support::isInt32(int64_t(value))) {
+ // Relative 32-bit displacement is not possible, use '.addrtab' section.
+ AddressTableEntry* atEntry = _addressTableEntries.get(re->payload());
+ if (ASMJIT_UNLIKELY(!atEntry))
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ // Cannot be null as we have just matched the `AddressTableEntry`.
+ ASMJIT_ASSERT(addressTableSection != nullptr);
+
+ if (!atEntry->hasAssignedSlot())
+ atEntry->_slot = addressTableEntryCount++;
+
+ size_t atEntryIndex = size_t(atEntry->slot()) * addressSize;
+ uint64_t addrSrc = sectionOffset + sourceOffset + regionSize;
+ uint64_t addrDst = addressTableSection->offset() + uint64_t(atEntryIndex);
+
+ value = addrDst - addrSrc;
+ if (!Support::isInt32(int64_t(value)))
+ return DebugUtils::errored(kErrorRelocOffsetOutOfRange);
+
+ // Bytes that replace [REX, OPCODE] bytes.
+ uint32_t byte0 = 0xFF;
+ uint32_t byte1 = buffer[valueOffset - 1];
+
+ if (byte1 == 0xE8) {
+ // Patch CALL/MOD byte to FF /2 (-> 0x15).
+ byte1 = x86EncodeMod(0, 2, 5);
+ }
+ else if (byte1 == 0xE9) {
+ // Patch JMP/MOD byte to FF /4 (-> 0x25).
+ byte1 = x86EncodeMod(0, 4, 5);
+ }
+ else {
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+ }
+
+ // Patch `jmp/call` instruction.
+ buffer[valueOffset - 2] = uint8_t(byte0);
+ buffer[valueOffset - 1] = uint8_t(byte1);
+
+ Support::writeU64uLE(addressTableEntryData + atEntryIndex, re->payload());
+ }
+ break;
+ }
+
+ default:
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+ }
+
+ switch (re->format().valueSize()) {
+ case 1:
+ Support::writeU8(buffer + valueOffset, uint32_t(value & 0xFFu));
+ break;
+
+ case 2:
+ Support::writeU16uLE(buffer + valueOffset, uint32_t(value & 0xFFFFu));
+ break;
+
+ case 4:
+ Support::writeU32uLE(buffer + valueOffset, uint32_t(value & 0xFFFFFFFFu));
+ break;
+
+ case 8:
+ Support::writeU64uLE(buffer + valueOffset, value);
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+ }
+ }
+
+ // Fixup the virtual size of the address table if it's the last section.
+ if (_sectionsByOrder.last() == addressTableSection) {
+ size_t addressTableSize = addressTableEntryCount * addressSize;
+ addressTableSection->_buffer._size = addressTableSize;
+ addressTableSection->_virtualSize = addressTableSize;
+ }
+
+ return kErrorOk;
+}
+
+Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId, uint32_t copyOptions) noexcept {
+ if (ASMJIT_UNLIKELY(!isSectionValid(sectionId)))
+ return DebugUtils::errored(kErrorInvalidSection);
+
+ Section* section = sectionById(sectionId);
+ size_t bufferSize = section->bufferSize();
+
+ if (ASMJIT_UNLIKELY(dstSize < bufferSize))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ memcpy(dst, section->data(), bufferSize);
+
+ if (bufferSize < dstSize && (copyOptions & kCopyPadSectionBuffer)) {
+ size_t paddingSize = dstSize - bufferSize;
+ memset(static_cast(dst) + bufferSize, 0, paddingSize);
+ }
+
+ return kErrorOk;
+}
+
+Error CodeHolder::copyFlattenedData(void* dst, size_t dstSize, uint32_t copyOptions) noexcept {
+ size_t end = 0;
+ for (Section* section : _sectionsByOrder) {
+ if (section->offset() > dstSize)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ size_t bufferSize = section->bufferSize();
+ size_t offset = size_t(section->offset());
+
+ if (ASMJIT_UNLIKELY(dstSize - offset < bufferSize))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ uint8_t* dstTarget = static_cast(dst) + offset;
+ size_t paddingSize = 0;
+ memcpy(dstTarget, section->data(), bufferSize);
+
+ if ((copyOptions & kCopyPadSectionBuffer) && bufferSize < section->virtualSize()) {
+ paddingSize = Support::min(dstSize - offset, size_t(section->virtualSize())) - bufferSize;
+ memset(dstTarget + bufferSize, 0, paddingSize);
+ }
+
+ end = Support::max(end, offset + bufferSize + paddingSize);
+ }
+
+ if (end < dstSize && (copyOptions & kCopyPadTargetBuffer)) {
+ memset(static_cast(dst) + end, 0, dstSize - end);
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+UNIT(code_holder) {
+ CodeHolder code;
+
+ INFO("Verifying CodeHolder::init()");
+ Environment env;
+ env.init(Environment::kArchX86);
+
+ code.init(env);
+ EXPECT(code.arch() == Environment::kArchX86);
+
+ INFO("Verifying named labels");
+ LabelEntry* le;
+ EXPECT(code.newNamedLabelEntry(&le, "NamedLabel", SIZE_MAX, Label::kTypeGlobal) == kErrorOk);
+ EXPECT(strcmp(le->name(), "NamedLabel") == 0);
+ EXPECT(code.labelIdByName("NamedLabel") == le->id());
+
+ INFO("Verifying section ordering");
+ Section* section1;
+ EXPECT(code.newSection(§ion1, "high-priority", SIZE_MAX, 0, 1, -1) == kErrorOk);
+ EXPECT(code.sections()[1] == section1);
+ EXPECT(code.sectionsByOrder()[0] == section1);
+
+ Section* section0;
+ EXPECT(code.newSection(§ion0, "higher-priority", SIZE_MAX, 0, 1, -2) == kErrorOk);
+ EXPECT(code.sections()[2] == section0);
+ EXPECT(code.sectionsByOrder()[0] == section0);
+ EXPECT(code.sectionsByOrder()[1] == section1);
+
+ Section* section3;
+ EXPECT(code.newSection(§ion3, "low-priority", SIZE_MAX, 0, 1, 2) == kErrorOk);
+ EXPECT(code.sections()[3] == section3);
+ EXPECT(code.sectionsByOrder()[3] == section3);
+
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/Theodosius/asmjit/core/codeholder.h b/Theodosius/asmjit/core/codeholder.h
new file mode 100644
index 0000000..06bf3f9
--- /dev/null
+++ b/Theodosius/asmjit/core/codeholder.h
@@ -0,0 +1,1061 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CODEHOLDER_H_INCLUDED
+#define ASMJIT_CORE_CODEHOLDER_H_INCLUDED
+
+#include "../core/archtraits.h"
+#include "../core/codebuffer.h"
+#include "../core/datatypes.h"
+#include "../core/errorhandler.h"
+#include "../core/operand.h"
+#include "../core/string.h"
+#include "../core/support.h"
+#include "../core/target.h"
+#include "../core/zone.h"
+#include "../core/zonehash.h"
+#include "../core/zonestring.h"
+#include "../core/zonetree.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class BaseEmitter;
+class CodeHolder;
+class LabelEntry;
+class Logger;
+
+// ============================================================================
+// [asmjit::AlignMode]
+// ============================================================================
+
+//! Align mode.
+enum AlignMode : uint32_t {
+ //! Align executable code.
+ kAlignCode = 0,
+ //! Align non-executable code.
+ kAlignData = 1,
+ //! Align by a sequence of zeros.
+ kAlignZero = 2,
+ //! Count of alignment modes.
+ kAlignCount = 3
+};
+
+// ============================================================================
+// [asmjit::Expression]
+// ============================================================================
+
+//! Expression node that can reference constants, labels, and another expressions.
+struct Expression {
+ //! Operation type.
+ enum OpType : uint8_t {
+ //! Addition.
+ kOpAdd = 0,
+ //! Subtraction.
+ kOpSub = 1,
+ //! Multiplication
+ kOpMul = 2,
+ //! Logical left shift.
+ kOpSll = 3,
+ //! Logical right shift.
+ kOpSrl = 4,
+ //! Arithmetic right shift.
+ kOpSra = 5
+ };
+
+ //! Type of \ref Value.
+ enum ValueType : uint8_t {
+ //! No value or invalid.
+ kValueNone = 0,
+ //! Value is 64-bit unsigned integer (constant).
+ kValueConstant = 1,
+ //! Value is \ref LabelEntry, which references a \ref Label.
+ kValueLabel = 2,
+ //! Value is \ref Expression
+ kValueExpression = 3
+ };
+
+ //! Expression value.
+ union Value {
+ //! Constant.
+ uint64_t constant;
+ //! Pointer to another expression.
+ Expression* expression;
+ //! Poitner to \ref LabelEntry.
+ LabelEntry* label;
+ };
+
+ //! Operation type.
+ uint8_t opType;
+ //! Value types of \ref value.
+ uint8_t valueType[2];
+ //! Reserved for future use, should be initialized to zero.
+ uint8_t reserved[5];
+ //! Expression left and right values.
+ Value value[2];
+
+ //! Resets the whole expression.
+ //!
+ //! Changes both values to \ref kValueNone.
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! Sets the value type at `index` to \ref kValueConstant and its content to `constant`.
+ inline void setValueAsConstant(size_t index, uint64_t constant) noexcept {
+ valueType[index] = kValueConstant;
+ value[index].constant = constant;
+ }
+
+ //! Sets the value type at `index` to \ref kValueLabel and its content to `labelEntry`.
+ inline void setValueAsLabel(size_t index, LabelEntry* labelEntry) noexcept {
+ valueType[index] = kValueLabel;
+ value[index].label = labelEntry;
+ }
+
+ //! Sets the value type at `index` to \ref kValueExpression and its content to `expression`.
+ inline void setValueAsExpression(size_t index, Expression* expression) noexcept {
+ valueType[index] = kValueLabel;
+ value[index].expression = expression;
+ }
+};
+
+// ============================================================================
+// [asmjit::Section]
+// ============================================================================
+
+//! Section entry.
+class Section {
+public:
+ //! Section id.
+ uint32_t _id;
+ //! Section flags.
+ uint32_t _flags;
+ //! Section alignment requirements (0 if no requirements).
+ uint32_t _alignment;
+ //! Order (lower value means higher priority).
+ int32_t _order;
+ //! Offset of this section from base-address.
+ uint64_t _offset;
+ //! Virtual size of the section (zero initialized sections).
+ uint64_t _virtualSize;
+ //! Section name (max 35 characters, PE allows max 8).
+ FixedString _name;
+ //! Code or data buffer.
+ CodeBuffer _buffer;
+
+ //! Section flags.
+ enum Flags : uint32_t {
+ //! Executable (.text sections).
+ kFlagExec = 0x00000001u,
+ //! Read-only (.text and .data sections).
+ kFlagConst = 0x00000002u,
+ //! Zero initialized by the loader (BSS).
+ kFlagZero = 0x00000004u,
+ //! Info / comment flag.
+ kFlagInfo = 0x00000008u,
+ //! Section created implicitly and can be deleted by \ref Target.
+ kFlagImplicit = 0x80000000u
+ };
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the section id.
+ inline uint32_t id() const noexcept { return _id; }
+ //! Returns the section name, as a null terminated string.
+ inline const char* name() const noexcept { return _name.str; }
+
+ //! Returns the section data.
+ inline uint8_t* data() noexcept { return _buffer.data(); }
+ //! \overload
+ inline const uint8_t* data() const noexcept { return _buffer.data(); }
+
+ //! Returns the section flags, see \ref Flags.
+ inline uint32_t flags() const noexcept { return _flags; }
+ //! Tests whether the section has the given `flag`.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ //! Adds `flags` to the section flags.
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+ //! Removes `flags` from the section flags.
+ inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
+
+ //! Returns the minimum section alignment
+ inline uint32_t alignment() const noexcept { return _alignment; }
+ //! Sets the minimum section alignment
+ inline void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
+
+ //! Returns the section order, which has a higher priority than section id.
+ inline int32_t order() const noexcept { return _order; }
+
+ //! Returns the section offset, relative to base.
+ inline uint64_t offset() const noexcept { return _offset; }
+ //! Set the section offset.
+ inline void setOffset(uint64_t offset) noexcept { _offset = offset; }
+
+ //! Returns the virtual size of the section.
+ //!
+ //! Virtual size is initially zero and is never changed by AsmJit. It's normal
+ //! if virtual size is smaller than size returned by `bufferSize()` as the buffer
+ //! stores real data emitted by assemblers or appended by users.
+ //!
+ //! Use `realSize()` to get the real and final size of this section.
+ inline uint64_t virtualSize() const noexcept { return _virtualSize; }
+ //! Sets the virtual size of the section.
+ inline void setVirtualSize(uint64_t virtualSize) noexcept { _virtualSize = virtualSize; }
+
+ //! Returns the buffer size of the section.
+ inline size_t bufferSize() const noexcept { return _buffer.size(); }
+ //! Returns the real size of the section calculated from virtual and buffer sizes.
+ inline uint64_t realSize() const noexcept { return Support::max(virtualSize(), bufferSize()); }
+
+ //! Returns the `CodeBuffer` used by this section.
+ inline CodeBuffer& buffer() noexcept { return _buffer; }
+ //! Returns the `CodeBuffer` used by this section (const).
+ inline const CodeBuffer& buffer() const noexcept { return _buffer; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::OffsetFormat]
+// ============================================================================
+
+//! Provides information about formatting offsets, absolute addresses, or their
+//! parts. Offset format is used by both \ref RelocEntry and \ref LabelLink.
+//!
+//! The illustration above describes the relation of region size and offset size.
+//! Region size is the size of the whole unit whereas offset size is the size of
+//! the unit that will be patched.
+//!
+//! ```
+//! +-> Code buffer | The subject of the relocation (region) |
+//! | | (Word-Offset) (Word-Size) |
+//! |xxxxxxxxxxxxxxx|................|*PATCHED*|................|xxxxxxxxxxxx->
+//! | |
+//! [Word Offset points here]----+ +--- [WordOffset + WordSize]
+//! ```
+//!
+//! Once the offset word has been located it can be patched like this:
+//!
+//! ```
+//! |ImmDiscardLSB (discard LSB bits).
+//! |..
+//! [0000000000000iiiiiiiiiiiiiiiiiDD] - Offset value (32-bit)
+//! [000000000000000iiiiiiiiiiiiiiiii] - Offset value after discard LSB.
+//! [00000000000iiiiiiiiiiiiiiiii0000] - Offset value shifted by ImmBitShift.
+//! [xxxxxxxxxxxiiiiiiiiiiiiiiiiixxxx] - Patched word (32-bit)
+//! |...............|
+//! (ImmBitCount) +- ImmBitShift
+//! ```
+struct OffsetFormat {
+ //! Type of the displacement.
+ uint8_t _type;
+ //! Encoding flags.
+ uint8_t _flags;
+ //! Size of the region (in bytes) containing the offset value, if the offset
+ //! value is part of an instruction, otherwise it would be the same as
+ //! `_valueSize`.
+ uint8_t _regionSize;
+ //! Size of the offset value, in bytes (1, 2, 4, or 8).
+ uint8_t _valueSize;
+ //! Offset of the offset value, in bytes, relative to the start of the region
+ //! or data. Value offset would be zero if both region size and value size are
+ //! equal.
+ uint8_t _valueOffset;
+ //! Size of the displacement immediate value in bits.
+ uint8_t _immBitCount;
+ //! Shift of the displacement immediate value in bits in the target word.
+ uint8_t _immBitShift;
+ //! Number of least significant bits to discard before writing the immediate
+ //! to the destination. All discarded bits must be zero otherwise the value
+ //! is invalid.
+ uint8_t _immDiscardLsb;
+
+ //! Type of the displacement.
+ enum Type : uint8_t {
+ //! A value having `_immBitCount` bits and shifted by `_immBitShift`.
+ //!
+ //! This displacement type is sufficient for both X86/X64 and many other
+ //! architectures that store displacement as continuous bits within a machine
+ //! word.
+ kTypeCommon = 0,
+ //! AARCH64 ADR format of `[.|immlo:2|.....|immhi:19|.....]`.
+ kTypeAArch64_ADR,
+ //! AARCH64 ADRP format of `[.|immlo:2|.....|immhi:19|.....]` (4kB pages).
+ kTypeAArch64_ADRP,
+
+ //! Count of displacement types.
+ kTypeCount
+ };
+
+ //! Returns the type of the displacement.
+ inline uint32_t type() const noexcept { return _type; }
+
+ //! Returns flags.
+ inline uint32_t flags() const noexcept { return _flags; }
+
+ //! Returns the size of the region/instruction where the displacement is encoded.
+ inline uint32_t regionSize() const noexcept { return _regionSize; }
+
+ //! Returns the the offset of the word relative to the start of the region
+ //! where the displacement is.
+ inline uint32_t valueOffset() const noexcept { return _valueOffset; }
+
+ //! Returns the size of the data-type (word) that contains the displacement, in bytes.
+ inline uint32_t valueSize() const noexcept { return _valueSize; }
+ //! Returns the count of bits of the displacement value in the data it's stored in.
+ inline uint32_t immBitCount() const noexcept { return _immBitCount; }
+ //! Returns the bit-shift of the displacement value in the data it's stored in.
+ inline uint32_t immBitShift() const noexcept { return _immBitShift; }
+ //! Returns the number of least significant bits of the displacement value,
+ //! that must be zero and that are not part of the encoded data.
+ inline uint32_t immDiscardLsb() const noexcept { return _immDiscardLsb; }
+
+ //! Resets this offset format to a simple data value of `dataSize` bytes.
+ //!
+ //! The region will be the same size as data and immediate bits would correspond
+ //! to `dataSize * 8`. There will be no immediate bit shift or discarded bits.
+ inline void resetToDataValue(size_t dataSize) noexcept {
+ ASMJIT_ASSERT(dataSize <= 8u);
+
+ _type = uint8_t(kTypeCommon);
+ _flags = uint8_t(0);
+ _regionSize = uint8_t(dataSize);
+ _valueSize = uint8_t(dataSize);
+ _valueOffset = uint8_t(0);
+ _immBitCount = uint8_t(dataSize * 8u);
+ _immBitShift = uint8_t(0);
+ _immDiscardLsb = uint8_t(0);
+ }
+
+ inline void resetToImmValue(uint32_t type, size_t valueSize, uint32_t immBitShift, uint32_t immBitCount, uint32_t immDiscardLsb) noexcept {
+ ASMJIT_ASSERT(valueSize <= 8u);
+ ASMJIT_ASSERT(immBitShift < valueSize * 8u);
+ ASMJIT_ASSERT(immBitCount <= 64u);
+ ASMJIT_ASSERT(immDiscardLsb <= 64u);
+
+ _type = uint8_t(type);
+ _flags = uint8_t(0);
+ _regionSize = uint8_t(valueSize);
+ _valueSize = uint8_t(valueSize);
+ _valueOffset = uint8_t(0);
+ _immBitCount = uint8_t(immBitCount);
+ _immBitShift = uint8_t(immBitShift);
+ _immDiscardLsb = uint8_t(immDiscardLsb);
+ }
+
+ inline void setRegion(size_t regionSize, size_t valueOffset) noexcept {
+ _regionSize = uint8_t(regionSize);
+ _valueOffset = uint8_t(valueOffset);
+ }
+
+ inline void setLeadingAndTrailingSize(size_t leadingSize, size_t trailingSize) noexcept {
+ _regionSize = uint8_t(leadingSize + trailingSize + _valueSize);
+ _valueOffset = uint8_t(leadingSize);
+ }
+};
+
+// ============================================================================
+// [asmjit::RelocEntry]
+// ============================================================================
+
+//! Relocation entry.
+struct RelocEntry {
+ //! Relocation id.
+ uint32_t _id;
+ //! Type of the relocation.
+ uint32_t _relocType;
+ //! Format of the relocated value.
+ OffsetFormat _format;
+ //! Source section id.
+ uint32_t _sourceSectionId;
+ //! Target section id.
+ uint32_t _targetSectionId;
+ //! Source offset (relative to start of the section).
+ uint64_t _sourceOffset;
+ //! Payload (target offset, target address, expression, etc).
+ uint64_t _payload;
+
+ //! Relocation type.
+ enum RelocType : uint32_t {
+ //! None/deleted (no relocation).
+ kTypeNone = 0,
+ //! Expression evaluation, `_payload` is pointer to `Expression`.
+ kTypeExpression = 1,
+ //! Relocate absolute to absolute.
+ kTypeAbsToAbs = 2,
+ //! Relocate relative to absolute.
+ kTypeRelToAbs = 3,
+ //! Relocate absolute to relative.
+ kTypeAbsToRel = 4,
+ //! Relocate absolute to relative or use trampoline.
+ kTypeX64AddressEntry = 5
+ };
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t id() const noexcept { return _id; }
+
+ inline uint32_t relocType() const noexcept { return _relocType; }
+ inline const OffsetFormat& format() const noexcept { return _format; }
+
+ inline uint32_t sourceSectionId() const noexcept { return _sourceSectionId; }
+ inline uint32_t targetSectionId() const noexcept { return _targetSectionId; }
+
+ inline uint64_t sourceOffset() const noexcept { return _sourceOffset; }
+ inline uint64_t payload() const noexcept { return _payload; }
+
+ Expression* payloadAsExpression() const noexcept {
+ return reinterpret_cast(uintptr_t(_payload));
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::LabelLink]
+// ============================================================================
+
+//! Data structure used to link either unbound labels or cross-section links.
+struct LabelLink {
+ //! Next link (single-linked list).
+ LabelLink* next;
+ //! Section id where the label is bound.
+ uint32_t sectionId;
+ //! Relocation id or Globals::kInvalidId.
+ uint32_t relocId;
+ //! Label offset relative to the start of the section.
+ size_t offset;
+ //! Inlined rel8/rel32.
+ intptr_t rel;
+ //! Offset format information.
+ OffsetFormat format;
+};
+
+// ============================================================================
+// [asmjit::LabelEntry]
+// ============================================================================
+
+//! Label entry.
+//!
+//! Contains the following properties:
+//! * Label id - This is the only thing that is set to the `Label` operand.
+//! * Label name - Optional, used mostly to create executables and libraries.
+//! * Label type - Type of the label, default `Label::kTypeAnonymous`.
+//! * Label parent id - Derived from many assemblers that allow to define a
+//! local label that falls under a global label. This allows to define
+//! many labels of the same name that have different parent (global) label.
+//! * Offset - offset of the label bound by `Assembler`.
+//! * Links - single-linked list that contains locations of code that has
+//! to be patched when the label gets bound. Every use of unbound label
+//! adds one link to `_links` list.
+//! * HVal - Hash value of label's name and optionally parentId.
+//! * HashNext - Hash-table implementation detail.
+class LabelEntry : public ZoneHashNode {
+public:
+ // Let's round the size of `LabelEntry` to 64 bytes (as `ZoneAllocator` has
+ // granularity of 32 bytes anyway). This gives `_name` the remaining space,
+ // which is should be 16 bytes on 64-bit and 28 bytes on 32-bit architectures.
+ enum : uint32_t {
+ kStaticNameSize =
+ 64 - (sizeof(ZoneHashNode) + 8 + sizeof(Section*) + sizeof(size_t) + sizeof(LabelLink*))
+ };
+
+ //! Label type, see `Label::LabelType`.
+ uint8_t _type;
+ //! Must be zero.
+ uint8_t _flags;
+ //! Reserved.
+ uint16_t _reserved16;
+ //! Label parent id or zero.
+ uint32_t _parentId;
+ //! Label offset relative to the start of the `_section`.
+ uint64_t _offset;
+ //! Section where the label was bound.
+ Section* _section;
+ //! Label links.
+ LabelLink* _links;
+ //! Label name.
+ ZoneString _name;
+
+ //! \name Accessors
+ //! \{
+
+ // NOTE: Label id is stored in `_customData`, which is provided by ZoneHashNode
+ // to fill a padding that a C++ compiler targeting 64-bit CPU will add to align
+ // the structure to 64-bits.
+
+ //! Returns label id.
+ inline uint32_t id() const noexcept { return _customData; }
+ //! Sets label id (internal, used only by `CodeHolder`).
+ inline void _setId(uint32_t id) noexcept { _customData = id; }
+
+ //! Returns label type, see `Label::LabelType`.
+ inline uint32_t type() const noexcept { return _type; }
+ //! Returns label flags, returns 0 at the moment.
+ inline uint32_t flags() const noexcept { return _flags; }
+
+ //! Tests whether the label has a parent label.
+ inline bool hasParent() const noexcept { return _parentId != Globals::kInvalidId; }
+ //! Returns label's parent id.
+ inline uint32_t parentId() const noexcept { return _parentId; }
+
+ //! Returns the section where the label was bound.
+ //!
+ //! If the label was not yet bound the return value is `nullptr`.
+ inline Section* section() const noexcept { return _section; }
+
+ //! Tests whether the label has name.
+ inline bool hasName() const noexcept { return !_name.empty(); }
+
+ //! Returns the label's name.
+ //!
+ //! \note Local labels will return their local name without their parent
+ //! part, for example ".L1".
+ inline const char* name() const noexcept { return _name.data(); }
+
+ //! Returns size of label's name.
+ //!
+ //! \note Label name is always null terminated, so you can use `strlen()` to
+ //! get it, however, it's also cached in `LabelEntry` itself, so if you want
+ //! to know the size the fastest way is to call `LabelEntry::nameSize()`.
+ inline uint32_t nameSize() const noexcept { return _name.size(); }
+
+ //! Returns links associated with this label.
+ inline LabelLink* links() const noexcept { return _links; }
+
+ //! Tests whether the label is bound.
+ inline bool isBound() const noexcept { return _section != nullptr; }
+ //! Tests whether the label is bound to a the given `sectionId`.
+ inline bool isBoundTo(Section* section) const noexcept { return _section == section; }
+
+ //! Returns the label offset (only useful if the label is bound).
+ inline uint64_t offset() const noexcept { return _offset; }
+
+ //! Returns the hash-value of label's name and its parent label (if any).
+ //!
+ //! Label hash is calculated as `HASH(Name) ^ ParentId`. The hash function
+ //! is implemented in `Support::hashString()` and `Support::hashRound()`.
+ inline uint32_t hashCode() const noexcept { return _hashCode; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::AddressTableEntry]
+// ============================================================================
+
+//! Entry in an address table.
+class AddressTableEntry : public ZoneTreeNodeT {
+public:
+ ASMJIT_NONCOPYABLE(AddressTableEntry)
+
+ //! Address.
+ uint64_t _address;
+ //! Slot.
+ uint32_t _slot;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline explicit AddressTableEntry(uint64_t address) noexcept
+ : _address(address),
+ _slot(0xFFFFFFFFu) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline uint64_t address() const noexcept { return _address; }
+ inline uint32_t slot() const noexcept { return _slot; }
+
+ inline bool hasAssignedSlot() const noexcept { return _slot != 0xFFFFFFFFu; }
+
+ inline bool operator<(const AddressTableEntry& other) const noexcept { return _address < other._address; }
+ inline bool operator>(const AddressTableEntry& other) const noexcept { return _address > other._address; }
+
+ inline bool operator<(uint64_t queryAddress) const noexcept { return _address < queryAddress; }
+ inline bool operator>(uint64_t queryAddress) const noexcept { return _address > queryAddress; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::CodeHolder]
+// ============================================================================
+
+//! Contains basic information about the target architecture and its options.
+//!
+//! In addition, it holds assembled code & data (including sections, labels, and
+//! relocation information). `CodeHolder` can store both binary and intermediate
+//! representation of assembly, which can be generated by \ref BaseAssembler,
+//! \ref BaseBuilder, and \ref BaseCompiler
+//!
+//! \note `CodeHolder` has an ability to attach an \ref ErrorHandler, however,
+//! the error handler is not triggered by `CodeHolder` itself, it's instead
+//! propagated to all emitters that attach to it.
+class CodeHolder {
+public:
+ ASMJIT_NONCOPYABLE(CodeHolder)
+
+ //! Environment information.
+ Environment _environment;
+ //! Base address or \ref Globals::kNoBaseAddress.
+ uint64_t _baseAddress;
+
+ //! Attached `Logger`, used by all consumers.
+ Logger* _logger;
+ //! Attached `ErrorHandler`.
+ ErrorHandler* _errorHandler;
+
+ //! Code zone (used to allocate core structures).
+ Zone _zone;
+ //! Zone allocator, used to manage internal containers.
+ ZoneAllocator _allocator;
+
+ //! Attached emitters.
+ ZoneVector _emitters;
+ //! Section entries.
+ ZoneVector _sections;
+ //! Section entries sorted by section order and then section id.
+ ZoneVector _sectionsByOrder;
+ //! Label entries.
+ ZoneVector _labelEntries;
+ //! Relocation entries.
+ ZoneVector _relocations;
+ //! Label name -> LabelEntry (only named labels).
+ ZoneHash _namedLabels;
+
+ //! Count of label links, which are not resolved.
+ size_t _unresolvedLinkCount;
+ //! Pointer to an address table section (or null if this section doesn't exist).
+ Section* _addressTableSection;
+ //! Address table entries.
+ ZoneTree _addressTableEntries;
+
+ //! Options that can be used with \ref copySectionData() and \ref copyFlattenedData().
+ enum CopyOptions : uint32_t {
+ //! If virtual size of a section is greater than the size of its \ref CodeBuffer
+ //! then all bytes between the buffer size and virtual size will be zeroed.
+ //! If this option is not set then those bytes would be left as is, which
+ //! means that if the user didn't initialize them they would have a previous
+ //! content, which may be unwanted.
+ kCopyPadSectionBuffer = 0x00000001u,
+
+#ifndef ASMJIT_NO_DEPRECATED
+ kCopyWithPadding = kCopyPadSectionBuffer,
+#endif // !ASMJIT_NO_DEPRECATED
+
+ //! Zeroes the target buffer if the flattened data is less than the destination
+ //! size. This option works only with \ref copyFlattenedData() as it processes
+ //! multiple sections. It is ignored by \ref copySectionData().
+ kCopyPadTargetBuffer = 0x00000002u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates an uninitialized CodeHolder (you must init() it before it can be used).
+ ASMJIT_API CodeHolder() noexcept;
+ //! Destroys the CodeHolder.
+ ASMJIT_API ~CodeHolder() noexcept;
+
+ //! Tests whether the `CodeHolder` has been initialized.
+ //!
+ //! Emitters can be only attached to initialized `CodeHolder` instances.
+ inline bool isInitialized() const noexcept { return _environment.isInitialized(); }
+
+ //! Initializes CodeHolder to hold code described by code `info`.
+ ASMJIT_API Error init(const Environment& environment, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept;
+ //! Detaches all code-generators attached and resets the `CodeHolder`.
+ ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept;
+
+ //! \}
+
+ //! \name Attach & Detach
+ //! \{
+
+ //! Attaches an emitter to this `CodeHolder`.
+ ASMJIT_API Error attach(BaseEmitter* emitter) noexcept;
+ //! Detaches an emitter from this `CodeHolder`.
+ ASMJIT_API Error detach(BaseEmitter* emitter) noexcept;
+
+ //! \}
+
+ //! \name Allocators
+ //! \{
+
+ //! Returns the allocator that the `CodeHolder` uses.
+ //!
+ //! \note This should be only used for AsmJit's purposes. Code holder uses
+ //! arena allocator to allocate everything, so anything allocated through
+ //! this allocator will be invalidated by \ref CodeHolder::reset() or by
+ //! CodeHolder's destructor.
+ inline ZoneAllocator* allocator() const noexcept { return const_cast(&_allocator); }
+
+ //! \}
+
+ //! \name Code & Architecture
+ //! \{
+
+ //! Returns the target environment information, see \ref Environment.
+ inline const Environment& environment() const noexcept { return _environment; }
+
+ //! Returns the target architecture.
+ inline uint32_t arch() const noexcept { return environment().arch(); }
+ //! Returns the target sub-architecture.
+ inline uint32_t subArch() const noexcept { return environment().subArch(); }
+
+ //! Tests whether a static base-address is set.
+ inline bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; }
+ //! Returns a static base-address or \ref Globals::kNoBaseAddress, if not set.
+ inline uint64_t baseAddress() const noexcept { return _baseAddress; }
+
+ //! \}
+
+ //! \name Emitters
+ //! \{
+
+ //! Returns a vector of attached emitters.
+ inline const ZoneVector& emitters() const noexcept { return _emitters; }
+
+ //! \}
+
+ //! \name Logging
+ //! \{
+
+ //! Returns the attached logger, see \ref Logger.
+ inline Logger* logger() const noexcept { return _logger; }
+ //! Attaches a `logger` to CodeHolder and propagates it to all attached emitters.
+ ASMJIT_API void setLogger(Logger* logger) noexcept;
+ //! Resets the logger to none.
+ inline void resetLogger() noexcept { setLogger(nullptr); }
+
+ //! \name Error Handling
+ //! \{
+
+ //! Tests whether the CodeHolder has an attached error handler, see \ref ErrorHandler.
+ inline bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
+ //! Returns the attached error handler.
+ inline ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
+ //! Attach an error handler to this `CodeHolder`.
+ ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept;
+ //! Resets the error handler to none.
+ inline void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
+
+ //! \}
+
+ //! \name Code Buffer
+ //! \{
+
+ //! Makes sure that at least `n` bytes can be added to CodeHolder's buffer `cb`.
+ //!
+ //! \note The buffer `cb` must be managed by `CodeHolder` - otherwise the
+ //! behavior of the function is undefined.
+ ASMJIT_API Error growBuffer(CodeBuffer* cb, size_t n) noexcept;
+
+ //! Reserves the size of `cb` to at least `n` bytes.
+ //!
+ //! \note The buffer `cb` must be managed by `CodeHolder` - otherwise the
+ //! behavior of the function is undefined.
+ ASMJIT_API Error reserveBuffer(CodeBuffer* cb, size_t n) noexcept;
+
+ //! \}
+
+ //! \name Sections
+ //! \{
+
+ //! Returns an array of `Section*` records.
+ inline const ZoneVector& sections() const noexcept { return _sections; }
+ //! Returns an array of `Section*` records sorted according to section order first, then section id.
+ inline const ZoneVector& sectionsByOrder() const noexcept { return _sectionsByOrder; }
+ //! Returns the number of sections.
+ inline uint32_t sectionCount() const noexcept { return _sections.size(); }
+
+ //! Tests whether the given `sectionId` is valid.
+ inline bool isSectionValid(uint32_t sectionId) const noexcept { return sectionId < _sections.size(); }
+
+ //! Creates a new section and return its pointer in `sectionOut`.
+ //!
+ //! Returns `Error`, does not report a possible error to `ErrorHandler`.
+ ASMJIT_API Error newSection(Section** sectionOut, const char* name, size_t nameSize = SIZE_MAX, uint32_t flags = 0, uint32_t alignment = 1, int32_t order = 0) noexcept;
+
+ //! Returns a section entry of the given index.
+ inline Section* sectionById(uint32_t sectionId) const noexcept { return _sections[sectionId]; }
+
+ //! Returns section-id that matches the given `name`.
+ //!
+ //! If there is no such section `Section::kInvalidId` is returned.
+ ASMJIT_API Section* sectionByName(const char* name, size_t nameSize = SIZE_MAX) const noexcept;
+
+ //! Returns '.text' section (section that commonly represents code).
+ //!
+ //! \note Text section is always the first section in \ref CodeHolder::sections() array.
+ inline Section* textSection() const noexcept { return _sections[0]; }
+
+ //! Tests whether '.addrtab' section exists.
+ inline bool hasAddressTable() const noexcept { return _addressTableSection != nullptr; }
+
+ //! Returns '.addrtab' section.
+ //!
+ //! This section is used exclusively by AsmJit to store absolute 64-bit
+ //! addresses that cannot be encoded in instructions like 'jmp' or 'call'.
+ //!
+ //! \note This section is created on demand, the returned pointer can be null.
+ inline Section* addressTableSection() const noexcept { return _addressTableSection; }
+
+ //! Ensures that '.addrtab' section exists (creates it if it doesn't) and
+ //! returns it. Can return `nullptr` on out of memory condition.
+ ASMJIT_API Section* ensureAddressTableSection() noexcept;
+
+ //! Used to add an address to an address table.
+ //!
+ //! This implicitly calls `ensureAddressTableSection()` and then creates
+ //! `AddressTableEntry` that is inserted to `_addressTableEntries`. If the
+ //! address already exists this operation does nothing as the same addresses
+ //! use the same slot.
+ //!
+ //! This function should be considered internal as it's used by assemblers to
+ //! insert an absolute address into the address table. Inserting address into
+ //! address table without creating a particula relocation entry makes no sense.
+ ASMJIT_API Error addAddressToAddressTable(uint64_t address) noexcept;
+
+ //! \}
+
+ //! \name Labels & Symbols
+ //! \{
+
+ //! Returns array of `LabelEntry*` records.
+ inline const ZoneVector& labelEntries() const noexcept { return _labelEntries; }
+
+ //! Returns number of labels created.
+ inline uint32_t labelCount() const noexcept { return _labelEntries.size(); }
+
+ //! Tests whether the label having `id` is valid (i.e. created by `newLabelEntry()`).
+ inline bool isLabelValid(uint32_t labelId) const noexcept {
+ return labelId < _labelEntries.size();
+ }
+
+ //! Tests whether the `label` is valid (i.e. created by `newLabelEntry()`).
+ inline bool isLabelValid(const Label& label) const noexcept {
+ return label.id() < _labelEntries.size();
+ }
+
+ //! \overload
+ inline bool isLabelBound(uint32_t labelId) const noexcept {
+ return isLabelValid(labelId) && _labelEntries[labelId]->isBound();
+ }
+
+ //! Tests whether the `label` is already bound.
+ //!
+ //! Returns `false` if the `label` is not valid.
+ inline bool isLabelBound(const Label& label) const noexcept {
+ return isLabelBound(label.id());
+ }
+
+ //! Returns LabelEntry of the given label `id`.
+ inline LabelEntry* labelEntry(uint32_t labelId) const noexcept {
+ return isLabelValid(labelId) ? _labelEntries[labelId] : static_cast(nullptr);
+ }
+
+ //! Returns LabelEntry of the given `label`.
+ inline LabelEntry* labelEntry(const Label& label) const noexcept {
+ return labelEntry(label.id());
+ }
+
+ //! Returns offset of a `Label` by its `labelId`.
+ //!
+ //! The offset returned is relative to the start of the section. Zero offset
+ //! is returned for unbound labels, which is their initial offset value.
+ inline uint64_t labelOffset(uint32_t labelId) const noexcept {
+ ASMJIT_ASSERT(isLabelValid(labelId));
+ return _labelEntries[labelId]->offset();
+ }
+
+ //! \overload
+ inline uint64_t labelOffset(const Label& label) const noexcept {
+ return labelOffset(label.id());
+ }
+
+ //! Returns offset of a label by it's `labelId` relative to the base offset.
+ //!
+ //! \remarks The offset of the section where the label is bound must be valid
+ //! in order to use this function, otherwise the value returned will not be
+ //! reliable.
+ inline uint64_t labelOffsetFromBase(uint32_t labelId) const noexcept {
+ ASMJIT_ASSERT(isLabelValid(labelId));
+ const LabelEntry* le = _labelEntries[labelId];
+ return (le->isBound() ? le->section()->offset() : uint64_t(0)) + le->offset();
+ }
+
+ //! \overload
+ inline uint64_t labelOffsetFromBase(const Label& label) const noexcept {
+ return labelOffsetFromBase(label.id());
+ }
+
+ //! Creates a new anonymous label and return its id in `idOut`.
+ //!
+ //! Returns `Error`, does not report error to `ErrorHandler`.
+ ASMJIT_API Error newLabelEntry(LabelEntry** entryOut) noexcept;
+
+ //! Creates a new named \ref LabelEntry of the given label `type`.
+ //!
+ //! \param entryOut Where to store the created \ref LabelEntry.
+ //! \param name The name of the label.
+ //! \param nameSize The length of `name` argument, or `SIZE_MAX` if `name` is
+ //! a null terminated string, which means that the `CodeHolder` will
+ //! use `strlen()` to determine the length.
+ //! \param type The type of the label to create, see \ref Label::LabelType.
+ //! \param parentId Parent id of a local label, otherwise it must be
+ //! \ref Globals::kInvalidId.
+ //!
+ //! \retval Always returns \ref Error, does not report a possible error to
+ //! the attached \ref ErrorHandler.
+ //!
+ //! AsmJit has a support for local labels (\ref Label::kTypeLocal) which
+ //! require a parent label id (parentId). The names of local labels can
+ //! conflict with names of other local labels that have a different parent.
+ ASMJIT_API Error newNamedLabelEntry(LabelEntry** entryOut, const char* name, size_t nameSize, uint32_t type, uint32_t parentId = Globals::kInvalidId) noexcept;
+
+ //! Returns a label by name.
+ //!
+ //! If the named label doesn't a default constructed \ref Label is returned,
+ //! which has its id set to \ref Globals::kInvalidId.
+ inline Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept {
+ return Label(labelIdByName(name, nameSize, parentId));
+ }
+
+ //! Returns a label id by name.
+ //!
+ //! If the named label doesn't exist \ref Globals::kInvalidId is returned.
+ ASMJIT_API uint32_t labelIdByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
+
+ //! Tests whether there are any unresolved label links.
+ inline bool hasUnresolvedLinks() const noexcept { return _unresolvedLinkCount != 0; }
+ //! Returns the number of label links, which are unresolved.
+ inline size_t unresolvedLinkCount() const noexcept { return _unresolvedLinkCount; }
+
+ //! Creates a new label-link used to store information about yet unbound labels.
+ //!
+ //! Returns `null` if the allocation failed.
+ ASMJIT_API LabelLink* newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel, const OffsetFormat& format) noexcept;
+
+ //! Resolves cross-section links (`LabelLink`) associated with each label that
+ //! was used as a destination in code of a different section. It's only useful
+ //! to people that use multiple sections as it will do nothing if the code only
+ //! contains a single section in which cross-section links are not possible.
+ ASMJIT_API Error resolveUnresolvedLinks() noexcept;
+
+ //! Binds a label to a given `sectionId` and `offset` (relative to start of the section).
+ //!
+ //! This function is generally used by `BaseAssembler::bind()` to do the heavy lifting.
+ ASMJIT_API Error bindLabel(const Label& label, uint32_t sectionId, uint64_t offset) noexcept;
+
+ //! \}
+
+ //! \name Relocations
+ //! \{
+
+ //! Tests whether the code contains relocation entries.
+ inline bool hasRelocEntries() const noexcept { return !_relocations.empty(); }
+ //! Returns array of `RelocEntry*` records.
+ inline const ZoneVector& relocEntries() const noexcept { return _relocations; }
+
+ //! Returns a RelocEntry of the given `id`.
+ inline RelocEntry* relocEntry(uint32_t id) const noexcept { return _relocations[id]; }
+
+ //! Creates a new relocation entry of type `relocType`.
+ //!
+ //! Additional fields can be set after the relocation entry was created.
+ ASMJIT_API Error newRelocEntry(RelocEntry** dst, uint32_t relocType) noexcept;
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Flattens all sections by recalculating their offsets, starting at 0.
+ //!
+ //! \note This should never be called more than once.
+ ASMJIT_API Error flatten() noexcept;
+
+ //! Returns computed the size of code & data of all sections.
+ //!
+ //! \note All sections will be iterated over and the code size returned
+ //! would represent the minimum code size of all combined sections after
+ //! applying minimum alignment. Code size may decrease after calling
+ //! `flatten()` and `relocateToBase()`.
+ ASMJIT_API size_t codeSize() const noexcept;
+
+ //! Relocates the code to the given `baseAddress`.
+ //!
+ //! \param baseAddress Absolute base address where the code will be relocated
+ //! to. Please note that nothing is copied to such base address, it's just an
+ //! absolute value used by the relocator to resolve all stored relocations.
+ //!
+ //! \note This should never be called more than once.
+ ASMJIT_API Error relocateToBase(uint64_t baseAddress) noexcept;
+
+ //! Copies a single section into `dst`.
+ ASMJIT_API Error copySectionData(void* dst, size_t dstSize, uint32_t sectionId, uint32_t copyOptions = 0) noexcept;
+
+ //! Copies all sections into `dst`.
+ //!
+ //! This should only be used if the data was flattened and there are no gaps
+ //! between the sections. The `dstSize` is always checked and the copy will
+ //! never write anything outside the provided buffer.
+ ASMJIT_API Error copyFlattenedData(void* dst, size_t dstSize, uint32_t copyOptions = 0) noexcept;
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use 'CodeHolder::init(const Environment& environment, uint64_t baseAddress)' instead")
+ inline Error init(const CodeInfo& codeInfo) noexcept { return init(codeInfo._environment, codeInfo._baseAddress); }
+
+ ASMJIT_DEPRECATED("Use nevironment() instead")
+ inline CodeInfo codeInfo() const noexcept { return CodeInfo(_environment, _baseAddress); }
+
+ ASMJIT_DEPRECATED("Use BaseEmitter::encodingOptions() - this function always returns zero")
+ inline uint32_t emitterOptions() const noexcept { return 0; }
+
+ ASMJIT_DEPRECATED("Use BaseEmitter::addEncodingOptions() - this function does nothing")
+ inline void addEmitterOptions(uint32_t options) noexcept { DebugUtils::unused(options); }
+
+ ASMJIT_DEPRECATED("Use BaseEmitter::clearEncodingOptions() - this function does nothing")
+ inline void clearEmitterOptions(uint32_t options) noexcept { DebugUtils::unused(options); }
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CODEHOLDER_H_INCLUDED
diff --git a/Theodosius/asmjit/core/codewriter.cpp b/Theodosius/asmjit/core/codewriter.cpp
new file mode 100644
index 0000000..6097c0e
--- /dev/null
+++ b/Theodosius/asmjit/core/codewriter.cpp
@@ -0,0 +1,151 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/codeholder.h"
+#include "../core/codewriter_p.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
+ uint32_t bitCount = format.immBitCount();
+ uint32_t bitShift = format.immBitShift();
+ uint32_t discardLsb = format.immDiscardLsb();
+
+ if (!bitCount || bitCount > format.valueSize() * 8u)
+ return false;
+
+ if (discardLsb) {
+ ASMJIT_ASSERT(discardLsb <= 32);
+ if ((offset64 & Support::lsbMask(discardLsb)) != 0)
+ return false;
+ offset64 >>= discardLsb;
+ }
+
+ if (!Support::isInt32(offset64))
+ return false;
+
+ int32_t offset32 = int32_t(offset64);
+ if (!Support::isEncodableOffset32(offset32, bitCount))
+ return false;
+
+ switch (format.type()) {
+ case OffsetFormat::kTypeCommon: {
+ *dst = (uint32_t(offset32) & Support::lsbMask(bitCount)) << bitShift;
+ return true;
+ }
+
+ case OffsetFormat::kTypeAArch64_ADR:
+ case OffsetFormat::kTypeAArch64_ADRP: {
+ // Sanity checks.
+ if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5)
+ return false;
+
+ uint32_t immLo = uint32_t(offset32) & 0x3u;
+ uint32_t immHi = uint32_t(offset32 >> 2) & Support::lsbMask(19);
+
+ *dst = (immLo << 29) | (immHi << 5);
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
+ uint32_t bitCount = format.immBitCount();
+ uint32_t discardLsb = format.immDiscardLsb();
+
+ if (!bitCount || bitCount > format.valueSize() * 8u)
+ return false;
+
+ if (discardLsb) {
+ ASMJIT_ASSERT(discardLsb <= 32);
+ if ((offset64 & Support::lsbMask(discardLsb)) != 0)
+ return false;
+ offset64 >>= discardLsb;
+ }
+
+ if (!Support::isEncodableOffset64(offset64, bitCount))
+ return false;
+
+ switch (format.type()) {
+ case OffsetFormat::kTypeCommon: {
+ *dst = (uint64_t(offset64) & Support::lsbMask(bitCount)) << format.immBitShift();
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept {
+ // Offset the destination by ValueOffset so the `dst` points to the
+ // patched word instead of the beginning of the patched region.
+ dst = static_cast(dst) + format.valueOffset();
+
+ switch (format.valueSize()) {
+ case 1: {
+ uint32_t mask;
+ if (!encodeOffset32(&mask, offset64, format))
+ return false;
+
+ Support::writeU8(dst, Support::readU8(dst) | mask);
+ return true;
+ }
+
+ case 2: {
+ uint32_t mask;
+ if (!encodeOffset32(&mask, offset64, format))
+ return false;
+
+ Support::writeU16uLE(dst, Support::readU16uLE(dst) | mask);
+ return true;
+ }
+
+ case 4: {
+ uint32_t mask;
+ if (!encodeOffset32(&mask, offset64, format))
+ return false;
+
+ Support::writeU32uLE(dst, Support::readU32uLE(dst) | mask);
+ return true;
+ }
+
+ case 8: {
+ uint64_t mask;
+ if (!encodeOffset64(&mask, offset64, format))
+ return false;
+
+ Support::writeU64uLE(dst, Support::readU64uLE(dst) | mask);
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/Theodosius/asmjit/core/codewriter_p.h b/Theodosius/asmjit/core/codewriter_p.h
new file mode 100644
index 0000000..61c9101
--- /dev/null
+++ b/Theodosius/asmjit/core/codewriter_p.h
@@ -0,0 +1,208 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
+#define ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
+
+#include "../core/assembler.h"
+#include "../core/codebuffer.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_assembler
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+struct OffsetFormat;
+
+// ============================================================================
+// [asmjit::CodeWriter]
+// ============================================================================
+
+//! Helper that is used to write into a \ref CodeBuffer held by \ref BaseAssembler.
+class CodeWriter {
+public:
+ uint8_t* _cursor;
+
+ ASMJIT_INLINE explicit CodeWriter(BaseAssembler* a) noexcept
+ : _cursor(a->_bufferPtr) {}
+
+ ASMJIT_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
+ size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor);
+ if (ASMJIT_UNLIKELY(remainingSpace < n)) {
+ CodeBuffer& buffer = a->_section->_buffer;
+ Error err = a->_code->growBuffer(&buffer, n);
+ if (ASMJIT_UNLIKELY(err))
+ return a->reportError(err);
+ _cursor = a->_bufferPtr;
+ }
+ return kErrorOk;
+ }
+
+ ASMJIT_INLINE uint8_t* cursor() const noexcept { return _cursor; }
+ ASMJIT_INLINE void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
+ ASMJIT_INLINE void advance(size_t n) noexcept { _cursor += n; }
+
+ ASMJIT_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
+ ASMJIT_ASSERT(_cursor >= from);
+ return (size_t)(_cursor - from);
+ }
+
+ template
+ ASMJIT_INLINE void emit8(T val) noexcept {
+ typedef typename std::make_unsigned::type U;
+ _cursor[0] = uint8_t(U(val) & U(0xFF));
+ _cursor++;
+ }
+
+ template
+ ASMJIT_INLINE void emit8If(T val, Y cond) noexcept {
+ typedef typename std::make_unsigned::type U;
+ ASMJIT_ASSERT(size_t(cond) <= 1u);
+
+ _cursor[0] = uint8_t(U(val) & U(0xFF));
+ _cursor += size_t(cond);
+ }
+
+ template
+ ASMJIT_INLINE void emit16uLE(T val) noexcept {
+ typedef typename std::make_unsigned::type U;
+ Support::writeU16uLE(_cursor, uint32_t(U(val) & 0xFFFFu));
+ _cursor += 2;
+ }
+
+ template
+ ASMJIT_INLINE void emit16uBE(T val) noexcept {
+ typedef typename std::make_unsigned::type U;
+ Support::writeU16uBE(_cursor, uint32_t(U(val) & 0xFFFFu));
+ _cursor += 2;
+ }
+
+ template
+ ASMJIT_INLINE void emit32uLE(T val) noexcept {
+ typedef typename std::make_unsigned::type U;
+ Support::writeU32uLE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
+ _cursor += 4;
+ }
+
+ template
+ ASMJIT_INLINE void emit32uBE(T val) noexcept {
+ typedef typename std::make_unsigned::type U;
+ Support::writeU32uBE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
+ _cursor += 4;
+ }
+
+ ASMJIT_INLINE void emitData(const void* data, size_t size) noexcept {
+ ASMJIT_ASSERT(size != 0);
+ memcpy(_cursor, data, size);
+ _cursor += size;
+ }
+
+ template
+ ASMJIT_INLINE void emitValueLE(const T& value, size_t size) noexcept {
+ typedef typename std::make_unsigned::type U;
+ ASMJIT_ASSERT(size <= sizeof(T));
+
+ U v = U(value);
+ for (uint32_t i = 0; i < size; i++) {
+ _cursor[i] = uint8_t(v & 0xFFu);
+ v >>= 8;
+ }
+ _cursor += size;
+ }
+
+ template
+ ASMJIT_INLINE void emitValueBE(const T& value, size_t size) noexcept {
+ typedef typename std::make_unsigned::type U;
+ ASMJIT_ASSERT(size <= sizeof(T));
+
+ U v = U(value);
+ for (uint32_t i = 0; i < size; i++) {
+ _cursor[i] = uint8_t(v >> (sizeof(T) - 8));
+ v <<= 8;
+ }
+ _cursor += size;
+ }
+
+ ASMJIT_INLINE void emitZeros(size_t size) noexcept {
+ ASMJIT_ASSERT(size != 0);
+ memset(_cursor, 0, size);
+ _cursor += size;
+ }
+
+ ASMJIT_INLINE void remove8(uint8_t* where) noexcept {
+ ASMJIT_ASSERT(where < _cursor);
+
+ uint8_t* p = where;
+ while (++p != _cursor)
+ p[-1] = p[0];
+ _cursor--;
+ }
+
+ template
+ ASMJIT_INLINE void insert8(uint8_t* where, T val) noexcept {
+ uint8_t* p = _cursor;
+
+ while (p != where) {
+ p[0] = p[-1];
+ p--;
+ }
+
+ *p = uint8_t(val & 0xFF);
+ _cursor++;
+ }
+
+ ASMJIT_INLINE void done(BaseAssembler* a) noexcept {
+ CodeBuffer& buffer = a->_section->_buffer;
+ size_t newSize = (size_t)(_cursor - a->_bufferData);
+ ASMJIT_ASSERT(newSize <= buffer.capacity());
+
+ a->_bufferPtr = _cursor;
+ buffer._size = Support::max(buffer._size, newSize);
+ }
+};
+
+// ============================================================================
+// [asmjit::CodeWriterUtils]
+// ============================================================================
+
+namespace CodeWriterUtils {
+
+bool encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
+bool encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
+
+bool writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept;
+
+} // {CodeWriterUtils}
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
diff --git a/Theodosius/asmjit/core/compiler.cpp b/Theodosius/asmjit/core/compiler.cpp
new file mode 100644
index 0000000..4d7baab
--- /dev/null
+++ b/Theodosius/asmjit/core/compiler.cpp
@@ -0,0 +1,628 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/assembler.h"
+#include "../core/compiler.h"
+#include "../core/cpuinfo.h"
+#include "../core/logger.h"
+#include "../core/rapass_p.h"
+#include "../core/rastack_p.h"
+#include "../core/support.h"
+#include "../core/type.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::GlobalConstPoolPass]
+// ============================================================================
+
+class GlobalConstPoolPass : public Pass {
+ typedef Pass Base;
+ ASMJIT_NONCOPYABLE(GlobalConstPoolPass)
+
+ GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {}
+
+ Error run(Zone* zone, Logger* logger) override {
+ DebugUtils::unused(zone, logger);
+
+ // Flush the global constant pool.
+ BaseCompiler* compiler = static_cast(_cb);
+ if (compiler->_globalConstPool) {
+ compiler->addAfter(compiler->_globalConstPool, compiler->lastNode());
+ compiler->_globalConstPool = nullptr;
+ }
+
+ return kErrorOk;
+ }
+};
+
+// ============================================================================
+// [asmjit::BaseCompiler - Construction / Destruction]
+// ============================================================================
+
+BaseCompiler::BaseCompiler() noexcept
+ : BaseBuilder(),
+ _func(nullptr),
+ _vRegZone(4096 - Zone::kBlockOverhead),
+ _vRegArray(),
+ _localConstPool(nullptr),
+ _globalConstPool(nullptr) {
+
+ _emitterType = uint8_t(kTypeCompiler);
+ _validationFlags = uint8_t(InstAPI::kValidationFlagVirtRegs);
+}
+BaseCompiler::~BaseCompiler() noexcept {}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Function Management]
+// ============================================================================
+
+Error BaseCompiler::_newFuncNode(FuncNode** out, const FuncSignature& signature) {
+ *out = nullptr;
+
+ // Create FuncNode together with all the required surrounding nodes.
+ FuncNode* funcNode;
+ ASMJIT_PROPAGATE(_newNodeT(&funcNode));
+ ASMJIT_PROPAGATE(_newLabelNode(&funcNode->_exitNode));
+ ASMJIT_PROPAGATE(_newNodeT(&funcNode->_end, SentinelNode::kSentinelFuncEnd));
+
+ // Initialize the function's detail info.
+ Error err = funcNode->detail().init(signature, environment());
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ // If the Target guarantees greater stack alignment than required by the
+ // calling convention then override it as we can prevent having to perform
+ // dynamic stack alignment
+ uint32_t environmentStackAlignment = _environment.stackAlignment();
+
+ if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment)
+ funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment);
+
+ // Initialize the function frame.
+ err = funcNode->_frame.init(funcNode->_funcDetail);
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ // Allocate space for function arguments.
+ funcNode->_args = nullptr;
+ if (funcNode->argCount() != 0) {
+ funcNode->_args = _allocator.allocT(funcNode->argCount() * sizeof(FuncNode::ArgPack));
+ if (ASMJIT_UNLIKELY(!funcNode->_args))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ memset(funcNode->_args, 0, funcNode->argCount() * sizeof(FuncNode::ArgPack));
+ }
+
+ ASMJIT_PROPAGATE(registerLabelNode(funcNode));
+
+ *out = funcNode;
+ return kErrorOk;
+}
+
+Error BaseCompiler::_addFuncNode(FuncNode** out, const FuncSignature& signature) {
+ ASMJIT_PROPAGATE(_newFuncNode(out, signature));
+ addFunc(*out);
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
+ uint32_t opCount = !o1.isNone() ? 2u : !o0.isNone() ? 1u : 0u;
+ FuncRetNode* node;
+
+ ASMJIT_PROPAGATE(_newNodeT(&node));
+ node->setOpCount(opCount);
+ node->setOp(0, o0);
+ node->setOp(1, o1);
+ node->resetOpRange(2, node->opCapacity());
+
+ *out = node;
+ return kErrorOk;
+}
+
+Error BaseCompiler::_addRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
+ ASMJIT_PROPAGATE(_newRetNode(out, o0, o1));
+ addNode(*out);
+ return kErrorOk;
+}
+
+FuncNode* BaseCompiler::addFunc(FuncNode* func) {
+ ASMJIT_ASSERT(_func == nullptr);
+ _func = func;
+
+ addNode(func); // Function node.
+ BaseNode* prev = cursor(); // {CURSOR}.
+ addNode(func->exitNode()); // Function exit label.
+ addNode(func->endNode()); // Function end sentinel.
+
+ _setCursor(prev);
+ return func;
+}
+
+Error BaseCompiler::endFunc() {
+ FuncNode* func = _func;
+
+ if (ASMJIT_UNLIKELY(!func))
+ return reportError(DebugUtils::errored(kErrorInvalidState));
+
+ // Add the local constant pool at the end of the function (if exists).
+ if (_localConstPool) {
+ setCursor(func->endNode()->prev());
+ addNode(_localConstPool);
+ _localConstPool = nullptr;
+ }
+
+ // Mark as finished.
+ _func = nullptr;
+
+ SentinelNode* end = func->endNode();
+ setCursor(end);
+
+ return kErrorOk;
+}
+
+Error BaseCompiler::_setArg(size_t argIndex, size_t valueIndex, const BaseReg& r) {
+ FuncNode* func = _func;
+
+ if (ASMJIT_UNLIKELY(!func))
+ return reportError(DebugUtils::errored(kErrorInvalidState));
+
+ if (ASMJIT_UNLIKELY(!isVirtRegValid(r)))
+ return reportError(DebugUtils::errored(kErrorInvalidVirtId));
+
+ VirtReg* vReg = virtRegByReg(r);
+ func->setArg(argIndex, valueIndex, vReg);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Function Invocation]
+// ============================================================================
+
+Error BaseCompiler::_newInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
+ InvokeNode* node;
+ ASMJIT_PROPAGATE(_newNodeT(&node, instId, 0u));
+
+ node->setOpCount(1);
+ node->setOp(0, o0);
+ node->resetOpRange(1, node->opCapacity());
+
+ Error err = node->detail().init(signature, environment());
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ // Skip the allocation if there are no arguments.
+ uint32_t argCount = signature.argCount();
+ if (argCount) {
+ node->_args = static_cast(_allocator.alloc(argCount * sizeof(InvokeNode::OperandPack)));
+ if (!node->_args)
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ memset(node->_args, 0, argCount * sizeof(InvokeNode::OperandPack));
+ }
+
+ *out = node;
+ return kErrorOk;
+}
+
+Error BaseCompiler::_addInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
+ ASMJIT_PROPAGATE(_newInvokeNode(out, instId, o0, signature));
+ addNode(*out);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Virtual Registers]
+// ============================================================================
+
+static void BaseCompiler_assignGenericName(BaseCompiler* self, VirtReg* vReg) {
+ uint32_t index = unsigned(Operand::virtIdToIndex(vReg->_id));
+
+ char buf[64];
+ int size = snprintf(buf, ASMJIT_ARRAY_SIZE(buf), "%%%u", unsigned(index));
+
+ ASMJIT_ASSERT(size > 0 && size < int(ASMJIT_ARRAY_SIZE(buf)));
+ vReg->_name.setData(&self->_dataZone, buf, unsigned(size));
+}
+
+Error BaseCompiler::newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signature, const char* name) {
+ *out = nullptr;
+ uint32_t index = _vRegArray.size();
+
+ if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount)))
+ return reportError(DebugUtils::errored(kErrorTooManyVirtRegs));
+
+ if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ VirtReg* vReg = _vRegZone.allocZeroedT();
+ if (ASMJIT_UNLIKELY(!vReg))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ uint32_t size = Type::sizeOf(typeId);
+ uint32_t alignment = Support::min(size, 64);
+
+ vReg = new(vReg) VirtReg(Operand::indexToVirtId(index), signature, size, alignment, typeId);
+
+#ifndef ASMJIT_NO_LOGGING
+ if (name && name[0] != '\0')
+ vReg->_name.setData(&_dataZone, name, SIZE_MAX);
+ else
+ BaseCompiler_assignGenericName(this, vReg);
+#else
+ DebugUtils::unused(name);
+#endif
+
+ _vRegArray.appendUnsafe(vReg);
+ *out = vReg;
+
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newReg(BaseReg* out, uint32_t typeId, const char* name) {
+ RegInfo regInfo;
+ out->reset();
+
+ Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, ®Info);
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ VirtReg* vReg;
+ ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
+
+ out->_initReg(regInfo.signature(), vReg->id());
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, ...) {
+ va_list ap;
+ StringTmp<256> sb;
+
+ va_start(ap, fmt);
+ sb.appendVFormat(fmt, ap);
+ va_end(ap);
+
+ return _newReg(out, typeId, sb.data());
+}
+
+Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name) {
+ out->reset();
+
+ RegInfo regInfo;
+ uint32_t typeId;
+
+ if (isVirtRegValid(ref)) {
+ VirtReg* vRef = virtRegByReg(ref);
+ typeId = vRef->typeId();
+
+ // NOTE: It's possible to cast one register type to another if it's the
+ // same register group. However, VirtReg always contains the TypeId that
+ // was used to create the register. This means that in some cases we may
+ // end up having different size of `ref` and `vRef`. In such case we
+ // adjust the TypeId to match the `ref` register type instead of the
+ // original register type, which should be the expected behavior.
+ uint32_t typeSize = Type::sizeOf(typeId);
+ uint32_t refSize = ref.size();
+
+ if (typeSize != refSize) {
+ if (Type::isInt(typeId)) {
+ // GP register - change TypeId to match `ref`, but keep sign of `vRef`.
+ switch (refSize) {
+ case 1: typeId = Type::kIdI8 | (typeId & 1); break;
+ case 2: typeId = Type::kIdI16 | (typeId & 1); break;
+ case 4: typeId = Type::kIdI32 | (typeId & 1); break;
+ case 8: typeId = Type::kIdI64 | (typeId & 1); break;
+ default: typeId = Type::kIdVoid; break;
+ }
+ }
+ else if (Type::isMmx(typeId)) {
+ // MMX register - always use 64-bit.
+ typeId = Type::kIdMmx64;
+ }
+ else if (Type::isMask(typeId)) {
+ // Mask register - change TypeId to match `ref` size.
+ switch (refSize) {
+ case 1: typeId = Type::kIdMask8; break;
+ case 2: typeId = Type::kIdMask16; break;
+ case 4: typeId = Type::kIdMask32; break;
+ case 8: typeId = Type::kIdMask64; break;
+ default: typeId = Type::kIdVoid; break;
+ }
+ }
+ else {
+ // VEC register - change TypeId to match `ref` size, keep vector metadata.
+ uint32_t elementTypeId = Type::baseOf(typeId);
+
+ switch (refSize) {
+ case 16: typeId = Type::_kIdVec128Start + (elementTypeId - Type::kIdI8); break;
+ case 32: typeId = Type::_kIdVec256Start + (elementTypeId - Type::kIdI8); break;
+ case 64: typeId = Type::_kIdVec512Start + (elementTypeId - Type::kIdI8); break;
+ default: typeId = Type::kIdVoid; break;
+ }
+ }
+
+ if (typeId == Type::kIdVoid)
+ return reportError(DebugUtils::errored(kErrorInvalidState));
+ }
+ }
+ else {
+ typeId = ref.type();
+ }
+
+ Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, ®Info);
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ VirtReg* vReg;
+ ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
+
+ out->_initReg(regInfo.signature(), vReg->id());
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt, ...) {
+ va_list ap;
+ StringTmp<256> sb;
+
+ va_start(ap, fmt);
+ sb.appendVFormat(fmt, ap);
+ va_end(ap);
+
+ return _newReg(out, ref, sb.data());
+}
+
+Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) {
+ out->reset();
+
+ if (size == 0)
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (alignment == 0)
+ alignment = 1;
+
+ if (!Support::isPowerOf2(alignment))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (alignment > 64)
+ alignment = 64;
+
+ VirtReg* vReg;
+ ASMJIT_PROPAGATE(newVirtReg(&vReg, 0, 0, name));
+
+ vReg->_virtSize = size;
+ vReg->_isStack = true;
+ vReg->_alignment = uint8_t(alignment);
+
+ // Set the memory operand to GPD/GPQ and its id to VirtReg.
+ *out = BaseMem(BaseMem::Decomposed { _gpRegInfo.type(), vReg->id(), BaseReg::kTypeNone, 0, 0, 0, BaseMem::kSignatureMemRegHomeFlag });
+ return kErrorOk;
+}
+
+Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) {
+ if (!isVirtIdValid(virtId))
+ return DebugUtils::errored(kErrorInvalidVirtId);
+
+ if (newAlignment && !Support::isPowerOf2(newAlignment))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (newAlignment > 64)
+ newAlignment = 64;
+
+ VirtReg* vReg = virtRegById(virtId);
+ if (newSize)
+ vReg->_virtSize = newSize;
+
+ if (newAlignment)
+ vReg->_alignment = uint8_t(newAlignment);
+
+ // This is required if the RAPass is already running. There is a chance that
+ // a stack-slot has been already allocated and in that case it has to be
+ // updated as well, otherwise we would allocate wrong amount of memory.
+ RAWorkReg* workReg = vReg->_workReg;
+ if (workReg && workReg->_stackSlot) {
+ workReg->_stackSlot->_size = vReg->_virtSize;
+ workReg->_stackSlot->_alignment = vReg->_alignment;
+ }
+
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newConst(BaseMem* out, uint32_t scope, const void* data, size_t size) {
+ out->reset();
+ ConstPoolNode** pPool;
+
+ if (scope == ConstPool::kScopeLocal)
+ pPool = &_localConstPool;
+ else if (scope == ConstPool::kScopeGlobal)
+ pPool = &_globalConstPool;
+ else
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (!*pPool)
+ ASMJIT_PROPAGATE(_newConstPoolNode(pPool));
+
+ ConstPoolNode* pool = *pPool;
+ size_t off;
+ Error err = pool->add(data, size, off);
+
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ *out = BaseMem(BaseMem::Decomposed {
+ Label::kLabelTag, // Base type.
+ pool->labelId(), // Base id.
+ 0, // Index type.
+ 0, // Index id.
+ int32_t(off), // Offset.
+ uint32_t(size), // Size.
+ 0 // Flags.
+ });
+
+ return kErrorOk;
+}
+
+void BaseCompiler::rename(const BaseReg& reg, const char* fmt, ...) {
+ if (!reg.isVirtReg()) return;
+
+ VirtReg* vReg = virtRegById(reg.id());
+ if (!vReg) return;
+
+ if (fmt && fmt[0] != '\0') {
+ char buf[128];
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
+ va_end(ap);
+
+ vReg->_name.setData(&_dataZone, buf, SIZE_MAX);
+ }
+ else {
+ BaseCompiler_assignGenericName(this, vReg);
+ }
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Jump Annotations]
+// ============================================================================
+
+Error BaseCompiler::newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation) {
+ JumpNode* node = _allocator.allocT();
+ uint32_t opCount = 1;
+
+ *out = node;
+ if (ASMJIT_UNLIKELY(!node))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ node = new(node) JumpNode(this, instId, instOptions, opCount, annotation);
+ node->setOp(0, o0);
+ node->resetOpRange(opCount, JumpNode::kBaseOpCapacity);
+
+ return kErrorOk;
+}
+
+Error BaseCompiler::emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation) {
+ uint32_t options = instOptions() | forcedInstOptions();
+ RegOnly extra = extraReg();
+ const char* comment = inlineComment();
+
+ resetInstOptions();
+ resetInlineComment();
+ resetExtraReg();
+
+ JumpNode* node;
+ ASMJIT_PROPAGATE(newJumpNode(&node, instId, options, o0, annotation));
+
+ node->setExtraReg(extra);
+ if (comment)
+ node->setInlineComment(static_cast(_dataZone.dup(comment, strlen(comment), true)));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+JumpAnnotation* BaseCompiler::newJumpAnnotation() {
+ if (_jumpAnnotations.grow(&_allocator, 1) != kErrorOk) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ uint32_t id = _jumpAnnotations.size();
+ JumpAnnotation* jumpAnnotation = _allocator.newT(this, id);
+
+ if (!jumpAnnotation) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ _jumpAnnotations.appendUnsafe(jumpAnnotation);
+ return jumpAnnotation;
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Events]
+// ============================================================================
+
+Error BaseCompiler::onAttach(CodeHolder* code) noexcept {
+ ASMJIT_PROPAGATE(Base::onAttach(code));
+
+ const ArchTraits& archTraits = ArchTraits::byArch(code->arch());
+ uint32_t nativeRegType = Environment::is32Bit(code->arch()) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
+ _gpRegInfo.setSignature(archTraits.regTypeToSignature(nativeRegType));
+
+ Error err = addPassT();
+ if (ASMJIT_UNLIKELY(err)) {
+ onDetach(code);
+ return err;
+ }
+
+ return kErrorOk;
+}
+
+Error BaseCompiler::onDetach(CodeHolder* code) noexcept {
+ _func = nullptr;
+ _localConstPool = nullptr;
+ _globalConstPool = nullptr;
+
+ _vRegArray.reset();
+ _vRegZone.reset();
+
+ return Base::onDetach(code);
+}
+
+// ============================================================================
+// [asmjit::FuncPass - Construction / Destruction]
+// ============================================================================
+
+FuncPass::FuncPass(const char* name) noexcept
+ : Pass(name) {}
+
+// ============================================================================
+// [asmjit::FuncPass - Run]
+// ============================================================================
+
+Error FuncPass::run(Zone* zone, Logger* logger) {
+ BaseNode* node = cb()->firstNode();
+ if (!node) return kErrorOk;
+
+ do {
+ if (node->type() == BaseNode::kNodeFunc) {
+ FuncNode* func = node->as();
+ node = func->endNode();
+ ASMJIT_PROPAGATE(runOnFunction(zone, logger, func));
+ }
+
+ // Find a function by skipping all nodes that are not `kNodeFunc`.
+ do {
+ node = node->next();
+ } while (node && node->type() != BaseNode::kNodeFunc);
+ } while (node);
+
+ return kErrorOk;
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
diff --git a/Theodosius/asmjit/core/compiler.h b/Theodosius/asmjit/core/compiler.h
new file mode 100644
index 0000000..eb2a5aa
--- /dev/null
+++ b/Theodosius/asmjit/core/compiler.h
@@ -0,0 +1,763 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_COMPILER_H_INCLUDED
+#define ASMJIT_CORE_COMPILER_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/assembler.h"
+#include "../core/builder.h"
+#include "../core/constpool.h"
+#include "../core/compilerdefs.h"
+#include "../core/func.h"
+#include "../core/inst.h"
+#include "../core/operand.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class JumpAnnotation;
+class JumpNode;
+class FuncNode;
+class FuncRetNode;
+class InvokeNode;
+
+//! \addtogroup asmjit_compiler
+//! \{
+
+// ============================================================================
+// [asmjit::BaseCompiler]
+// ============================================================================
+
+//! Code emitter that uses virtual registers and performs register allocation.
+//!
+//! Compiler is a high-level code-generation tool that provides register
+//! allocation and automatic handling of function calling conventions. It was
+//! primarily designed for merging multiple parts of code into a function
+//! without worrying about registers and function calling conventions.
+//!
+//! BaseCompiler can be used, with a minimum effort, to handle 32-bit and
+//! 64-bit code generation within a single code base.
+//!
+//! BaseCompiler is based on BaseBuilder and contains all the features it
+//! provides. It means that the code it stores can be modified (removed, added,
+//! injected) and analyzed. When the code is finalized the compiler can emit
+//! the code into an Assembler to translate the abstract representation into a
+//! machine code.
+//!
+//! Check out architecture specific compilers for more details and examples:
+//!
+//! - \ref x86::Compiler - X86/X64 compiler implementation.
+class ASMJIT_VIRTAPI BaseCompiler : public BaseBuilder {
+public:
+ ASMJIT_NONCOPYABLE(BaseCompiler)
+ typedef BaseBuilder Base;
+
+ //! Current function.
+ FuncNode* _func;
+ //! Allocates `VirtReg` objects.
+ Zone _vRegZone;
+ //! Stores array of `VirtReg` pointers.
+ ZoneVector _vRegArray;
+ //! Stores jump annotations.
+ ZoneVector _jumpAnnotations;
+
+ //! Local constant pool, flushed at the end of each function.
+ ConstPoolNode* _localConstPool;
+ //! Global constant pool, flushed by `finalize()`.
+ ConstPoolNode* _globalConstPool;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `BaseCompiler` instance.
+ ASMJIT_API BaseCompiler() noexcept;
+ //! Destroys the `BaseCompiler` instance.
+ ASMJIT_API virtual ~BaseCompiler() noexcept;
+
+ //! \}
+
+ //! \name Function Management
+ //! \{
+
+ //! Returns the current function.
+ inline FuncNode* func() const noexcept { return _func; }
+
+ //! Creates a new \ref FuncNode.
+ ASMJIT_API Error _newFuncNode(FuncNode** out, const FuncSignature& signature);
+ //! Creates a new \ref FuncNode adds it to the compiler.
+ ASMJIT_API Error _addFuncNode(FuncNode** out, const FuncSignature& signature);
+
+ //! Creates a new \ref FuncRetNode.
+ ASMJIT_API Error _newRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1);
+ //! Creates a new \ref FuncRetNode and adds it to the compiler.
+ ASMJIT_API Error _addRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1);
+
+ //! Creates a new \ref FuncNode with the given `signature` and returns it.
+ inline FuncNode* newFunc(const FuncSignature& signature) {
+ FuncNode* node;
+ _newFuncNode(&node, signature);
+ return node;
+ }
+
+ //! Creates a new \ref FuncNode with the given `signature`, adds it to the
+ //! compiler by using the \ref addFunc(FuncNode*) overload, and returns it.
+ inline FuncNode* addFunc(const FuncSignature& signature) {
+ FuncNode* node;
+ _addFuncNode(&node, signature);
+ return node;
+ }
+
+ //! Adds a function `node` to the instruction stream.
+ ASMJIT_API FuncNode* addFunc(FuncNode* func);
+ //! Emits a sentinel that marks the end of the current function.
+ ASMJIT_API Error endFunc();
+
+ ASMJIT_API Error _setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg);
+
+ //! Sets a function argument at `argIndex` to `reg`.
+ inline Error setArg(size_t argIndex, const BaseReg& reg) { return _setArg(argIndex, 0, reg); }
+ //! Sets a function argument at `argIndex` at `valueIndex` to `reg`.
+ inline Error setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) { return _setArg(argIndex, valueIndex, reg); }
+
+ inline FuncRetNode* newRet(const Operand_& o0, const Operand_& o1) {
+ FuncRetNode* node;
+ _newRetNode(&node, o0, o1);
+ return node;
+ }
+
+ inline FuncRetNode* addRet(const Operand_& o0, const Operand_& o1) {
+ FuncRetNode* node;
+ _addRetNode(&node, o0, o1);
+ return node;
+ }
+
+ //! \}
+
+ //! \name Function Invocation
+ //! \{
+
+ //! Creates a new \ref InvokeNode.
+ ASMJIT_API Error _newInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature);
+ //! Creates a new \ref InvokeNode and adds it to Compiler.
+ ASMJIT_API Error _addInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature);
+
+ //! Creates a new `InvokeNode`.
+ inline InvokeNode* newCall(uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
+ InvokeNode* node;
+ _newInvokeNode(&node, instId, o0, signature);
+ return node;
+ }
+
+ //! Adds a new `InvokeNode`.
+ inline InvokeNode* addCall(uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
+ InvokeNode* node;
+ _addInvokeNode(&node, instId, o0, signature);
+ return node;
+ }
+
+ //! \}
+
+ //! \name Virtual Registers
+ //! \{
+
+ //! Creates a new virtual register representing the given `typeId` and `signature`.
+ //!
+ //! \note This function is public, but it's not generally recommended to be used
+ //! by AsmJit users, use architecture-specific `newReg()` functionality instead
+ //! or functions like \ref _newReg() and \ref _newRegFmt().
+ ASMJIT_API Error newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signature, const char* name);
+
+ //! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
+ ASMJIT_API Error _newReg(BaseReg* out, uint32_t typeId, const char* name = nullptr);
+
+ //! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
+ //!
+ //! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
+ ASMJIT_API Error _newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, ...);
+
+ //! Creates a new virtual register compatible with the provided reference register `ref`.
+ ASMJIT_API Error _newReg(BaseReg* out, const BaseReg& ref, const char* name = nullptr);
+
+ //! Creates a new virtual register compatible with the provided reference register `ref`.
+ //!
+ //! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
+ ASMJIT_API Error _newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt, ...);
+
+ //! Tests whether the given `id` is a valid virtual register id.
+ inline bool isVirtIdValid(uint32_t id) const noexcept {
+ uint32_t index = Operand::virtIdToIndex(id);
+ return index < _vRegArray.size();
+ }
+ //! Tests whether the given `reg` is a virtual register having a valid id.
+ inline bool isVirtRegValid(const BaseReg& reg) const noexcept {
+ return isVirtIdValid(reg.id());
+ }
+
+ //! Returns \ref VirtReg associated with the given `id`.
+ inline VirtReg* virtRegById(uint32_t id) const noexcept {
+ ASMJIT_ASSERT(isVirtIdValid(id));
+ return _vRegArray[Operand::virtIdToIndex(id)];
+ }
+
+ //! Returns \ref VirtReg associated with the given `reg`.
+ inline VirtReg* virtRegByReg(const BaseReg& reg) const noexcept { return virtRegById(reg.id()); }
+
+ //! Returns \ref VirtReg associated with the given virtual register `index`.
+ //!
+ //! \note This is not the same as virtual register id. The conversion between
+ //! id and its index is implemented by \ref Operand_::virtIdToIndex() and \ref
+ //! Operand_::indexToVirtId() functions.
+ inline VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; }
+
+ //! Returns an array of all virtual registers managed by the Compiler.
+ inline const ZoneVector& virtRegs() const noexcept { return _vRegArray; }
+
+ //! \name Stack
+ //! \{
+
+ //! Creates a new stack of the given `size` and `alignment` and stores it to `out`.
+ //!
+ //! \note `name` can be used to give the stack a name, for debugging purposes.
+ ASMJIT_API Error _newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name = nullptr);
+
+ //! Updates the stack size of a stack created by `_newStack()` by its `virtId`.
+ ASMJIT_API Error setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment = 0);
+
+ //! Updates the stack size of a stack created by `_newStack()`.
+ inline Error setStackSize(const BaseMem& mem, uint32_t newSize, uint32_t newAlignment = 0) {
+ return setStackSize(mem.id(), newSize, newAlignment);
+ }
+
+ //! \}
+
+ //! \name Constants
+ //! \{
+
+ //! Creates a new constant of the given `scope` (see \ref ConstPool::Scope).
+ //!
+ //! This function adds a constant of the given `size` to the built-in \ref
+ //! ConstPool and stores the reference to that constant to the `out` operand.
+ ASMJIT_API Error _newConst(BaseMem* out, uint32_t scope, const void* data, size_t size);
+
+ //! \}
+
+ //! \name Miscellaneous
+ //! \{
+
+ //! Rename the given virtual register `reg` to a formatted string `fmt`.
+ ASMJIT_API void rename(const BaseReg& reg, const char* fmt, ...);
+
+ //! \}
+
+ //! \name Jump Annotations
+ //! \{
+
+ inline const ZoneVector& jumpAnnotations() const noexcept {
+ return _jumpAnnotations;
+ }
+
+ ASMJIT_API Error newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation);
+ ASMJIT_API Error emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation);
+
+ //! Returns a new `JumpAnnotation` instance, which can be used to aggregate
+ //! possible targets of a jump where the target is not a label, for example
+ //! to implement jump tables.
+ ASMJIT_API JumpAnnotation* newJumpAnnotation();
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("alloc() has no effect, it will be removed in the future")
+ inline void alloc(BaseReg&) {}
+ ASMJIT_DEPRECATED("spill() has no effect, it will be removed in the future")
+ inline void spill(BaseReg&) {}
+#endif // !ASMJIT_NO_DEPRECATED
+
+ //! \name Events
+ //! \{
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::JumpAnnotation]
+// ============================================================================
+
+//! Jump annotation used to annotate jumps.
+//!
+//! \ref BaseCompiler allows to emit jumps where the target is either register
+//! or memory operand. Such jumps cannot be trivially inspected, so instead of
+//! doing heuristics AsmJit allows to annotate such jumps with possible targets.
+//! Register allocator then use the annotation to construct control-flow, which
+//! is then used by liveness analysis and other tools to prepare ground for
+//! register allocation.
+class JumpAnnotation {
+public:
+ ASMJIT_NONCOPYABLE(JumpAnnotation)
+
+ //! Compiler that owns this JumpAnnotation.
+ BaseCompiler* _compiler;
+ //! Annotation identifier.
+ uint32_t _annotationId;
+ //! Vector of label identifiers, see \ref labelIds().
+ ZoneVector _labelIds;
+
+ inline JumpAnnotation(BaseCompiler* compiler, uint32_t annotationId) noexcept
+ : _compiler(compiler),
+ _annotationId(annotationId) {}
+
+ //! Returns the compiler that owns this JumpAnnotation.
+ inline BaseCompiler* compiler() const noexcept { return _compiler; }
+ //! Returns the annotation id.
+ inline uint32_t annotationId() const noexcept { return _annotationId; }
+ //! Returns a vector of label identifiers that lists all targets of the jump.
+ const ZoneVector& labelIds() const noexcept { return _labelIds; }
+
+ //! Tests whether the given `label` is a target of this JumpAnnotation.
+ inline bool hasLabel(const Label& label) const noexcept { return hasLabelId(label.id()); }
+ //! Tests whether the given `labelId` is a target of this JumpAnnotation.
+ inline bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); }
+
+ //! Adds the `label` to the list of targets of this JumpAnnotation.
+ inline Error addLabel(const Label& label) noexcept { return addLabelId(label.id()); }
+ //! Adds the `labelId` to the list of targets of this JumpAnnotation.
+ inline Error addLabelId(uint32_t labelId) noexcept { return _labelIds.append(&_compiler->_allocator, labelId); }
+};
+
+// ============================================================================
+// [asmjit::JumpNode]
+// ============================================================================
+
+//! Jump instruction with \ref JumpAnnotation.
+//!
+//! \note This node should be only used to represent jump where the jump target
+//! cannot be deduced by examining instruction operands. For example if the jump
+//! target is register or memory location. This pattern is often used to perform
+//! indirect jumps that use jump table, e.g. to implement `switch{}` statement.
+class JumpNode : public InstNode {
+public:
+ ASMJIT_NONCOPYABLE(JumpNode)
+
+ JumpAnnotation* _annotation;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_INLINE JumpNode(BaseCompiler* cc, uint32_t instId, uint32_t options, uint32_t opCount, JumpAnnotation* annotation) noexcept
+ : InstNode(cc, instId, options, opCount, kBaseOpCapacity),
+ _annotation(annotation) {
+ setType(kNodeJump);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether this JumpNode has associated a \ref JumpAnnotation.
+ inline bool hasAnnotation() const noexcept { return _annotation != nullptr; }
+ //! Returns the \ref JumpAnnotation associated with this jump, or `nullptr`.
+ inline JumpAnnotation* annotation() const noexcept { return _annotation; }
+ //! Sets the \ref JumpAnnotation associated with this jump to `annotation`.
+ inline void setAnnotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncNode]
+// ============================================================================
+
+//! Function node represents a function used by \ref BaseCompiler.
+//!
+//! A function is composed of the following:
+//!
+//! - Function entry, \ref FuncNode acts as a label, so the entry is implicit.
+//! To get the entry, simply use \ref FuncNode::label(), which is the same
+//! as \ref LabelNode::label().
+//!
+//! - Function exit, which is represented by \ref FuncNode::exitNode(). A
+//! helper function \ref FuncNode::exitLabel() exists and returns an exit
+//! label instead of node.
+//!
+//! - Function \ref FuncNode::endNode() sentinel. This node marks the end of
+//! a function - there should be no code that belongs to the function after
+//! this node, but the Compiler doesn't enforce that at the moment.
+//!
+//! - Function detail, see \ref FuncNode::detail().
+//!
+//! - Function frame, see \ref FuncNode::frame().
+//!
+//! - Function arguments mapped to virtual registers, see \ref FuncNode::args().
+//!
+//! In a node list, the function and its body looks like the following:
+//!
+//! \code{.unparsed}
+//! [...] - Anything before the function.
+//!
+//! [FuncNode] - Entry point of the function, acts as a label as well.
+//! - Prolog inserted by the register allocator.
+//! {...} - Function body - user code basically.
+//! [ExitLabel] - Exit label
+//! - Epilog inserted by the register allocator.
+//! - Return inserted by the register allocator.
+//! {...} - Can contain data or user code (error handling, special cases, ...).
+//! [FuncEnd] - End sentinel
+//!
+//! [...] - Anything after the function.
+//! \endcode
+//!
+//! When a function is added to the compiler by \ref BaseCompiler::addFunc() it
+//! actually inserts 3 nodes (FuncNode, ExitLabel, and FuncEnd) and sets the
+//! current cursor to be FuncNode. When \ref BaseCompiler::endFunc() is called
+//! the cursor is set to FuncEnd. This guarantees that user can use ExitLabel
+//! as a marker after additional code or data can be placed, and it's a common
+//! practice.
+class FuncNode : public LabelNode {
+public:
+ ASMJIT_NONCOPYABLE(FuncNode)
+
+ //! Arguments pack.
+ struct ArgPack {
+ VirtReg* _data[Globals::kMaxValuePack];
+
+ inline void reset() noexcept {
+ for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
+ _data[valueIndex] = nullptr;
+ }
+
+ inline VirtReg*& operator[](size_t valueIndex) noexcept { return _data[valueIndex]; }
+ inline VirtReg* const& operator[](size_t valueIndex) const noexcept { return _data[valueIndex]; }
+ };
+
+ //! Function detail.
+ FuncDetail _funcDetail;
+ //! Function frame.
+ FuncFrame _frame;
+ //! Function exit label.
+ LabelNode* _exitNode;
+ //! Function end (sentinel).
+ SentinelNode* _end;
+
+ //! Argument packs.
+ ArgPack* _args;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `FuncNode` instance.
+ //!
+ //! Always use `BaseCompiler::addFunc()` to create `FuncNode`.
+ ASMJIT_INLINE FuncNode(BaseBuilder* cb) noexcept
+ : LabelNode(cb),
+ _funcDetail(),
+ _frame(),
+ _exitNode(nullptr),
+ _end(nullptr),
+ _args(nullptr) {
+ setType(kNodeFunc);
+ }
+
+ //! \}
+
+ //! \{
+ //! \name Accessors
+
+ //! Returns function exit `LabelNode`.
+ inline LabelNode* exitNode() const noexcept { return _exitNode; }
+ //! Returns function exit label.
+ inline Label exitLabel() const noexcept { return _exitNode->label(); }
+
+ //! Returns "End of Func" sentinel.
+ inline SentinelNode* endNode() const noexcept { return _end; }
+
+ //! Returns function declaration.
+ inline FuncDetail& detail() noexcept { return _funcDetail; }
+ //! Returns function declaration.
+ inline const FuncDetail& detail() const noexcept { return _funcDetail; }
+
+ //! Returns function frame.
+ inline FuncFrame& frame() noexcept { return _frame; }
+ //! Returns function frame.
+ inline const FuncFrame& frame() const noexcept { return _frame; }
+
+ //! Tests whether the function has a return value.
+ inline bool hasRet() const noexcept { return _funcDetail.hasRet(); }
+ //! Returns arguments count.
+ inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
+
+ //! Returns argument packs.
+ inline ArgPack* argPacks() const noexcept { return _args; }
+
+ //! Returns argument pack at `argIndex`.
+ inline ArgPack& argPack(size_t argIndex) const noexcept {
+ ASMJIT_ASSERT(argIndex < argCount());
+ return _args[argIndex];
+ }
+
+ //! Sets argument at `argIndex`.
+ inline void setArg(size_t argIndex, VirtReg* vReg) noexcept {
+ ASMJIT_ASSERT(argIndex < argCount());
+ _args[argIndex][0] = vReg;
+ }
+
+ //! Sets argument at `argIndex` and `valueIndex`.
+ inline void setArg(size_t argIndex, size_t valueIndex, VirtReg* vReg) noexcept {
+ ASMJIT_ASSERT(argIndex < argCount());
+ _args[argIndex][valueIndex] = vReg;
+ }
+
+ //! Resets argument pack at `argIndex`.
+ inline void resetArg(size_t argIndex) noexcept {
+ ASMJIT_ASSERT(argIndex < argCount());
+ _args[argIndex].reset();
+ }
+
+ //! Resets argument pack at `argIndex`.
+ inline void resetArg(size_t argIndex, size_t valueIndex) noexcept {
+ ASMJIT_ASSERT(argIndex < argCount());
+ _args[argIndex][valueIndex] = nullptr;
+ }
+
+ //! Returns function attributes.
+ inline uint32_t attributes() const noexcept { return _frame.attributes(); }
+ //! Adds `attrs` to the function attributes.
+ inline void addAttributes(uint32_t attrs) noexcept { _frame.addAttributes(attrs); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncRetNode]
+// ============================================================================
+
+//! Function return, used by \ref BaseCompiler.
+class FuncRetNode : public InstNode {
+public:
+ ASMJIT_NONCOPYABLE(FuncRetNode)
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `FuncRetNode` instance.
+ inline FuncRetNode(BaseBuilder* cb) noexcept : InstNode(cb, BaseInst::kIdAbstract, 0, 0) {
+ _any._nodeType = kNodeFuncRet;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::InvokeNode]
+// ============================================================================
+
+//! Function invocation, used by \ref BaseCompiler.
+class InvokeNode : public InstNode {
+public:
+ ASMJIT_NONCOPYABLE(InvokeNode)
+
+ //! Operand pack provides multiple operands that can be associated with a
+ //! single return value of function argument. Sometims this is necessary to
+ //! express an argument or return value that requires multiple registers, for
+ //! example 64-bit value in 32-bit mode or passing / returning homogenous data
+ //! structures.
+ struct OperandPack {
+ //! Operands.
+ Operand_ _data[Globals::kMaxValuePack];
+
+ //! Reset the pack by resetting all operands in the pack.
+ inline void reset() noexcept {
+ for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
+ _data[valueIndex].reset();
+ }
+
+ //! Returns an operand at the given `valueIndex`.
+ inline Operand& operator[](size_t valueIndex) noexcept {
+ ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
+ return _data[valueIndex].as();
+ }
+
+ //! Returns an operand at the given `valueIndex` (const).
+ const inline Operand& operator[](size_t valueIndex) const noexcept {
+ ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
+ return _data[valueIndex].as();
+ }
+ };
+
+ //! Function detail.
+ FuncDetail _funcDetail;
+ //! Function return value(s).
+ OperandPack _rets;
+ //! Function arguments.
+ OperandPack* _args;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `InvokeNode` instance.
+ inline InvokeNode(BaseBuilder* cb, uint32_t instId, uint32_t options) noexcept
+ : InstNode(cb, instId, options, kBaseOpCapacity),
+ _funcDetail(),
+ _args(nullptr) {
+ setType(kNodeInvoke);
+ _resetOps();
+ _rets.reset();
+ addFlags(kFlagIsRemovable);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets the function signature.
+ inline Error init(const FuncSignature& signature, const Environment& environment) noexcept {
+ return _funcDetail.init(signature, environment);
+ }
+
+ //! Returns the function detail.
+ inline FuncDetail& detail() noexcept { return _funcDetail; }
+ //! Returns the function detail.
+ inline const FuncDetail& detail() const noexcept { return _funcDetail; }
+
+ //! Returns the target operand.
+ inline Operand& target() noexcept { return _opArray[0].as(); }
+ //! \overload
+ inline const Operand& target() const noexcept { return _opArray[0].as(); }
+
+ //! Returns the number of function return values.
+ inline bool hasRet() const noexcept { return _funcDetail.hasRet(); }
+ //! Returns the number of function arguments.
+ inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
+
+ //! Returns operand pack representing function return value(s).
+ inline OperandPack& retPack() noexcept { return _rets; }
+ //! Returns operand pack representing function return value(s).
+ inline const OperandPack& retPack() const noexcept { return _rets; }
+
+ //! Returns the return value at the given `valueIndex`.
+ inline Operand& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; }
+ //! \overload
+ inline const Operand& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; }
+
+ //! Returns operand pack representing function return value(s).
+ inline OperandPack& argPack(size_t argIndex) noexcept {
+ ASMJIT_ASSERT(argIndex < argCount());
+ return _args[argIndex];
+ }
+ //! \overload
+ inline const OperandPack& argPack(size_t argIndex) const noexcept {
+ ASMJIT_ASSERT(argIndex < argCount());
+ return _args[argIndex];
+ }
+
+ //! Returns a function argument at the given `argIndex`.
+ inline Operand& arg(size_t argIndex, size_t valueIndex) noexcept {
+ ASMJIT_ASSERT(argIndex < argCount());
+ return _args[argIndex][valueIndex];
+ }
+ //! \overload
+ inline const Operand& arg(size_t argIndex, size_t valueIndex) const noexcept {
+ ASMJIT_ASSERT(argIndex < argCount());
+ return _args[argIndex][valueIndex];
+ }
+
+ //! Sets the function return value at `i` to `op`.
+ inline void _setRet(size_t valueIndex, const Operand_& op) noexcept { _rets[valueIndex] = op; }
+ //! Sets the function argument at `i` to `op`.
+ inline void _setArg(size_t argIndex, size_t valueIndex, const Operand_& op) noexcept {
+ ASMJIT_ASSERT(argIndex < argCount());
+ _args[argIndex][valueIndex] = op;
+ }
+
+ //! Sets the function return value at `valueIndex` to `reg`.
+ inline void setRet(size_t valueIndex, const BaseReg& reg) noexcept { _setRet(valueIndex, reg); }
+
+ //! Sets the first function argument in a value-pack at `argIndex` to `reg`.
+ inline void setArg(size_t argIndex, const BaseReg& reg) noexcept { _setArg(argIndex, 0, reg); }
+ //! Sets the first function argument in a value-pack at `argIndex` to `imm`.
+ inline void setArg(size_t argIndex, const Imm& imm) noexcept { _setArg(argIndex, 0, imm); }
+
+ //! Sets the function argument at `argIndex` and `valueIndex` to `reg`.
+ inline void setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) noexcept { _setArg(argIndex, valueIndex, reg); }
+ //! Sets the function argument at `argIndex` and `valueIndex` to `imm`.
+ inline void setArg(size_t argIndex, size_t valueIndex, const Imm& imm) noexcept { _setArg(argIndex, valueIndex, imm); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncPass]
+// ============================================================================
+
+//! Function pass extends \ref Pass with \ref FuncPass::runOnFunction().
+class ASMJIT_VIRTAPI FuncPass : public Pass {
+public:
+ ASMJIT_NONCOPYABLE(FuncPass)
+ typedef Pass Base;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API FuncPass(const char* name) noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the associated `BaseCompiler`.
+ inline BaseCompiler* cc() const noexcept { return static_cast(_cb); }
+
+ //! \}
+
+ //! \name Run
+ //! \{
+
+ //! Calls `runOnFunction()` on each `FuncNode` node found.
+ ASMJIT_API Error run(Zone* zone, Logger* logger) override;
+
+ //! Called once per `FuncNode`.
+ virtual Error runOnFunction(Zone* zone, Logger* logger, FuncNode* func) = 0;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_COMPILER_H_INCLUDED
diff --git a/Theodosius/asmjit/core/compilerdefs.h b/Theodosius/asmjit/core/compilerdefs.h
new file mode 100644
index 0000000..32f0757
--- /dev/null
+++ b/Theodosius/asmjit/core/compilerdefs.h
@@ -0,0 +1,170 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
+#define ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
+
+#include "../core/api-config.h"
+#include "../core/operand.h"
+#include "../core/zonestring.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class RAWorkReg;
+
+//! \addtogroup asmjit_compiler
+//! \{
+
+// ============================================================================
+// [asmjit::VirtReg]
+// ============================================================================
+
+//! Virtual register data, managed by \ref BaseCompiler.
+class VirtReg {
+public:
+ ASMJIT_NONCOPYABLE(VirtReg)
+
+ //! Virtual register id.
+ uint32_t _id = 0;
+ //! Virtual register info (signature).
+ RegInfo _info = {};
+ //! Virtual register size (can be smaller than `regInfo._size`).
+ uint32_t _virtSize = 0;
+ //! Virtual register alignment (for spilling).
+ uint8_t _alignment = 0;
+ //! Type-id.
+ uint8_t _typeId = 0;
+ //! Virtual register weight for alloc/spill decisions.
+ uint8_t _weight = 1;
+ //! True if this is a fixed register, never reallocated.
+ uint8_t _isFixed : 1;
+ //! True if the virtual register is only used as a stack (never accessed as register).
+ uint8_t _isStack : 1;
+ uint8_t _reserved : 6;
+
+ //! Virtual register name (user provided or automatically generated).
+ ZoneString<16> _name {};
+
+ // -------------------------------------------------------------------------
+ // The following members are used exclusively by RAPass. They are initialized
+ // when the VirtReg is created to NULL pointers and then changed during RAPass
+ // execution. RAPass sets them back to NULL before it returns.
+ // -------------------------------------------------------------------------
+
+ //! Reference to `RAWorkReg`, used during register allocation.
+ RAWorkReg* _workReg = nullptr;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline VirtReg(uint32_t id, uint32_t signature, uint32_t virtSize, uint32_t alignment, uint32_t typeId) noexcept
+ : _id(id),
+ _info { signature },
+ _virtSize(virtSize),
+ _alignment(uint8_t(alignment)),
+ _typeId(uint8_t(typeId)),
+ _isFixed(false),
+ _isStack(false),
+ _reserved(0) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the virtual register id.
+ inline uint32_t id() const noexcept { return _id; }
+
+ //! Returns the virtual register name.
+ inline const char* name() const noexcept { return _name.data(); }
+ //! Returns the size of the virtual register name.
+ inline uint32_t nameSize() const noexcept { return _name.size(); }
+
+ //! Returns a register information that wraps the register signature.
+ inline const RegInfo& info() const noexcept { return _info; }
+ //! Returns a virtual register type (maps to the physical register type as well).
+ inline uint32_t type() const noexcept { return _info.type(); }
+ //! Returns a virtual register group (maps to the physical register group as well).
+ inline uint32_t group() const noexcept { return _info.group(); }
+
+ //! Returns a real size of the register this virtual register maps to.
+ //!
+ //! For example if this is a 128-bit SIMD register used for a scalar single
+ //! precision floating point value then its virtSize would be 4, however, the
+ //! `regSize` would still say 16 (128-bits), because it's the smallest size
+ //! of that register type.
+ inline uint32_t regSize() const noexcept { return _info.size(); }
+
+ //! Returns a register signature of this virtual register.
+ inline uint32_t signature() const noexcept { return _info.signature(); }
+
+ //! Returns the virtual register size.
+ //!
+ //! The virtual register size describes how many bytes the virtual register
+ //! needs to store its content. It can be smaller than the physical register
+ //! size, see `regSize()`.
+ inline uint32_t virtSize() const noexcept { return _virtSize; }
+
+ //! Returns the virtual register alignment.
+ inline uint32_t alignment() const noexcept { return _alignment; }
+
+ //! Returns the virtual register type id, see `Type::Id`.
+ inline uint32_t typeId() const noexcept { return _typeId; }
+
+ //! Returns the virtual register weight - the register allocator can use it
+ //! as explicit hint for alloc/spill decisions.
+ inline uint32_t weight() const noexcept { return _weight; }
+ //! Sets the virtual register weight (0 to 255) - the register allocator can
+ //! use it as explicit hint for alloc/spill decisions and initial bin-packing.
+ inline void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); }
+
+ //! Returns whether the virtual register is always allocated to a fixed
+ //! physical register (and never reallocated).
+ //!
+ //! \note This is only used for special purposes and it's mostly internal.
+ inline bool isFixed() const noexcept { return bool(_isFixed); }
+
+ //! Returns whether the virtual register is indeed a stack that only uses
+ //! the virtual register id for making it accessible.
+ //!
+ //! \note It's an error if a stack is accessed as a register.
+ inline bool isStack() const noexcept { return bool(_isStack); }
+
+ inline bool hasWorkReg() const noexcept { return _workReg != nullptr; }
+ inline RAWorkReg* workReg() const noexcept { return _workReg; }
+ inline void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; }
+ inline void resetWorkReg() noexcept { _workReg = nullptr; }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
+
diff --git a/Theodosius/asmjit/core/constpool.cpp b/Theodosius/asmjit/core/constpool.cpp
new file mode 100644
index 0000000..65c995b
--- /dev/null
+++ b/Theodosius/asmjit/core/constpool.cpp
@@ -0,0 +1,375 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/constpool.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ConstPool - Construction / Destruction]
+// ============================================================================
+
+ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
+ConstPool::~ConstPool() noexcept {}
+
+// ============================================================================
+// [asmjit::ConstPool - Reset]
+// ============================================================================
+
+void ConstPool::reset(Zone* zone) noexcept {
+ _zone = zone;
+
+ size_t dataSize = 1;
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
+ _tree[i].reset();
+ _tree[i].setDataSize(dataSize);
+ _gaps[i] = nullptr;
+ dataSize <<= 1;
+ }
+
+ _gapPool = nullptr;
+ _size = 0;
+ _alignment = 0;
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Ops]
+// ============================================================================
+
+static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
+ ConstPool::Gap* gap = self->_gapPool;
+ if (!gap)
+ return self->_zone->allocT();
+
+ self->_gapPool = gap->_next;
+ return gap;
+}
+
+static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
+ gap->_next = self->_gapPool;
+ self->_gapPool = gap;
+}
+
+static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexcept {
+ ASMJIT_ASSERT(size > 0);
+
+ while (size > 0) {
+ size_t gapIndex;
+ size_t gapSize;
+
+ if (size >= 16 && Support::isAligned(offset, 16)) {
+ gapIndex = ConstPool::kIndex16;
+ gapSize = 16;
+ }
+ else if (size >= 8 && Support::isAligned(offset, 8)) {
+ gapIndex = ConstPool::kIndex8;
+ gapSize = 8;
+ }
+ else if (size >= 4 && Support::isAligned(offset, 4)) {
+ gapIndex = ConstPool::kIndex4;
+ gapSize = 4;
+ }
+ else if (size >= 2 && Support::isAligned(offset, 2)) {
+ gapIndex = ConstPool::kIndex2;
+ gapSize = 2;
+ }
+ else {
+ gapIndex = ConstPool::kIndex1;
+ gapSize = 1;
+ }
+
+ // We don't have to check for errors here, if this failed nothing really
+ // happened (just the gap won't be visible) and it will fail again at
+ // place where the same check would generate `kErrorOutOfMemory` error.
+ ConstPool::Gap* gap = ConstPool_allocGap(self);
+ if (!gap)
+ return;
+
+ gap->_next = self->_gaps[gapIndex];
+ self->_gaps[gapIndex] = gap;
+
+ gap->_offset = offset;
+ gap->_size = gapSize;
+
+ offset += gapSize;
+ size -= gapSize;
+ }
+}
+
+Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
+ size_t treeIndex;
+
+ if (size == 32)
+ treeIndex = kIndex32;
+ else if (size == 16)
+ treeIndex = kIndex16;
+ else if (size == 8)
+ treeIndex = kIndex8;
+ else if (size == 4)
+ treeIndex = kIndex4;
+ else if (size == 2)
+ treeIndex = kIndex2;
+ else if (size == 1)
+ treeIndex = kIndex1;
+ else
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ ConstPool::Node* node = _tree[treeIndex].get(data);
+ if (node) {
+ dstOffset = node->_offset;
+ return kErrorOk;
+ }
+
+ // Before incrementing the current offset try if there is a gap that can
+ // be used for the requested data.
+ size_t offset = ~size_t(0);
+ size_t gapIndex = treeIndex;
+
+ while (gapIndex != kIndexCount - 1) {
+ ConstPool::Gap* gap = _gaps[treeIndex];
+
+ // Check if there is a gap.
+ if (gap) {
+ size_t gapOffset = gap->_offset;
+ size_t gapSize = gap->_size;
+
+ // Destroy the gap for now.
+ _gaps[treeIndex] = gap->_next;
+ ConstPool_freeGap(this, gap);
+
+ offset = gapOffset;
+ ASMJIT_ASSERT(Support::isAligned(offset, size));
+
+ gapSize -= size;
+ if (gapSize > 0)
+ ConstPool_addGap(this, gapOffset, gapSize);
+ }
+
+ gapIndex++;
+ }
+
+ if (offset == ~size_t(0)) {
+ // Get how many bytes have to be skipped so the address is aligned accordingly
+ // to the 'size'.
+ size_t diff = Support::alignUpDiff(_size, size);
+
+ if (diff != 0) {
+ ConstPool_addGap(this, _size, diff);
+ _size += diff;
+ }
+
+ offset = _size;
+ _size += size;
+ }
+
+ // Add the initial node to the right index.
+ node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
+ if (!node) return DebugUtils::errored(kErrorOutOfMemory);
+
+ _tree[treeIndex].insert(node);
+ _alignment = Support::max(_alignment, size);
+
+ dstOffset = offset;
+
+ // Now create a bunch of shared constants that are based on the data pattern.
+ // We stop at size 4, it probably doesn't make sense to split constants down
+ // to 1 byte.
+ size_t pCount = 1;
+ while (size > 4) {
+ size >>= 1;
+ pCount <<= 1;
+
+ ASMJIT_ASSERT(treeIndex != 0);
+ treeIndex--;
+
+ const uint8_t* pData = static_cast(data);
+ for (size_t i = 0; i < pCount; i++, pData += size) {
+ node = _tree[treeIndex].get(pData);
+ if (node) continue;
+
+ node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true);
+ _tree[treeIndex].insert(node);
+ }
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Reset]
+// ============================================================================
+
+struct ConstPoolFill {
+ inline ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
+ _dst(dst),
+ _dataSize(dataSize) {}
+
+ inline void operator()(const ConstPool::Node* node) noexcept {
+ if (!node->_shared)
+ memcpy(_dst + node->_offset, node->data(), _dataSize);
+ }
+
+ uint8_t* _dst;
+ size_t _dataSize;
+};
+
+void ConstPool::fill(void* dst) const noexcept {
+ // Clears possible gaps, asmjit should never emit garbage to the output.
+ memset(dst, 0, _size);
+
+ ConstPoolFill filler(static_cast(dst), 1);
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
+ _tree[i].forEach(filler);
+ filler._dataSize <<= 1;
+ }
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+UNIT(const_pool) {
+ Zone zone(32384 - Zone::kBlockOverhead);
+ ConstPool pool(&zone);
+
+ uint32_t i;
+ uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 1000000;
+
+ INFO("Adding %u constants to the pool", kCount);
+ {
+ size_t prevOffset;
+ size_t curOffset;
+ uint64_t c = 0x0101010101010101u;
+
+ EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk);
+ EXPECT(prevOffset == 0);
+
+ for (i = 1; i < kCount; i++) {
+ c++;
+ EXPECT(pool.add(&c, 8, curOffset) == kErrorOk);
+ EXPECT(prevOffset + 8 == curOffset);
+ EXPECT(pool.size() == (i + 1) * 8);
+ prevOffset = curOffset;
+ }
+
+ EXPECT(pool.alignment() == 8);
+ }
+
+ INFO("Retrieving %u constants from the pool", kCount);
+ {
+ uint64_t c = 0x0101010101010101u;
+
+ for (i = 0; i < kCount; i++) {
+ size_t offset;
+ EXPECT(pool.add(&c, 8, offset) == kErrorOk);
+ EXPECT(offset == i * 8);
+ c++;
+ }
+ }
+
+ INFO("Checking if the constants were split into 4-byte patterns");
+ {
+ uint32_t c = 0x01010101;
+ for (i = 0; i < kCount; i++) {
+ size_t offset;
+ EXPECT(pool.add(&c, 4, offset) == kErrorOk);
+ EXPECT(offset == i * 8);
+ c++;
+ }
+ }
+
+ INFO("Adding 2 byte constant to misalign the current offset");
+ {
+ uint16_t c = 0xFFFF;
+ size_t offset;
+
+ EXPECT(pool.add(&c, 2, offset) == kErrorOk);
+ EXPECT(offset == kCount * 8);
+ EXPECT(pool.alignment() == 8);
+ }
+
+ INFO("Adding 8 byte constant to check if pool gets aligned again");
+ {
+ uint64_t c = 0xFFFFFFFFFFFFFFFFu;
+ size_t offset;
+
+ EXPECT(pool.add(&c, 8, offset) == kErrorOk);
+ EXPECT(offset == kCount * 8 + 8);
+ }
+
+ INFO("Adding 2 byte constant to verify the gap is filled");
+ {
+ uint16_t c = 0xFFFE;
+ size_t offset;
+
+ EXPECT(pool.add(&c, 2, offset) == kErrorOk);
+ EXPECT(offset == kCount * 8 + 2);
+ EXPECT(pool.alignment() == 8);
+ }
+
+ INFO("Checking reset functionality");
+ {
+ pool.reset(&zone);
+ zone.reset();
+
+ EXPECT(pool.size() == 0);
+ EXPECT(pool.alignment() == 0);
+ }
+
+ INFO("Checking pool alignment when combined constants are added");
+ {
+ uint8_t bytes[32] = { 0 };
+ size_t offset;
+
+ pool.add(bytes, 1, offset);
+ EXPECT(pool.size() == 1);
+ EXPECT(pool.alignment() == 1);
+ EXPECT(offset == 0);
+
+ pool.add(bytes, 2, offset);
+ EXPECT(pool.size() == 4);
+ EXPECT(pool.alignment() == 2);
+ EXPECT(offset == 2);
+
+ pool.add(bytes, 4, offset);
+ EXPECT(pool.size() == 8);
+ EXPECT(pool.alignment() == 4);
+ EXPECT(offset == 4);
+
+ pool.add(bytes, 4, offset);
+ EXPECT(pool.size() == 8);
+ EXPECT(pool.alignment() == 4);
+ EXPECT(offset == 4);
+
+ pool.add(bytes, 32, offset);
+ EXPECT(pool.size() == 64);
+ EXPECT(pool.alignment() == 32);
+ EXPECT(offset == 32);
+ }
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/Theodosius/asmjit/core/constpool.h b/Theodosius/asmjit/core/constpool.h
new file mode 100644
index 0000000..d9ac589
--- /dev/null
+++ b/Theodosius/asmjit/core/constpool.h
@@ -0,0 +1,262 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CONSTPOOL_H_INCLUDED
+#define ASMJIT_CORE_CONSTPOOL_H_INCLUDED
+
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonetree.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_utilities
+//! \{
+
+// ============================================================================
+// [asmjit::ConstPool]
+// ============================================================================
+
+//! Constant pool.
+class ConstPool {
+public:
+ ASMJIT_NONCOPYABLE(ConstPool)
+
+ //! Constant pool scope.
+ enum Scope : uint32_t {
+ //! Local constant, always embedded right after the current function.
+ kScopeLocal = 0,
+ //! Global constant, embedded at the end of the currently compiled code.
+ kScopeGlobal = 1
+ };
+
+ //! \cond INTERNAL
+
+ //! Index of a given size in const-pool table.
+ enum Index : uint32_t {
+ kIndex1 = 0,
+ kIndex2 = 1,
+ kIndex4 = 2,
+ kIndex8 = 3,
+ kIndex16 = 4,
+ kIndex32 = 5,
+ kIndexCount = 6
+ };
+
+ //! Zone-allocated const-pool gap created by two differently aligned constants.
+ struct Gap {
+ //! Pointer to the next gap
+ Gap* _next;
+ //! Offset of the gap.
+ size_t _offset;
+ //! Remaining bytes of the gap (basically a gap size).
+ size_t _size;
+ };
+
+ //! Zone-allocated const-pool node.
+ class Node : public ZoneTreeNodeT {
+ public:
+ ASMJIT_NONCOPYABLE(Node)
+
+ //! If this constant is shared with another.
+ uint32_t _shared : 1;
+ //! Data offset from the beginning of the pool.
+ uint32_t _offset;
+
+ inline Node(size_t offset, bool shared) noexcept
+ : ZoneTreeNodeT(),
+ _shared(shared),
+ _offset(uint32_t(offset)) {}
+
+ inline void* data() const noexcept {
+ return static_cast(const_cast(this) + 1);
+ }
+ };
+
+ //! Data comparer used internally.
+ class Compare {
+ public:
+ size_t _dataSize;
+
+ inline Compare(size_t dataSize) noexcept
+ : _dataSize(dataSize) {}
+
+ inline int operator()(const Node& a, const Node& b) const noexcept {
+ return ::memcmp(a.data(), b.data(), _dataSize);
+ }
+
+ inline int operator()(const Node& a, const void* data) const noexcept {
+ return ::memcmp(a.data(), data, _dataSize);
+ }
+ };
+
+ //! Zone-allocated const-pool tree.
+ struct Tree {
+ //! RB tree.
+ ZoneTree _tree;
+ //! Size of the tree (number of nodes).
+ size_t _size;
+ //! Size of the data.
+ size_t _dataSize;
+
+ inline explicit Tree(size_t dataSize = 0) noexcept
+ : _tree(),
+ _size(0),
+ _dataSize(dataSize) {}
+
+ inline void reset() noexcept {
+ _tree.reset();
+ _size = 0;
+ }
+
+ inline bool empty() const noexcept { return _size == 0; }
+ inline size_t size() const noexcept { return _size; }
+
+ inline void setDataSize(size_t dataSize) noexcept {
+ ASMJIT_ASSERT(empty());
+ _dataSize = dataSize;
+ }
+
+ inline Node* get(const void* data) noexcept {
+ Compare cmp(_dataSize);
+ return _tree.get(data, cmp);
+ }
+
+ inline void insert(Node* node) noexcept {
+ Compare cmp(_dataSize);
+ _tree.insert(node, cmp);
+ _size++;
+ }
+
+ template
+ inline void forEach(Visitor& visitor) const noexcept {
+ Node* node = _tree.root();
+ if (!node) return;
+
+ Node* stack[Globals::kMaxTreeHeight];
+ size_t top = 0;
+
+ for (;;) {
+ Node* left = node->left();
+ if (left != nullptr) {
+ ASMJIT_ASSERT(top != Globals::kMaxTreeHeight);
+ stack[top++] = node;
+
+ node = left;
+ continue;
+ }
+
+ for (;;) {
+ visitor(node);
+ node = node->right();
+
+ if (node != nullptr)
+ break;
+
+ if (top == 0)
+ return;
+
+ node = stack[--top];
+ }
+ }
+ }
+
+ static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
+ Node* node = zone->allocT(sizeof(Node) + size);
+ if (ASMJIT_UNLIKELY(!node)) return nullptr;
+
+ node = new(node) Node(offset, shared);
+ memcpy(node->data(), data, size);
+ return node;
+ }
+ };
+
+ //! \endcond
+
+ //! Zone allocator.
+ Zone* _zone;
+ //! Tree per size.
+ Tree _tree[kIndexCount];
+ //! Gaps per size.
+ Gap* _gaps[kIndexCount];
+ //! Gaps pool
+ Gap* _gapPool;
+
+ //! Size of the pool (in bytes).
+ size_t _size;
+ //! Required pool alignment.
+ size_t _alignment;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API ConstPool(Zone* zone) noexcept;
+ ASMJIT_API ~ConstPool() noexcept;
+
+ ASMJIT_API void reset(Zone* zone) noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the constant-pool is empty.
+ inline bool empty() const noexcept { return _size == 0; }
+ //! Returns the size of the constant-pool in bytes.
+ inline size_t size() const noexcept { return _size; }
+ //! Returns minimum alignment.
+ inline size_t alignment() const noexcept { return _alignment; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Adds a constant to the constant pool.
+ //!
+ //! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes.
+ //! The constant is added to the pool only if it doesn't not exist, otherwise
+ //! cached value is returned.
+ //!
+ //! AsmJit is able to subdivide added constants, so for example if you add
+ //! 8-byte constant 0x1122334455667788 it will create the following slots:
+ //!
+ //! 8-byte: 0x1122334455667788
+ //! 4-byte: 0x11223344, 0x55667788
+ //!
+ //! The reason is that when combining MMX/SSE/AVX code some patterns are used
+ //! frequently. However, AsmJit is not able to reallocate a constant that has
+ //! been already added. For example if you try to add 4-byte constant and then
+ //! 8-byte constant having the same 4-byte pattern as the previous one, two
+ //! independent slots will be generated by the pool.
+ ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
+
+ //! Fills the destination with the content of this constant pool.
+ ASMJIT_API void fill(void* dst) const noexcept;
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CONSTPOOL_H_INCLUDED
diff --git a/Theodosius/asmjit/core/cpuinfo.cpp b/Theodosius/asmjit/core/cpuinfo.cpp
new file mode 100644
index 0000000..edc7d17
--- /dev/null
+++ b/Theodosius/asmjit/core/cpuinfo.cpp
@@ -0,0 +1,97 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/cpuinfo.h"
+
+#if !defined(_WIN32)
+ #include
+ #include
+ #include
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect - CPU NumThreads]
+// ============================================================================
+
+#if defined(_WIN32)
+static inline uint32_t detectHWThreadCount() noexcept {
+ SYSTEM_INFO info;
+ ::GetSystemInfo(&info);
+ return info.dwNumberOfProcessors;
+}
+#elif defined(_SC_NPROCESSORS_ONLN)
+static inline uint32_t detectHWThreadCount() noexcept {
+ long res = ::sysconf(_SC_NPROCESSORS_ONLN);
+ return res <= 0 ? uint32_t(1) : uint32_t(res);
+}
+#else
+static inline uint32_t detectHWThreadCount() noexcept {
+ return 1;
+}
+#endif
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect - CPU Features]
+// ============================================================================
+
+#if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86
+namespace x86 { void detectCpu(CpuInfo& cpu) noexcept; }
+#endif
+
+#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM
+namespace arm { void detectCpu(CpuInfo& cpu) noexcept; }
+#endif
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect - Static Initializer]
+// ============================================================================
+
+static uint32_t cpuInfoInitialized;
+static CpuInfo cpuInfoGlobal(Globals::NoInit);
+
+const CpuInfo& CpuInfo::host() noexcept {
+ // This should never cause a problem as the resulting information should
+ // always be the same.
+ if (!cpuInfoInitialized) {
+ CpuInfo cpuInfoLocal;
+
+#if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86
+ x86::detectCpu(cpuInfoLocal);
+#endif
+
+#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM
+ arm::detectCpu(cpuInfoLocal);
+#endif
+
+ cpuInfoLocal._hwThreadCount = detectHWThreadCount();
+ cpuInfoGlobal = cpuInfoLocal;
+ cpuInfoInitialized = 1;
+ }
+
+ return cpuInfoGlobal;
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/Theodosius/asmjit/core/cpuinfo.h b/Theodosius/asmjit/core/cpuinfo.h
new file mode 100644
index 0000000..83bb8c1
--- /dev/null
+++ b/Theodosius/asmjit/core/cpuinfo.h
@@ -0,0 +1,154 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CPUINFO_H_INCLUDED
+#define ASMJIT_CORE_CPUINFO_H_INCLUDED
+
+#include "../core/archtraits.h"
+#include "../core/features.h"
+#include "../core/globals.h"
+#include "../core/string.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::CpuInfo]
+// ============================================================================
+
+//! CPU information.
+class CpuInfo {
+public:
+ //! Architecture.
+ uint8_t _arch;
+ //! Sub-architecture.
+ uint8_t _subArch;
+ //! Reserved for future use.
+ uint16_t _reserved;
+ //! CPU family ID.
+ uint32_t _familyId;
+ //! CPU model ID.
+ uint32_t _modelId;
+ //! CPU brand ID.
+ uint32_t _brandId;
+ //! CPU stepping.
+ uint32_t _stepping;
+ //! Processor type.
+ uint32_t _processorType;
+ //! Maximum number of addressable IDs for logical processors.
+ uint32_t _maxLogicalProcessors;
+ //! Cache line size (in bytes).
+ uint32_t _cacheLineSize;
+ //! Number of hardware threads.
+ uint32_t _hwThreadCount;
+
+ //! CPU vendor string.
+ FixedString<16> _vendor;
+ //! CPU brand string.
+ FixedString<64> _brand;
+ //! CPU features.
+ BaseFeatures _features;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline CpuInfo() noexcept { reset(); }
+ inline CpuInfo(const CpuInfo& other) noexcept = default;
+
+ inline explicit CpuInfo(Globals::NoInit_) noexcept
+ : _features(Globals::NoInit) {};
+
+ //! Returns the host CPU information.
+ ASMJIT_API static const CpuInfo& host() noexcept;
+
+ //! Initializes CpuInfo to the given architecture, see \ref Environment.
+ inline void initArch(uint32_t arch, uint32_t subArch = 0u) noexcept {
+ _arch = uint8_t(arch);
+ _subArch = uint8_t(subArch);
+ }
+
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline CpuInfo& operator=(const CpuInfo& other) noexcept = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the CPU architecture id, see \ref Environment::Arch.
+ inline uint32_t arch() const noexcept { return _arch; }
+ //! Returns the CPU architecture sub-id, see \ref Environment::SubArch.
+ inline uint32_t subArch() const noexcept { return _subArch; }
+
+ //! Returns the CPU family ID.
+ inline uint32_t familyId() const noexcept { return _familyId; }
+ //! Returns the CPU model ID.
+ inline uint32_t modelId() const noexcept { return _modelId; }
+ //! Returns the CPU brand id.
+ inline uint32_t brandId() const noexcept { return _brandId; }
+ //! Returns the CPU stepping.
+ inline uint32_t stepping() const noexcept { return _stepping; }
+ //! Returns the processor type.
+ inline uint32_t processorType() const noexcept { return _processorType; }
+ //! Returns the number of maximum logical processors.
+ inline uint32_t maxLogicalProcessors() const noexcept { return _maxLogicalProcessors; }
+
+ //! Returns the size of a cache line flush.
+ inline uint32_t cacheLineSize() const noexcept { return _cacheLineSize; }
+ //! Returns number of hardware threads available.
+ inline uint32_t hwThreadCount() const noexcept { return _hwThreadCount; }
+
+ //! Returns the CPU vendor.
+ inline const char* vendor() const noexcept { return _vendor.str; }
+ //! Tests whether the CPU vendor is equal to `s`.
+ inline bool isVendor(const char* s) const noexcept { return _vendor.eq(s); }
+
+ //! Returns the CPU brand string.
+ inline const char* brand() const noexcept { return _brand.str; }
+
+ //! Returns all CPU features as `BaseFeatures`, cast to your arch-specific class
+ //! if needed.
+ template
+ inline const T& features() const noexcept { return _features.as(); }
+
+ //! Tests whether the CPU has the given `feature`.
+ inline bool hasFeature(uint32_t featureId) const noexcept { return _features.has(featureId); }
+ //! Adds the given CPU `feature` to the list of this CpuInfo features.
+ inline CpuInfo& addFeature(uint32_t featureId) noexcept { _features.add(featureId); return *this; }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CPUINFO_H_INCLUDED
diff --git a/Theodosius/asmjit/core/datatypes.h b/Theodosius/asmjit/core/datatypes.h
new file mode 100644
index 0000000..2f6cc1e
--- /dev/null
+++ b/Theodosius/asmjit/core/datatypes.h
@@ -0,0 +1,1071 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_DATATYPES_H_INCLUDED
+#define ASMJIT_CORE_DATATYPES_H_INCLUDED
+
+#include "../core/globals.h"
+
+#ifndef ASMJIT_NO_DEPRECATED
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Data64]
+// ============================================================================
+
+//! 64-bit data useful for creating SIMD constants.
+union ASMJIT_DEPRECATED_STRUCT("Data64 is deprecated and will be removed in the future") Data64 {
+ //! Array of eight 8-bit signed integers.
+ int8_t sb[8];
+ //! Array of eight 8-bit unsigned integers.
+ uint8_t ub[8];
+ //! Array of four 16-bit signed integers.
+ int16_t sw[4];
+ //! Array of four 16-bit unsigned integers.
+ uint16_t uw[4];
+ //! Array of two 32-bit signed integers.
+ int32_t sd[2];
+ //! Array of two 32-bit unsigned integers.
+ uint32_t ud[2];
+ //! Array of one 64-bit signed integer.
+ int64_t sq[1];
+ //! Array of one 64-bit unsigned integer.
+ uint64_t uq[1];
+
+ //! Array of two SP-FP values.
+ float sf[2];
+ //! Array of one DP-FP value.
+ double df[1];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Sets all eight 8-bit signed integers.
+ static inline Data64 fromI8(int8_t x0) noexcept {
+ Data64 self;
+ self.setI8(x0);
+ return self;
+ }
+
+ //! Sets all eight 8-bit unsigned integers.
+ static inline Data64 fromU8(uint8_t x0) noexcept {
+ Data64 self;
+ self.setU8(x0);
+ return self;
+ }
+
+ //! Sets all eight 8-bit signed integers.
+ static inline Data64 fromI8(
+ int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) noexcept {
+
+ Data64 self;
+ self.setI8(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all eight 8-bit unsigned integers.
+ static inline Data64 fromU8(
+ uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) noexcept {
+
+ Data64 self;
+ self.setU8(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all four 16-bit signed integers.
+ static inline Data64 fromI16(int16_t x0) noexcept {
+ Data64 self;
+ self.setI16(x0);
+ return self;
+ }
+
+ //! Sets all four 16-bit unsigned integers.
+ static inline Data64 fromU16(uint16_t x0) noexcept {
+ Data64 self;
+ self.setU16(x0);
+ return self;
+ }
+
+ //! Sets all four 16-bit signed integers.
+ static inline Data64 fromI16(int16_t x0, int16_t x1, int16_t x2, int16_t x3) noexcept {
+ Data64 self;
+ self.setI16(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all four 16-bit unsigned integers.
+ static inline Data64 fromU16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) noexcept {
+ Data64 self;
+ self.setU16(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all two 32-bit signed integers.
+ static inline Data64 fromI32(int32_t x0) noexcept {
+ Data64 self;
+ self.setI32(x0);
+ return self;
+ }
+
+ //! Sets all two 32-bit unsigned integers.
+ static inline Data64 fromU32(uint32_t x0) noexcept {
+ Data64 self;
+ self.setU32(x0);
+ return self;
+ }
+
+ //! Sets all two 32-bit signed integers.
+ static inline Data64 fromI32(int32_t x0, int32_t x1) noexcept {
+ Data64 self;
+ self.setI32(x0, x1);
+ return self;
+ }
+
+ //! Sets all two 32-bit unsigned integers.
+ static inline Data64 fromU32(uint32_t x0, uint32_t x1) noexcept {
+ Data64 self;
+ self.setU32(x0, x1);
+ return self;
+ }
+
+ //! Sets 64-bit signed integer.
+ static inline Data64 fromI64(int64_t x0) noexcept {
+ Data64 self;
+ self.setI64(x0);
+ return self;
+ }
+
+ //! Sets 64-bit unsigned integer.
+ static inline Data64 fromU64(uint64_t x0) noexcept {
+ Data64 self;
+ self.setU64(x0);
+ return self;
+ }
+
+ //! Sets all two SP-FP values.
+ static inline Data64 fromF32(float x0) noexcept {
+ Data64 self;
+ self.setF32(x0);
+ return self;
+ }
+
+ //! Sets all two SP-FP values.
+ static inline Data64 fromF32(float x0, float x1) noexcept {
+ Data64 self;
+ self.setF32(x0, x1);
+ return self;
+ }
+
+ //! Sets all two SP-FP values.
+ static inline Data64 fromF64(double x0) noexcept {
+ Data64 self;
+ self.setF64(x0);
+ return self;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets all eight 8-bit signed integers.
+ inline void setI8(int8_t x0) noexcept {
+ setU8(uint8_t(x0));
+ }
+
+ //! Sets all eight 8-bit unsigned integers.
+ inline void setU8(uint8_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0101010101010101u;
+ uq[0] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x01010101u;
+ ud[0] = xd;
+ ud[1] = xd;
+ }
+ }
+
+ //! Sets all eight 8-bit signed integers.
+ inline void setI8(
+ int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) noexcept {
+
+ sb[0] = x0; sb[1] = x1; sb[2] = x2; sb[3] = x3;
+ sb[4] = x4; sb[5] = x5; sb[6] = x6; sb[7] = x7;
+ }
+
+ //! Sets all eight 8-bit unsigned integers.
+ inline void setU8(
+ uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) noexcept {
+
+ ub[0] = x0; ub[1] = x1; ub[2] = x2; ub[3] = x3;
+ ub[4] = x4; ub[5] = x5; ub[6] = x6; ub[7] = x7;
+ }
+
+ //! Sets all four 16-bit signed integers.
+ inline void setI16(int16_t x0) noexcept {
+ setU16(uint16_t(x0));
+ }
+
+ //! Sets all four 16-bit unsigned integers.
+ inline void setU16(uint16_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0001000100010001u;
+ uq[0] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x00010001u;
+ ud[0] = xd;
+ ud[1] = xd;
+ }
+ }
+
+ //! Sets all four 16-bit signed integers.
+ inline void setI16(int16_t x0, int16_t x1, int16_t x2, int16_t x3) noexcept {
+ sw[0] = x0; sw[1] = x1; sw[2] = x2; sw[3] = x3;
+ }
+
+ //! Sets all four 16-bit unsigned integers.
+ inline void setU16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) noexcept {
+ uw[0] = x0; uw[1] = x1; uw[2] = x2; uw[3] = x3;
+ }
+
+ //! Sets all two 32-bit signed integers.
+ inline void setI32(int32_t x0) noexcept {
+ sd[0] = x0; sd[1] = x0;
+ }
+
+ //! Sets all two 32-bit unsigned integers.
+ inline void setU32(uint32_t x0) noexcept {
+ ud[0] = x0; ud[1] = x0;
+ }
+
+ //! Sets all two 32-bit signed integers.
+ inline void setI32(int32_t x0, int32_t x1) noexcept {
+ sd[0] = x0; sd[1] = x1;
+ }
+
+ //! Sets all two 32-bit unsigned integers.
+ inline void setU32(uint32_t x0, uint32_t x1) noexcept {
+ ud[0] = x0; ud[1] = x1;
+ }
+
+ //! Sets 64-bit signed integer.
+ inline void setI64(int64_t x0) noexcept {
+ sq[0] = x0;
+ }
+
+ //! Sets 64-bit unsigned integer.
+ inline void setU64(uint64_t x0) noexcept {
+ uq[0] = x0;
+ }
+
+ //! Sets all two SP-FP values.
+ inline void setF32(float x0) noexcept {
+ sf[0] = x0; sf[1] = x0;
+ }
+
+ //! Sets all two SP-FP values.
+ inline void setF32(float x0, float x1) noexcept {
+ sf[0] = x0; sf[1] = x1;
+ }
+
+ //! Sets all two SP-FP values.
+ inline void setF64(double x0) noexcept {
+ df[0] = x0;
+ }
+};
+
+// ============================================================================
+// [asmjit::Data128]
+// ============================================================================
+
+//! 128-bit data useful for creating SIMD constants.
+union ASMJIT_DEPRECATED_STRUCT("Data128 is deprecated and will be removed in the future") Data128 {
+ //! Array of sixteen 8-bit signed integers.
+ int8_t sb[16];
+ //! Array of sixteen 8-bit unsigned integers.
+ uint8_t ub[16];
+ //! Array of eight 16-bit signed integers.
+ int16_t sw[8];
+ //! Array of eight 16-bit unsigned integers.
+ uint16_t uw[8];
+ //! Array of four 32-bit signed integers.
+ int32_t sd[4];
+ //! Array of four 32-bit unsigned integers.
+ uint32_t ud[4];
+ //! Array of two 64-bit signed integers.
+ int64_t sq[2];
+ //! Array of two 64-bit unsigned integers.
+ uint64_t uq[2];
+
+ //! Array of four 32-bit single precision floating points.
+ float sf[4];
+ //! Array of two 64-bit double precision floating points.
+ double df[2];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Sets all sixteen 8-bit signed integers.
+ static inline Data128 fromI8(int8_t x0) noexcept {
+ Data128 self;
+ self.setI8(x0);
+ return self;
+ }
+
+ //! Sets all sixteen 8-bit unsigned integers.
+ static inline Data128 fromU8(uint8_t x0) noexcept {
+ Data128 self;
+ self.setU8(x0);
+ return self;
+ }
+
+ //! Sets all sixteen 8-bit signed integers.
+ static inline Data128 fromI8(
+ int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 ,
+ int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 ,
+ int8_t x8 , int8_t x9 , int8_t x10, int8_t x11,
+ int8_t x12, int8_t x13, int8_t x14, int8_t x15) noexcept {
+
+ Data128 self;
+ self.setI8(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+ return self;
+ }
+
+ //! Sets all sixteen 8-bit unsigned integers.
+ static inline Data128 fromU8(
+ uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 ,
+ uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 ,
+ uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11,
+ uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) noexcept {
+
+ Data128 self;
+ self.setU8(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+ return self;
+ }
+
+ //! Sets all eight 16-bit signed integers.
+ static inline Data128 fromI16(int16_t x0) noexcept {
+ Data128 self;
+ self.setI16(x0);
+ return self;
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ static inline Data128 fromU16(uint16_t x0) noexcept {
+ Data128 self;
+ self.setU16(x0);
+ return self;
+ }
+
+ //! Sets all eight 16-bit signed integers.
+ static inline Data128 fromI16(
+ int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) noexcept {
+
+ Data128 self;
+ self.setI16(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ static inline Data128 fromU16(
+ uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) noexcept {
+
+ Data128 self;
+ self.setU16(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all four 32-bit signed integers.
+ static inline Data128 fromI32(int32_t x0) noexcept {
+ Data128 self;
+ self.setI32(x0);
+ return self;
+ }
+
+ //! Sets all four 32-bit unsigned integers.
+ static inline Data128 fromU32(uint32_t x0) noexcept {
+ Data128 self;
+ self.setU32(x0);
+ return self;
+ }
+
+ //! Sets all four 32-bit signed integers.
+ static inline Data128 fromI32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) noexcept {
+ Data128 self;
+ self.setI32(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all four 32-bit unsigned integers.
+ static inline Data128 fromU32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept {
+ Data128 self;
+ self.setU32(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all two 64-bit signed integers.
+ static inline Data128 fromI64(int64_t x0) noexcept {
+ Data128 self;
+ self.setI64(x0);
+ return self;
+ }
+
+ //! Sets all two 64-bit unsigned integers.
+ static inline Data128 fromU64(uint64_t x0) noexcept {
+ Data128 self;
+ self.setU64(x0);
+ return self;
+ }
+
+ //! Sets all two 64-bit signed integers.
+ static inline Data128 fromI64(int64_t x0, int64_t x1) noexcept {
+ Data128 self;
+ self.setI64(x0, x1);
+ return self;
+ }
+
+ //! Sets all two 64-bit unsigned integers.
+ static inline Data128 fromU64(uint64_t x0, uint64_t x1) noexcept {
+ Data128 self;
+ self.setU64(x0, x1);
+ return self;
+ }
+
+ //! Sets all four SP-FP floats.
+ static inline Data128 fromF32(float x0) noexcept {
+ Data128 self;
+ self.setF32(x0);
+ return self;
+ }
+
+ //! Sets all four SP-FP floats.
+ static inline Data128 fromF32(float x0, float x1, float x2, float x3) noexcept {
+ Data128 self;
+ self.setF32(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all two DP-FP floats.
+ static inline Data128 fromF64(double x0) noexcept {
+ Data128 self;
+ self.setF64(x0);
+ return self;
+ }
+
+ //! Sets all two DP-FP floats.
+ static inline Data128 fromF64(double x0, double x1) noexcept {
+ Data128 self;
+ self.setF64(x0, x1);
+ return self;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets all sixteen 8-bit signed integers.
+ inline void setI8(int8_t x0) noexcept {
+ setU8(uint8_t(x0));
+ }
+
+ //! Sets all sixteen 8-bit unsigned integers.
+ inline void setU8(uint8_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0101010101010101u;
+ uq[0] = xq;
+ uq[1] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x01010101u;
+ ud[0] = xd;
+ ud[1] = xd;
+ ud[2] = xd;
+ ud[3] = xd;
+ }
+ }
+
+ //! Sets all sixteen 8-bit signed integers.
+ inline void setI8(
+ int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 ,
+ int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 ,
+ int8_t x8 , int8_t x9 , int8_t x10, int8_t x11,
+ int8_t x12, int8_t x13, int8_t x14, int8_t x15) noexcept {
+
+ sb[0 ] = x0 ; sb[1 ] = x1 ; sb[2 ] = x2 ; sb[3 ] = x3 ;
+ sb[4 ] = x4 ; sb[5 ] = x5 ; sb[6 ] = x6 ; sb[7 ] = x7 ;
+ sb[8 ] = x8 ; sb[9 ] = x9 ; sb[10] = x10; sb[11] = x11;
+ sb[12] = x12; sb[13] = x13; sb[14] = x14; sb[15] = x15;
+ }
+
+ //! Sets all sixteen 8-bit unsigned integers.
+ inline void setU8(
+ uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 ,
+ uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 ,
+ uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11,
+ uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) noexcept {
+
+ ub[0 ] = x0 ; ub[1 ] = x1 ; ub[2 ] = x2 ; ub[3 ] = x3 ;
+ ub[4 ] = x4 ; ub[5 ] = x5 ; ub[6 ] = x6 ; ub[7 ] = x7 ;
+ ub[8 ] = x8 ; ub[9 ] = x9 ; ub[10] = x10; ub[11] = x11;
+ ub[12] = x12; ub[13] = x13; ub[14] = x14; ub[15] = x15;
+ }
+
+ //! Sets all eight 16-bit signed integers.
+ inline void setI16(int16_t x0) noexcept {
+ setU16(uint16_t(x0));
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ inline void setU16(uint16_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0001000100010001u;
+ uq[0] = xq;
+ uq[1] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x00010001u;
+ ud[0] = xd;
+ ud[1] = xd;
+ ud[2] = xd;
+ ud[3] = xd;
+ }
+ }
+
+ //! Sets all eight 16-bit signed integers.
+ inline void setI16(
+ int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) noexcept {
+
+ sw[0] = x0; sw[1] = x1; sw[2] = x2; sw[3] = x3;
+ sw[4] = x4; sw[5] = x5; sw[6] = x6; sw[7] = x7;
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ inline void setU16(
+ uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) noexcept {
+
+ uw[0] = x0; uw[1] = x1; uw[2] = x2; uw[3] = x3;
+ uw[4] = x4; uw[5] = x5; uw[6] = x6; uw[7] = x7;
+ }
+
+ //! Sets all four 32-bit signed integers.
+ inline void setI32(int32_t x0) noexcept {
+ setU32(uint32_t(x0));
+ }
+
+ //! Sets all four 32-bit unsigned integers.
+ inline void setU32(uint32_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t t = (uint64_t(x0) << 32) + x0;
+ uq[0] = t;
+ uq[1] = t;
+ }
+ else {
+ ud[0] = x0;
+ ud[1] = x0;
+ ud[2] = x0;
+ ud[3] = x0;
+ }
+ }
+
+ //! Sets all four 32-bit signed integers.
+ inline void setI32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) noexcept {
+ sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3;
+ }
+
+ //! Sets all four 32-bit unsigned integers.
+ inline void setU32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept {
+ ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3;
+ }
+
+ //! Sets all two 64-bit signed integers.
+ inline void setI64(int64_t x0) noexcept {
+ sq[0] = x0; sq[1] = x0;
+ }
+
+ //! Sets all two 64-bit unsigned integers.
+ inline void setU64(uint64_t x0) noexcept {
+ uq[0] = x0; uq[1] = x0;
+ }
+
+ //! Sets all two 64-bit signed integers.
+ inline void setI64(int64_t x0, int64_t x1) noexcept {
+ sq[0] = x0; sq[1] = x1;
+ }
+
+ //! Sets all two 64-bit unsigned integers.
+ inline void setU64(uint64_t x0, uint64_t x1) noexcept {
+ uq[0] = x0; uq[1] = x1;
+ }
+
+ //! Sets all four SP-FP floats.
+ inline void setF32(float x0) noexcept {
+ sf[0] = x0; sf[1] = x0; sf[2] = x0; sf[3] = x0;
+ }
+
+ //! Sets all four SP-FP floats.
+ inline void setF32(float x0, float x1, float x2, float x3) noexcept {
+ sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3;
+ }
+
+ //! Sets all two DP-FP floats.
+ inline void setF64(double x0) noexcept {
+ df[0] = x0; df[1] = x0;
+ }
+
+ //! Sets all two DP-FP floats.
+ inline void setF64(double x0, double x1) noexcept {
+ df[0] = x0; df[1] = x1;
+ }
+};
+
+// ============================================================================
+// [asmjit::Data256]
+// ============================================================================
+
+//! 256-bit data useful for creating SIMD constants.
+union ASMJIT_DEPRECATED_STRUCT("Data256 is deprecated and will be removed in the future") Data256 {
+ //! Array of thirty two 8-bit signed integers.
+ int8_t sb[32];
+ //! Array of thirty two 8-bit unsigned integers.
+ uint8_t ub[32];
+ //! Array of sixteen 16-bit signed integers.
+ int16_t sw[16];
+ //! Array of sixteen 16-bit unsigned integers.
+ uint16_t uw[16];
+ //! Array of eight 32-bit signed integers.
+ int32_t sd[8];
+ //! Array of eight 32-bit unsigned integers.
+ uint32_t ud[8];
+ //! Array of four 64-bit signed integers.
+ int64_t sq[4];
+ //! Array of four 64-bit unsigned integers.
+ uint64_t uq[4];
+
+ //! Array of eight 32-bit single precision floating points.
+ float sf[8];
+ //! Array of four 64-bit double precision floating points.
+ double df[4];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Sets all thirty two 8-bit signed integers.
+ static inline Data256 fromI8(int8_t x0) noexcept {
+ Data256 self;
+ self.setI8(x0);
+ return self;
+ }
+
+ //! Sets all thirty two 8-bit unsigned integers.
+ static inline Data256 fromU8(uint8_t x0) noexcept {
+ Data256 self;
+ self.setU8(x0);
+ return self;
+ }
+
+ //! Sets all thirty two 8-bit signed integers.
+ static inline Data256 fromI8(
+ int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 ,
+ int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 ,
+ int8_t x8 , int8_t x9 , int8_t x10, int8_t x11,
+ int8_t x12, int8_t x13, int8_t x14, int8_t x15,
+ int8_t x16, int8_t x17, int8_t x18, int8_t x19,
+ int8_t x20, int8_t x21, int8_t x22, int8_t x23,
+ int8_t x24, int8_t x25, int8_t x26, int8_t x27,
+ int8_t x28, int8_t x29, int8_t x30, int8_t x31) noexcept {
+
+ Data256 self;
+ self.setI8(
+ x0, x1 , x2 , x3 , x4 , x5 , x6 , x7 , x8 , x9 , x10, x11, x12, x13, x14, x15,
+ x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31);
+ return self;
+ }
+
+ //! Sets all thirty two 8-bit unsigned integers.
+ static inline Data256 fromU8(
+ uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 ,
+ uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 ,
+ uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11,
+ uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15,
+ uint8_t x16, uint8_t x17, uint8_t x18, uint8_t x19,
+ uint8_t x20, uint8_t x21, uint8_t x22, uint8_t x23,
+ uint8_t x24, uint8_t x25, uint8_t x26, uint8_t x27,
+ uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) noexcept {
+
+ Data256 self;
+ self.setU8(
+ x0, x1 , x2 , x3 , x4 , x5 , x6 , x7 , x8 , x9 , x10, x11, x12, x13, x14, x15,
+ x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31);
+ return self;
+ }
+
+ //! Sets all sixteen 16-bit signed integers.
+ static inline Data256 fromI16(int16_t x0) noexcept {
+ Data256 self;
+ self.setI16(x0);
+ return self;
+ }
+
+ //! Sets all sixteen 16-bit unsigned integers.
+ static inline Data256 fromU16(uint16_t x0) noexcept {
+ Data256 self;
+ self.setU16(x0);
+ return self;
+ }
+
+ //! Sets all sixteen 16-bit signed integers.
+ static inline Data256 fromI16(
+ int16_t x0, int16_t x1, int16_t x2 , int16_t x3 , int16_t x4 , int16_t x5 , int16_t x6 , int16_t x7 ,
+ int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) noexcept {
+
+ Data256 self;
+ self.setI16(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+ return self;
+ }
+
+ //! Sets all sixteen 16-bit unsigned integers.
+ static inline Data256 fromU16(
+ uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7 ,
+ uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) noexcept {
+
+ Data256 self;
+ self.setU16(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+ return self;
+ }
+
+ //! Sets all eight 32-bit signed integers.
+ static inline Data256 fromI32(int32_t x0) noexcept {
+ Data256 self;
+ self.setI32(x0);
+ return self;
+ }
+
+ //! Sets all eight 32-bit unsigned integers.
+ static inline Data256 fromU32(uint32_t x0) noexcept {
+ Data256 self;
+ self.setU32(x0);
+ return self;
+ }
+
+ //! Sets all eight 32-bit signed integers.
+ static inline Data256 fromI32(
+ int32_t x0, int32_t x1, int32_t x2, int32_t x3,
+ int32_t x4, int32_t x5, int32_t x6, int32_t x7) noexcept {
+
+ Data256 self;
+ self.setI32(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all eight 32-bit unsigned integers.
+ static inline Data256 fromU32(
+ uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
+ uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept {
+
+ Data256 self;
+ self.setU32(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all four 64-bit signed integers.
+ static inline Data256 fromI64(int64_t x0) noexcept {
+ Data256 self;
+ self.setI64(x0);
+ return self;
+ }
+
+ //! Sets all four 64-bit unsigned integers.
+ static inline Data256 fromU64(uint64_t x0) noexcept {
+ Data256 self;
+ self.setU64(x0);
+ return self;
+ }
+
+ //! Sets all four 64-bit signed integers.
+ static inline Data256 fromI64(int64_t x0, int64_t x1, int64_t x2, int64_t x3) noexcept {
+ Data256 self;
+ self.setI64(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all four 64-bit unsigned integers.
+ static inline Data256 fromU64(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) noexcept {
+ Data256 self;
+ self.setU64(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all eight SP-FP floats.
+ static inline Data256 fromF32(float x0) noexcept {
+ Data256 self;
+ self.setF32(x0);
+ return self;
+ }
+
+ //! Sets all eight SP-FP floats.
+ static inline Data256 fromF32(
+ float x0, float x1, float x2, float x3,
+ float x4, float x5, float x6, float x7) noexcept {
+
+ Data256 self;
+ self.setF32(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all four DP-FP floats.
+ static inline Data256 fromF64(double x0) noexcept {
+ Data256 self;
+ self.setF64(x0);
+ return self;
+ }
+
+ //! Sets all four DP-FP floats.
+ static inline Data256 fromF64(double x0, double x1, double x2, double x3) noexcept {
+ Data256 self;
+ self.setF64(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets all thirty two 8-bit signed integers.
+ inline void setI8(int8_t x0) noexcept {
+ setU8(uint8_t(x0));
+ }
+
+ //! Sets all thirty two 8-bit unsigned integers.
+ inline void setU8(uint8_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0101010101010101u;
+ uq[0] = xq;
+ uq[1] = xq;
+ uq[2] = xq;
+ uq[3] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x01010101u;
+ ud[0] = xd;
+ ud[1] = xd;
+ ud[2] = xd;
+ ud[3] = xd;
+ ud[4] = xd;
+ ud[5] = xd;
+ ud[6] = xd;
+ ud[7] = xd;
+ }
+ }
+
+ //! Sets all thirty two 8-bit signed integers.
+ inline void setI8(
+ int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 ,
+ int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 ,
+ int8_t x8 , int8_t x9 , int8_t x10, int8_t x11,
+ int8_t x12, int8_t x13, int8_t x14, int8_t x15,
+ int8_t x16, int8_t x17, int8_t x18, int8_t x19,
+ int8_t x20, int8_t x21, int8_t x22, int8_t x23,
+ int8_t x24, int8_t x25, int8_t x26, int8_t x27,
+ int8_t x28, int8_t x29, int8_t x30, int8_t x31) noexcept {
+
+ sb[0 ] = x0 ; sb[1 ] = x1 ; sb[2 ] = x2 ; sb[3 ] = x3 ;
+ sb[4 ] = x4 ; sb[5 ] = x5 ; sb[6 ] = x6 ; sb[7 ] = x7 ;
+ sb[8 ] = x8 ; sb[9 ] = x9 ; sb[10] = x10; sb[11] = x11;
+ sb[12] = x12; sb[13] = x13; sb[14] = x14; sb[15] = x15;
+ sb[16] = x16; sb[17] = x17; sb[18] = x18; sb[19] = x19;
+ sb[20] = x20; sb[21] = x21; sb[22] = x22; sb[23] = x23;
+ sb[24] = x24; sb[25] = x25; sb[26] = x26; sb[27] = x27;
+ sb[28] = x28; sb[29] = x29; sb[30] = x30; sb[31] = x31;
+ }
+
+ //! Sets all thirty two 8-bit unsigned integers.
+ inline void setU8(
+ uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 ,
+ uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 ,
+ uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11,
+ uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15,
+ uint8_t x16, uint8_t x17, uint8_t x18, uint8_t x19,
+ uint8_t x20, uint8_t x21, uint8_t x22, uint8_t x23,
+ uint8_t x24, uint8_t x25, uint8_t x26, uint8_t x27,
+ uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) noexcept {
+
+ ub[0 ] = x0 ; ub[1 ] = x1 ; ub[2 ] = x2 ; ub[3 ] = x3 ;
+ ub[4 ] = x4 ; ub[5 ] = x5 ; ub[6 ] = x6 ; ub[7 ] = x7 ;
+ ub[8 ] = x8 ; ub[9 ] = x9 ; ub[10] = x10; ub[11] = x11;
+ ub[12] = x12; ub[13] = x13; ub[14] = x14; ub[15] = x15;
+ ub[16] = x16; ub[17] = x17; ub[18] = x18; ub[19] = x19;
+ ub[20] = x20; ub[21] = x21; ub[22] = x22; ub[23] = x23;
+ ub[24] = x24; ub[25] = x25; ub[26] = x26; ub[27] = x27;
+ ub[28] = x28; ub[29] = x29; ub[30] = x30; ub[31] = x31;
+ }
+
+ //! Sets all sixteen 16-bit signed integers.
+ inline void setI16(int16_t x0) noexcept {
+ setU16(uint16_t(x0));
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ inline void setU16(uint16_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0001000100010001u;
+ uq[0] = xq;
+ uq[1] = xq;
+ uq[2] = xq;
+ uq[3] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x00010001u;
+ ud[0] = xd;
+ ud[1] = xd;
+ ud[2] = xd;
+ ud[3] = xd;
+ ud[4] = xd;
+ ud[5] = xd;
+ ud[6] = xd;
+ ud[7] = xd;
+ }
+ }
+
+ //! Sets all sixteen 16-bit signed integers.
+ inline void setI16(
+ int16_t x0, int16_t x1, int16_t x2 , int16_t x3 , int16_t x4 , int16_t x5 , int16_t x6 , int16_t x7,
+ int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) noexcept {
+
+ sw[0 ] = x0 ; sw[1 ] = x1 ; sw[2 ] = x2 ; sw[3 ] = x3 ;
+ sw[4 ] = x4 ; sw[5 ] = x5 ; sw[6 ] = x6 ; sw[7 ] = x7 ;
+ sw[8 ] = x8 ; sw[9 ] = x9 ; sw[10] = x10; sw[11] = x11;
+ sw[12] = x12; sw[13] = x13; sw[14] = x14; sw[15] = x15;
+ }
+
+ //! Sets all sixteen 16-bit unsigned integers.
+ inline void setU16(
+ uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7,
+ uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) noexcept {
+
+ uw[0 ] = x0 ; uw[1 ] = x1 ; uw[2 ] = x2 ; uw[3 ] = x3 ;
+ uw[4 ] = x4 ; uw[5 ] = x5 ; uw[6 ] = x6 ; uw[7 ] = x7 ;
+ uw[8 ] = x8 ; uw[9 ] = x9 ; uw[10] = x10; uw[11] = x11;
+ uw[12] = x12; uw[13] = x13; uw[14] = x14; uw[15] = x15;
+ }
+
+ //! Sets all eight 32-bit signed integers.
+ inline void setI32(int32_t x0) noexcept {
+ setU32(uint32_t(x0));
+ }
+
+ //! Sets all eight 32-bit unsigned integers.
+ inline void setU32(uint32_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = (uint64_t(x0) << 32) + x0;
+ uq[0] = xq;
+ uq[1] = xq;
+ uq[2] = xq;
+ uq[3] = xq;
+ }
+ else {
+ ud[0] = x0;
+ ud[1] = x0;
+ ud[2] = x0;
+ ud[3] = x0;
+ ud[4] = x0;
+ ud[5] = x0;
+ ud[6] = x0;
+ ud[7] = x0;
+ }
+ }
+
+ //! Sets all eight 32-bit signed integers.
+ inline void setI32(
+ int32_t x0, int32_t x1, int32_t x2, int32_t x3,
+ int32_t x4, int32_t x5, int32_t x6, int32_t x7) noexcept {
+
+ sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3;
+ sd[4] = x4; sd[5] = x5; sd[6] = x6; sd[7] = x7;
+ }
+
+ //! Sets all eight 32-bit unsigned integers.
+ inline void setU32(
+ uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
+ uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept {
+
+ ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3;
+ ud[4] = x4; ud[5] = x5; ud[6] = x6; ud[7] = x7;
+ }
+
+ //! Sets all four 64-bit signed integers.
+ inline void setI64(int64_t x0) noexcept {
+ sq[0] = x0; sq[1] = x0; sq[2] = x0; sq[3] = x0;
+ }
+
+ //! Sets all four 64-bit unsigned integers.
+ inline void setU64(uint64_t x0) noexcept {
+ uq[0] = x0; uq[1] = x0; uq[2] = x0; uq[3] = x0;
+ }
+
+ //! Sets all four 64-bit signed integers.
+ inline void setI64(int64_t x0, int64_t x1, int64_t x2, int64_t x3) noexcept {
+ sq[0] = x0; sq[1] = x1; sq[2] = x2; sq[3] = x3;
+ }
+
+ //! Sets all four 64-bit unsigned integers.
+ inline void setU64(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) noexcept {
+ uq[0] = x0; uq[1] = x1; uq[2] = x2; uq[3] = x3;
+ }
+
+ //! Sets all eight SP-FP floats.
+ inline void setF32(float x0) noexcept {
+ sf[0] = x0; sf[1] = x0; sf[2] = x0; sf[3] = x0;
+ sf[4] = x0; sf[5] = x0; sf[6] = x0; sf[7] = x0;
+ }
+
+ //! Sets all eight SP-FP floats.
+ inline void setF32(
+ float x0, float x1, float x2, float x3,
+ float x4, float x5, float x6, float x7) noexcept {
+
+ sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3;
+ sf[4] = x4; sf[5] = x5; sf[6] = x6; sf[7] = x7;
+ }
+
+ //! Sets all four DP-FP floats.
+ inline void setF64(double x0) noexcept {
+ df[0] = x0; df[1] = x0; df[2] = x0; df[3] = x0;
+ }
+
+ //! Sets all four DP-FP floats.
+ inline void setF64(double x0, double x1, double x2, double x3) noexcept {
+ df[0] = x0; df[1] = x1; df[2] = x2; df[3] = x3;
+ }
+
+ //! \}
+};
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_DEPRECATED
+#endif // ASMJIT_CORE_DATATYPES_H_INCLUDED
diff --git a/Theodosius/asmjit/core/emithelper.cpp b/Theodosius/asmjit/core/emithelper.cpp
new file mode 100644
index 0000000..a77211e
--- /dev/null
+++ b/Theodosius/asmjit/core/emithelper.cpp
@@ -0,0 +1,351 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/archtraits.h"
+#include "../core/emithelper_p.h"
+#include "../core/formatter.h"
+#include "../core/funcargscontext_p.h"
+#include "../core/radefs_p.h"
+
+// Can be used for debugging...
+// #define ASMJIT_DUMP_ARGS_ASSIGNMENT
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::BaseEmitHelper - Formatting]
+// ============================================================================
+
+#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
+static void dumpFuncValue(String& sb, uint32_t arch, const FuncValue& value) noexcept {
+ Formatter::formatTypeId(sb, value.typeId());
+ sb.append('@');
+
+ if (value.isIndirect())
+ sb.append('[');
+
+ if (value.isReg())
+ Formatter::formatRegister(sb, 0, nullptr, arch, value.regType(), value.regId());
+ else if (value.isStack())
+ sb.appendFormat("[%d]", value.stackOffset());
+ else
+ sb.append("");
+
+ if (value.isIndirect())
+ sb.append(']');
+}
+
+static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
+ typedef FuncArgsContext::Var Var;
+
+ uint32_t arch = ctx.arch();
+ uint32_t varCount = ctx.varCount();
+
+ for (uint32_t i = 0; i < varCount; i++) {
+ const Var& var = ctx.var(i);
+ const FuncValue& dst = var.out;
+ const FuncValue& cur = var.cur;
+
+ sb.appendFormat("Var%u: ", i);
+ dumpFuncValue(sb, arch, dst);
+ sb.append(" <- ");
+ dumpFuncValue(sb, arch, cur);
+
+ if (var.isDone())
+ sb.append(" {Done}");
+
+ sb.append('\n');
+ }
+}
+#endif
+
+// ============================================================================
+// [asmjit::BaseEmitHelper - EmitArgsAssignment]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
+ typedef FuncArgsContext::Var Var;
+ typedef FuncArgsContext::WorkData WorkData;
+
+ enum WorkFlags : uint32_t {
+ kWorkNone = 0x00,
+ kWorkDidSome = 0x01,
+ kWorkPending = 0x02,
+ kWorkPostponed = 0x04
+ };
+
+ uint32_t arch = frame.arch();
+ const ArchTraits& archTraits = ArchTraits::byArch(arch);
+
+ RAConstraints constraints;
+ FuncArgsContext ctx;
+
+ ASMJIT_PROPAGATE(constraints.init(arch));
+ ASMJIT_PROPAGATE(ctx.initWorkData(frame, args, &constraints));
+
+#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
+ {
+ String sb;
+ dumpAssignment(sb, ctx);
+ printf("%s\n", sb.data());
+ }
+#endif
+
+ uint32_t varCount = ctx._varCount;
+ WorkData* workData = ctx._workData;
+
+ uint32_t saVarId = ctx._saVarId;
+ BaseReg sp = BaseReg::fromSignatureAndId(_emitter->_gpRegInfo.signature(), archTraits.spRegId());
+ BaseReg sa = sp;
+
+ if (frame.hasDynamicAlignment()) {
+ if (frame.hasPreservedFP())
+ sa.setId(archTraits.fpRegId());
+ else
+ sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
+ }
+
+ // --------------------------------------------------------------------------
+ // Register to stack and stack to stack moves must be first as now we have
+ // the biggest chance of having as many as possible unassigned registers.
+ // --------------------------------------------------------------------------
+
+ if (ctx._stackDstMask) {
+ // Base address of all arguments passed by stack.
+ BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
+ BaseMem baseStackPtr(sp, 0);
+
+ for (uint32_t varId = 0; varId < varCount; varId++) {
+ Var& var = ctx._vars[varId];
+
+ if (!var.out.isStack())
+ continue;
+
+ FuncValue& cur = var.cur;
+ FuncValue& out = var.out;
+
+ ASMJIT_ASSERT(cur.isReg() || cur.isStack());
+ BaseReg reg;
+
+ BaseMem dstStackPtr = baseStackPtr.cloneAdjusted(out.stackOffset());
+ BaseMem srcStackPtr = baseArgPtr.cloneAdjusted(cur.stackOffset());
+
+ if (cur.isIndirect()) {
+ if (cur.isStack()) {
+ // TODO: Indirect stack.
+ return DebugUtils::errored(kErrorInvalidAssignment);
+ }
+ else {
+ srcStackPtr.setBaseId(cur.regId());
+ }
+ }
+
+ if (cur.isReg() && !cur.isIndirect()) {
+ WorkData& wd = workData[archTraits.regTypeToGroup(cur.regType())];
+ uint32_t rId = cur.regId();
+
+ reg.setSignatureAndId(archTraits.regTypeToSignature(cur.regType()), rId);
+ wd.unassign(varId, rId);
+ }
+ else {
+ // Stack to reg move - tricky since we move stack to stack we can decide which
+ // register to use. In general we follow the rule that IntToInt moves will use
+ // GP regs with possibility to signature or zero extend, and all other moves will
+ // either use GP or VEC regs depending on the size of the move.
+ RegInfo rInfo = getSuitableRegForMemToMemMove(arch, out.typeId(), cur.typeId());
+ if (ASMJIT_UNLIKELY(!rInfo.isValid()))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ WorkData& wd = workData[rInfo.group()];
+ uint32_t availableRegs = wd.availableRegs();
+ if (ASMJIT_UNLIKELY(!availableRegs))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ uint32_t rId = Support::ctz(availableRegs);
+ reg.setSignatureAndId(rInfo.signature(), rId);
+
+ ASMJIT_PROPAGATE(emitArgMove(reg, out.typeId(), srcStackPtr, cur.typeId()));
+ }
+
+ if (cur.isIndirect() && cur.isReg())
+ workData[BaseReg::kGroupGp].unassign(varId, cur.regId());
+
+ // Register to stack move.
+ ASMJIT_PROPAGATE(emitRegMove(dstStackPtr, reg, cur.typeId()));
+ var.markDone();
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // Shuffle all registers that are currently assigned accordingly to target
+ // assignment.
+ // --------------------------------------------------------------------------
+
+ uint32_t workFlags = kWorkNone;
+ for (;;) {
+ for (uint32_t varId = 0; varId < varCount; varId++) {
+ Var& var = ctx._vars[varId];
+ if (var.isDone() || !var.cur.isReg())
+ continue;
+
+ FuncValue& cur = var.cur;
+ FuncValue& out = var.out;
+
+ uint32_t curGroup = archTraits.regTypeToGroup(cur.regType());
+ uint32_t outGroup = archTraits.regTypeToGroup(out.regType());
+
+ uint32_t curId = cur.regId();
+ uint32_t outId = out.regId();
+
+ if (curGroup != outGroup) {
+ // TODO: Conversion is not supported.
+ return DebugUtils::errored(kErrorInvalidAssignment);
+ }
+ else {
+ WorkData& wd = workData[outGroup];
+ if (!wd.isAssigned(outId)) {
+EmitMove:
+ ASMJIT_PROPAGATE(
+ emitArgMove(
+ BaseReg::fromSignatureAndId(archTraits.regTypeToSignature(out.regType()), outId), out.typeId(),
+ BaseReg::fromSignatureAndId(archTraits.regTypeToSignature(cur.regType()), curId), cur.typeId()));
+
+ wd.reassign(varId, outId, curId);
+ cur.initReg(out.regType(), outId, out.typeId());
+
+ if (outId == out.regId())
+ var.markDone();
+ workFlags |= kWorkDidSome | kWorkPending;
+ }
+ else {
+ uint32_t altId = wd._physToVarId[outId];
+ Var& altVar = ctx._vars[altId];
+
+ if (!altVar.out.isInitialized() || (altVar.out.isReg() && altVar.out.regId() == curId)) {
+ // Only few architectures provide swap operations, and only for few register groups.
+ if (archTraits.hasSwap(curGroup)) {
+ uint32_t highestType = Support::max(cur.regType(), altVar.cur.regType());
+ if (Support::isBetween(highestType, BaseReg::kTypeGp8Lo, BaseReg::kTypeGp16))
+ highestType = BaseReg::kTypeGp32;
+
+ uint32_t signature = archTraits.regTypeToSignature(highestType);
+ ASMJIT_PROPAGATE(
+ emitRegSwap(BaseReg::fromSignatureAndId(signature, outId),
+ BaseReg::fromSignatureAndId(signature, curId)));
+ wd.swap(varId, curId, altId, outId);
+ cur.setRegId(outId);
+ var.markDone();
+ altVar.cur.setRegId(curId);
+
+ if (altVar.out.isInitialized())
+ altVar.markDone();
+ workFlags |= kWorkDidSome;
+ }
+ else {
+ // If there is a scratch register it can be used to perform the swap.
+ uint32_t availableRegs = wd.availableRegs();
+ if (availableRegs) {
+ uint32_t inOutRegs = wd.dstRegs();
+ if (availableRegs & ~inOutRegs)
+ availableRegs &= ~inOutRegs;
+ outId = Support::ctz(availableRegs);
+ goto EmitMove;
+ }
+ else {
+ workFlags |= kWorkPending;
+ }
+ }
+ }
+ else {
+ workFlags |= kWorkPending;
+ }
+ }
+ }
+ }
+
+ if (!(workFlags & kWorkPending))
+ break;
+
+ // If we did nothing twice it means that something is really broken.
+ if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed)
+ return DebugUtils::errored(kErrorInvalidState);
+
+ workFlags = (workFlags & kWorkDidSome) ? kWorkNone : kWorkPostponed;
+ }
+
+ // --------------------------------------------------------------------------
+ // Load arguments passed by stack into registers. This is pretty simple and
+ // it never requires multiple iterations like the previous phase.
+ // --------------------------------------------------------------------------
+
+ if (ctx._hasStackSrc) {
+ uint32_t iterCount = 1;
+ if (frame.hasDynamicAlignment() && !frame.hasPreservedFP())
+ sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
+
+ // Base address of all arguments passed by stack.
+ BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
+
+ for (uint32_t iter = 0; iter < iterCount; iter++) {
+ for (uint32_t varId = 0; varId < varCount; varId++) {
+ Var& var = ctx._vars[varId];
+ if (var.isDone())
+ continue;
+
+ if (var.cur.isStack()) {
+ ASMJIT_ASSERT(var.out.isReg());
+
+ uint32_t outId = var.out.regId();
+ uint32_t outType = var.out.regType();
+
+ uint32_t group = archTraits.regTypeToGroup(outType);
+ WorkData& wd = ctx._workData[group];
+
+ if (outId == sa.id() && group == BaseReg::kGroupGp) {
+ // This register will be processed last as we still need `saRegId`.
+ if (iterCount == 1) {
+ iterCount++;
+ continue;
+ }
+ wd.unassign(wd._physToVarId[outId], outId);
+ }
+
+ BaseReg dstReg = BaseReg::fromSignatureAndId(archTraits.regTypeToSignature(outType), outId);
+ BaseMem srcMem = baseArgPtr.cloneAdjusted(var.cur.stackOffset());
+
+ ASMJIT_PROPAGATE(emitArgMove(
+ dstReg, var.out.typeId(),
+ srcMem, var.cur.typeId()));
+
+ wd.assign(varId, outId);
+ var.cur.initReg(outType, outId, var.cur.typeId(), FuncValue::kFlagIsDone);
+ }
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/Theodosius/asmjit/core/emithelper_p.h b/Theodosius/asmjit/core/emithelper_p.h
new file mode 100644
index 0000000..cb8ddf0
--- /dev/null
+++ b/Theodosius/asmjit/core/emithelper_p.h
@@ -0,0 +1,83 @@
+
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
+#define ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
+
+#include "../core/emitter.h"
+#include "../core/operand.h"
+#include "../core/type.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::BaseEmitHelper]
+// ============================================================================
+
+//! Helper class that provides utilities for each supported architecture.
+class BaseEmitHelper {
+public:
+ BaseEmitter* _emitter;
+
+ inline explicit BaseEmitHelper(BaseEmitter* emitter = nullptr) noexcept
+ : _emitter(emitter) {}
+
+ inline BaseEmitter* emitter() const noexcept { return _emitter; }
+ inline void setEmitter(BaseEmitter* emitter) noexcept { _emitter = emitter; }
+
+ //! Emits a pure move operation between two registers or the same type or
+ //! between a register and its home slot. This function does not handle
+ //! register conversion.
+ virtual Error emitRegMove(
+ const Operand_& dst_,
+ const Operand_& src_, uint32_t typeId, const char* comment = nullptr) = 0;
+
+ //! Emits swap between two registers.
+ virtual Error emitRegSwap(
+ const BaseReg& a,
+ const BaseReg& b, const char* comment = nullptr) = 0;
+
+ //! Emits move from a function argument (either register or stack) to a register.
+ //!
+ //! This function can handle the necessary conversion from one argument to
+ //! another, and from one register type to another, if it's possible. Any
+ //! attempt of conversion that requires third register of a different group
+ //! (for example conversion from K to MMX on X86/X64) will fail.
+ virtual Error emitArgMove(
+ const BaseReg& dst_, uint32_t dstTypeId,
+ const Operand_& src_, uint32_t srcTypeId, const char* comment = nullptr) = 0;
+
+ Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
diff --git a/Theodosius/asmjit/core/emitter.cpp b/Theodosius/asmjit/core/emitter.cpp
new file mode 100644
index 0000000..f684140
--- /dev/null
+++ b/Theodosius/asmjit/core/emitter.cpp
@@ -0,0 +1,416 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/emitterutils_p.h"
+#include "../core/errorhandler.h"
+#include "../core/logger.h"
+#include "../core/support.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86emithelper_p.h"
+ #include "../x86/x86instdb_p.h"
+#endif // ASMJIT_BUILD_X86
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/a64emithelper_p.h"
+ #include "../arm/a64instdb.h"
+#endif // ASMJIT_BUILD_ARM
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::BaseEmitter - Construction / Destruction]
+// ============================================================================
+
+BaseEmitter::BaseEmitter(uint32_t emitterType) noexcept
+ : _emitterType(uint8_t(emitterType)) {}
+
+BaseEmitter::~BaseEmitter() noexcept {
+ if (_code) {
+ _addEmitterFlags(kFlagDestroyed);
+ _code->detach(this);
+ }
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Finalize]
+// ============================================================================
+
+Error BaseEmitter::finalize() {
+ // Does nothing by default, overridden by `BaseBuilder` and `BaseCompiler`.
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Internals]
+// ============================================================================
+
+static constexpr uint32_t kEmitterPreservedFlags = BaseEmitter::kFlagOwnLogger | BaseEmitter::kFlagOwnErrorHandler;
+
+static ASMJIT_NOINLINE void BaseEmitter_updateForcedOptions(BaseEmitter* self) noexcept {
+ bool emitComments = false;
+ bool hasValidationOptions = false;
+
+ if (self->emitterType() == BaseEmitter::kTypeAssembler) {
+ // Assembler: Don't emit comments if logger is not attached.
+ emitComments = self->_code != nullptr && self->_logger != nullptr;
+ hasValidationOptions = self->hasValidationOption(BaseEmitter::kValidationOptionAssembler);
+ }
+ else {
+ // Builder/Compiler: Always emit comments, we cannot assume they won't be used.
+ emitComments = self->_code != nullptr;
+ hasValidationOptions = self->hasValidationOption(BaseEmitter::kValidationOptionIntermediate);
+ }
+
+ if (emitComments)
+ self->_addEmitterFlags(BaseEmitter::kFlagLogComments);
+ else
+ self->_clearEmitterFlags(BaseEmitter::kFlagLogComments);
+
+ // The reserved option tells emitter (Assembler/Builder/Compiler) that there
+ // may be either a border case (CodeHolder not attached, for example) or that
+ // logging or validation is required.
+ if (self->_code == nullptr || self->_logger || hasValidationOptions)
+ self->_forcedInstOptions |= BaseInst::kOptionReserved;
+ else
+ self->_forcedInstOptions &= ~BaseInst::kOptionReserved;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Validation Options]
+// ============================================================================
+
+void BaseEmitter::addValidationOptions(uint32_t options) noexcept {
+ _validationOptions = uint8_t(_validationOptions | options);
+ BaseEmitter_updateForcedOptions(this);
+}
+
+void BaseEmitter::clearValidationOptions(uint32_t options) noexcept {
+ _validationOptions = uint8_t(_validationOptions | options);
+ BaseEmitter_updateForcedOptions(this);
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Logging]
+// ============================================================================
+
+void BaseEmitter::setLogger(Logger* logger) noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ if (logger) {
+ _logger = logger;
+ _addEmitterFlags(kFlagOwnLogger);
+ }
+ else {
+ _logger = nullptr;
+ _clearEmitterFlags(kFlagOwnLogger);
+ if (_code)
+ _logger = _code->logger();
+ }
+ BaseEmitter_updateForcedOptions(this);
+#else
+ DebugUtils::unused(logger);
+#endif
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Error Handling]
+// ============================================================================
+
+void BaseEmitter::setErrorHandler(ErrorHandler* errorHandler) noexcept {
+ if (errorHandler) {
+ _errorHandler = errorHandler;
+ _addEmitterFlags(kFlagOwnErrorHandler);
+ }
+ else {
+ _errorHandler = nullptr;
+ _clearEmitterFlags(kFlagOwnErrorHandler);
+ if (_code)
+ _errorHandler = _code->errorHandler();
+ }
+}
+
+Error BaseEmitter::reportError(Error err, const char* message) {
+ ErrorHandler* eh = _errorHandler;
+ if (eh) {
+ if (!message)
+ message = DebugUtils::errorAsString(err);
+ eh->handleError(err, message, this);
+ }
+ return err;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Labels]
+// ============================================================================
+
+Label BaseEmitter::labelByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
+ return Label(_code ? _code->labelIdByName(name, nameSize, parentId) : uint32_t(Globals::kInvalidId));
+}
+
+bool BaseEmitter::isLabelValid(uint32_t labelId) const noexcept {
+ return _code && labelId < _code->labelCount();
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Emit (Low-Level)]
+// ============================================================================
+
+using EmitterUtils::noExt;
+
+Error BaseEmitter::_emitI(uint32_t instId) {
+ return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
+}
+
+Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0) {
+ return _emit(instId, o0, noExt[1], noExt[2], noExt);
+}
+
+Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1) {
+ return _emit(instId, o0, o1, noExt[2], noExt);
+}
+
+Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2) {
+ return _emit(instId, o0, o1, o2, noExt);
+}
+
+Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
+ Operand_ opExt[3] = { o3 };
+ return _emit(instId, o0, o1, o2, opExt);
+}
+
+Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4) {
+ Operand_ opExt[3] = { o3, o4 };
+ return _emit(instId, o0, o1, o2, opExt);
+}
+
+Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
+ Operand_ opExt[3] = { o3, o4, o5 };
+ return _emit(instId, o0, o1, o2, opExt);
+}
+
+Error BaseEmitter::_emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount) {
+ const Operand_* op = operands;
+
+ Operand_ opExt[3];
+
+ switch (opCount) {
+ case 0:
+ return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
+
+ case 1:
+ return _emit(instId, op[0], noExt[1], noExt[2], noExt);
+
+ case 2:
+ return _emit(instId, op[0], op[1], noExt[2], noExt);
+
+ case 3:
+ return _emit(instId, op[0], op[1], op[2], noExt);
+
+ case 4:
+ opExt[0] = op[3];
+ opExt[1].reset();
+ opExt[2].reset();
+ return _emit(instId, op[0], op[1], op[2], opExt);
+
+ case 5:
+ opExt[0] = op[3];
+ opExt[1] = op[4];
+ opExt[2].reset();
+ return _emit(instId, op[0], op[1], op[2], opExt);
+
+ case 6:
+ return _emit(instId, op[0], op[1], op[2], op + 3);
+
+ default:
+ return DebugUtils::errored(kErrorInvalidArgument);
+ }
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Emit (High-Level)]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error BaseEmitter::emitProlog(const FuncFrame& frame) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifdef ASMJIT_BUILD_X86
+ if (environment().isFamilyX86()) {
+ x86::EmitHelper emitHelper(this, frame.isAvxEnabled());
+ return emitHelper.emitProlog(frame);
+ }
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (environment().isArchAArch64()) {
+ a64::EmitHelper emitHelper(this);
+ return emitHelper.emitProlog(frame);
+ }
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+ASMJIT_FAVOR_SIZE Error BaseEmitter::emitEpilog(const FuncFrame& frame) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifdef ASMJIT_BUILD_X86
+ if (environment().isFamilyX86()) {
+ x86::EmitHelper emitHelper(this, frame.isAvxEnabled());
+ return emitHelper.emitEpilog(frame);
+ }
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (environment().isArchAArch64()) {
+ a64::EmitHelper emitHelper(this);
+ return emitHelper.emitEpilog(frame);
+ }
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+ASMJIT_FAVOR_SIZE Error BaseEmitter::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifdef ASMJIT_BUILD_X86
+ if (environment().isFamilyX86()) {
+ x86::EmitHelper emitHelper(this, frame.isAvxEnabled());
+ return emitHelper.emitArgsAssignment(frame, args);
+ }
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (environment().isArchAArch64()) {
+ a64::EmitHelper emitHelper(this);
+ return emitHelper.emitArgsAssignment(frame, args);
+ }
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Comment]
+// ============================================================================
+
+Error BaseEmitter::commentf(const char* fmt, ...) {
+ if (!hasEmitterFlag(kFlagLogComments)) {
+ if (!hasEmitterFlag(kFlagAttached))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+ return kErrorOk;
+ }
+
+#ifndef ASMJIT_NO_LOGGING
+ StringTmp<1024> sb;
+
+ va_list ap;
+ va_start(ap, fmt);
+ Error err = sb.appendVFormat(fmt, ap);
+ va_end(ap);
+
+ ASMJIT_PROPAGATE(err);
+ return comment(sb.data(), sb.size());
+#else
+ DebugUtils::unused(fmt);
+ return kErrorOk;
+#endif
+}
+
+Error BaseEmitter::commentv(const char* fmt, va_list ap) {
+ if (!hasEmitterFlag(kFlagLogComments)) {
+ if (!hasEmitterFlag(kFlagAttached))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+ return kErrorOk;
+ }
+
+#ifndef ASMJIT_NO_LOGGING
+ StringTmp<1024> sb;
+ Error err = sb.appendVFormat(fmt, ap);
+
+ ASMJIT_PROPAGATE(err);
+ return comment(sb.data(), sb.size());
+#else
+ DebugUtils::unused(fmt, ap);
+ return kErrorOk;
+#endif
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Events]
+// ============================================================================
+
+Error BaseEmitter::onAttach(CodeHolder* code) noexcept {
+ _code = code;
+ _environment = code->environment();
+ _addEmitterFlags(kFlagAttached);
+
+ const ArchTraits& archTraits = ArchTraits::byArch(code->arch());
+ uint32_t nativeRegType = Environment::is32Bit(code->arch()) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
+ _gpRegInfo.setSignature(archTraits._regInfo[nativeRegType].signature());
+
+ onSettingsUpdated();
+ return kErrorOk;
+}
+
+Error BaseEmitter::onDetach(CodeHolder* code) noexcept {
+ DebugUtils::unused(code);
+
+ if (!hasOwnLogger())
+ _logger = nullptr;
+
+ if (!hasOwnErrorHandler())
+ _errorHandler = nullptr;
+
+ _clearEmitterFlags(~kEmitterPreservedFlags);
+ _forcedInstOptions = BaseInst::kOptionReserved;
+ _privateData = 0;
+
+ _environment.reset();
+ _gpRegInfo.reset();
+
+ _instOptions = 0;
+ _extraReg.reset();
+ _inlineComment = nullptr;
+
+ return kErrorOk;
+}
+
+void BaseEmitter::onSettingsUpdated() noexcept {
+ // Only called when attached to CodeHolder by CodeHolder.
+ ASMJIT_ASSERT(_code != nullptr);
+
+ if (!hasOwnLogger())
+ _logger = _code->logger();
+
+ if (!hasOwnErrorHandler())
+ _errorHandler = _code->errorHandler();
+
+ BaseEmitter_updateForcedOptions(this);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/Theodosius/asmjit/core/emitter.h b/Theodosius/asmjit/core/emitter.h
new file mode 100644
index 0000000..fcb9bb5
--- /dev/null
+++ b/Theodosius/asmjit/core/emitter.h
@@ -0,0 +1,723 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_EMITTER_H_INCLUDED
+#define ASMJIT_CORE_EMITTER_H_INCLUDED
+
+#include "../core/archtraits.h"
+#include "../core/codeholder.h"
+#include "../core/inst.h"
+#include "../core/operand.h"
+#include "../core/type.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class ConstPool;
+class FuncFrame;
+class FuncArgsAssignment;
+
+// ============================================================================
+// [asmjit::BaseEmitter]
+// ============================================================================
+
+//! Provides a base foundation to emit code - specialized by `Assembler` and
+//! `BaseBuilder`.
+class ASMJIT_VIRTAPI BaseEmitter {
+public:
+ ASMJIT_BASE_CLASS(BaseEmitter)
+
+ //! See \ref EmitterType.
+ uint8_t _emitterType = 0;
+ //! See \ref BaseEmitter::EmitterFlags.
+ uint8_t _emitterFlags = 0;
+ //! Validation flags in case validation is used, see \ref InstAPI::ValidationFlags.
+ //!
+ //! \note Validation flags are specific to the emitter and they are setup at
+ //! construction time and then never changed.
+ uint8_t _validationFlags = 0;
+ //! Validation options, see \ref ValidationOptions.
+ uint8_t _validationOptions = 0;
+
+ //! Encoding options, see \ref EncodingOptions.
+ uint32_t _encodingOptions = 0;
+
+ //! Forced instruction options, combined with \ref _instOptions by \ref emit().
+ uint32_t _forcedInstOptions = BaseInst::kOptionReserved;
+ //! Internal private data used freely by any emitter.
+ uint32_t _privateData = 0;
+
+ //! CodeHolder the emitter is attached to.
+ CodeHolder* _code = nullptr;
+ //! Attached \ref Logger.
+ Logger* _logger = nullptr;
+ //! Attached \ref ErrorHandler.
+ ErrorHandler* _errorHandler = nullptr;
+
+ //! Describes the target environment, matches \ref CodeHolder::environment().
+ Environment _environment {};
+ //! Native GP register signature and signature related information.
+ RegInfo _gpRegInfo {};
+
+ //! Next instruction options (affects the next instruction).
+ uint32_t _instOptions = 0;
+ //! Extra register (op-mask {k} on AVX-512) (affects the next instruction).
+ RegOnly _extraReg {};
+ //! Inline comment of the next instruction (affects the next instruction).
+ const char* _inlineComment = nullptr;
+
+ //! Emitter type.
+ enum EmitterType : uint32_t {
+ //! Unknown or uninitialized.
+ kTypeNone = 0,
+ //! Emitter inherits from \ref BaseAssembler.
+ kTypeAssembler = 1,
+ //! Emitter inherits from \ref BaseBuilder.
+ kTypeBuilder = 2,
+ //! Emitter inherits from \ref BaseCompiler.
+ kTypeCompiler = 3,
+
+ //! Count of emitter types.
+ kTypeCount = 4
+ };
+
+ //! Emitter flags.
+ enum EmitterFlags : uint32_t {
+ //! Emitter is attached to CodeHolder.
+ kFlagAttached = 0x01u,
+ //! The emitter must emit comments.
+ kFlagLogComments = 0x08u,
+ //! The emitter has its own \ref Logger (not propagated from \ref CodeHolder).
+ kFlagOwnLogger = 0x10u,
+ //! The emitter has its own \ref ErrorHandler (not propagated from \ref CodeHolder).
+ kFlagOwnErrorHandler = 0x20u,
+ //! The emitter was finalized.
+ kFlagFinalized = 0x40u,
+ //! The emitter was destroyed.
+ kFlagDestroyed = 0x80u
+ };
+
+ //! Encoding options.
+ enum EncodingOptions : uint32_t {
+ //! Emit instructions that are optimized for size, if possible.
+ //!
+ //! Default: false.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! When this option is set it the assembler will try to fix instructions
+ //! if possible into operation equivalent instructions that take less bytes
+ //! by taking advantage of implicit zero extension. For example instruction
+ //! like `mov r64, imm` and `and r64, imm` can be translated to `mov r32, imm`
+ //! and `and r32, imm` when the immediate constant is lesser than `2^31`.
+ kEncodingOptionOptimizeForSize = 0x00000001u,
+
+ //! Emit optimized code-alignment sequences.
+ //!
+ //! Default: false.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! Default align sequence used by X86 architecture is one-byte (0x90)
+ //! opcode that is often shown by disassemblers as NOP. However there are
+ //! more optimized align sequences for 2-11 bytes that may execute faster
+ //! on certain CPUs. If this feature is enabled AsmJit will generate
+ //! specialized sequences for alignment between 2 to 11 bytes.
+ kEncodingOptionOptimizedAlign = 0x00000002u,
+
+ //! Emit jump-prediction hints.
+ //!
+ //! Default: false.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! Jump prediction is usually based on the direction of the jump. If the
+ //! jump is backward it is usually predicted as taken; and if the jump is
+ //! forward it is usually predicted as not-taken. The reason is that loops
+ //! generally use backward jumps and conditions usually use forward jumps.
+ //! However this behavior can be overridden by using instruction prefixes.
+ //! If this option is enabled these hints will be emitted.
+ //!
+ //! This feature is disabled by default, because the only processor that
+ //! used to take into consideration prediction hints was P4. Newer processors
+ //! implement heuristics for branch prediction and ignore static hints. This
+ //! means that this feature can be only used for annotation purposes.
+ kEncodingOptionPredictedJumps = 0x00000010u
+ };
+
+#ifndef ASMJIT_NO_DEPRECATED
+ enum EmitterOptions : uint32_t {
+ kOptionOptimizedForSize = kEncodingOptionOptimizeForSize,
+ kOptionOptimizedAlign = kEncodingOptionOptimizedAlign,
+ kOptionPredictedJumps = kEncodingOptionPredictedJumps
+ };
+#endif
+
+ //! Validation options are used to tell emitters to perform strict validation
+ //! of instructions passed to \ref emit().
+ //!
+ //! \ref BaseAssembler implementation perform by default only basic checks
+ //! that are necessary to identify all variations of an instruction so the
+ //! correct encoding can be selected. This is fine for production-ready code
+ //! as the assembler doesn't have to perform checks that would slow it down.
+ //! However, sometimes these checks are beneficial especially when the project
+ //! that uses AsmJit is in a development phase, in which mistakes happen often.
+ //! To make the experience of using AsmJit seamless it offers validation
+ //! features that can be controlled by `ValidationOptions`.
+ enum ValidationOptions : uint32_t {
+ //! Perform strict validation in \ref BaseAssembler::emit() implementations.
+ //!
+ //! This flag ensures that each instruction is checked before it's encoded
+ //! into a binary representation. This flag is only relevant for \ref
+ //! BaseAssembler implementations, but can be set in any other emitter type,
+ //! in that case if that emitter needs to create an assembler on its own,
+ //! for the purpose of \ref finalize() it would propagate this flag to such
+ //! assembler so all instructions passed to it are explicitly validated.
+ //!
+ //! Default: false.
+ kValidationOptionAssembler = 0x00000001u,
+
+ //! Perform strict validation in \ref BaseBuilder::emit() and \ref
+ //! BaseCompiler::emit() implementations.
+ //!
+ //! This flag ensures that each instruction is checked before an \ref
+ //! InstNode representing the instruction is created by Builder or Compiler.
+ //!
+ //! Default: false.
+ kValidationOptionIntermediate = 0x00000002u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API explicit BaseEmitter(uint32_t emitterType) noexcept;
+ ASMJIT_API virtual ~BaseEmitter() noexcept;
+
+ //! \}
+
+ //! \name Cast
+ //! \{
+
+ template
+ inline T* as() noexcept { return reinterpret_cast(this); }
+
+ template
+ inline const T* as() const noexcept { return reinterpret_cast(this); }
+
+ //! \}
+
+ //! \name Emitter Type & Flags
+ //! \{
+
+ //! Returns the type of this emitter, see `EmitterType`.
+ inline uint32_t emitterType() const noexcept { return _emitterType; }
+ //! Returns emitter flags , see `Flags`.
+ inline uint32_t emitterFlags() const noexcept { return _emitterFlags; }
+
+ //! Tests whether the emitter inherits from `BaseAssembler`.
+ inline bool isAssembler() const noexcept { return _emitterType == kTypeAssembler; }
+ //! Tests whether the emitter inherits from `BaseBuilder`.
+ //!
+ //! \note Both Builder and Compiler emitters would return `true`.
+ inline bool isBuilder() const noexcept { return _emitterType >= kTypeBuilder; }
+ //! Tests whether the emitter inherits from `BaseCompiler`.
+ inline bool isCompiler() const noexcept { return _emitterType == kTypeCompiler; }
+
+ //! Tests whether the emitter has the given `flag` enabled.
+ inline bool hasEmitterFlag(uint32_t flag) const noexcept { return (_emitterFlags & flag) != 0; }
+ //! Tests whether the emitter is finalized.
+ inline bool isFinalized() const noexcept { return hasEmitterFlag(kFlagFinalized); }
+ //! Tests whether the emitter is destroyed (only used during destruction).
+ inline bool isDestroyed() const noexcept { return hasEmitterFlag(kFlagDestroyed); }
+
+ inline void _addEmitterFlags(uint32_t flags) noexcept { _emitterFlags = uint8_t(_emitterFlags | flags); }
+ inline void _clearEmitterFlags(uint32_t flags) noexcept { _emitterFlags = uint8_t(_emitterFlags & ~flags); }
+
+ //! \}
+
+ //! \name Target Information
+ //! \{
+
+ //! Returns the CodeHolder this emitter is attached to.
+ inline CodeHolder* code() const noexcept { return _code; }
+
+ //! Returns the target environment, see \ref Environment.
+ //!
+ //! The returned \ref Environment reference matches \ref CodeHolder::environment().
+ inline const Environment& environment() const noexcept { return _environment; }
+
+ //! Tests whether the target architecture is 32-bit.
+ inline bool is32Bit() const noexcept { return environment().is32Bit(); }
+ //! Tests whether the target architecture is 64-bit.
+ inline bool is64Bit() const noexcept { return environment().is64Bit(); }
+
+ //! Returns the target architecture type.
+ inline uint32_t arch() const noexcept { return environment().arch(); }
+ //! Returns the target architecture sub-type.
+ inline uint32_t subArch() const noexcept { return environment().subArch(); }
+
+ //! Returns the target architecture's GP register size (4 or 8 bytes).
+ inline uint32_t registerSize() const noexcept { return environment().registerSize(); }
+
+ //! \}
+
+ //! \name Initialization & Finalization
+ //! \{
+
+ //! Tests whether the emitter is initialized (i.e. attached to \ref CodeHolder).
+ inline bool isInitialized() const noexcept { return _code != nullptr; }
+
+ //! Finalizes this emitter.
+ //!
+ //! Materializes the content of the emitter by serializing it to the attached
+ //! \ref CodeHolder through an architecture specific \ref BaseAssembler. This
+ //! function won't do anything if the emitter inherits from \ref BaseAssembler
+ //! as assemblers emit directly to a \ref CodeBuffer held by \ref CodeHolder.
+ //! However, if this is an emitter that inherits from \ref BaseBuilder or \ref
+ //! BaseCompiler then these emitters need the materialization phase as they
+ //! store their content in a representation not visible to \ref CodeHolder.
+ ASMJIT_API virtual Error finalize();
+
+ //! \}
+
+ //! \name Logging
+ //! \{
+
+ //! Tests whether the emitter has a logger.
+ inline bool hasLogger() const noexcept { return _logger != nullptr; }
+
+ //! Tests whether the emitter has its own logger.
+ //!
+ //! Own logger means that it overrides the possible logger that may be used
+ //! by \ref CodeHolder this emitter is attached to.
+ inline bool hasOwnLogger() const noexcept { return hasEmitterFlag(kFlagOwnLogger); }
+
+ //! Returns the logger this emitter uses.
+ //!
+ //! The returned logger is either the emitter's own logger or it's logger
+ //! used by \ref CodeHolder this emitter is attached to.
+ inline Logger* logger() const noexcept { return _logger; }
+
+ //! Sets or resets the logger of the emitter.
+ //!
+ //! If the `logger` argument is non-null then the logger will be considered
+ //! emitter's own logger, see \ref hasOwnLogger() for more details. If the
+ //! given `logger` is null then the emitter will automatically use logger
+ //! that is attached to the \ref CodeHolder this emitter is attached to.
+ ASMJIT_API void setLogger(Logger* logger) noexcept;
+
+ //! Resets the logger of this emitter.
+ //!
+ //! The emitter will bail to using a logger attached to \ref CodeHolder this
+ //! emitter is attached to, or no logger at all if \ref CodeHolder doesn't
+ //! have one.
+ inline void resetLogger() noexcept { return setLogger(nullptr); }
+
+ //! \}
+
+ //! \name Error Handling
+ //! \{
+
+ //! Tests whether the emitter has an error handler attached.
+ inline bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
+
+ //! Tests whether the emitter has its own error handler.
+ //!
+ //! Own error handler means that it overrides the possible error handler that
+ //! may be used by \ref CodeHolder this emitter is attached to.
+ inline bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(kFlagOwnErrorHandler); }
+
+ //! Returns the error handler this emitter uses.
+ //!
+ //! The returned error handler is either the emitter's own error handler or
+ //! it's error handler used by \ref CodeHolder this emitter is attached to.
+ inline ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
+
+ //! Sets or resets the error handler of the emitter.
+ ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept;
+
+ //! Resets the error handler.
+ inline void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
+
+ //! Handles the given error in the following way:
+ //! 1. If the emitter has \ref ErrorHandler attached, it calls its
+ //! \ref ErrorHandler::handleError() member function first, and
+ //! then returns the error. The `handleError()` function may throw.
+ //! 2. if the emitter doesn't have \ref ErrorHandler, the error is
+ //! simply returned.
+ ASMJIT_API Error reportError(Error err, const char* message = nullptr);
+
+ //! \}
+
+ //! \name Encoding Options
+ //! \{
+
+ //! Returns encoding options, see \ref EncodingOptions.
+ inline uint32_t encodingOptions() const noexcept { return _encodingOptions; }
+ //! Tests whether the encoding `option` is set.
+ inline bool hasEncodingOption(uint32_t option) const noexcept { return (_encodingOptions & option) != 0; }
+
+ //! Enables the given encoding `options`, see \ref EncodingOptions.
+ inline void addEncodingOptions(uint32_t options) noexcept { _encodingOptions |= options; }
+ //! Disables the given encoding `options`, see \ref EncodingOptions.
+ inline void clearEncodingOptions(uint32_t options) noexcept { _encodingOptions &= ~options; }
+
+ //! \}
+
+ //! \name Validation Options
+ //! \{
+
+ //! Returns the emitter's validation options, see \ref ValidationOptions.
+ inline uint32_t validationOptions() const noexcept {
+ return _validationOptions;
+ }
+
+ //! Tests whether the given `option` is present in validation options.
+ inline bool hasValidationOption(uint32_t option) const noexcept {
+ return (_validationOptions & option) != 0;
+ }
+
+ //! Activates the given validation `options`, see \ref ValidationOptions.
+ //!
+ //! This function is used to activate explicit validation options that will
+ //! be then used by all emitter implementations. There are in general two
+ //! possibilities:
+ //!
+ //! - Architecture specific assembler is used. In this case a
+ //! \ref kValidationOptionAssembler can be used to turn on explicit
+ //! validation that will be used before an instruction is emitted.
+ //! This means that internally an extra step will be performed to
+ //! make sure that the instruction is correct. This is needed, because
+ //! by default assemblers prefer speed over strictness.
+ //!
+ //! This option should be used in debug builds as it's pretty expensive.
+ //!
+ //! - Architecture specific builder or compiler is used. In this case
+ //! the user can turn on \ref kValidationOptionIntermediate option
+ //! that adds explicit validation step before the Builder or Compiler
+ //! creates an \ref InstNode to represent an emitted instruction. Error
+ //! will be returned if the instruction is ill-formed. In addition,
+ //! also \ref kValidationOptionAssembler can be used, which would not be
+ //! consumed by Builder / Compiler directly, but it would be propagated
+ //! to an architecture specific \ref BaseAssembler implementation it
+ //! creates during \ref BaseEmitter::finalize().
+ ASMJIT_API void addValidationOptions(uint32_t options) noexcept;
+
+ //! Deactivates the given validation `options`.
+ //!
+ //! See \ref addValidationOptions() and \ref ValidationOptions for more details.
+ ASMJIT_API void clearValidationOptions(uint32_t options) noexcept;
+
+ //! \}
+
+ //! \name Instruction Options
+ //! \{
+
+ //! Returns forced instruction options.
+ //!
+ //! Forced instruction options are merged with next instruction options before
+ //! the instruction is encoded. These options have some bits reserved that are
+ //! used by error handling, logging, and instruction validation purposes. Other
+ //! options are globals that affect each instruction.
+ inline uint32_t forcedInstOptions() const noexcept { return _forcedInstOptions; }
+
+ //! Returns options of the next instruction.
+ inline uint32_t instOptions() const noexcept { return _instOptions; }
+ //! Returns options of the next instruction.
+ inline void setInstOptions(uint32_t options) noexcept { _instOptions = options; }
+ //! Adds options of the next instruction.
+ inline void addInstOptions(uint32_t options) noexcept { _instOptions |= options; }
+ //! Resets options of the next instruction.
+ inline void resetInstOptions() noexcept { _instOptions = 0; }
+
+ //! Tests whether the extra register operand is valid.
+ inline bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
+ //! Returns an extra operand that will be used by the next instruction (architecture specific).
+ inline const RegOnly& extraReg() const noexcept { return _extraReg; }
+ //! Sets an extra operand that will be used by the next instruction (architecture specific).
+ inline void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
+ //! Sets an extra operand that will be used by the next instruction (architecture specific).
+ inline void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
+ //! Resets an extra operand that will be used by the next instruction (architecture specific).
+ inline void resetExtraReg() noexcept { _extraReg.reset(); }
+
+ //! Returns comment/annotation of the next instruction.
+ inline const char* inlineComment() const noexcept { return _inlineComment; }
+ //! Sets comment/annotation of the next instruction.
+ //!
+ //! \note This string is set back to null by `_emit()`, but until that it has
+ //! to remain valid as the Emitter is not required to make a copy of it (and
+ //! it would be slow to do that for each instruction).
+ inline void setInlineComment(const char* s) noexcept { _inlineComment = s; }
+ //! Resets the comment/annotation to nullptr.
+ inline void resetInlineComment() noexcept { _inlineComment = nullptr; }
+
+ //! \}
+
+ //! \name Sections
+ //! \{
+
+ virtual Error section(Section* section) = 0;
+
+ //! \}
+
+ //! \name Labels
+ //! \{
+
+ //! Creates a new label.
+ virtual Label newLabel() = 0;
+ //! Creates a new named label.
+ virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) = 0;
+
+ //! Creates a new external label.
+ inline Label newExternalLabel(const char* name, size_t nameSize = SIZE_MAX) {
+ return newNamedLabel(name, nameSize, Label::kTypeExternal);
+ }
+
+ //! Returns `Label` by `name`.
+ //!
+ //! Returns invalid Label in case that the name is invalid or label was not found.
+ //!
+ //! \note This function doesn't trigger ErrorHandler in case the name is invalid
+ //! or no such label exist. You must always check the validity of the `Label` returned.
+ ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
+
+ //! Binds the `label` to the current position of the current section.
+ //!
+ //! \note Attempt to bind the same label multiple times will return an error.
+ virtual Error bind(const Label& label) = 0;
+
+ //! Tests whether the label `id` is valid (i.e. registered).
+ ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept;
+ //! Tests whether the `label` is valid (i.e. registered).
+ inline bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); }
+
+ //! \}
+
+ //! \name Emit
+ //! \{
+
+ // NOTE: These `emit()` helpers are designed to address a code-bloat generated
+ // by C++ compilers to call a function having many arguments. Each parameter to
+ // `_emit()` requires some code to pass it, which means that if we default to
+ // 5 arguments in `_emit()` and instId the C++ compiler would have to generate
+ // a virtual function call having 5 parameters and additional `this` argument,
+ // which is quite a lot. Since by default most instructions have 2 to 3 operands
+ // it's better to introduce helpers that pass from 0 to 6 operands that help to
+ // reduce the size of emit(...) function call.
+
+ //! Emits an instruction (internal).
+ ASMJIT_API Error _emitI(uint32_t instId);
+ //! \overload
+ ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0);
+ //! \overload
+ ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1);
+ //! \overload
+ ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2);
+ //! \overload
+ ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
+ //! \overload
+ ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
+ //! \overload
+ ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
+
+ //! Emits an instruction `instId` with the given `operands`.
+ template
+ ASMJIT_INLINE Error emit(uint32_t instId, Args&&... operands) {
+ return _emitI(instId, Support::ForwardOp::forward(operands)...);
+ }
+
+ inline Error emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount) {
+ return _emitOpArray(instId, operands, opCount);
+ }
+
+ inline Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) {
+ setInstOptions(inst.options());
+ setExtraReg(inst.extraReg());
+ return _emitOpArray(inst.id(), operands, opCount);
+ }
+
+ //! \cond INTERNAL
+ //! Emits an instruction - all 6 operands must be defined.
+ virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* oExt) = 0;
+ //! Emits instruction having operands stored in array.
+ ASMJIT_API virtual Error _emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount);
+ //! \endcond
+
+ //! \}
+
+ //! \name Emit Utilities
+ //! \{
+
+ ASMJIT_API Error emitProlog(const FuncFrame& frame);
+ ASMJIT_API Error emitEpilog(const FuncFrame& frame);
+ ASMJIT_API Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
+
+ //! \}
+
+ //! \name Align
+ //! \{
+
+ //! Aligns the current CodeBuffer position to the `alignment` specified.
+ //!
+ //! The sequence that is used to fill the gap between the aligned location
+ //! and the current location depends on the align `mode`, see \ref AlignMode.
+ virtual Error align(uint32_t alignMode, uint32_t alignment) = 0;
+
+ //! \}
+
+ //! \name Embed
+ //! \{
+
+ //! Embeds raw data into the \ref CodeBuffer.
+ virtual Error embed(const void* data, size_t dataSize) = 0;
+
+ //! Embeds a typed data array.
+ //!
+ //! This is the most flexible function for embedding data as it allows to:
+ //! - Assign a `typeId` to the data, so the emitter knows the type of
+ //! items stored in `data`. Binary data should use \ref Type::kIdU8.
+ //! - Repeat the given data `repeatCount` times, so the data can be used
+ //! as a fill pattern for example, or as a pattern used by SIMD instructions.
+ virtual Error embedDataArray(uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount = 1) = 0;
+
+ //! Embeds int8_t `value` repeated by `repeatCount`.
+ inline Error embedInt8(int8_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI8, &value, 1, repeatCount); }
+ //! Embeds uint8_t `value` repeated by `repeatCount`.
+ inline Error embedUInt8(uint8_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU8, &value, 1, repeatCount); }
+ //! Embeds int16_t `value` repeated by `repeatCount`.
+ inline Error embedInt16(int16_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI16, &value, 1, repeatCount); }
+ //! Embeds uint16_t `value` repeated by `repeatCount`.
+ inline Error embedUInt16(uint16_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU16, &value, 1, repeatCount); }
+ //! Embeds int32_t `value` repeated by `repeatCount`.
+ inline Error embedInt32(int32_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI32, &value, 1, repeatCount); }
+ //! Embeds uint32_t `value` repeated by `repeatCount`.
+ inline Error embedUInt32(uint32_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU32, &value, 1, repeatCount); }
+ //! Embeds int64_t `value` repeated by `repeatCount`.
+ inline Error embedInt64(int64_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI64, &value, 1, repeatCount); }
+ //! Embeds uint64_t `value` repeated by `repeatCount`.
+ inline Error embedUInt64(uint64_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU64, &value, 1, repeatCount); }
+ //! Embeds a floating point `value` repeated by `repeatCount`.
+ inline Error embedFloat(float value, size_t repeatCount = 1) { return embedDataArray(Type::kIdF32, &value, 1, repeatCount); }
+ //! Embeds a floating point `value` repeated by `repeatCount`.
+ inline Error embedDouble(double value, size_t repeatCount = 1) { return embedDataArray(Type::IdOfT::kTypeId, &value, 1, repeatCount); }
+
+ //! Embeds a constant pool at the current offset by performing the following:
+ //! 1. Aligns by using kAlignData to the minimum `pool` alignment.
+ //! 2. Binds the ConstPool label so it's bound to an aligned location.
+ //! 3. Emits ConstPool content.
+ virtual Error embedConstPool(const Label& label, const ConstPool& pool) = 0;
+
+ //! Embeds an absolute `label` address as data.
+ //!
+ //! The `dataSize` is an optional argument that can be used to specify the
+ //! size of the address data. If it's zero (default) the address size is
+ //! deduced from the target architecture (either 4 or 8 bytes).
+ virtual Error embedLabel(const Label& label, size_t dataSize = 0) = 0;
+
+ //! Embeds a delta (distance) between the `label` and `base` calculating it
+ //! as `label - base`. This function was designed to make it easier to embed
+ //! lookup tables where each index is a relative distance of two labels.
+ virtual Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) = 0;
+
+ //! \}
+
+ //! \name Comment
+ //! \{
+
+ //! Emits a comment stored in `data` with an optional `size` parameter.
+ virtual Error comment(const char* data, size_t size = SIZE_MAX) = 0;
+
+ //! Emits a formatted comment specified by `fmt` and variable number of arguments.
+ ASMJIT_API Error commentf(const char* fmt, ...);
+ //! Emits a formatted comment specified by `fmt` and `ap`.
+ ASMJIT_API Error commentv(const char* fmt, va_list ap);
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ //! Called after the emitter was attached to `CodeHolder`.
+ virtual Error onAttach(CodeHolder* code) noexcept = 0;
+ //! Called after the emitter was detached from `CodeHolder`.
+ virtual Error onDetach(CodeHolder* code) noexcept = 0;
+
+ //! Called when \ref CodeHolder has updated an important setting, which
+ //! involves the following:
+ //!
+ //! - \ref Logger has been changed (\ref CodeHolder::setLogger() has been
+ //! called).
+ //! - \ref ErrorHandler has been changed (\ref CodeHolder::setErrorHandler()
+ //! has been called).
+ //!
+ //! This function ensures that the settings are properly propagated from
+ //! \ref CodeHolder to the emitter.
+ //!
+ //! \note This function is virtual and can be overridden, however, if you
+ //! do so, always call \ref BaseEmitter::onSettingsUpdated() within your
+ //! own implementation to ensure that the emitter is in a consisten state.
+ ASMJIT_API virtual void onSettingsUpdated() noexcept;
+
+ //! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+ ASMJIT_DEPRECATED("Use environment() instead")
+ inline CodeInfo codeInfo() const noexcept {
+ return CodeInfo(_environment, _code ? _code->baseAddress() : Globals::kNoBaseAddress);
+ }
+
+ ASMJIT_DEPRECATED("Use arch() instead")
+ inline uint32_t archId() const noexcept { return arch(); }
+
+ ASMJIT_DEPRECATED("Use registerSize() instead")
+ inline uint32_t gpSize() const noexcept { return registerSize(); }
+
+ ASMJIT_DEPRECATED("Use encodingOptions() instead")
+ inline uint32_t emitterOptions() const noexcept { return encodingOptions(); }
+
+ ASMJIT_DEPRECATED("Use addEncodingOptions() instead")
+ inline void addEmitterOptions(uint32_t options) noexcept { addEncodingOptions(options); }
+
+ ASMJIT_DEPRECATED("Use clearEncodingOptions() instead")
+ inline void clearEmitterOptions(uint32_t options) noexcept { clearEncodingOptions(options); }
+
+ ASMJIT_DEPRECATED("Use forcedInstOptions() instead")
+ inline uint32_t globalInstOptions() const noexcept { return forcedInstOptions(); }
+#endif // !ASMJIT_NO_DEPRECATED
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_EMITTER_H_INCLUDED
diff --git a/Theodosius/asmjit/core/emitterutils.cpp b/Theodosius/asmjit/core/emitterutils.cpp
new file mode 100644
index 0000000..1115934
--- /dev/null
+++ b/Theodosius/asmjit/core/emitterutils.cpp
@@ -0,0 +1,150 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/assembler.h"
+#include "../core/emitterutils_p.h"
+#include "../core/formatter.h"
+#include "../core/logger.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::EmitterUtils]
+// ============================================================================
+
+namespace EmitterUtils {
+
+#ifndef ASMJIT_NO_LOGGING
+
+Error formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept {
+ size_t currentSize = sb.size();
+ size_t commentSize = comment ? Support::strLen(comment, Globals::kMaxCommentSize) : 0;
+
+ ASMJIT_ASSERT(binSize >= dispSize);
+ const size_t kNoBinSize = SIZE_MAX;
+
+ if ((binSize != 0 && binSize != kNoBinSize) || commentSize) {
+ size_t align = kMaxInstLineSize;
+ char sep = ';';
+
+ for (size_t i = (binSize == kNoBinSize); i < 2; i++) {
+ size_t begin = sb.size();
+ ASMJIT_PROPAGATE(sb.padEnd(align));
+
+ if (sep) {
+ ASMJIT_PROPAGATE(sb.append(sep));
+ ASMJIT_PROPAGATE(sb.append(' '));
+ }
+
+ // Append binary data or comment.
+ if (i == 0) {
+ ASMJIT_PROPAGATE(sb.appendHex(binData, binSize - dispSize - immSize));
+ ASMJIT_PROPAGATE(sb.appendChars('.', dispSize * 2));
+ ASMJIT_PROPAGATE(sb.appendHex(binData + binSize - immSize, immSize));
+ if (commentSize == 0) break;
+ }
+ else {
+ ASMJIT_PROPAGATE(sb.append(comment, commentSize));
+ }
+
+ currentSize += sb.size() - begin;
+ align += kMaxBinarySize;
+ sep = '|';
+ }
+ }
+
+ return sb.append('\n');
+}
+
+void logLabelBound(BaseAssembler* self, const Label& label) noexcept {
+ Logger* logger = self->logger();
+
+ StringTmp<512> sb;
+ size_t binSize = logger->hasFlag(FormatOptions::kFlagMachineCode) ? size_t(0) : SIZE_MAX;
+
+ sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationLabel));
+ Formatter::formatLabel(sb, logger->flags(), self, label.id());
+ sb.append(':');
+ EmitterUtils::formatLine(sb, nullptr, binSize, 0, 0, self->_inlineComment);
+ logger->log(sb.data(), sb.size());
+}
+
+void logInstructionEmitted(
+ BaseAssembler* self,
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
+ uint32_t relSize, uint32_t immSize, uint8_t* afterCursor) {
+
+ Logger* logger = self->logger();
+ ASMJIT_ASSERT(logger != nullptr);
+
+ StringTmp<256> sb;
+ uint32_t flags = logger->flags();
+
+ uint8_t* beforeCursor = self->bufferPtr();
+ intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor);
+
+ Operand_ opArray[Globals::kMaxOpCount];
+ EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
+
+ sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationCode));
+ Formatter::formatInstruction(sb, flags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
+
+ if ((flags & FormatOptions::kFlagMachineCode) != 0)
+ EmitterUtils::formatLine(sb, self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment());
+ else
+ EmitterUtils::formatLine(sb, nullptr, SIZE_MAX, 0, 0, self->inlineComment());
+ logger->log(sb);
+}
+
+Error logInstructionFailed(
+ BaseAssembler* self,
+ Error err,
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
+
+ StringTmp<256> sb;
+ sb.append(DebugUtils::errorAsString(err));
+ sb.append(": ");
+
+ Operand_ opArray[Globals::kMaxOpCount];
+ EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
+
+ Formatter::formatInstruction(sb, 0, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
+
+ if (self->inlineComment()) {
+ sb.append(" ; ");
+ sb.append(self->inlineComment());
+ }
+
+ self->resetInstOptions();
+ self->resetExtraReg();
+ self->resetInlineComment();
+ return self->reportError(err, sb.data());
+}
+
+#endif
+
+} // {EmitterUtils}
+
+ASMJIT_END_NAMESPACE
diff --git a/Theodosius/asmjit/core/emitterutils_p.h b/Theodosius/asmjit/core/emitterutils_p.h
new file mode 100644
index 0000000..7e222d3
--- /dev/null
+++ b/Theodosius/asmjit/core/emitterutils_p.h
@@ -0,0 +1,109 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
+#define ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
+
+#include "../core/emitter.h"
+#include "../core/operand.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+class BaseAssembler;
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::EmitterUtils]
+// ============================================================================
+
+namespace EmitterUtils {
+
+static const Operand_ noExt[3] {};
+
+enum kOpIndex {
+ kOp3 = 0,
+ kOp4 = 1,
+ kOp5 = 2
+};
+
+static ASMJIT_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
+ uint32_t opCount = 0;
+
+ if (opExt[kOp3].isNone()) {
+ if (!o0.isNone()) opCount = 1;
+ if (!o1.isNone()) opCount = 2;
+ if (!o2.isNone()) opCount = 3;
+ }
+ else {
+ opCount = 4;
+ if (!opExt[kOp4].isNone()) {
+ opCount = 5 + uint32_t(!opExt[kOp5].isNone());
+ }
+ }
+
+ return opCount;
+}
+
+static ASMJIT_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
+ dst[0].copyFrom(o0);
+ dst[1].copyFrom(o1);
+ dst[2].copyFrom(o2);
+ dst[3].copyFrom(opExt[kOp3]);
+ dst[4].copyFrom(opExt[kOp4]);
+ dst[5].copyFrom(opExt[kOp5]);
+}
+
+#ifndef ASMJIT_NO_LOGGING
+enum : uint32_t {
+ // Has to be big to be able to hold all metadata compiler can assign to a
+ // single instruction.
+ kMaxInstLineSize = 44,
+ kMaxBinarySize = 26
+};
+
+Error formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept;
+
+void logLabelBound(BaseAssembler* self, const Label& label) noexcept;
+
+void logInstructionEmitted(
+ BaseAssembler* self,
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
+ uint32_t relSize, uint32_t immSize, uint8_t* afterCursor);
+
+Error logInstructionFailed(
+ BaseAssembler* self,
+ Error err, uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt);
+#endif
+
+}
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
+
diff --git a/Theodosius/asmjit/core/environment.cpp b/Theodosius/asmjit/core/environment.cpp
new file mode 100644
index 0000000..3be2b15
--- /dev/null
+++ b/Theodosius/asmjit/core/environment.cpp
@@ -0,0 +1,64 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/environment.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// X86 Target
+// ----------
+//
+// - 32-bit - Linux, OSX, BSD, and apparently also Haiku guarantee 16-byte
+// stack alignment. Other operating systems are assumed to have
+// 4-byte alignment by default for safety reasons.
+// - 64-bit - stack must be aligned to 16 bytes.
+//
+// ARM Target
+// ----------
+//
+// - 32-bit - Stack must be aligned to 8 bytes.
+// - 64-bit - Stack must be aligned to 16 bytes (hardware requirement).
+uint32_t Environment::stackAlignment() const noexcept {
+ if (is64Bit()) {
+ // Assume 16-byte alignment on any 64-bit target.
+ return 16;
+ }
+ else {
+ // The following platforms use 16-byte alignment in 32-bit mode.
+ if (isPlatformLinux() ||
+ isPlatformBSD() ||
+ isPlatformApple() ||
+ isPlatformHaiku()) {
+ return 16u;
+ }
+
+ if (isFamilyARM())
+ return 8;
+
+ // Bail to 4-byte alignment if we don't know.
+ return 4;
+ }
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/Theodosius/asmjit/core/environment.h b/Theodosius/asmjit/core/environment.h
new file mode 100644
index 0000000..79e6f7c
--- /dev/null
+++ b/Theodosius/asmjit/core/environment.h
@@ -0,0 +1,612 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
+#define ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
+
+#include "../core/globals.h"
+
+#if defined(__APPLE__)
+ #include
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::Environment]
+// ============================================================================
+
+//! Represents an environment, which is usually related to a \ref Target.
+//!
+//! Environment has usually an 'arch-subarch-vendor-os-abi' format, which is
+//! sometimes called "Triple" (historically it used to be 3 only parts) or
+//! "Tuple", which is a convention used by Debian Linux.
+//!
+//! AsmJit doesn't support all possible combinations or architectures and ABIs,
+//! however, it models the environment similarly to other compilers for future
+//! extensibility.
+class Environment {
+public:
+ //! Architecture type, see \ref Arch.
+ uint8_t _arch;
+ //! Sub-architecture type, see \ref SubArch.
+ uint8_t _subArch;
+ //! Vendor type, see \ref Vendor.
+ uint8_t _vendor;
+ //! Platform type, see \ref Platform.
+ uint8_t _platform;
+ //! ABI type, see \ref Abi.
+ uint8_t _abi;
+ //! Object format, see \ref Format.
+ uint8_t _format;
+ //! Reserved for future use, must be zero.
+ uint16_t _reserved;
+
+ //! Architecture.
+ enum Arch : uint32_t {
+ //! Unknown or uninitialized architecture.
+ kArchUnknown = 0,
+
+ //! Mask used by 32-bit architectures (odd are 32-bit, even are 64-bit).
+ kArch32BitMask = 0x01,
+ //! Mask used by big-endian architectures.
+ kArchBigEndianMask = 0x80u,
+
+ //! 32-bit X86 architecture.
+ kArchX86 = 1,
+ //! 64-bit X86 architecture also known as X86_64 and AMD64.
+ kArchX64 = 2,
+
+ //! 32-bit RISC-V architecture.
+ kArchRISCV32 = 3,
+ //! 64-bit RISC-V architecture.
+ kArchRISCV64 = 4,
+
+ //! 32-bit ARM architecture (little endian).
+ kArchARM = 5,
+ //! 32-bit ARM architecture (big endian).
+ kArchARM_BE = kArchARM | kArchBigEndianMask,
+ //! 64-bit ARM architecture in (little endian).
+ kArchAArch64 = 6,
+ //! 64-bit ARM architecture in (big endian).
+ kArchAArch64_BE = kArchAArch64 | kArchBigEndianMask,
+ //! 32-bit ARM in Thumb mode (little endian).
+ kArchThumb = 7,
+ //! 32-bit ARM in Thumb mode (big endian).
+ kArchThumb_BE = kArchThumb | kArchBigEndianMask,
+
+ // 8 is not used, even numbers are 64-bit architectures.
+
+ //! 32-bit MIPS architecture in (little endian).
+ kArchMIPS32_LE = 9,
+ //! 32-bit MIPS architecture in (big endian).
+ kArchMIPS32_BE = kArchMIPS32_LE | kArchBigEndianMask,
+ //! 64-bit MIPS architecture in (little endian).
+ kArchMIPS64_LE = 10,
+ //! 64-bit MIPS architecture in (big endian).
+ kArchMIPS64_BE = kArchMIPS64_LE | kArchBigEndianMask,
+
+ //! Count of architectures.
+ kArchCount = 11
+ };
+
+ //! Sub-architecture.
+ enum SubArch : uint32_t {
+ //! Unknown or uninitialized architecture sub-type.
+ kSubArchUnknown = 0,
+
+ //! Count of sub-architectures.
+ kSubArchCount
+ };
+
+ //! Vendor.
+ //!
+ //! \note AsmJit doesn't use vendor information at the moment. It's provided
+ //! for future use, if required.
+ enum Vendor : uint32_t {
+ //! Unknown or uninitialized vendor.
+ kVendorUnknown = 0,
+
+ //! Count of vendor identifiers.
+ kVendorCount
+ };
+
+ //! Platform / OS.
+ enum Platform : uint32_t {
+ //! Unknown or uninitialized platform.
+ kPlatformUnknown = 0,
+
+ //! Windows OS.
+ kPlatformWindows,
+
+ //! Other platform, most likely POSIX based.
+ kPlatformOther,
+
+ //! Linux OS.
+ kPlatformLinux,
+ //! GNU/Hurd OS.
+ kPlatformHurd,
+
+ //! FreeBSD OS.
+ kPlatformFreeBSD,
+ //! OpenBSD OS.
+ kPlatformOpenBSD,
+ //! NetBSD OS.
+ kPlatformNetBSD,
+ //! DragonFly BSD OS.
+ kPlatformDragonFlyBSD,
+
+ //! Haiku OS.
+ kPlatformHaiku,
+
+ //! Apple OSX.
+ kPlatformOSX,
+ //! Apple iOS.
+ kPlatformIOS,
+ //! Apple TVOS.
+ kPlatformTVOS,
+ //! Apple WatchOS.
+ kPlatformWatchOS,
+
+ //! Emscripten platform.
+ kPlatformEmscripten,
+
+ //! Count of platform identifiers.
+ kPlatformCount
+ };
+
+ //! ABI.
+ enum Abi : uint32_t {
+ //! Unknown or uninitialied environment.
+ kAbiUnknown = 0,
+ //! Microsoft ABI.
+ kAbiMSVC,
+ //! GNU ABI.
+ kAbiGNU,
+ //! Android Environment / ABI.
+ kAbiAndroid,
+ //! Cygwin ABI.
+ kAbiCygwin,
+
+ //! Count of known ABI types.
+ kAbiCount
+ };
+
+ //! Object format.
+ //!
+ //! \note AsmJit doesn't really use anything except \ref kFormatUnknown and
+ //! \ref kFormatJIT at the moment. Object file formats are provided for
+ //! future extensibility and a possibility to generate object files at some
+ //! point.
+ enum Format : uint32_t {
+ //! Unknown or uninitialized object format.
+ kFormatUnknown = 0,
+
+ //! JIT code generation object, most likely \ref JitRuntime or a custom
+ //! \ref Target implementation.
+ kFormatJIT,
+
+ //! Executable and linkable format (ELF).
+ kFormatELF,
+ //! Common object file format.
+ kFormatCOFF,
+ //! Extended COFF object format.
+ kFormatXCOFF,
+ //! Mach object file format.
+ kFormatMachO,
+
+ //! Count of object format types.
+ kFormatCount
+ };
+
+ //! \name Environment Detection
+ //! \{
+
+#ifdef _DOXYGEN
+ //! Architecture detected at compile-time (architecture of the host).
+ static constexpr Arch kArchHost = DETECTED_AT_COMPILE_TIME;
+ //! Sub-architecture detected at compile-time (sub-architecture of the host).
+ static constexpr SubArch kSubArchHost = DETECTED_AT_COMPILE_TIME;
+ //! Vendor detected at compile-time (vendor of the host).
+ static constexpr Vendor kVendorHost = DETECTED_AT_COMPILE_TIME;
+ //! Platform detected at compile-time (platform of the host).
+ static constexpr Platform kPlatformHost = DETECTED_AT_COMPILE_TIME;
+ //! ABI detected at compile-time (ABI of the host).
+ static constexpr Abi kAbiHost = DETECTED_AT_COMPILE_TIME;
+#else
+ static constexpr Arch kArchHost =
+ ASMJIT_ARCH_X86 == 32 ? kArchX86 :
+ ASMJIT_ARCH_X86 == 64 ? kArchX64 :
+
+ ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_LE ? kArchARM :
+ ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_BE ? kArchARM_BE :
+ ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_LE ? kArchAArch64 :
+ ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_BE ? kArchAArch64_BE :
+
+ ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_LE ? kArchMIPS32_LE :
+ ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_BE ? kArchMIPS32_BE :
+ ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_LE ? kArchMIPS64_LE :
+ ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_BE ? kArchMIPS64_BE :
+
+ kArchUnknown;
+
+ static constexpr SubArch kSubArchHost =
+ kSubArchUnknown;
+
+ static constexpr Vendor kVendorHost =
+ kVendorUnknown;
+
+ static constexpr Platform kPlatformHost =
+#if defined(__EMSCRIPTEN__)
+ kPlatformEmscripten
+#elif defined(_WIN32)
+ kPlatformWindows
+#elif defined(__linux__)
+ kPlatformLinux
+#elif defined(__gnu_hurd__)
+ kPlatformHurd
+#elif defined(__FreeBSD__)
+ kPlatformFreeBSD
+#elif defined(__OpenBSD__)
+ kPlatformOpenBSD
+#elif defined(__NetBSD__)
+ kPlatformNetBSD
+#elif defined(__DragonFly__)
+ kPlatformDragonFlyBSD
+#elif defined(__HAIKU__)
+ kPlatformHaiku
+#elif defined(__APPLE__) && TARGET_OS_OSX
+ kPlatformOSX
+#elif defined(__APPLE__) && TARGET_OS_TV
+ kPlatformTVOS
+#elif defined(__APPLE__) && TARGET_OS_WATCH
+ kPlatformWatchOS
+#elif defined(__APPLE__) && TARGET_OS_IPHONE
+ kPlatformIOS
+#else
+ kPlatformOther
+#endif
+ ;
+
+ static constexpr Abi kAbiHost =
+#if defined(_MSC_VER)
+ kAbiMSVC
+#elif defined(__CYGWIN__)
+ kAbiCygwin
+#elif defined(__MINGW32__) || defined(__GLIBC__)
+ kAbiGNU
+#elif defined(__ANDROID__)
+ kAbiAndroid
+#else
+ kAbiUnknown
+#endif
+ ;
+
+#endif
+
+ //! \}
+
+ //! \name Construction / Destruction
+ //! \{
+
+ inline Environment() noexcept :
+ _arch(uint8_t(kArchUnknown)),
+ _subArch(uint8_t(kSubArchUnknown)),
+ _vendor(uint8_t(kVendorUnknown)),
+ _platform(uint8_t(kPlatformUnknown)),
+ _abi(uint8_t(kAbiUnknown)),
+ _format(uint8_t(kFormatUnknown)),
+ _reserved(0) {}
+
+ inline Environment(const Environment& other) noexcept = default;
+
+ inline explicit Environment(uint32_t arch,
+ uint32_t subArch = kSubArchUnknown,
+ uint32_t vendor = kVendorUnknown,
+ uint32_t platform = kPlatformUnknown,
+ uint32_t abi = kAbiUnknown,
+ uint32_t format = kFormatUnknown) noexcept {
+ init(arch, subArch, vendor, platform, abi, format);
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline Environment& operator=(const Environment& other) noexcept = default;
+
+ inline bool operator==(const Environment& other) const noexcept { return equals(other); }
+ inline bool operator!=(const Environment& other) const noexcept { return !equals(other); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the environment is not set up.
+ //!
+ //! Returns true if all members are zero, and thus unknown.
+ inline bool empty() const noexcept {
+ // Unfortunately compilers won't optimize fields are checked one by one...
+ return _packed() == 0;
+ }
+
+ //! Tests whether the environment is intialized, which means it must have
+ //! a valid architecture.
+ inline bool isInitialized() const noexcept {
+ return _arch != kArchUnknown;
+ }
+
+ inline uint64_t _packed() const noexcept {
+ uint64_t x;
+ memcpy(&x, this, 8);
+ return x;
+ }
+
+ //! Resets all members of the environment to zero / unknown.
+ inline void reset() noexcept {
+ _arch = uint8_t(kArchUnknown);
+ _subArch = uint8_t(kSubArchUnknown);
+ _vendor = uint8_t(kVendorUnknown);
+ _platform = uint8_t(kPlatformUnknown);
+ _abi = uint8_t(kAbiUnknown);
+ _format = uint8_t(kFormatUnknown);
+ _reserved = 0;
+ }
+
+ inline bool equals(const Environment& other) const noexcept {
+ return _packed() == other._packed();
+ }
+
+ //! Returns the architecture, see \ref Arch.
+ inline uint32_t arch() const noexcept { return _arch; }
+ //! Returns the sub-architecture, see \ref SubArch.
+ inline uint32_t subArch() const noexcept { return _subArch; }
+ //! Returns vendor, see \ref Vendor.
+ inline uint32_t vendor() const noexcept { return _vendor; }
+ //! Returns target's platform or operating system, see \ref Platform.
+ inline uint32_t platform() const noexcept { return _platform; }
+ //! Returns target's ABI, see \ref Abi.
+ inline uint32_t abi() const noexcept { return _abi; }
+ //! Returns target's object format, see \ref Format.
+ inline uint32_t format() const noexcept { return _format; }
+
+ inline void init(uint32_t arch,
+ uint32_t subArch = kSubArchUnknown,
+ uint32_t vendor = kVendorUnknown,
+ uint32_t platform = kPlatformUnknown,
+ uint32_t abi = kAbiUnknown,
+ uint32_t format = kFormatUnknown) noexcept {
+ _arch = uint8_t(arch);
+ _subArch = uint8_t(subArch);
+ _vendor = uint8_t(vendor);
+ _platform = uint8_t(platform);
+ _abi = uint8_t(abi);
+ _format = uint8_t(format);
+ _reserved = 0;
+ }
+
+ inline bool isArchX86() const noexcept { return _arch == kArchX86; }
+ inline bool isArchX64() const noexcept { return _arch == kArchX64; }
+ inline bool isArchRISCV32() const noexcept { return _arch == kArchRISCV32; }
+ inline bool isArchRISCV64() const noexcept { return _arch == kArchRISCV64; }
+ inline bool isArchARM() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchARM; }
+ inline bool isArchThumb() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchThumb; }
+ inline bool isArchAArch64() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchAArch64; }
+ inline bool isArchMIPS32() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchMIPS32_LE; }
+ inline bool isArchMIPS64() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchMIPS64_LE; }
+
+ //! Tests whether the architecture is 32-bit.
+ inline bool is32Bit() const noexcept { return is32Bit(_arch); }
+ //! Tests whether the architecture is 64-bit.
+ inline bool is64Bit() const noexcept { return is64Bit(_arch); }
+
+ //! Tests whether the architecture is little endian.
+ inline bool isLittleEndian() const noexcept { return isLittleEndian(_arch); }
+ //! Tests whether the architecture is big endian.
+ inline bool isBigEndian() const noexcept { return isBigEndian(_arch); }
+
+ //! Tests whether this architecture is of X86 family.
+ inline bool isFamilyX86() const noexcept { return isFamilyX86(_arch); }
+ //! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit).
+ inline bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); }
+ //! Tests whether this architecture family is ARM, Thumb, or AArch64.
+ inline bool isFamilyARM() const noexcept { return isFamilyARM(_arch); }
+ //! Tests whether this architecture family is MISP or MIPS64.
+ inline bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); }
+
+ //! Tests whether the environment platform is Windows.
+ inline bool isPlatformWindows() const noexcept { return _platform == kPlatformWindows; }
+
+ //! Tests whether the environment platform is Linux.
+ inline bool isPlatformLinux() const noexcept { return _platform == kPlatformLinux; }
+
+ //! Tests whether the environment platform is Hurd.
+ inline bool isPlatformHurd() const noexcept { return _platform == kPlatformHurd; }
+
+ //! Tests whether the environment platform is Haiku.
+ inline bool isPlatformHaiku() const noexcept { return _platform == kPlatformHaiku; }
+
+ //! Tests whether the environment platform is any BSD.
+ inline bool isPlatformBSD() const noexcept {
+ return _platform == kPlatformFreeBSD ||
+ _platform == kPlatformOpenBSD ||
+ _platform == kPlatformNetBSD ||
+ _platform == kPlatformDragonFlyBSD;
+ }
+
+ //! Tests whether the environment platform is any Apple platform (OSX, iOS, TVOS, WatchOS).
+ inline bool isPlatformApple() const noexcept {
+ return _platform == kPlatformOSX ||
+ _platform == kPlatformIOS ||
+ _platform == kPlatformTVOS ||
+ _platform == kPlatformWatchOS;
+ }
+
+ //! Tests whether the ABI is MSVC.
+ inline bool isAbiMSVC() const noexcept { return _abi == kAbiMSVC; }
+ //! Tests whether the ABI is GNU.
+ inline bool isAbiGNU() const noexcept { return _abi == kAbiGNU; }
+
+ //! Returns a calculated stack alignment for this environment.
+ ASMJIT_API uint32_t stackAlignment() const noexcept;
+
+ //! Returns a native register size of this architecture.
+ uint32_t registerSize() const noexcept { return registerSizeFromArch(_arch); }
+
+ //! Sets the architecture to `arch`.
+ inline void setArch(uint32_t arch) noexcept { _arch = uint8_t(arch); }
+ //! Sets the sub-architecture to `subArch`.
+ inline void setSubArch(uint32_t subArch) noexcept { _subArch = uint8_t(subArch); }
+ //! Sets the vendor to `vendor`.
+ inline void setVendor(uint32_t vendor) noexcept { _vendor = uint8_t(vendor); }
+ //! Sets the platform to `platform`.
+ inline void setPlatform(uint32_t platform) noexcept { _platform = uint8_t(platform); }
+ //! Sets the ABI to `abi`.
+ inline void setAbi(uint32_t abi) noexcept { _abi = uint8_t(abi); }
+ //! Sets the object format to `format`.
+ inline void setFormat(uint32_t format) noexcept { _format = uint8_t(format); }
+
+ //! \}
+
+ //! \name Static Utilities
+ //! \{
+
+ static inline bool isValidArch(uint32_t arch) noexcept {
+ return (arch & ~kArchBigEndianMask) != 0 &&
+ (arch & ~kArchBigEndianMask) < kArchCount;
+ }
+
+ //! Tests whether the given architecture `arch` is 32-bit.
+ static inline bool is32Bit(uint32_t arch) noexcept {
+ return (arch & kArch32BitMask) == kArch32BitMask;
+ }
+
+ //! Tests whether the given architecture `arch` is 64-bit.
+ static inline bool is64Bit(uint32_t arch) noexcept {
+ return (arch & kArch32BitMask) == 0;
+ }
+
+ //! Tests whether the given architecture `arch` is little endian.
+ static inline bool isLittleEndian(uint32_t arch) noexcept {
+ return (arch & kArchBigEndianMask) == 0;
+ }
+
+ //! Tests whether the given architecture `arch` is big endian.
+ static inline bool isBigEndian(uint32_t arch) noexcept {
+ return (arch & kArchBigEndianMask) == kArchBigEndianMask;
+ }
+
+ //! Tests whether the given architecture is AArch64.
+ static inline bool isArchAArch64(uint32_t arch) noexcept {
+ arch &= ~kArchBigEndianMask;
+ return arch == kArchAArch64;
+ }
+
+ //! Tests whether the given architecture family is X86 or X64.
+ static inline bool isFamilyX86(uint32_t arch) noexcept {
+ return arch == kArchX86 ||
+ arch == kArchX64;
+ }
+
+ //! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit).
+ static inline bool isFamilyRISCV(uint32_t arch) noexcept {
+ return arch == kArchRISCV32 ||
+ arch == kArchRISCV64;
+ }
+
+ //! Tests whether the given architecture family is ARM, Thumb, or AArch64.
+ static inline bool isFamilyARM(uint32_t arch) noexcept {
+ arch &= ~kArchBigEndianMask;
+ return arch == kArchARM ||
+ arch == kArchAArch64 ||
+ arch == kArchThumb;
+ }
+
+ //! Tests whether the given architecture family is MISP or MIPS64.
+ static inline bool isFamilyMIPS(uint32_t arch) noexcept {
+ arch &= ~kArchBigEndianMask;
+ return arch == kArchMIPS32_LE ||
+ arch == kArchMIPS64_LE;
+ }
+
+ //! Returns a native general purpose register size from the given architecture.
+ static uint32_t registerSizeFromArch(uint32_t arch) noexcept {
+ return is32Bit(arch) ? 4u : 8u;
+ }
+
+ //! \}
+};
+
+//! Returns the host environment constructed from preprocessor macros defined
+//! by the compiler.
+//!
+//! The returned environment should precisely match the target host architecture,
+//! sub-architecture, platform, and ABI.
+static ASMJIT_INLINE Environment hostEnvironment() noexcept {
+ return Environment(Environment::kArchHost,
+ Environment::kSubArchHost,
+ Environment::kVendorHost,
+ Environment::kPlatformHost,
+ Environment::kAbiHost,
+ Environment::kFormatUnknown);
+}
+
+static_assert(sizeof(Environment) == 8,
+ "Environment must occupy exactly 8 bytes.");
+
+//! \}
+
+#ifndef ASMJIT_NO_DEPRECATED
+class ASMJIT_DEPRECATED_STRUCT("Use Environment instead") ArchInfo : public Environment {
+public:
+ inline ArchInfo() noexcept : Environment() {}
+
+ inline ArchInfo(const Environment& other) noexcept : Environment(other) {}
+ inline explicit ArchInfo(uint32_t arch, uint32_t subArch = kSubArchUnknown) noexcept
+ : Environment(arch, subArch) {}
+
+ enum Id : uint32_t {
+ kIdNone = Environment::kArchUnknown,
+ kIdX86 = Environment::kArchX86,
+ kIdX64 = Environment::kArchX64,
+ kIdA32 = Environment::kArchARM,
+ kIdA64 = Environment::kArchAArch64,
+ kIdHost = Environment::kArchHost
+ };
+
+ enum SubType : uint32_t {
+ kSubIdNone = Environment::kSubArchUnknown
+ };
+
+ static inline ArchInfo host() noexcept { return ArchInfo(hostEnvironment()); }
+};
+#endif // !ASMJIT_NO_DEPRECATED
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
diff --git a/Theodosius/asmjit/core/errorhandler.cpp b/Theodosius/asmjit/core/errorhandler.cpp
new file mode 100644
index 0000000..8372d75
--- /dev/null
+++ b/Theodosius/asmjit/core/errorhandler.cpp
@@ -0,0 +1,37 @@
+
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/errorhandler.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ErrorHandler]
+// ============================================================================
+
+ErrorHandler::ErrorHandler() noexcept {}
+ErrorHandler::~ErrorHandler() noexcept {}
+
+ASMJIT_END_NAMESPACE
diff --git a/Theodosius/asmjit/core/errorhandler.h b/Theodosius/asmjit/core/errorhandler.h
new file mode 100644
index 0000000..2337cd8
--- /dev/null
+++ b/Theodosius/asmjit/core/errorhandler.h
@@ -0,0 +1,267 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
+#define ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
+
+#include "../core/globals.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_error_handling
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class BaseEmitter;
+
+// ============================================================================
+// [asmjit::ErrorHandler]
+// ============================================================================
+
+//! Error handler can be used to override the default behavior of error handling.
+//!
+//! It's available to all classes that inherit `BaseEmitter`. Override
+//! \ref ErrorHandler::handleError() to implement your own error handler.
+//!
+//! The following use-cases are supported:
+//!
+//! - Record the error and continue code generation. This is the simplest
+//! approach that can be used to at least log possible errors.
+//! - Throw an exception. AsmJit doesn't use exceptions and is completely
+//! exception-safe, but it's perfectly legal to throw an exception from
+//! the error handler.
+//! - Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts Assembler,
+//! Builder and Compiler to a consistent state before calling \ref handleError(),
+//! so `longjmp()` can be used without issues to cancel the code-generation if
+//! an error occurred. This method can be used if exception handling in your
+//! project is turned off and you still want some comfort. In most cases it
+//! should be safe as AsmJit uses \ref Zone memory and the ownership of memory
+//! it allocates always ends with the instance that allocated it. If using this
+//! approach please never jump outside the life-time of \ref CodeHolder and
+//! \ref BaseEmitter.
+//!
+//! \ref ErrorHandler can be attached to \ref CodeHolder or \ref BaseEmitter,
+//! which has a priority. The example below uses error handler that just prints
+//! the error, but lets AsmJit continue:
+//!
+//! ```
+//! // Error Handling #1 - Logging and returing Error.
+//! #include
+//! #include
+//!
+//! using namespace asmjit;
+//!
+//! // Error handler that just prints the error and lets AsmJit ignore it.
+//! class SimpleErrorHandler : public ErrorHandler {
+//! public:
+//! Error err;
+//!
+//! inline SimpleErrorHandler() : err(kErrorOk) {}
+//!
+//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
+//! this->err = err;
+//! fprintf(stderr, "ERROR: %s\n", message);
+//! }
+//! };
+//!
+//! int main() {
+//! JitRuntime rt;
+//! SimpleErrorHandler eh;
+//!
+//! CodeHolder code;
+//! code.init(rt.environment());
+//! code.setErrorHandler(&eh);
+//!
+//! // Try to emit instruction that doesn't exist.
+//! x86::Assembler a(&code);
+//! a.emit(x86::Inst::kIdMov, x86::xmm0, x86::xmm1);
+//!
+//! if (eh.err) {
+//! // Assembler failed!
+//! return 1;
+//! }
+//!
+//! return 0;
+//! }
+//! ```
+//!
+//! If error happens during instruction emitting / encoding the assembler behaves
+//! transactionally - the output buffer won't advance if encoding failed, thus
+//! either a fully encoded instruction or nothing is emitted. The error handling
+//! shown above is useful, but it's still not the best way of dealing with errors
+//! in AsmJit. The following example shows how to use exception handling to handle
+//! errors in a more C++ way:
+//!
+//! ```
+//! // Error Handling #2 - Throwing an exception.
+//! #include