From 853324ff52b01e6afb4480fe83fcc1ba5aa9696c Mon Sep 17 00:00:00 2001
From: mike1k <53958713+mike1k@users.noreply.github.com>
Date: Wed, 13 Apr 2022 22:15:12 -0500
Subject: [PATCH] PERSES Initial commit
---
.gitignore | 2 +
.gitmodules | 15 +
CMakeLists.txt | 102 ++
README.md | 160 +++
cmake.toml | 11 +
cmkr.cmake | 236 ++++
sdk/PersesSDK.h | 22 +
src/details.hpp | 84 ++
src/disasm.cpp | 238 ++++
src/disasm.hpp | 61 +
src/mapfileparser.cpp | 75 ++
src/mapfileparser.hpp | 55 +
src/mutationlight.cpp | 2441 ++++++++++++++++++++++++++++++++++++++
src/mutationlight.hpp | 48 +
src/perses.cpp | 177 +++
src/perses.hpp | 36 +
src/protectionschema.hpp | 33 +
src/util.hpp | 26 +
src/x86application.cpp | 1262 ++++++++++++++++++++
src/x86application.hpp | 51 +
src/x86util.hpp | 125 ++
vendor/CMakeLists.txt | 240 ++++
vendor/argparse | 1 +
vendor/asmjit | 1 +
vendor/cmake.toml | 17 +
vendor/pepp | 1 +
vendor/spdlog | 1 +
vendor/zydis | 1 +
28 files changed, 5522 insertions(+)
create mode 100644 .gitignore
create mode 100644 .gitmodules
create mode 100644 CMakeLists.txt
create mode 100644 README.md
create mode 100644 cmake.toml
create mode 100644 cmkr.cmake
create mode 100644 sdk/PersesSDK.h
create mode 100644 src/details.hpp
create mode 100644 src/disasm.cpp
create mode 100644 src/disasm.hpp
create mode 100644 src/mapfileparser.cpp
create mode 100644 src/mapfileparser.hpp
create mode 100644 src/mutationlight.cpp
create mode 100644 src/mutationlight.hpp
create mode 100644 src/perses.cpp
create mode 100644 src/perses.hpp
create mode 100644 src/protectionschema.hpp
create mode 100644 src/util.hpp
create mode 100644 src/x86application.cpp
create mode 100644 src/x86application.hpp
create mode 100644 src/x86util.hpp
create mode 100644 vendor/CMakeLists.txt
create mode 160000 vendor/argparse
create mode 160000 vendor/asmjit
create mode 100644 vendor/cmake.toml
create mode 160000 vendor/pepp
create mode 160000 vendor/spdlog
create mode 160000 vendor/zydis
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..ab5925c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+*.exe
+build/
\ No newline at end of file
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..6bbd539
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,15 @@
+[submodule "vendor/spdlog"]
+ path = vendor/spdlog
+ url = https://github.com/gabime/spdlog
+[submodule "vendor/asmjit"]
+ path = vendor/asmjit
+ url = https://github.com/asmjit/asmjit
+[submodule "vendor/argparse"]
+ path = vendor/argparse
+ url = https://github.com/p-ranav/argparse
+[submodule "vendor/pepp"]
+ path = vendor/pepp
+ url = https://github.com/mike1k/pepp
+[submodule "vendor/zydis"]
+ path = vendor/zydis
+ url = https://github.com/zyantific/zydis.git
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..a7b05e3
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,102 @@
+# This file is automatically generated from cmake.toml - DO NOT EDIT
+# See https://github.com/build-cpp/cmkr for more information
+
+cmake_minimum_required(VERSION 3.15)
+
+if(CMAKE_SOURCE_DIR STREQUAL CMAKE_BINARY_DIR)
+ message(FATAL_ERROR "In-tree builds are not supported. Run CMake from a separate directory: cmake -B build")
+endif()
+
+# Regenerate CMakeLists.txt automatically in the root project
+set(CMKR_ROOT_PROJECT OFF)
+if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
+ set(CMKR_ROOT_PROJECT ON)
+
+ # Bootstrap cmkr
+ include(cmkr.cmake OPTIONAL RESULT_VARIABLE CMKR_INCLUDE_RESULT)
+ if(CMKR_INCLUDE_RESULT)
+ cmkr()
+ endif()
+
+ # Enable folder support
+ set_property(GLOBAL PROPERTY USE_FOLDERS ON)
+endif()
+
+# Create a configure-time dependency on cmake.toml to improve IDE support
+if(CMKR_ROOT_PROJECT)
+ configure_file(cmake.toml cmake.toml COPYONLY)
+endif()
+
+project(perses)
+
+# vendor
+set(CMKR_CMAKE_FOLDER ${CMAKE_FOLDER})
+if(CMAKE_FOLDER)
+ set(CMAKE_FOLDER "${CMAKE_FOLDER}/vendor")
+else()
+ set(CMAKE_FOLDER vendor)
+endif()
+add_subdirectory(vendor)
+set(CMAKE_FOLDER ${CMKR_CMAKE_FOLDER})
+
+# Target perses
+set(CMKR_TARGET perses)
+set(perses_SOURCES "")
+
+list(APPEND perses_SOURCES
+ "src/disasm.cpp"
+ "src/mapfileparser.cpp"
+ "src/mutationlight.cpp"
+ "src/perses.cpp"
+ "src/x86application.cpp"
+ "src/details.hpp"
+ "src/disasm.hpp"
+ "src/mapfileparser.hpp"
+ "src/mutationlight.hpp"
+ "src/perses.hpp"
+ "src/protectionschema.hpp"
+ "src/util.hpp"
+ "src/x86application.hpp"
+ "src/x86util.hpp"
+)
+
+list(APPEND perses_SOURCES
+ cmake.toml
+)
+
+set(CMKR_SOURCES ${perses_SOURCES})
+add_executable(perses)
+
+if(perses_SOURCES)
+ target_sources(perses PRIVATE ${perses_SOURCES})
+endif()
+
+get_directory_property(CMKR_VS_STARTUP_PROJECT DIRECTORY ${PROJECT_SOURCE_DIR} DEFINITION VS_STARTUP_PROJECT)
+if(NOT CMKR_VS_STARTUP_PROJECT)
+ set_property(DIRECTORY ${PROJECT_SOURCE_DIR} PROPERTY VS_STARTUP_PROJECT perses)
+endif()
+
+source_group(TREE ${CMAKE_CURRENT_SOURCE_DIR} FILES ${perses_SOURCES})
+
+target_compile_definitions(perses PRIVATE
+ NOMINMAX
+ ZYCORE_STATIC_BUILD
+ ZYDIS_STATIC_BUILD
+ ASMJIT_STATIC
+)
+
+target_compile_features(perses PRIVATE
+ cxx_std_20
+)
+
+target_link_libraries(perses PRIVATE
+ spdlog
+ Zydis
+ asmjit
+ argparse
+ pepp
+)
+
+unset(CMKR_TARGET)
+unset(CMKR_SOURCES)
+
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..6905741
--- /dev/null
+++ b/README.md
@@ -0,0 +1,160 @@
+
+
+
+
+# Introduction
+
+PERSES is a X86 code obfuscation engine that works with [Portable Executable](https://en.wikipedia.org/wiki/Portable_Executable) files. The obfuscation works by replacing a specific instruction with a larger more sophisticated set that is semantically compatible to the original. PERSES only mutates 4 instructions yet has adverse effects on analyzers/decompilers due to the method of mutation. For more information on PERSES' inner workings, please check out the article written [here](https://back.engineering/13/04/2022/).
+
+PERSES is a work in progress and does not attempt to be a replacement for any established code obfuscation engine, so please be mindful when using it to protect your code. Furthermore, X64 support can be slightly improved and augmented to ensure semantical accuracy.
+
+# Reference Manual
+
+PERSES by default works off a command line. Listed below are the arguments requried to utilize PERSES.
+
+| Argument | Description | May Require |
+|-------------------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |-------------------- |
+| `-f` or `--file` | Path to the input PE file. | :heavy_check_mark: |
+| `-a` or `-address` | Address or list of addresses required to be mutated. | :heavy_check_mark: |
+| `-s` or `--symbol` | Symbol or list of symbols to be mutated. This requires a linked `.map` file. | :x: |
+| `--map` | Map file to be linked. IDA Pro `.map` files must have their extension replaced with `.ida`. | :x: |
+| `--list` | List of functions to be mutated. Each entry must envelop one line and be formatted as `0x1000:0x2000` where `0x1000` is the start and `0x2000` is the end of the routine. | :x: |
+| `--x64` | Used to indicate that the file is of 64bit architecture (AMD64). | :heavy_check_mark: |
+| `--rets` | Allow PERSES to build a `RET` gadget pool used to create `JMP`s to random locations. | :x: |
+| `--scan` | Force PERSES to scan for code protection markers. | :x: |
+
+Both symbols and addresses can be used, but atleast one of them must be present.
+
+> :information_source: Due to limitations in the `argparse` library, if more than one address or symbol are required, please append the `-a` or `-s` argument and parameters last.
+
+
+## Working with PERSES Manually
+
+If desired, PERSES can be used manually to generate mutated binaries. To begin, one must declare a `X86BinaryApplication` instance with the provided template parameter.
+
+* `X86BinaryApplication(filepath)` to generate a `X86_64` instance.
+* `X86BinaryApplication(filepath)` to generate a `X86` instance.
+
+### RET Gadgets
+
+After instantiating the object, the `RET` gadget pool mentioned above can be optionally created with `perses::buildKnownRetGadgets(app)`.
+
+### MAP Files
+
+MAP files are optionally produced by compilers to show symbols and sections present in a binary with their corresponding address. PERSES allows MAP files from `IDA Pro` and `MSVC` (tested on VS2022) to be linked via `X86BinaryApplication::linkMapFile`. Afterwards, symbols can be traversed via `X86BinaryApplication::getSymbols` or added directly into the mutation queue by calling `addRoutineBySymbol`. For instance, after linking the map file, adding `main` to the mutation queue is as simple as:
+
+```cpp
+app->addRoutineBySymbol("main", PERSES_MARKER_MUTATION);
+```
+
+MAP files can aid function size calculation by exposing known symbols, however, MAP linking is completely optional and only added as a convenience.
+
+### Function Lists
+
+If mutating a batch of functions is wanted, function lists can be parsed in order to add the specified routines automatically. This is done by calling `parseFunctionList`. Please be mindful of the required format listed [above](#Reference-Manual) in the argument table. Futhermore, the end address supplied is expected to be the end of a function, providing anything else will likely result in instability of the output program.
+
+### Markers
+
+The [PersesSDK](https://github.com/mike1k/perses/sdk/PersesSDK.h) can be included into a project to emit a scannable pattern into code. PERSES makes use of compiler intrinsics to generate unique patterns. Beginning and end macros named `PERSES_MUTATION_START()` and `PERSES_MUTATION_END()` are provided.
+
+### Applying Transforms
+
+Applying mutation on all routines in the queue is done by calling `X86BinaryApplication::transformRoutines`. Transforms are applied via the corresponding schema. At the moment, there is only one schema supplied; `MutationLight`.
+
+### Compiling.
+
+Compilation of the new binary can be done with `X86BinaryApplication::compile`. PERSES creates a new file and appends `.perses` after the original filename.
+
+# Showcase
+
+Below are some example programs created to show the efficacy in regards to crippling decompiler output.
+
+## Hello, world!
+
+```cpp
+int main()
+{
+ PERSES_MUTATION_START()
+
+ printf("Hello, world!\n");
+ Sleep(100);
+
+ PERSES_MUTATION_END()
+
+ return getchar();
+}
+```
+
+### Output
+
+![Output1](https://i.imgur.com/k3MvscV.png)
+
+
+## Jump Table Mutation
+
+```cpp
+int main()
+{
+ int input = 0;
+
+ std::cin >> input;
+
+ switch (input)
+ {
+ case 0:
+ std::cout << "Value is zero" << std::endl;
+ break;
+ case 1:
+ std::cout << "Value is one" << std::endl;
+ break;
+ case 2:
+ std::cout << "Value is two" << std::endl;
+ break;
+ case 3:
+ std::cout << "Value is three" << std::endl;
+ break;
+ case 4:
+ std::cout << "Value is four" << std::endl;
+ break;
+ default:
+ std::cerr << "Unhandled value!" << std::endl;
+ break;
+ }
+
+ return getchar();
+}
+```
+
+> Full function mutation using command line
+
+```
+perses -f MutationTesting.exe --map MutationTesting.map --rets -s _main
+```
+
+### Output
+
+![Output2](https://i.imgur.com/79aWu5c.png)
+
+# Modification
+
+Additional schemas can be created then attached to `X86BinaryApplication::buildSchema`. Alternatively, `MutationLight` can be extended as it only supports a minimal set of instructions. In order to modify the existing schema, please thoroughly read and understand [MutationLight.cpp](https://github.com/mike1k/src/MutationLight.cpp).
+
+# Building (Windows)
+
+PERSES utilizes [cmkr](https://github.com/build-cpp/cmkr). In order to build the PERSES project, please run the following commands:
+
+```
+git clone --recursive https://github.com/mike1k/perses.git
+cmake -B build
+```
+
+# Dependencies
+
+PERSES makes use of multiple great libraries in order to achieve it's objective.
+
+* [Zydis](https://github.com/zyantific/zydis)
+* [AsmJit](https://github.com/asmjit/asmjit)
+* [spdlog](https://github.com/gabime/spdlog)
+* [argparse](https://github.com/p-ranav/argparse)
+* [pepp](https://github.com/mike1k/pepp)
+
diff --git a/cmake.toml b/cmake.toml
new file mode 100644
index 0000000..22b877e
--- /dev/null
+++ b/cmake.toml
@@ -0,0 +1,11 @@
+[project]
+name = "perses"
+
+[subdir.vendor]
+
+[target.perses]
+type = "executable"
+sources = ["src/**.cpp", "src/**.hpp"]
+link-libraries = ["spdlog", "Zydis", "asmjit", "argparse", "pepp"]
+compile-features = ["cxx_std_20"]
+compile-definitions = ["NOMINMAX", "ZYCORE_STATIC_BUILD", "ZYDIS_STATIC_BUILD", "ASMJIT_STATIC"]
\ No newline at end of file
diff --git a/cmkr.cmake b/cmkr.cmake
new file mode 100644
index 0000000..9af0802
--- /dev/null
+++ b/cmkr.cmake
@@ -0,0 +1,236 @@
+include_guard()
+
+# Change these defaults to point to your infrastructure if desired
+set(CMKR_REPO "https://github.com/build-cpp/cmkr" CACHE STRING "cmkr git repository" FORCE)
+set(CMKR_TAG "v0.2.12" CACHE STRING "cmkr git tag (this needs to be available forever)" FORCE)
+set(CMKR_COMMIT_HASH "" CACHE STRING "cmkr git commit hash (optional)" FORCE)
+
+# To bootstrap/generate a cmkr project: cmake -P cmkr.cmake
+if(CMAKE_SCRIPT_MODE_FILE)
+ set(CMAKE_BINARY_DIR "${CMAKE_BINARY_DIR}/build")
+ set(CMAKE_CURRENT_BINARY_DIR "${CMAKE_BINARY_DIR}")
+ file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}")
+endif()
+
+# Set these from the command line to customize for development/debugging purposes
+set(CMKR_EXECUTABLE "" CACHE FILEPATH "cmkr executable")
+set(CMKR_SKIP_GENERATION OFF CACHE BOOL "skip automatic cmkr generation")
+set(CMKR_BUILD_TYPE "Debug" CACHE STRING "cmkr build configuration")
+mark_as_advanced(CMKR_REPO CMKR_TAG CMKR_COMMIT_HASH CMKR_EXECUTABLE CMKR_SKIP_GENERATION CMKR_BUILD_TYPE)
+
+# Disable cmkr if generation is disabled
+if(DEFINED ENV{CI} OR CMKR_SKIP_GENERATION OR CMKR_BUILD_SKIP_GENERATION)
+ message(STATUS "[cmkr] Skipping automatic cmkr generation")
+ unset(CMKR_BUILD_SKIP_GENERATION CACHE)
+ macro(cmkr)
+ endmacro()
+ return()
+endif()
+
+# Disable cmkr if no cmake.toml file is found
+if(NOT CMAKE_SCRIPT_MODE_FILE AND NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/cmake.toml")
+ message(AUTHOR_WARNING "[cmkr] Not found: ${CMAKE_CURRENT_SOURCE_DIR}/cmake.toml")
+ macro(cmkr)
+ endmacro()
+ return()
+endif()
+
+# Convert a Windows native path to CMake path
+if(CMKR_EXECUTABLE MATCHES "\\\\")
+ string(REPLACE "\\" "/" CMKR_EXECUTABLE_CMAKE "${CMKR_EXECUTABLE}")
+ set(CMKR_EXECUTABLE "${CMKR_EXECUTABLE_CMAKE}" CACHE FILEPATH "" FORCE)
+ unset(CMKR_EXECUTABLE_CMAKE)
+endif()
+
+# Helper macro to execute a process (COMMAND_ERROR_IS_FATAL ANY is 3.19 and higher)
+function(cmkr_exec)
+ execute_process(COMMAND ${ARGV} RESULT_VARIABLE CMKR_EXEC_RESULT)
+ if(NOT CMKR_EXEC_RESULT EQUAL 0)
+ message(FATAL_ERROR "cmkr_exec(${ARGV}) failed (exit code ${CMKR_EXEC_RESULT})")
+ endif()
+endfunction()
+
+# Windows-specific hack (CMAKE_EXECUTABLE_PREFIX is not set at the moment)
+if(WIN32)
+ set(CMKR_EXECUTABLE_NAME "cmkr.exe")
+else()
+ set(CMKR_EXECUTABLE_NAME "cmkr")
+endif()
+
+# Use cached cmkr if found
+if(DEFINED ENV{CMKR_CACHE} AND EXISTS "$ENV{CMKR_CACHE}")
+ set(CMKR_DIRECTORY_PREFIX "$ENV{CMKR_CACHE}")
+ string(REPLACE "\\" "/" CMKR_DIRECTORY_PREFIX "${CMKR_DIRECTORY_PREFIX}")
+ if(NOT CMKR_DIRECTORY_PREFIX MATCHES "\\/$")
+ set(CMKR_DIRECTORY_PREFIX "${CMKR_DIRECTORY_PREFIX}/")
+ endif()
+ # Build in release mode for the cache
+ set(CMKR_BUILD_TYPE "Release")
+else()
+ set(CMKR_DIRECTORY_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/_cmkr_")
+endif()
+set(CMKR_DIRECTORY "${CMKR_DIRECTORY_PREFIX}${CMKR_TAG}")
+set(CMKR_CACHED_EXECUTABLE "${CMKR_DIRECTORY}/bin/${CMKR_EXECUTABLE_NAME}")
+
+# Handle upgrading logic
+if(CMKR_EXECUTABLE AND NOT CMKR_CACHED_EXECUTABLE STREQUAL CMKR_EXECUTABLE)
+ if(CMKR_EXECUTABLE MATCHES "^${CMAKE_CURRENT_BINARY_DIR}/_cmkr")
+ if(DEFINED ENV{CMKR_CACHE} AND EXISTS "$ENV{CMKR_CACHE}")
+ message(AUTHOR_WARNING "[cmkr] Switching to cached cmkr: '${CMKR_CACHED_EXECUTABLE}'")
+ if(EXISTS "${CMKR_CACHED_EXECUTABLE}")
+ set(CMKR_EXECUTABLE "${CMKR_CACHED_EXECUTABLE}" CACHE FILEPATH "Full path to cmkr executable" FORCE)
+ else()
+ unset(CMKR_EXECUTABLE CACHE)
+ endif()
+ else()
+ message(AUTHOR_WARNING "[cmkr] Upgrading '${CMKR_EXECUTABLE}' to '${CMKR_CACHED_EXECUTABLE}'")
+ unset(CMKR_EXECUTABLE CACHE)
+ endif()
+ elseif(DEFINED ENV{CMKR_CACHE} AND EXISTS "$ENV{CMKR_CACHE}" AND CMKR_EXECUTABLE MATCHES "^${CMKR_DIRECTORY_PREFIX}")
+ message(AUTHOR_WARNING "[cmkr] Upgrading cached '${CMKR_EXECUTABLE}' to '${CMKR_CACHED_EXECUTABLE}'")
+ unset(CMKR_EXECUTABLE CACHE)
+ endif()
+endif()
+
+if(CMKR_EXECUTABLE AND EXISTS "${CMKR_EXECUTABLE}")
+ message(VERBOSE "[cmkr] Found cmkr: '${CMKR_EXECUTABLE}'")
+elseif(CMKR_EXECUTABLE AND NOT CMKR_EXECUTABLE STREQUAL CMKR_CACHED_EXECUTABLE)
+ message(FATAL_ERROR "[cmkr] '${CMKR_EXECUTABLE}' not found")
+elseif(NOT CMKR_EXECUTABLE AND EXISTS "${CMKR_CACHED_EXECUTABLE}")
+ set(CMKR_EXECUTABLE "${CMKR_CACHED_EXECUTABLE}" CACHE FILEPATH "Full path to cmkr executable" FORCE)
+ message(STATUS "[cmkr] Found cached cmkr: '${CMKR_EXECUTABLE}'")
+else()
+ set(CMKR_EXECUTABLE "${CMKR_CACHED_EXECUTABLE}" CACHE FILEPATH "Full path to cmkr executable" FORCE)
+ message(VERBOSE "[cmkr] Bootstrapping '${CMKR_EXECUTABLE}'")
+
+ message(STATUS "[cmkr] Fetching cmkr...")
+ if(EXISTS "${CMKR_DIRECTORY}")
+ cmkr_exec("${CMAKE_COMMAND}" -E rm -rf "${CMKR_DIRECTORY}")
+ endif()
+ find_package(Git QUIET REQUIRED)
+ cmkr_exec("${GIT_EXECUTABLE}"
+ clone
+ --config advice.detachedHead=false
+ --branch ${CMKR_TAG}
+ --depth 1
+ ${CMKR_REPO}
+ "${CMKR_DIRECTORY}"
+ )
+ if(CMKR_COMMIT_HASH)
+ execute_process(
+ COMMAND "${GIT_EXECUTABLE}" checkout -q "${CMKR_COMMIT_HASH}"
+ RESULT_VARIABLE CMKR_EXEC_RESULT
+ WORKING_DIRECTORY "${CMKR_DIRECTORY}"
+ )
+ if(NOT CMKR_EXEC_RESULT EQUAL 0)
+ message(FATAL_ERROR "Tag '${CMKR_TAG}' hash is not '${CMKR_COMMIT_HASH}'")
+ endif()
+ endif()
+ message(STATUS "[cmkr] Building cmkr (using system compiler)...")
+ cmkr_exec("${CMAKE_COMMAND}"
+ --no-warn-unused-cli
+ "${CMKR_DIRECTORY}"
+ "-B${CMKR_DIRECTORY}/build"
+ "-DCMAKE_BUILD_TYPE=${CMKR_BUILD_TYPE}"
+ "-DCMAKE_UNITY_BUILD=ON"
+ "-DCMAKE_INSTALL_PREFIX=${CMKR_DIRECTORY}"
+ "-DCMKR_GENERATE_DOCUMENTATION=OFF"
+ )
+ cmkr_exec("${CMAKE_COMMAND}"
+ --build "${CMKR_DIRECTORY}/build"
+ --config "${CMKR_BUILD_TYPE}"
+ --parallel
+ )
+ cmkr_exec("${CMAKE_COMMAND}"
+ --install "${CMKR_DIRECTORY}/build"
+ --config "${CMKR_BUILD_TYPE}"
+ --prefix "${CMKR_DIRECTORY}"
+ --component cmkr
+ )
+ if(NOT EXISTS ${CMKR_EXECUTABLE})
+ message(FATAL_ERROR "[cmkr] Failed to bootstrap '${CMKR_EXECUTABLE}'")
+ endif()
+ cmkr_exec("${CMKR_EXECUTABLE}" version)
+ message(STATUS "[cmkr] Bootstrapped ${CMKR_EXECUTABLE}")
+endif()
+execute_process(COMMAND "${CMKR_EXECUTABLE}" version
+ RESULT_VARIABLE CMKR_EXEC_RESULT
+)
+if(NOT CMKR_EXEC_RESULT EQUAL 0)
+ message(FATAL_ERROR "[cmkr] Failed to get version, try clearing the cache and rebuilding")
+endif()
+
+# Use cmkr.cmake as a script
+if(CMAKE_SCRIPT_MODE_FILE)
+ if(NOT EXISTS "${CMAKE_SOURCE_DIR}/cmake.toml")
+ execute_process(COMMAND "${CMKR_EXECUTABLE}" init
+ RESULT_VARIABLE CMKR_EXEC_RESULT
+ )
+ if(NOT CMKR_EXEC_RESULT EQUAL 0)
+ message(FATAL_ERROR "[cmkr] Failed to bootstrap cmkr project. Please report an issue: https://github.com/build-cpp/cmkr/issues/new")
+ else()
+ message(STATUS "[cmkr] Modify cmake.toml and then configure using: cmake -B build")
+ endif()
+ else()
+ execute_process(COMMAND "${CMKR_EXECUTABLE}" gen
+ RESULT_VARIABLE CMKR_EXEC_RESULT
+ )
+ if(NOT CMKR_EXEC_RESULT EQUAL 0)
+ message(FATAL_ERROR "[cmkr] Failed to generate project.")
+ else()
+ message(STATUS "[cmkr] Configure using: cmake -B build")
+ endif()
+ endif()
+endif()
+
+# This is the macro that contains black magic
+macro(cmkr)
+ # When this macro is called from the generated file, fake some internal CMake variables
+ get_source_file_property(CMKR_CURRENT_LIST_FILE "${CMAKE_CURRENT_LIST_FILE}" CMKR_CURRENT_LIST_FILE)
+ if(CMKR_CURRENT_LIST_FILE)
+ set(CMAKE_CURRENT_LIST_FILE "${CMKR_CURRENT_LIST_FILE}")
+ get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" DIRECTORY)
+ endif()
+
+ # File-based include guard (include_guard is not documented to work)
+ get_source_file_property(CMKR_INCLUDE_GUARD "${CMAKE_CURRENT_LIST_FILE}" CMKR_INCLUDE_GUARD)
+ if(NOT CMKR_INCLUDE_GUARD)
+ set_source_files_properties("${CMAKE_CURRENT_LIST_FILE}" PROPERTIES CMKR_INCLUDE_GUARD TRUE)
+
+ file(SHA256 "${CMAKE_CURRENT_LIST_FILE}" CMKR_LIST_FILE_SHA256_PRE)
+
+ # Generate CMakeLists.txt
+ cmkr_exec("${CMKR_EXECUTABLE}" gen
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
+ )
+
+ file(SHA256 "${CMAKE_CURRENT_LIST_FILE}" CMKR_LIST_FILE_SHA256_POST)
+
+ # Delete the temporary file if it was left for some reason
+ set(CMKR_TEMP_FILE "${CMAKE_CURRENT_SOURCE_DIR}/CMakerLists.txt")
+ if(EXISTS "${CMKR_TEMP_FILE}")
+ file(REMOVE "${CMKR_TEMP_FILE}")
+ endif()
+
+ if(NOT CMKR_LIST_FILE_SHA256_PRE STREQUAL CMKR_LIST_FILE_SHA256_POST)
+ # Copy the now-generated CMakeLists.txt to CMakerLists.txt
+ # This is done because you cannot include() a file you are currently in
+ configure_file(CMakeLists.txt "${CMKR_TEMP_FILE}" COPYONLY)
+
+ # Add the macro required for the hack at the start of the cmkr macro
+ set_source_files_properties("${CMKR_TEMP_FILE}" PROPERTIES
+ CMKR_CURRENT_LIST_FILE "${CMAKE_CURRENT_LIST_FILE}"
+ )
+
+ # 'Execute' the newly-generated CMakeLists.txt
+ include("${CMKR_TEMP_FILE}")
+
+ # Delete the generated file
+ file(REMOVE "${CMKR_TEMP_FILE}")
+
+ # Do not execute the rest of the original CMakeLists.txt
+ return()
+ endif()
+ # Resume executing the unmodified CMakeLists.txt
+ endif()
+endmacro()
diff --git a/sdk/PersesSDK.h b/sdk/PersesSDK.h
new file mode 100644
index 0000000..25dd9a3
--- /dev/null
+++ b/sdk/PersesSDK.h
@@ -0,0 +1,22 @@
+#pragma once
+
+#include
+#include
+
+#define PERSES_MUTATION_START() \
+{\
+ __nop();\
+ __nop();\
+ __debugbreak();\
+ __debugbreak();\
+ _disable();\
+}
+
+#define PERSES_MUTATION_END() \
+{\
+ _enable();\
+ __debugbreak();\
+ __debugbreak();\
+ __nop();\
+ __nop();\
+}
\ No newline at end of file
diff --git a/src/details.hpp b/src/details.hpp
new file mode 100644
index 0000000..db6ab03
--- /dev/null
+++ b/src/details.hpp
@@ -0,0 +1,84 @@
+#pragma once
+
+// - MACROs for specific toggles
+#define PERSES_VERBOSE
+#define PERSES_DEBUGGABLE
+
+#ifdef PERSES_DEBUGGABLE
+
+#define PERSES_THROW(msg) throw msg
+#define PERSES_THROWIFN(cnd, msg) if (!cnd) PERSES_THROW(msg)
+#define PERSES_THROWIF(cnd, msg) if (cnd) PERSES_THROW(msg)
+
+#else
+
+#define PERSES_THROW(msg)
+#define PERSES_THROWIFN(cnd, msg)
+#define PERSES_THROWIF(cnd, msg)
+
+#endif
+
+#define PERSES_32BIT 32
+#define PERSES_64BIT 64
+
+
+#define PERSES_MARKER_MUTATION 0
+#define PERSES_MARKER_VIRTUALIZATION 1
+
+// #define PERSES_BITSTOBYTES(bits) (((bits) + 7) >> 3)
+// #define PERSES_BYTESTOBITS(bits) (bits << 3)
+
+constexpr auto PERSES_BITSTOBYTES(size_t bits) noexcept {
+ return (((bits)+7) >> 3);
+}
+
+constexpr auto PERSES_BYTESTOBITS(size_t bytes) noexcept {
+ return (bytes << 3);
+}
+
+namespace perses
+{
+ class ProtectionSchema;
+
+ namespace pe = pepp;
+ namespace assembler = asmjit;
+
+ using address = pe::Address< >;
+ using u8 = uint8_t;
+ using u16 = uint16_t;
+ using u32 = uint32_t;
+ using u64 = uint64_t;
+ using uptr = uintptr_t;
+ using SharedProtectionSchema = std::shared_ptr;
+
+ struct RelocationEntry
+ {
+ pepp::RelocationType type;
+ u32 offset;
+ // Only used on x64 for RIP relative instructions.
+ u32 base;
+ u32 length;
+ u64 absolute;
+ u64 stream = 0ull;
+ };
+
+ struct JumpTableEntry
+ {
+ u32 rva;
+ u64 address;
+ u32 newOffset;
+ assembler::Label label;
+ };
+
+ // - These are what get scanned in .text sections by the engine
+ // - These two are the begin/end markers respectively.
+ // - LIMITATIONS:
+ // * These markers MUST be 5 bytes in length!
+ inline std::tuple MarkerTags[] =
+ {
+ {PERSES_MARKER_MUTATION, "CC CC 90 90 FA", 0xCCCC9090FBull}
+ };
+}
+
+#define PERSES_MUTATE_FULL 0xf001c0de
+#define PERSES_MARKER_SIZE 0x5
\ No newline at end of file
diff --git a/src/disasm.cpp b/src/disasm.cpp
new file mode 100644
index 0000000..601695f
--- /dev/null
+++ b/src/disasm.cpp
@@ -0,0 +1,238 @@
+#include "perses.hpp"
+
+using namespace perses;
+
+static Disassembler s_disasm;
+
+Disassembler* Disassembler::instance()
+{
+ return &s_disasm;
+}
+
+void Disassembler::create(ZydisMachineMode mode)
+{
+ // - Initialize formatter
+ ZydisFormatterInit(&s_disasm._formatter, ZYDIS_FORMATTER_STYLE_INTEL);
+ ZydisFormatterSetProperty(&s_disasm._formatter, ZYDIS_FORMATTER_PROP_FORCE_SIZE, ZYAN_TRUE);
+
+ s_disasm._mode = mode;
+
+ if (mode == ZYDIS_MACHINE_MODE_LONG_64)
+ {
+ ZydisDecoderInit(&s_disasm._decoder, s_disasm._mode, ZYDIS_STACK_WIDTH_64);
+ return;
+ }
+
+ if (mode == ZYDIS_MACHINE_MODE_LONG_COMPAT_32)
+ {
+ ZydisDecoderInit(&s_disasm._decoder, s_disasm._mode, ZYDIS_STACK_WIDTH_32);
+ return;
+ }
+
+ PERSES_THROW("Unexpected machine mode passed into Disassembler::create()!");
+}
+
+bool Disassembler::decode(void* buf, instruction_t* instr)
+{
+ ZyanStatus status = ZydisDecoderDecodeFull(&_decoder, buf, 0xFFF, &instr->decoded, instr->operands, ZYDIS_MAX_OPERAND_COUNT_VISIBLE, ZYDIS_DFLAG_VISIBLE_OPERANDS_ONLY);
+
+ if (ZYAN_SUCCESS(status))
+ {
+ instr->raw.resize(instr->decoded.length);
+ memcpy(&instr->raw[0], buf, instr->decoded.length);
+ return true;
+ }
+
+ return false;
+}
+
+bool Disassembler::decode(void* buf, ZydisDecodedInstruction* instr, ZydisDecodedOperand* op)
+{
+ return ZYAN_SUCCESS(
+ ZydisDecoderDecodeFull(&_decoder, buf, 0xFFF, instr, op, ZYDIS_MAX_OPERAND_COUNT_VISIBLE, ZYDIS_DFLAG_VISIBLE_OPERANDS_ONLY));
+}
+
+u64 Disassembler::calcAbsolute(instruction_t* instr)
+{
+ u64 dst = 0ull;
+
+ ZydisCalcAbsoluteAddress(&instr->decoded, &instr->operands[0], instr->address, &dst);
+
+ return dst;
+}
+
+u64 perses::Disassembler::calcAbsolute(ZydisDecodedInstruction* instr, ZydisDecodedOperand* op, u64 address)
+{
+ u64 dst = 0ull;
+
+ ZyanStatus status = ZydisCalcAbsoluteAddress(instr, op, address, &dst);
+
+ return dst;
+}
+
+bool Disassembler::getSegments(instruction_t* intr, ZydisInstructionSegments* segments)
+{
+ return ZYAN_SUCCESS(
+ ZydisGetInstructionSegments(&intr->decoded, segments));
+}
+
+bool Disassembler::isJmp(instruction_t* i)
+{
+ ZydisDecodedInstruction* instr = &i->decoded;
+
+ switch (instr->mnemonic)
+ {
+ case ZYDIS_MNEMONIC_JNBE:
+ case ZYDIS_MNEMONIC_JB:
+ case ZYDIS_MNEMONIC_JBE:
+ case ZYDIS_MNEMONIC_JCXZ:
+ case ZYDIS_MNEMONIC_JECXZ:
+ case ZYDIS_MNEMONIC_JKNZD:
+ case ZYDIS_MNEMONIC_JKZD:
+ case ZYDIS_MNEMONIC_JL:
+ case ZYDIS_MNEMONIC_JLE:
+ case ZYDIS_MNEMONIC_JNB:
+ case ZYDIS_MNEMONIC_JNL:
+ case ZYDIS_MNEMONIC_JNLE:
+ case ZYDIS_MNEMONIC_JNO:
+ case ZYDIS_MNEMONIC_JNP:
+ case ZYDIS_MNEMONIC_JNS:
+ case ZYDIS_MNEMONIC_JNZ:
+ case ZYDIS_MNEMONIC_JO:
+ case ZYDIS_MNEMONIC_JP:
+ case ZYDIS_MNEMONIC_JRCXZ:
+ case ZYDIS_MNEMONIC_JS:
+ case ZYDIS_MNEMONIC_JZ:
+ case ZYDIS_MNEMONIC_JMP:
+ case ZYDIS_MNEMONIC_CALL:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool Disassembler::isBbTerminatorInstruction(instruction_t* i)
+{
+ // Check if the instruction ends a basic block
+ ZydisDecodedInstruction* instr = &i->decoded;
+
+ switch (instr->mnemonic)
+ {
+ case ZYDIS_MNEMONIC_JNBE:
+ case ZYDIS_MNEMONIC_JB:
+ case ZYDIS_MNEMONIC_JBE:
+ case ZYDIS_MNEMONIC_JCXZ:
+ case ZYDIS_MNEMONIC_JECXZ:
+ case ZYDIS_MNEMONIC_JKNZD:
+ case ZYDIS_MNEMONIC_JKZD:
+ case ZYDIS_MNEMONIC_JL:
+ case ZYDIS_MNEMONIC_JLE:
+ case ZYDIS_MNEMONIC_JNB:
+ case ZYDIS_MNEMONIC_JNL:
+ case ZYDIS_MNEMONIC_JNLE:
+ case ZYDIS_MNEMONIC_JNO:
+ case ZYDIS_MNEMONIC_JNP:
+ case ZYDIS_MNEMONIC_JNS:
+ case ZYDIS_MNEMONIC_JNZ:
+ case ZYDIS_MNEMONIC_JO:
+ case ZYDIS_MNEMONIC_JP:
+ case ZYDIS_MNEMONIC_JRCXZ:
+ case ZYDIS_MNEMONIC_JS:
+ case ZYDIS_MNEMONIC_JZ:
+ case ZYDIS_MNEMONIC_JMP:
+ case ZYDIS_MNEMONIC_RET:
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+std::string Disassembler::format(address addr, ZydisDecodedInstruction* instr, ZydisDecodedOperand* op)
+{
+ char buf[0xFF]{};
+ ZyanStatus status = 0;
+
+ status = ZydisFormatterFormatInstruction(&_formatter, instr, op, instr->operand_count_visible, buf, sizeof(buf), addr.uintptr());
+
+ if (!ZYAN_SUCCESS(status))
+ {
+ return fmt::format("Unexpected error when formatting address: 0x{:X}", addr.uintptr());
+ }
+
+ return buf;
+}
+
+void Routine::buildFromCode(address buf)
+{
+ return;
+}
+
+void Routine::printAssembly(uint32_t numInstructions)
+{
+ if (empty())
+ return;
+
+ logger()->info("** Printing routine: 0x{:X}", at(0).address);
+
+ int count = 0;
+
+ for (auto& instr : *this)
+ {
+ if (numInstructions != -1)
+ {
+ if (count++ >= numInstructions)
+ break;
+ }
+
+ std::string fmt = Disassembler::instance()->format(instr.address, &instr.decoded, instr.operands);
+
+ logger()->debug("** 0x{:X}\t\t|\t{}", instr.address, fmt);
+ }
+}
+
+size_t perses::Routine::codeSize() const
+{
+ size_t sz {};
+ for (auto& insn : *this)
+ sz += insn.decoded.length;
+ return sz;
+}
+
+bool instruction_t::isMnemonic(ZydisMnemonic mnem) const
+{
+ return decoded.mnemonic == mnem;
+}
+
+bool instruction_t::isOperandType(size_t index, ZydisOperandType type) const
+{
+ if (const ZydisDecodedOperand* op = getOperand(index))
+ {
+ return op->type == type;
+ }
+ return false;
+}
+
+const ZydisDecodedOperand* instruction_t::getOperand(size_t index) const
+{
+ if (index >= decoded.operand_count_visible)
+ return nullptr;
+
+ return &operands[index];
+}
+
+size_t instruction_t::getFirstSegmentOffset(ZydisInstructionSegment type)
+{
+ ZydisInstructionSegments segs { };
+ Disassembler::instance()->getSegments(this, &segs);
+
+ for (auto& seg : segs.segments)
+ {
+ if (seg.type == type)
+ return seg.offset;
+ }
+
+ return 0ull;
+}
diff --git a/src/disasm.hpp b/src/disasm.hpp
new file mode 100644
index 0000000..c5a7629
--- /dev/null
+++ b/src/disasm.hpp
@@ -0,0 +1,61 @@
+#pragma once
+
+namespace perses
+{
+
+ struct instruction_t
+ {
+ bool isMnemonic(ZydisMnemonic mnem) const;
+ bool isOperandType(size_t index, ZydisOperandType type) const;
+ const ZydisDecodedOperand* getOperand(size_t index) const;
+ size_t getFirstSegmentOffset(ZydisInstructionSegment type);
+
+ uintptr_t address{ };
+ ZydisDecodedInstruction decoded{ };
+ ZydisDecodedOperand operands[ZYDIS_MAX_OPERAND_COUNT_VISIBLE]{ };
+ std::vector raw{ };
+ };
+
+ class Disassembler
+ {
+ public:
+ static Disassembler* instance();
+ static void create(ZydisMachineMode mode);
+
+ bool decode(void* buf, instruction_t* instr);
+ bool decode(void* buf, ZydisDecodedInstruction* instr, ZydisDecodedOperand* op);
+ u64 calcAbsolute(instruction_t* intr);
+ u64 calcAbsolute(ZydisDecodedInstruction* instr, ZydisDecodedOperand* op, u64 address);
+ ZydisRegister enclosingReg(ZydisRegister);
+ bool getSegments(instruction_t* intr, ZydisInstructionSegments* segments);
+ bool isJmp(instruction_t* instr);
+ bool isBbTerminatorInstruction(instruction_t* instr);
+
+ std::string format(address addr, ZydisDecodedInstruction* instr, ZydisDecodedOperand* op);
+
+ private:
+ ZydisDecoder _decoder;
+ ZydisFormatter _formatter;
+ ZydisMachineMode _mode;
+ };
+
+
+ class Routine : public std::vector
+ {
+ public:
+ Routine() = default;
+
+ void buildFromCode(address buf);
+ void printAssembly(uint32_t numInstructions = -1);
+
+ void addFlag(int flag) { _flag |= flag; }
+ void stripFlag(int flag) { _flag &= ~flag; }
+ int getFlag() const { return _flag; }
+ size_t codeSize() const;
+
+ uptr getAddress() const { return empty() ? 0 : at(0).address; }
+
+ private:
+ int _flag { };
+ };
+}
\ No newline at end of file
diff --git a/src/mapfileparser.cpp b/src/mapfileparser.cpp
new file mode 100644
index 0000000..7836d09
--- /dev/null
+++ b/src/mapfileparser.cpp
@@ -0,0 +1,75 @@
+#include "perses.hpp"
+#include
+
+using namespace perses;
+
+bool MapFileParser::parse(std::filesystem::path filePath)
+{
+ if (!std::filesystem::exists(filePath))
+ return false;
+
+ std::ifstream infile(filePath);
+ std::string line;
+
+ while (std::getline(infile, line))
+ {
+ parseLine(line);
+ }
+
+ return !_symbols.empty();
+}
+
+bool MSVCMapFileParser::parseLine(std::string_view line)
+{
+ std::string s = line.data();
+ std::regex rgx (R"#(\s(\d+)\:([a-fA-F0-9]+)\s+(\S+)\s+([a-fA-F0-9]+)\s+(.+))#", std::regex_constants::ECMAScript);
+
+ if (std::regex_match(s, rgx))
+ {
+ for (std::smatch m; std::regex_search(s, m, rgx); s = m.suffix())
+ {
+ MapSymbol symbol { };
+
+ symbol.address = std::strtoull(m[4].str().c_str(), nullptr, 16);
+
+ if (symbol.address == 0ull)
+ continue;
+
+ symbol.sectionIndex = std::atoi(m[1].str().c_str());
+ symbol.sectionOffset = std::strtoul(m[2].str().c_str(), nullptr, 16);
+ symbol.name = m[3].str();
+ symbol.libobj = m[5].str();
+
+ _symbols.emplace_back(std::move(symbol));
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+bool IDAMapFileParser::parseLine(std::string_view line)
+{
+ std::string s = line.data();
+ std::regex rgx(R"#(.(\d+)\:([a-fA-F0-9]+)\s+(\S+))#", std::regex_constants::ECMAScript);
+
+ if (std::regex_match(s, rgx))
+ {
+ for (std::smatch m; std::regex_search(s, m, rgx); s = m.suffix())
+ {
+ MapSymbol symbol{ };
+
+ symbol.address = 0ull;
+ symbol.sectionIndex = std::atoi(m[1].str().c_str());
+ symbol.sectionOffset = std::strtoul(m[2].str().c_str(), nullptr, 16);
+ symbol.name = m[3].str();
+
+ _symbols.emplace_back(std::move(symbol));
+ }
+
+ return true;
+ }
+
+ return false;
+}
\ No newline at end of file
diff --git a/src/mapfileparser.hpp b/src/mapfileparser.hpp
new file mode 100644
index 0000000..7889656
--- /dev/null
+++ b/src/mapfileparser.hpp
@@ -0,0 +1,55 @@
+#pragma once
+
+namespace perses
+{
+ struct MapSymbol
+ {
+ u32 sectionIndex;
+ u32 sectionOffset;
+ u64 address;
+ std::string name;
+ std::string libobj;
+ };
+
+ enum class MapFileType
+ {
+ kIDAPro = 0,
+ kMSVC,
+ kLLVM
+ };
+
+ class MapFileParser : pe::msc::NonCopyable
+ {
+ public:
+ virtual ~MapFileParser() = default;
+
+ bool parse(std::filesystem::path filePath);
+
+ const std::vector& getSymbols() const {
+ return _symbols;
+ }
+ protected:
+ MapFileParser() = default;
+ virtual bool parseLine(std::string_view line) = 0;
+ protected:
+ std::vector _symbols;
+ };
+
+ class MSVCMapFileParser : public MapFileParser
+ {
+ public:
+ MSVCMapFileParser() {}
+
+ protected:
+ bool parseLine(std::string_view line) override;
+ };
+
+ class IDAMapFileParser : public MapFileParser
+ {
+ public:
+ IDAMapFileParser() {}
+
+ protected:
+ bool parseLine(std::string_view line) override;
+ };
+}
\ No newline at end of file
diff --git a/src/mutationlight.cpp b/src/mutationlight.cpp
new file mode 100644
index 0000000..1733f15
--- /dev/null
+++ b/src/mutationlight.cpp
@@ -0,0 +1,2441 @@
+#include "perses.hpp"
+
+using namespace perses;
+using namespace perses::assembler;
+
+// Explicit templates.
+template class MutationLightSchema;
+template class MutationLightSchema;
+
+#define MUT_TEMPLATE template
+
+static std::vector gs_retGadgets;
+
+/*
+NOTES:
+ * -- TODO: Conditionals are not handled "properly" here, but in order to do so, the `Routine` class would need to be modified to handle basic blocks rather than entire blocks of code.
+ ** Each routine should be analyzed in steps where the last instruction is a terminator instruction (e.g ret, jmp/jcc, etc)
+ ** Then each block can be separated and have passes run on them individually, the terminator instructions can be fixed to the new dst or mutated entirely.
+ * -- This is still a very basic mutation/obfuscation schema. The major component is the immediate/memory encryption, where all original immediates or offsets
+ ** are encrypted at compile time then decrypted during actual runtime of the code.
+ * -- All calls/jmps and even jmps to jump tables are obfuscated, which will completely break decompiling analyzers from working properly.
+ ** Further obfuscation can include JCC obfuscation, more MBA, or additional instructions.
+*/
+
+class CompilerErrorHandler : public ErrorHandler {
+public:
+ void handleError(Error err, const char* message, BaseEmitter* origin) override {
+ logger()->critical("AsmJit compilation error: {} ({})\n", message, err);
+#ifdef PERSES_DEBUGGABLE
+ __debugbreak();
+#endif
+ }
+};
+
+MUT_TEMPLATE perses::assembler::CodeBuffer MutationLightSchema::applyTransforms(Routine* rtn)
+{
+ logger()->info("Applying transforms on routine 0x{:X}", rtn->getAddress());
+
+ if (rtn->empty() || rtn->getFlag() != PERSES_MARKER_MUTATION)
+ return {};
+
+ _rtn = rtn;
+
+ // Setup begin-end values
+ _rtnBegin = rtn->getAddress();
+ _rtnEnd = _rtnBegin + rtn->codeSize();
+
+ // Initialize the CodeHolder
+ this->_code.init(app()->getEnvironment());
+
+ if (static std::atomic_bool x = true; x.exchange(false)) // Credits to Xerox for this one liner :)
+ {
+ // Don't include stuff that will change on runtime.
+ auto it = std::remove_if(gs_retGadgets.begin(), gs_retGadgets.end(),
+ [this](u64& gadget)
+ {
+ u32 rva = toRva(gadget);
+
+ for (auto reloc : app()->getOriginalRelocs())
+ {
+ if (rva >= reloc && rva <= (reloc + sizeof(u32)))
+ {
+ return true;
+ }
+ }
+
+ for (auto& rtn : app()->getRoutines())
+ {
+ if (gadget >= rtn.getAddress() && gadget <= (rtn.getAddress() + rtn.codeSize()))
+ {
+ return true;
+ }
+ }
+ return false;
+ });
+
+ gs_retGadgets.erase(it, gs_retGadgets.end());
+ }
+
+ // Add an error handler
+ CompilerErrorHandler errHandler;
+ this->_code.setErrorHandler(&errHandler);
+
+ assembler::x86::Assembler* cc = this->getCompiler();
+
+ // Attach the codeholder
+ this->_code.attach(cc);
+
+ // Variables needed for the entire compilation
+ uptr start = rtn->getAddress();
+
+ _streamOffset = 0ul;
+
+ // See notes above regarding JCC handling. This is improper and can be
+ // done properly by disassembling a function into basic blocks rather
+ // than an entire blob of code.
+ // Alternatively, JCC instructions such as jnz, jz can be converted into conditional movs such as cmovnz, cmovz, etc..
+ std::vector>> jccOffsets;
+ std::map positions;
+
+ // Now build mutations on the X86 instruction level
+ for (auto& insn : *rtn)
+ {
+ bool handled = false;
+
+ // Update the current instruction
+ _currentInstruction = &insn;
+
+#ifdef PERSES_VERBOSE
+ // printf("> * Transforming 0x%llx (0x%x) - [ %s ]\n",
+ // insn.address, _streamOffset,
+ // Disassembler::instance()->format(insn.address, &insn.decoded, insn.operands).c_str());
+#endif
+
+ // JCC fixups.
+ for (auto& [offset, label] : jccOffsets)
+ {
+ if (offset == _streamOffset && !label.second)
+ {
+ // Bind the label
+ Label& asmLabel = label.first;
+ cc->bind(asmLabel);
+
+ // Signify the label has been binded now
+ label.second = true;
+ }
+ }
+
+ // Bind each offset to a label (NOT GOOD)
+ positions[_streamOffset] = cc->newLabel();
+ cc->bind(positions[_streamOffset]);
+
+ // Just like JCCs, we need to constantly check whether we are hitting a jump table entry
+ for (auto& jte : _jumpTables)
+ {
+ uptr addr = insn.address;
+
+ if constexpr (BitSize == PERSES_64BIT)
+ {
+ // Jump table entries on x64 are RVAs.
+ addr -= app()->getBaseAddress();
+ }
+
+ if (jte.address == addr)
+ {
+ // If we hit the jump table offset, bind (if not already) the label
+ if (jte.label.id() == Globals::kInvalidId)
+ {
+ jte.label = cc->newLabel();
+ cc->bind(jte.label);
+ }
+ }
+ }
+
+ /**/
+ switch (insn.decoded.mnemonic)
+ {
+ case ZYDIS_MNEMONIC_PUSH:
+ handled = handlePush(&insn);
+ break;
+ case ZYDIS_MNEMONIC_MOV:
+ handled = handleMov(&insn);
+ break;
+ case ZYDIS_MNEMONIC_CALL:
+ case ZYDIS_MNEMONIC_JMP:
+ handled = handleRelInstruction(&insn);
+
+ if (handled)
+ break;
+
+ if constexpr (BitSize == PERSES_32BIT)
+ {
+ if (insn.isMnemonic(ZYDIS_MNEMONIC_JMP))
+ handled = recoverJumpTable(&insn);
+ }
+
+ break;
+ case ZYDIS_MNEMONIC_ADD:
+ handled = handleAdd(&insn);
+ break;
+ case ZYDIS_MNEMONIC_XOR:
+ handled = handleXor(&insn);
+ break;
+ default:
+ handled = false;
+ break;
+ }
+
+ if (!handled &&
+ Disassembler::instance()->isJmp(&insn) &&
+ insn.operands[0].type == ZYDIS_OPERAND_TYPE_IMMEDIATE)
+ {
+ uptr va = insn.address;
+ uptr dst = Disassembler::instance()->calcAbsolute(&insn);
+
+ if (dst < _rtnBegin && dst > _rtnEnd)
+ {
+ goto Unhandled;
+ }
+
+ // JCCs that jump backward are handled differently
+ if (dst < va)
+ {
+ // Try to find the label by assigned _streamOffset
+ auto it = positions.find((dst - rtn->getAddress()));
+ if (it != positions.end())
+ {
+ writeJcc(&insn.decoded, it->second);
+ }
+
+ goto UpdateStream;
+ }
+
+ //
+ // Create a new label, when we reach the destination, we call
+ // `asm->bind(*label)` -- This way we don't have to manually
+ // solve for the JCC relative deltas or the JCC type.
+ Label lbl = cc->newLabel();
+ dst -= rtn->getAddress();
+
+ jccOffsets.emplace_back(dst, std::make_pair(lbl, false));
+
+ // Write the JCC instruction now, so the label can be binded later
+ writeJcc(&insn.decoded, lbl);
+ }
+ else if (!handled)
+ {
+ Unhandled:
+ // Attempt to fixup relocations automatically.
+ if (app() != nullptr && insn.decoded.operand_count_visible != 0)
+ {
+ ZydisInstructionSegments segs;
+ ZydisGetInstructionSegments(&insn.decoded, &segs);
+
+ // On x64, mostly everything is RIP relative, so we need to attempt to fix these automatically..
+ if constexpr (BitSize == PERSES_64BIT)
+ {
+ for (u8 idx = 0; idx < insn.decoded.operand_count_visible; ++idx)
+ {
+ auto& op = insn.operands[idx];
+
+ if (op.type == ZYDIS_OPERAND_TYPE_IMMEDIATE)
+ {
+ if (op.imm.is_relative)
+ {
+ for (u8 segIdx = 0; segIdx < segs.count; ++segIdx)
+ {
+ auto seg = segs.segments[segIdx];
+
+ if (seg.type == ZYDIS_INSTR_SEGMENT_IMMEDIATE)
+ {
+ u64 absolute = Disassembler::instance()->calcAbsolute(&insn.decoded, &op, insn.address);
+ makeRelocation(seg.offset, true, absolute);
+ break;
+ }
+ }
+ }
+ }
+
+ if (op.type == ZYDIS_OPERAND_TYPE_MEMORY)
+ {
+ if (op.mem.base == ZYDIS_REGISTER_RIP)
+ {
+ for (u8 segIdx = 0; segIdx < segs.count; ++segIdx)
+ {
+ auto seg = segs.segments[segIdx];
+
+ if (seg.type == ZYDIS_INSTR_SEGMENT_DISPLACEMENT)
+ {
+ u64 absolute = Disassembler::instance()->calcAbsolute(&insn.decoded, &op, insn.address);
+ makeRelocation(seg.offset, true, absolute);
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Generic, will work on both x86 and x64.
+ for (int i = 0; i < segs.count; ++i)
+ {
+ auto seg = segs.segments[i];
+
+ if (seg.type != ZYDIS_INSTR_SEGMENT_DISPLACEMENT &&
+ seg.type != ZYDIS_INSTR_SEGMENT_IMMEDIATE)
+ continue;
+
+ u32 idx = seg.offset;
+ if ((idx + sizeof(u32) <= insn.raw.size()) &&
+ *(u32*)&insn.raw[idx] >= app()->getBaseAddress())
+ {
+ // These checks are slow, which is why the above code is necessary.
+ // Binaries can have hundreds of thousands of relocations.
+ if (app()->isRelocationPresent(toRva(insn.address) + idx))
+ {
+ app()->removeRelocation(toRva(insn.address) + idx);
+ // Now generate the relocation ourself for the current
+ // instruction stream
+ makeRelocation(idx);
+ }
+ }
+
+ }
+ }
+
+
+ // Just embed the original code
+ Error err = cc->embed(insn.raw.data(), insn.raw.size());
+ if (err)
+ logger()->critical("Error during embed: {}", err);
+ }
+ else
+ {
+#ifdef PERSES_VERBOSE
+ // printf("> * Transforming 0x%llx (0x%x) - [ %s ]\n",
+ // insn.address, _streamOffset,
+ // Disassembler::instance()->format(insn.address, &insn.decoded, insn.operands).c_str());
+#endif
+ }
+
+ UpdateStream:
+ _streamOffset += insn.decoded.length;
+ }
+
+ // Finish up the rest of the JCC offsets incase we have branches at the end.
+ for (auto& [offset, label] : jccOffsets)
+ {
+ if (offset == _streamOffset && !label.second)
+ {
+ Label& asmLabel = label.first;
+ cc->bind(asmLabel);
+ }
+ }
+
+ cc->finalize();
+
+ // Resolve all label offsets
+ for (auto& jte : _jumpTables)
+ {
+ // Ensure its a binded label
+ if (jte.label.id() != Globals::kInvalidId)
+ {
+ jte.newOffset = this->_code.labelOffset(jte.label);
+ }
+ }
+
+ size_t relocImmIdx { 0ull };
+
+ // Build a relocation table to pass to the linker.
+ for (auto& le : this->_code.labelEntries())
+ {
+ if (!le || !le->hasName())
+ continue;
+
+ if (le->id() == Globals::kInvalidId)
+ {
+ PERSES_THROW("Relocation label was added but never binded!");
+ }
+
+ RelocGenEntry& relocInfo = _relocEntryList[relocImmIdx++];
+
+ u32 relocOffset = (u32)le->offset() + relocInfo.roffset;
+ this->_relocs.emplace_back(
+ BitSize == PERSES_64BIT ? pepp::REL_BASED_DIR64 : pepp::REL_BASED_HIGHLOW,
+ relocOffset,
+ le->offset(),
+ relocInfo.length,
+ relocInfo.absolute);
+ }
+
+ // Link the code, build PE sections, fix jump tables, relocs., RIP relative instructions ..
+ if (app())
+ {
+ app()->linkCode(rtn, this->_code, this->_relocs, this->_jumpTables);
+ }
+
+ Section* section = this->_code.sectionById(0);
+ CodeBuffer& buf = section->buffer();
+
+ // Uncomment to get raw binary files of the mutated code.
+#ifdef PERSES_DEBUGGABLE
+ //pepp::io::File outbin("bin/compiler_out.bin", pepp::io::kFileOutput | pepp::io::kFileBinary);
+ //outbin.Write(buf.data(), buf.size());
+#endif
+
+ return buf;
+}
+
+MUT_TEMPLATE bool MutationLightSchema::handlePush(instruction_t* insn)
+{
+ // This obfuscation is only present on x86 (32bit), not x64.
+
+ assembler::x86::Assembler* cc = this->getCompiler();
+
+ if (app() && app()->getEnvironment().is64Bit())
+ return false;
+
+ if (insn->getOperand(0)->size != 32)
+ return false;
+
+ if (insn->isOperandType(0, ZYDIS_OPERAND_TYPE_MEMORY))
+ {
+ // e.g push offset (opcode: FF 35)
+ if (insn->operands[0].mem.disp.has_displacement)
+ {
+ if (insn->operands[0].mem.base != ZYDIS_REGISTER_NONE)
+ return false;
+
+ if (insn->operands[0].mem.segment != ZYDIS_REGISTER_DS)
+ return false;
+
+ bool isRelocatable = app()->isRelocationPresent((insn->address - app()->getBaseAddress()) + 2);
+
+ if (isRelocatable)
+ {
+ // We need to remove/ignore the relocation so that our code doesn't break when the PE ldr. attempts to
+ // process the reloc. directory.
+ app()->removeRelocation((insn->address - app()->getBaseAddress()) + 2);
+ }
+
+ u32 value = toRva((u32)insn->operands[0].mem.disp.value);
+ u32 key = util::genRandInteger();
+
+ value ^= key;
+ value = _byteswap_ulong(value);
+
+ cc->sub(x86::regs::esp, sizeof(u32));
+
+ // Preserve EAX
+ cc->push(x86::regs::eax);
+
+ cc->mov(x86::regs::eax, value);
+ cc->bswap(x86::regs::eax);
+ cc->xor_(x86::regs::eax, key);
+ // Load into previous stack alloc.
+ cc->xchg(x86::dword_ptr(x86::regs::esp, 4), x86::regs::eax);
+
+ if (app()->getImage().isDllOrSystemFile())
+ {
+ makeRelocation(0x1);
+ cc->mov(x86::regs::eax, app()->getBaseAddress());
+ }
+ else
+ {
+ fetchPeb(x86::regs::eax);
+ cc->mov(x86::regs::eax, x86::dword_ptr(x86::regs::eax, 8));
+ }
+
+ // Translate rva to va.
+ cc->add(x86::dword_ptr(x86::regs::esp, 4), x86::regs::eax);
+ cc->xchg(x86::dword_ptr(x86::regs::esp, 4), x86::regs::eax);
+ cc->mov(x86::regs::eax, x86::dword_ptr(x86::regs::eax));
+ cc->xchg(x86::dword_ptr(x86::regs::esp, 4), x86::regs::eax);
+
+ cc->pop(x86::regs::eax);
+
+ return true;
+ }
+
+ return false;
+ }
+
+ if (insn->isOperandType(0, ZYDIS_OPERAND_TYPE_IMMEDIATE))
+ {
+ // Only handle opcode 0x68
+ if (insn->decoded.opcode != 0x68)
+ return false;
+
+ if (insn->operands[0].imm.value.u == 0)
+ return false;
+
+ bool isRelocatable = app()->isRelocationPresent((insn->address - app()->getBaseAddress()) + 1);
+
+ // Simple constant encryption, but enough to make decompilers generate some funky output.
+ if (isRelocatable)
+ {
+ // We need to remove/ignore the relocation so that our code doesn't break when the PE ldr. attempts to
+ // process the reloc. directory.
+ app()->removeRelocation((insn->address - app()->getBaseAddress()) + 1);
+
+ u32 rva = toRva(insn->operands[0].imm.value.u);
+ u32 key = util::genRandInteger();
+ u32 rots = util::genRandInteger(2, 14);
+ u32 crypt = _rotl(rva ^ key, rots);
+
+ cc->push(crypt);
+ cc->pushfd();
+ cc->ror(x86::dword_ptr(x86::regs::esp, 4), rots);
+ cc->xor_(x86::dword_ptr(x86::regs::esp, 4), key);
+
+ cc->push(x86::regs::eax);
+
+ if (app()->getImage().isDllOrSystemFile())
+ {
+ // Add a relocation at this offset
+ // TODO: Figure out how to do this properly with asmjit!?
+ // For now, we just generate a dummy label, then we add 1 to the offset. mov eax, imm encoding is {A1 ..bytes..}
+ makeRelocation(0x1);
+ cc->mov(x86::regs::eax, app()->getBaseAddress());
+ }
+ else
+ {
+ // We can use the PEB to determine the active image base
+ fetchPeb(x86::regs::eax);
+ cc->mov(x86::regs::eax, x86::dword_ptr(x86::regs::eax, 8));
+ }
+
+ cc->add(x86::dword_ptr(x86::regs::esp, 8), x86::regs::eax);
+
+ cc->pop(x86::regs::eax);
+ cc->popfd();
+ }
+ else
+ {
+ u32 key = util::genRandInteger();
+ u32 rots = util::genRandInteger(2, 14);
+ u32 crypt = _rotl(insn->operands[0].imm.value.u ^ key, rots);
+
+ cc->push(crypt);
+ cc->push(x86::regs::eax);
+ cc->pushfd();
+ cc->xchg(x86::regs::eax, x86::dword_ptr(x86::regs::esp, 8));
+ cc->ror(x86::regs::eax, rots);
+ cc->xor_(x86::regs::eax, key);
+ cc->xchg(x86::regs::eax, x86::dword_ptr(x86::regs::esp, 8));
+ cc->popfd();
+ cc->pop(x86::regs::eax);
+ }
+
+ return true;
+ }
+
+
+ return false;
+}
+
+MUT_TEMPLATE bool perses::MutationLightSchema::handleMov(instruction_t* insn)
+{
+ /*
+ ZydisInstructionSegments segs;
+ ZydisGetInstructionSegments(&insn->decoded, &segs);
+
+ for (int i = 0; i < segs.count; ++i)
+ {
+ auto seg = segs.segments[i];
+
+ printf("* Segment\n");
+ printf("\t* Type: %d\n", seg.type);
+ printf("\t* Size: %d\n", seg.size);
+ printf("\t* Offset: 0x%x\n", seg.offset);
+ }
+ */
+
+ // NOTE: MOV does not affect RFLAGs, so preserve it
+ //
+
+ if (insn->decoded.operand_count_visible < 2)
+ return false;
+
+ assembler::x86::Assembler* cc = this->getCompiler();
+ assembler::x86::Gp stackReg;
+
+ constexpr bool isx64 = BitSize == PERSES_64BIT;
+ bool isDll = false;
+
+ if (app())
+ {
+ isDll = app()->getImage().isDllOrSystemFile();
+ }
+
+ if constexpr (isx64)
+ stackReg = x86::regs::rsp;
+ else
+ stackReg = x86::regs::esp;
+
+ if (insn->isOperandType(0, ZYDIS_OPERAND_TYPE_REGISTER))
+ {
+ bool isImm = insn->isOperandType(1, ZYDIS_OPERAND_TYPE_IMMEDIATE);
+ bool isMem = insn->isOperandType(1, ZYDIS_OPERAND_TYPE_MEMORY);
+
+ auto dst = x86util::getAsmRegAny(insn->operands[0].reg.value);
+
+ if (isImm)
+ {
+ size_t offset = insn->getFirstSegmentOffset(ZYDIS_INSTR_SEGMENT_IMMEDIATE);
+ bool isReloc = false;
+
+ // Remove any relocation if needed
+ if (app() && app()->isRelocationPresent(toRva(insn->address) + offset))
+ {
+ app()->removeRelocation(toRva(insn->address) + offset);
+ isReloc = true;
+ }
+
+ if (isReloc)
+ {
+ if constexpr (isx64) // -- unlikely case.
+ {
+ u32 rva = toRva(insn->operands[1].imm.value.u);
+ u32 key = util::genRandInteger();
+ u32 rots = util::genRandInteger(1, 31);
+
+ x86::Reg dstReg(dst);
+ x86::Gpd dstDw = dst.r32();
+
+ rva ^= key;
+ rva = _rotl(rva, rots);
+ rva = ~rva;
+ rva = _byteswap_ulong(rva);
+
+ cc->mov(dstDw, rva);
+ cc->bswap(dstDw);
+ cc->not_(dstDw);
+ cc->ror(dstDw, rots);
+ cc->xor_(dstDw, key);
+ cc->push(dst);
+
+ if (app()->getImage().isDllOrSystemFile())
+ {
+ cc->push(x86::regs::rax);
+ makeRelocation(0x2);
+ cc->mov(x86::regs::rax, app()->getBaseAddress());
+ cc->add(x86::qword_ptr(stackReg, 8), x86::regs::rax);
+ cc->pop(x86::regs::rax);
+ }
+ else
+ {
+ cc->push(x86::regs::rax);
+ fetchPeb(x86::regs::rax);
+ cc->mov(x86::regs::rax, x86::qword_ptr(x86::regs::rax, 0x10));
+ cc->add(x86::qword_ptr(stackReg, 8), x86::regs::rax);
+ cc->pop(x86::regs::rax);
+ }
+
+ cc->pop(dst);
+ return true;
+ }
+ else
+ {
+ u32 rva = toRva((u32)insn->operands[1].imm.value.u);
+ u32 key = util::genRandInteger();
+ u32 rots = util::genRandInteger(1, 20);
+
+ rva = ~rva;
+ rva ^= key;
+
+ if (rots >= 10)
+ rva = _rotl(rva, rots);
+ else
+ rva = _rotr(rva, rots);
+
+ cc->pushfd();
+ // Generate a relocation for the base address
+ makeRelocation(0x1);
+ cc->push(app()->getBaseAddress());
+ cc->push(rva);
+
+ if (rots >= 10)
+ cc->ror(x86::dword_ptr(stackReg), rots);
+ else
+ cc->rol(x86::dword_ptr(stackReg), rots);
+
+ cc->xor_(x86::dword_ptr(stackReg), key);
+ cc->not_(x86::dword_ptr(stackReg));
+
+ // Load into dst reg.
+ cc->pop(dst);
+ // Translate rva to va
+ cc->add(dst, x86::dword_ptr(stackReg));
+ // Clean up
+ cc->add(stackReg, sizeof(u32));
+
+ cc->popfd();
+
+ return true;
+ }
+ }
+ else
+ {
+ cc->pushfd();
+ // Generate a different algorithm dependent on size
+ // growing in complexity respectively to size
+ switch (insn->operands[1].size)
+ {
+ case PERSES_BYTESTOBITS(sizeof(u8)):
+ {
+ u8 imm = (u8)insn->operands[1].imm.value.u;
+ imm = _rotl8(imm, 3);
+
+ cc->mov(dst, imm);
+ cc->ror(dst, 3);
+ break;
+ }
+ case PERSES_BYTESTOBITS(sizeof(u16)):
+ {
+ u16 imm = (u16)insn->operands[1].imm.value.u;
+ u16 key = util::genRandInteger();
+
+ imm = ~imm;
+ imm ^= key;
+
+ cc->mov(dst, imm);
+ cc->xor_(dst, key);
+ cc->not_(dst);
+ break;
+ }
+ // 32bit+ immediates will get 2 stage loads,
+ // where the upper and lower portion are decrypted
+ // separately then concatenated
+ case PERSES_BYTESTOBITS(sizeof(u32)):
+ {
+ // Temporary registers for use.
+ const static x86::Gpd _r32ss[] =
+ {
+ x86::regs::ebx,
+ x86::regs::ecx,
+ x86::regs::edx
+ };
+
+ x86::Gp tmpGp = x86::regs::eax;
+ x86::Reg tmpGpReg(tmpGp);
+
+ if (dst == x86::regs::eax ||
+ dst == x86::regs::rax)
+ {
+ tmpGp = _r32ss[rand() % ASMJIT_ARRAY_SIZE(_r32ss)];
+ tmpGpReg = tmpGp;
+ }
+
+ x86::Gpw tmpGpW = x86::gpw(tmpGpReg.id());
+ x86::GpbHi tmpGpHi = x86::gpb_hi(tmpGpReg.id());
+ x86::GpbLo tmpGpLo = x86::gpb_lo(tmpGpReg.id());
+
+ u16 upper = ((u32)insn->operands[1].imm.value.u) >> 16;
+ u16 lower = ((u32)insn->operands[1].imm.value.u) & 0xffff;
+
+ //printf("*** LOWER: 0x%x **\n", lower);
+ //printf("*** UPPER: 0x%x **\n", upper);
+
+ u16 keyUp = util::genRandInteger();
+ u16 keyLow = util::genRandInteger();
+
+ if (upper != 0)
+ {
+ upper = ~upper;
+ upper ^= keyUp;
+ upper = _byteswap_ushort(upper);
+ }
+
+ lower = _byteswap_ushort(lower);
+ lower = ~lower;
+ lower ^= keyLow;
+
+ // Load lower portion
+ cc->sub(stackReg, sizeof(u16));
+ cc->mov(x86::word_ptr(stackReg), lower);
+
+
+ // Load and decrypt the upper 16 bits
+ x86::Reg dstReg(dst);
+ x86::Gpw dstW = x86::gpw(dstReg.id());
+ x86::Gpd dstDw = x86::gpd(dstReg.id());
+
+ cc->mov(dstDw, upper);
+
+ if (upper != 0)
+ {
+ // EDI/ESI/EBP aren't registers with 16bit subsections that can be used
+ // to swap the bytes. I use the easy way out and transfer control to
+ // a temp register where the subsections needed are present.
+ bool unsupported = false;
+
+ x86::GpbHi dstHi = x86::gpb_hi(dstReg.id());
+ x86::GpbLo dstLo = x86::gpb_lo(dstReg.id());
+
+ // TODO: Do something better
+ if (dstReg == x86::regs::edi ||
+ dstReg == x86::regs::esi ||
+ dstReg == x86::regs::ebp ||
+ isx64 /*temporary*/)
+ {
+ unsupported = true;
+
+ cc->push(tmpGp);
+ cc->mov(tmpGp, dstDw);
+
+ dstHi = tmpGpHi;
+ dstLo = tmpGpLo;
+ }
+
+ cc->xchg(dstHi, dstLo);
+ if (unsupported)
+ cc->xchg(tmpGp, dstDw);
+ cc->xor_(dstW, keyUp);
+ cc->not_(dstW);
+ cc->shl(dstDw, 16);
+
+ if (unsupported)
+ {
+ cc->pop(tmpGp);
+ }
+ }
+
+
+ // Decrypt the lower portion
+ constexpr size_t stackOffset = isx64 ? sizeof(u64) : sizeof(u32);
+
+ cc->push(tmpGp);
+ cc->xor_(tmpGp, tmpGp);
+ cc->mov(tmpGpW, x86::word_ptr(stackReg, stackOffset));
+ cc->xor_(tmpGpW, keyLow);
+ cc->not_(tmpGpW);
+ cc->xchg(tmpGpLo, tmpGpHi);
+ cc->xchg(tmpGpW, x86::word_ptr(stackReg, stackOffset));
+ cc->pop(tmpGp);
+
+ // OR the two values
+ cc->mov(dstW, x86::word_ptr(stackReg));
+
+ // Clean up
+ cc->add(stackReg, sizeof(u16));
+
+ // Copy the lower 32bits to the upper 32
+ if (dst.size() == sizeof(u64))
+ {
+ cc->push(dst);
+ cc->shl(x86::qword_ptr(stackReg), 32);
+ cc->or_(x86::qword_ptr(stackReg), dst);
+ cc->pop(dst);
+ }
+
+ break;
+ }
+ case PERSES_BYTESTOBITS(sizeof(u64)):
+ {
+ u32 upper = ((u64)insn->operands[1].imm.value.u) >> 32;
+ u32 lower = ((u64)insn->operands[1].imm.value.u) & 0xffffffff;
+
+ // Get reg. info
+ x86::Reg dstReg(dst);
+ x86::Gpd dstDw = x86::gpd(dstReg.id());
+
+ u32 keyUp = util::genRandInteger();
+ u32 keyLow = util::genRandInteger();
+
+ lower ^= keyLow;
+ lower = ~lower;
+ lower = _byteswap_ulong(lower);
+ lower = _rotl(lower, keyUp & 31);
+
+ if (upper != 0)
+ {
+ upper ^= keyUp;
+ upper = _rotr(upper, keyLow & 31);
+ upper = _byteswap_ulong(upper);
+ upper = ~upper;
+ }
+
+ cc->xor_(dst, dst);
+
+ // Load and decrypt lower portion, and abuse xchg to generate aids pseudocode.
+ cc->sub(stackReg, sizeof(u32));
+ cc->mov(x86::dword_ptr(stackReg), dstDw);
+ cc->sub(stackReg, sizeof(u32));
+ cc->mov(x86::dword_ptr(stackReg), lower);
+ cc->ror(x86::dword_ptr(stackReg), keyUp & 31);
+ cc->xchg(x86::dword_ptr(stackReg), dstDw);
+ cc->bswap(dstDw);
+ cc->not_(dstDw);
+ cc->xor_(dstDw, keyLow);
+ cc->xchg(x86::dword_ptr(stackReg), dstDw);
+
+ // Decrypt upper
+ if (upper != 0)
+ {
+ cc->mov(dst, upper);
+ cc->not_(dstDw);
+ cc->bswap(dstDw);
+ cc->rol(dstDw, keyLow & 31);
+ cc->xor_(dstDw, keyUp);
+ cc->shl(dst, 32);
+ cc->or_(dst, x86::qword_ptr(stackReg));
+ }
+ else
+ {
+ cc->mov(dstDw, x86::dword_ptr(stackReg));
+ }
+
+ // Clean up
+ cc->add(stackReg, sizeof(u32));
+ cc->add(stackReg, sizeof(u32));
+
+ break;
+ }
+
+ }
+
+
+ cc->popfd();
+ return true;
+ }
+ }
+
+ if (isMem)
+ {
+ // Handle jump tables.
+ if constexpr (isx64)
+ {
+ if (recoverJumpTable(insn))
+ return true;
+ }
+
+ if (insn->operands[1].mem.segment != ZYDIS_REGISTER_DS)
+ {
+ return false;
+ }
+
+ if (insn->operands[1].mem.disp.has_displacement)
+ {
+ if constexpr (isx64)
+ {
+ if (insn->operands[1].mem.base != ZYDIS_REGISTER_RIP)
+ return false;
+ if (insn->operands[1].mem.index != ZYDIS_REGISTER_NONE)
+ return false;
+ }
+ else
+ {
+ if (insn->operands[1].mem.base != ZYDIS_REGISTER_NONE)
+ return false;
+ if (insn->operands[1].mem.index != ZYDIS_REGISTER_NONE)
+ return false;
+
+ if (insn->operands[0].size != 32)
+ return false;
+ }
+
+ if constexpr (!isx64)
+ {
+ size_t offset = insn->getFirstSegmentOffset(ZYDIS_INSTR_SEGMENT_DISPLACEMENT);
+ bool isReloc = false;
+
+ // Remove any relocation if needed
+ if (app() && app()->isRelocationPresent(toRva(insn->address) + offset))
+ {
+ app()->removeRelocation(toRva(insn->address) + offset);
+ isReloc = true;
+ }
+
+ u32 rva = isReloc ? toRva(insn->operands[1].mem.disp.value) : insn->operands[1].mem.disp.value;
+
+ u16 upper = (u16)(rva >> 16);
+ u16 lower = (u16)(rva & 0xffff);
+
+ // Temporary registers for use.
+ const static x86::Gpd _r32ss[] =
+ {
+ x86::regs::ebx,
+ x86::regs::ecx,
+ x86::regs::edx
+ };
+
+ x86::Gp tmpGp = x86::regs::eax;
+ x86::Reg tmpGpReg(tmpGp);
+
+ while (dst == tmpGp.r32())
+ {
+ tmpGp = _r32ss[util::genRandInteger(0, ASMJIT_ARRAY_SIZE(_r32ss))];
+ tmpGpReg = tmpGp;
+ }
+
+ x86::Gpw tmpGpW = x86::gpw(tmpGpReg.id());
+ x86::GpbHi tmpGpHi = x86::gpb_hi(tmpGpReg.id());
+ x86::GpbLo tmpGpLo = x86::gpb_lo(tmpGpReg.id());
+
+ //printf("*** LOWER: 0x%x **\n", lower);
+ //printf("*** UPPER: 0x%x **\n", upper);
+
+ u16 keyUp = util::genRandInteger();
+ u16 keyLow = util::genRandInteger();
+
+ if (upper != 0)
+ {
+ upper = ~upper;
+ upper ^= keyUp;
+ upper = _byteswap_ushort(upper);
+ }
+
+ lower = _byteswap_ushort(lower);
+ lower = ~lower;
+ lower ^= keyLow;
+
+ cc->pushfd();
+
+ // Load lower portion
+ cc->sub(stackReg, sizeof(u16));
+ cc->mov(x86::word_ptr(stackReg), lower);
+
+
+ // Load and decrypt the upper 16 bits
+ x86::Reg dstReg(dst);
+ x86::Gpw dstW = x86::gpw(dstReg.id());
+ x86::Gpd dstDw = x86::gpd(dstReg.id());
+
+ cc->mov(dstDw, upper);
+
+ if (upper != 0)
+ {
+ // EDI/ESI/EBP aren't registers with 16bit subsections that can be used
+ // to swap the bytes. I use the easy way out and transfer control to
+ // a temp register where the subsections needed are present.
+ bool unsupported = false;
+
+ x86::GpbHi dstHi = x86::gpb_hi(dstReg.id());
+ x86::GpbLo dstLo = x86::gpb_lo(dstReg.id());
+
+ // TODO: Do something better
+ if (dstReg == x86::regs::edi ||
+ dstReg == x86::regs::esi ||
+ dstReg == x86::regs::ebp)
+ {
+ unsupported = true;
+
+ cc->push(tmpGp);
+ cc->mov(tmpGp, dstDw);
+
+ dstHi = tmpGpHi;
+ dstLo = tmpGpLo;
+ }
+
+ // BSWAP does not work on 8bit operands
+ cc->xchg(dstHi, dstLo);
+ if (unsupported)
+ cc->xchg(tmpGp, dstDw);
+ cc->xor_(dstW, keyUp);
+ cc->not_(dstW);
+ cc->shl(dstDw, 16);
+
+ if (unsupported)
+ {
+ cc->pop(tmpGp);
+ }
+ }
+
+
+ // Decrypt the lower portion
+ constexpr size_t stackOffset = sizeof(u32);
+
+ cc->push(tmpGp);
+ cc->xor_(tmpGp, tmpGp);
+ cc->mov(tmpGpW, x86::word_ptr(stackReg, stackOffset));
+ cc->xor_(tmpGpW, keyLow);
+ cc->not_(tmpGpW);
+ cc->xchg(tmpGpLo, tmpGpHi);
+ cc->xchg(tmpGpW, x86::word_ptr(stackReg, stackOffset));
+ cc->pop(tmpGp);
+
+ // OR the two values
+ cc->mov(dstW, x86::word_ptr(stackReg));
+
+ // Clean up
+ cc->add(stackReg, sizeof(u16));
+
+ if (isReloc)
+ {
+ cc->push(tmpGp);
+
+ if (isDll)
+ {
+ // Relocate base address
+ makeRelocation(0x1);
+ cc->mov(tmpGp, app()->getBaseAddress());
+ }
+ else
+ {
+ fetchPeb(tmpGp);
+ cc->mov(tmpGp, x86::dword_ptr(tmpGp, 8));
+ }
+
+ cc->xchg(x86::dword_ptr(stackReg), tmpGp);
+ cc->add(dst, x86::dword_ptr(stackReg));
+ cc->xchg(x86::dword_ptr(stackReg), tmpGp);
+ cc->pop(tmpGp);
+ }
+
+ cc->push(x86::dword_ptr(dst));
+ cc->pop(dst);
+
+ cc->popfd();
+
+ return true;
+ }
+ else
+ {
+ u64 absolute = Disassembler::instance()->calcAbsolute(&insn->decoded, &insn->operands[1], insn->address);
+ u32 rva = toRva(absolute);
+ u32 key = util::genRandInteger();
+ u32 rots = util::genRandInteger(1, 31);
+
+ x86::Reg dstReg(dst);
+ x86::Gpd dstDw = x86::gpd(dstReg.id());
+ x86::Gpq dstQw = x86::gpq(dstReg.id());
+
+ rva ^= key;
+ rva = _rotl(rva, rots);
+ rva = ~rva;
+ rva = _byteswap_ulong(rva);
+
+ cc->mov(dstDw, rva);
+ cc->bswap(dstDw);
+ cc->not_(dstDw);
+ cc->ror(dstDw, rots);
+ cc->xor_(dstDw, key);
+ cc->push(dst);
+
+ if (app()->getImage().isDllOrSystemFile())
+ {
+ cc->push(x86::regs::rax);
+ makeRelocation(0x2);
+ cc->mov(x86::regs::rax, app()->getBaseAddress());
+ cc->add(x86::qword_ptr(stackReg, 8), x86::regs::rax);
+ cc->pop(x86::regs::rax);
+ }
+ else
+ {
+ cc->push(x86::regs::rax);
+ fetchPeb(x86::regs::rax);
+ cc->mov(x86::regs::rax, x86::qword_ptr(x86::regs::rax, 0x10));
+ cc->add(x86::qword_ptr(stackReg, 8), x86::regs::rax);
+ cc->pop(x86::regs::rax);
+ }
+
+ cc->pop(dst);
+ cc->mov(dst, x86::ptr(dstQw, 0, dstReg.size()));
+ return true;
+ }
+ }
+ else
+ {
+ return false;
+ }
+ }
+ }
+
+ if (insn->isOperandType(0, ZYDIS_OPERAND_TYPE_MEMORY))
+ {
+ if (insn->operands[0].mem.segment != ZYDIS_REGISTER_DS)
+ return false;
+
+ if (insn->isOperandType(1, ZYDIS_OPERAND_TYPE_IMMEDIATE))
+ {
+ if (isx64 && insn->operands[1].size == 32)
+ {
+ if (insn->operands[0].mem.base != ZYDIS_REGISTER_RIP)
+ return false;
+
+ u64 dst = Disassembler::instance()->calcAbsolute(insn);
+ u32 dstRva = toRva(dst);
+ u32 imm = insn->operands[1].imm.value.u;
+
+ u32 dstKey = util::genRandInteger();
+ u32 dstKey2 = util::genRandInteger();
+
+ u32 immKey = util::genRandInteger();
+ u32 immKey2 = util::genRandInteger();
+ u32 rots = util::genRandInteger(1, 31);
+
+ imm ^= immKey;
+ imm = ~imm;
+ imm ^= immKey2;
+ imm = _rotl(imm, rots);
+
+ dstRva ^= dstKey;
+ dstRva = _byteswap_ulong(dstRva);
+ dstRva ^= dstKey2;
+ dstRva = ~dstRva;
+
+ auto _rax = x86::regs::rax;
+
+ cc->push(_rax);
+ cc->sub(stackReg, sizeof(u64));
+
+ cc->mov(x86::regs::eax, imm);
+ cc->ror(x86::regs::eax, rots);
+ cc->xor_(x86::regs::eax, immKey2);
+ cc->not_(x86::regs::eax);
+ cc->xor_(x86::regs::eax, immKey);
+ cc->xchg(x86::qword_ptr(stackReg), _rax);
+
+ // Decrypt RVA
+ cc->mov(x86::regs::eax, dstRva);
+ cc->not_(x86::regs::eax);
+ cc->xor_(x86::regs::eax, dstKey2);
+ cc->bswap(x86::regs::eax);
+ cc->xor_(x86::regs::eax, dstKey);
+
+ cc->push(_rax);
+
+ if (app()->getImage().isDllOrSystemFile())
+ {
+ // Load image base and relocate manually
+ makeRelocation(0x2);
+ cc->movabs(_rax, app()->getBaseAddress());
+ }
+ else
+ {
+ // Use PEB to resolve current base address
+ fetchPeb(_rax);
+ cc->mov(_rax, x86::qword_ptr(_rax, 0x10));
+ }
+
+ cc->add(x86::qword_ptr(stackReg), _rax);
+ cc->pop(_rax);
+
+ // rax now holds the pointer to load the immediate into.
+ cc->push(x86::regs::rcx);
+ cc->xchg(x86::regs::rcx, x86::qword_ptr(stackReg, sizeof(u64)));
+ cc->mov(x86::qword_ptr(_rax), x86::regs::rcx);
+ cc->pop(x86::regs::rcx);
+ cc->add(stackReg, sizeof(u64));
+ cc->pop(_rax);
+
+ return true;
+ }
+ else
+ {
+ if (insn->operands[0].mem.disp.has_displacement &&
+ insn->operands[1].size == 32)
+ {
+ if (insn->operands[0].mem.base != ZYDIS_REGISTER_NONE)
+ return false;
+ if (insn->operands[0].mem.index != ZYDIS_REGISTER_NONE)
+ return false;
+
+ ZydisInstructionSegments segs;
+ ZydisGetInstructionSegments(&insn->decoded, &segs);
+ bool isReloc = false;
+ bool isImmReloc = false;
+
+ for (int i = 0; i < segs.count; ++i)
+ {
+ auto seg = segs.segments[i];
+
+ if (seg.type != ZYDIS_INSTR_SEGMENT_DISPLACEMENT &&
+ seg.type != ZYDIS_INSTR_SEGMENT_IMMEDIATE)
+ continue;
+
+ if (app()->isRelocationPresent(toRva(insn->address) + seg.offset))
+ {
+ app()->removeRelocation(toRva(insn->address) + seg.offset);
+
+ if (seg.type == ZYDIS_INSTR_SEGMENT_DISPLACEMENT)
+ isReloc = true;
+ else
+ isImmReloc = true;
+ }
+ }
+
+ u32 val = isReloc ? toRva(insn->operands[0].mem.disp.value) : insn->operands[0].mem.disp.value;
+ u32 imm = isImmReloc ? toRva(insn->operands[1].imm.value.u) : insn->operands[1].imm.value.u;
+ u32 valKey = util::genRandInteger();
+ u32 immKey = util::genRandInteger();
+
+ imm = ~imm;
+ imm ^= immKey;
+ imm = _byteswap_ulong(imm);
+
+ val -= (valKey >> 1);
+ val = ~val;
+ val = _rotl(val, 8);
+
+ cc->pushfd();
+
+ cc->push(val);
+ cc->ror(x86::dword_ptr(stackReg), 8);
+ cc->not_(x86::dword_ptr(stackReg));
+ cc->add(x86::dword_ptr(stackReg), valKey >> 1);
+ // Preserve EAX
+ cc->push(x86::regs::eax);
+
+ if (isReloc)
+ {
+ if (isDll)
+ {
+ // Relocate base address
+ makeRelocation(0x1);
+ cc->mov(x86::regs::eax, app()->getBaseAddress());
+ cc->add(x86::dword_ptr(stackReg, 4), x86::regs::eax);
+ }
+ else
+ {
+ fetchPeb(x86::regs::eax);
+ cc->mov(x86::regs::eax, x86::dword_ptr(x86::regs::eax, 8));
+ cc->add(x86::dword_ptr(stackReg, 4), x86::regs::eax);
+ }
+ }
+
+ if (isImmReloc)
+ cc->push(x86::regs::eax);
+
+ cc->mov(x86::regs::eax, imm);
+ cc->bswap(x86::regs::eax);
+ cc->xor_(x86::regs::eax, immKey);
+ cc->not_(x86::regs::eax);
+
+ if (isImmReloc)
+ {
+ cc->add(x86::dword_ptr(stackReg), x86::regs::eax);
+ cc->pop(x86::regs::eax);
+ }
+
+ // EAX now holds the decrypted imm.
+ cc->push(x86::regs::ebx);
+ cc->mov(x86::regs::ebx, x86::dword_ptr(stackReg, 8));
+ cc->push(x86::regs::eax);
+ cc->pop(x86::dword_ptr(x86::regs::ebx));
+ cc->pop(x86::regs::ebx);
+ cc->pop(x86::regs::eax);
+
+ cc->add(x86::regs::esp, sizeof(u32));
+
+ cc->popfd();
+
+ return true;
+ }
+ }
+ }
+
+ if (insn->isOperandType(1, ZYDIS_OPERAND_TYPE_REGISTER))
+ {
+ // Segment registers not handled yet
+ if (insn->operands[1].reg.value >= ZYDIS_REGISTER_ES &&
+ insn->operands[1].reg.value <= ZYDIS_REGISTER_GS)
+ {
+ return false;
+ }
+
+
+ // Only 32bit operands
+ if (insn->operands[0].mem.disp.has_displacement &&
+ insn->operands[1].size == 32)
+ {
+ if (insn->operands[0].mem.base != ZYDIS_REGISTER_NONE)
+ return false;
+ if (insn->operands[0].mem.base != ZYDIS_REGISTER_NONE)
+ return false;
+ if (insn->operands[0].mem.index != ZYDIS_REGISTER_NONE)
+ return false;
+
+ size_t offset = insn->getFirstSegmentOffset(ZYDIS_INSTR_SEGMENT_DISPLACEMENT);
+ bool isReloc = false;
+
+ // Remove any relocation if needed
+ if (app() && app()->isRelocationPresent(toRva(insn->address) + offset))
+ {
+ app()->removeRelocation(toRva(insn->address) + offset);
+ isReloc = true;
+ }
+
+ u32 val = toRva(insn->operands[0].mem.disp.value);
+ u32 valKey = util::genRandInteger();
+ auto src = x86util::getAsmRegAny(insn->operands[1].reg.value);
+
+ val += (valKey >> 1);
+ val = ~val;
+ val = _rotl(val, 8);
+
+ cc->pushfd();
+ // Push base
+ makeRelocation(0x1);
+ cc->push(app()->getBaseAddress());
+
+ cc->push(val);
+ cc->ror(x86::dword_ptr(stackReg), 8);
+ cc->not_(x86::dword_ptr(stackReg));
+ cc->sub(x86::dword_ptr(stackReg), valKey >> 1);
+
+ // Preserve
+ cc->push(x86::regs::ebp);
+
+ cc->mov(x86::regs::ebp, x86::dword_ptr(stackReg, 4));
+ // Resolve RVA
+ cc->add(x86::regs::ebp, x86::dword_ptr(stackReg, 8));
+ // Load
+ cc->mov(x86::dword_ptr(x86::regs::ebp), src);
+
+ // Cleanup
+ cc->pop(x86::regs::ebp);
+ cc->add(stackReg, sizeof(u64));
+
+ cc->popfd();
+
+ return true;
+ }
+ }
+ }
+
+
+ return false;
+}
+
+MUT_TEMPLATE bool perses::MutationLightSchema::handleXor(instruction_t* insn)
+{
+ //return false;
+
+ if (insn->isOperandType(0, ZYDIS_OPERAND_TYPE_REGISTER) &&
+ (insn->operands[1].size == 32 || insn->operands[1].size == 64))
+ {
+ auto dst = x86util::getAsmRegAny(insn->operands[0].reg.value);
+
+ if (insn->isOperandType(1, ZYDIS_OPERAND_TYPE_REGISTER))
+ {
+ auto val = x86util::getAsmRegAny(insn->operands[1].reg.value);
+
+ if (dst == val)
+ return false;
+
+ genXor(dst, val);
+ return true;
+ }
+
+ if (insn->isOperandType(1, ZYDIS_OPERAND_TYPE_IMMEDIATE))
+ {
+ auto val = insn->operands[1].imm.value.u;
+
+ // Play it safe for potential relocated immediates
+ // We can handle these, but for now, ignore.
+ if constexpr (BitSize == PERSES_32BIT)
+ {
+ if (val >= app()->getBaseAddress() &&
+ val <= (app()->getBaseAddress() + app()->getImage().getPEHdr().calcSizeOfImage()))
+ {
+ return false;
+ }
+ }
+
+ genXorImm(dst, val);
+ return true;
+ }
+
+ //printf("***** Generated XOR *****\n");
+
+ return false;
+ }
+
+
+ return false;
+}
+
+MUT_TEMPLATE bool perses::MutationLightSchema::handleAdd(instruction_t* insn)
+{
+ if (insn->isOperandType(0, ZYDIS_OPERAND_TYPE_REGISTER) &&
+ insn->operands[1].size == 32)
+ {
+ if (insn->operands[0].reg.value == ZYDIS_REGISTER_ESP ||
+ insn->operands[0].reg.value == ZYDIS_REGISTER_RSP)
+ return false;
+
+ auto dst = x86util::getAsmRegAny(insn->operands[0].reg.value);
+
+ if (insn->isOperandType(1, ZYDIS_OPERAND_TYPE_REGISTER))
+ {
+ auto val = x86util::getAsmRegAny(insn->operands[1].reg.value);
+
+ if (dst == val)
+ return false;
+
+ genAdd(dst, val);
+ return true;
+ }
+
+ if (insn->isOperandType(1, ZYDIS_OPERAND_TYPE_IMMEDIATE))
+ {
+ auto val = insn->operands[1].imm.value.u;
+
+ // Play it safe for potential relocated immediates
+ // We can handle these, but for now, ignore.
+ if constexpr (BitSize == PERSES_32BIT)
+ {
+ if (val >= app()->getBaseAddress() &&
+ val <= (app()->getBaseAddress() + app()->getImage().getPEHdr().calcSizeOfImage()))
+ {
+ return false;
+ }
+ }
+
+
+ genAddImm(dst, val);
+ return true;
+ }
+
+ //printf("Generated ADD\n");
+
+ return false;
+ }
+
+ return false;
+}
+
+MUT_TEMPLATE bool perses::MutationLightSchema::handleRelInstruction(instruction_t* insn)
+{
+ bool isImm = insn->isOperandType(0, ZYDIS_OPERAND_TYPE_IMMEDIATE);
+ bool isMem = insn->isOperandType(0, ZYDIS_OPERAND_TYPE_MEMORY);
+
+
+ assembler::x86::Assembler* cc = this->getCompiler();
+ u64 absolute = Disassembler::instance()->calcAbsolute(insn);
+
+ if (isImm)
+ {
+ // DO NOT process imm JMPs if they lye in our routine's range.
+ if (absolute > _rtnBegin && absolute < _rtnEnd)
+ {
+ // I've never seen this happen but I'll get around to it if it does.
+ if (insn->isMnemonic(ZYDIS_MNEMONIC_CALL))
+ {
+ return false;
+ PERSES_THROW("CALL found in the routine's range, implement functionality!");
+ }
+
+#ifdef PERSES_VERBOSE
+ // printf("******** SKIPPING RELATIVE IN ROUTINE RANGE **********\n");
+#endif
+ return false;
+ }
+
+ // UNCOMMENT IF NEEDED TO REMOVE CALL/JMP OBFUSCATION
+ if (insn->decoded.opcode == 0xe8)
+ {
+ // cc->call(absolute);
+ // return true;
+ }
+
+ if (insn->decoded.opcode == 0xe9)
+ {
+ // cc->jmp(absolute);
+ // return true;
+ }
+ }
+
+ //return false;
+
+ if (!isImm && !isMem)
+ return false;
+
+ if (insn->isMnemonic(ZYDIS_MNEMONIC_CALL))
+ {
+ if (isImm)
+ {
+ if constexpr (BitSize == PERSES_64BIT)
+ {
+ u64 callDst = absolute;
+ u64 cryptDst = toRva(callDst);
+
+ u32 key = util::genRandInteger();
+ u32 key2 = util::genRandInteger();
+ u32 rots = util::genRandInteger(1, 48);
+
+ cryptDst = _byteswap_uint64(cryptDst);
+ cryptDst ^= key2;
+ cryptDst = _rotr64(cryptDst, rots);
+ cryptDst ^= key;
+ cryptDst = ~cryptDst;
+
+ Label label = cc->newLabel();
+
+ cc->sub(x86::regs::rsp, 8);
+ cc->push(x86::regs::r15);
+ cc->lea(x86::regs::r15, x86::ptr(label));
+ cc->mov(x86::qword_ptr(x86::regs::rsp, 8), x86::regs::r15);
+ cc->pop(x86::regs::r15);
+
+ cc->sub(x86::regs::rsp, 8);
+ cc->push(x86::regs::r15);
+
+ // Decrypt the RVA
+ cc->movabs(x86::regs::r15, cryptDst);
+ cc->not_(x86::regs::r15);
+ cc->mov(x86::qword_ptr(x86::regs::rsp, 8), key);
+ cc->xor_(x86::regs::r15, x86::qword_ptr(x86::regs::rsp, 8));
+ cc->rol(x86::regs::r15, rots);
+ cc->mov(x86::qword_ptr(x86::regs::rsp, 8), key2);
+ cc->xor_(x86::regs::r15, x86::qword_ptr(x86::regs::rsp, 8));
+ cc->bswap(x86::regs::r15);
+ cc->xchg(x86::regs::r15, x86::qword_ptr(x86::regs::rsp, 8));
+
+ if (true)// app()->getImage().isDllOrSystemFile())
+ {
+ // Load image base and relocate manually
+ makeRelocation(0x2);
+ cc->movabs(x86::regs::r15, app()->getBaseAddress());
+ }
+ else
+ {
+ // Use PEB to resolve current base address
+ fetchPeb(x86::regs::r15);
+ cc->mov(x86::regs::r15, x86::qword_ptr(x86::regs::r15, 0x10));
+ }
+
+ cc->add(x86::qword_ptr(x86::regs::rsp, 8), x86::regs::r15);
+
+ // Restore R15.
+ cc->pop(x86::regs::r15);
+
+
+ // Jump to a random RET if possible
+ if (!gs_retGadgets.empty())
+ cc->jmp(gs_retGadgets[rand() % gs_retGadgets.size()]);
+ else
+ cc->ret();
+
+ cc->bind(label);
+ return true;
+ }
+ else
+ {
+ u32 callDst = (u32)absolute;
+ u32 cryptDst = toRva(callDst);
+
+ u32 key = util::genRandInteger();
+ u32 key2 = util::genRandInteger();
+ u32 rots = util::genRandInteger(1, 18);
+
+ cryptDst = _byteswap_ulong(cryptDst);
+ cryptDst ^= key2;
+ cryptDst = _rotl(cryptDst, rots);
+ cryptDst ^= key;
+ cryptDst = ~cryptDst;
+
+ Label label = cc->newLabel();
+
+ cc->sub(x86::regs::esp, sizeof(u32));
+ cc->push(x86::regs::eax);
+ makeRelocation(0x2);
+ cc->lea(x86::regs::eax, x86::dword_ptr(label));
+ cc->mov(x86::dword_ptr(x86::regs::esp, 4), x86::regs::eax);
+ cc->pop(x86::regs::eax);
+ cc->push(cryptDst);
+ cc->pushfd();
+ cc->not_(x86::dword_ptr(x86::regs::esp, 4));
+ cc->xor_(x86::dword_ptr(x86::regs::esp, 4), key);
+ cc->ror(x86::dword_ptr(x86::regs::esp, 4), rots);
+ cc->xor_(x86::dword_ptr(x86::regs::esp, 4), key2);
+ cc->popfd();
+ cc->push(x86::regs::eax);
+ cc->xchg(x86::regs::eax, x86::dword_ptr(x86::regs::esp, 4));
+ cc->bswap(x86::regs::eax);
+ cc->xchg(x86::regs::eax, x86::dword_ptr(x86::regs::esp, 4));
+ cc->pop(x86::regs::eax);
+ makeRelocation(0x3);
+ cc->add(x86::dword_ptr(x86::regs::esp), app()->getBaseAddress());
+
+ // Jump to a random RET if possible
+ if (!gs_retGadgets.empty())
+ cc->jmp((u32)gs_retGadgets[rand() % gs_retGadgets.size()]);
+ else
+ cc->ret();
+
+ cc->bind(label);
+
+ return true;
+ }
+ }
+
+
+ if (isMem)
+ {
+ if constexpr (BitSize == PERSES_64BIT)
+ {
+ if (insn->operands[0].mem.disp.has_displacement)
+ {
+ if (insn->operands[0].mem.base != ZYDIS_REGISTER_RIP)
+ return false;
+
+ u64 callDst = absolute;
+ u64 cryptDst = toRva(callDst);
+
+ u32 key = util::genRandInteger();
+ u32 key2 = util::genRandInteger();
+ u32 rots = util::genRandInteger(1, 48);
+
+ cryptDst = _byteswap_uint64(cryptDst);
+ cryptDst ^= key2;
+ cryptDst = _rotr64(cryptDst, rots);
+ cryptDst ^= key;
+ cryptDst = ~cryptDst;
+
+ Label label = cc->newLabel();
+
+ cc->lea(x86::regs::rax, x86::ptr(label));
+ cc->push(x86::regs::rax);
+ cc->sub(x86::regs::rsp, 8);
+ cc->movabs(x86::regs::rax, cryptDst);
+ // Decrypt the RVA
+ cc->not_(x86::regs::rax);
+ cc->mov(x86::qword_ptr(x86::regs::rsp), key);
+ cc->xor_(x86::regs::rax, x86::qword_ptr(x86::regs::rsp));
+ cc->rol(x86::regs::rax, rots);
+ cc->mov(x86::qword_ptr(x86::regs::rsp), key2);
+ cc->xor_(x86::regs::rax, x86::qword_ptr(x86::regs::rsp));
+ cc->bswap(x86::regs::rax);
+ cc->xchg(x86::regs::rax, x86::qword_ptr(x86::regs::rsp));
+
+ if (app()->getImage().isDllOrSystemFile())
+ {
+ // Load image base and relocate manually
+ makeRelocation(0x2);
+ cc->movabs(x86::regs::rax, app()->getBaseAddress());
+ }
+ else
+ {
+ // Use PEB to resolve current base address
+ fetchPeb(x86::regs::rax);
+ cc->mov(x86::regs::rax, x86::qword_ptr(x86::regs::rax, 0x10));
+ }
+
+ cc->add(x86::qword_ptr(x86::regs::rsp), x86::regs::rax);
+ cc->xchg(x86::qword_ptr(x86::regs::rsp), x86::regs::rax);
+ cc->mov(x86::regs::rax, x86::qword_ptr(x86::regs::rax));
+ cc->xchg(x86::qword_ptr(x86::regs::rsp), x86::regs::rax);
+
+ // Jump to a random RET if possible
+ if (!gs_retGadgets.empty())
+ cc->jmp(gs_retGadgets[rand() % gs_retGadgets.size()]);
+ else
+ cc->ret();
+
+ cc->bind(label);
+ return true;
+ }
+ }
+ else
+ {
+ // printf("absolute: 0x%x\n", (u32)absolute);
+
+ if (insn->operands[0].mem.disp.has_displacement)
+ {
+ if (insn->operands[0].mem.base != ZYDIS_REGISTER_NONE)
+ return false;
+
+ u32 offset = insn->getFirstSegmentOffset(ZYDIS_INSTR_SEGMENT_DISPLACEMENT);
+ bool isRelocatable = app()->isRelocationPresent((insn->address - app()->getBaseAddress()) + offset);
+
+ if (isRelocatable)
+ {
+ // We need to remove/ignore the relocation so that our code doesn't break when the PE ldr. attempts to
+ // process the reloc. directory.
+ app()->removeRelocation((insn->address - app()->getBaseAddress()) + offset);
+ }
+
+ u32 disp = insn->operands[0].mem.disp.value;
+ u32 rva = toRva(disp);
+ u32 key = util::genRandInteger();
+ Label label = cc->newLabel();
+
+ rva = ~rva;
+ rva ^= key;
+
+ // Generate junk code randomly
+ bool j = false;
+ if ((key & 0xf) < 7)
+ cc->lahf();
+ else
+ j = true;
+
+ // Generate relocation for the return address
+ makeRelocation(0x2);
+ cc->lea(x86::regs::eax, x86::dword_ptr(label));
+ cc->push(x86::regs::eax);
+
+
+ cc->push(rva);
+ cc->xor_(x86::dword_ptr(x86::regs::esp), key);
+ cc->not_(x86::dword_ptr(x86::regs::esp));
+
+ if (app()->getImage().isDllOrSystemFile())
+ {
+ makeRelocation(0x1);
+ cc->mov(x86::regs::eax, app()->getBaseAddress());
+ }
+ else
+ {
+ fetchPeb(x86::regs::eax);
+ cc->mov(x86::regs::eax, x86::dword_ptr(x86::regs::eax, 8));
+ }
+
+ // Add the base address, and generate a relocation for runtime
+ cc->add(x86::dword_ptr(x86::regs::esp), x86::regs::eax);
+ cc->xchg(x86::dword_ptr(x86::regs::esp), x86::regs::eax);
+ cc->mov(x86::regs::eax, x86::dword_ptr(x86::regs::eax));
+ cc->xchg(x86::dword_ptr(x86::regs::esp), x86::regs::eax);
+
+ // Generate junk code randomly
+ if (j)
+ cc->sahf();
+
+ // Jump to a random RET if possible
+ if (!gs_retGadgets.empty())
+ cc->jmp((u32)gs_retGadgets[rand() % gs_retGadgets.size()]);
+ else
+ cc->ret();
+ cc->bind(label);
+
+ return true;
+ }
+ }
+ }
+ }
+
+ if (insn->isMnemonic(ZYDIS_MNEMONIC_JMP) && isImm && insn->decoded.opcode == 0xE9)
+ {
+ if constexpr (BitSize == 64)
+ {
+ u64 callDst = (u64)absolute;
+ u64 cryptDst = callDst - app()->getBaseAddress();
+
+
+ cc->sub(x86::regs::rsp, sizeof(u64));
+ cc->mov(x86::qword_ptr(x86::regs::rsp), cryptDst);
+
+ cc->push(x86::regs::rax);
+ // Resolve RVA
+ makeRelocation(0x2);
+ cc->mov(x86::regs::rax, app()->getBaseAddress());
+ cc->add(x86::qword_ptr(x86::regs::rsp, 8), x86::regs::rax);
+ cc->pop(x86::regs::rax);
+
+ // Jump to a random RET if possible
+ if (!gs_retGadgets.empty())
+ cc->jmp(gs_retGadgets[rand() % gs_retGadgets.size()]);
+ else
+ cc->ret();
+
+ return true;
+ }
+ else
+ {
+ u32 callDst = (u32)absolute;
+ u32 cryptDst = callDst - app()->getBaseAddress();
+
+ u32 key = util::genRandInteger();
+ u32 key2 = util::genRandInteger();
+ u32 rots = util::genRandInteger(1, 18);
+ u32 rots2 = util::genRandInteger(3, 12);
+
+ cryptDst = _rotl(cryptDst, rots);
+ cryptDst ^= key;
+ cryptDst = _rotr(cryptDst, rots2);
+ cryptDst ^= key2;
+
+ cc->push(cryptDst);
+ cc->xor_(x86::dword_ptr(x86::regs::esp), key2);
+ cc->rol(x86::dword_ptr(x86::regs::esp), rots2);
+ cc->xor_(x86::dword_ptr(x86::regs::esp), key);
+ cc->ror(x86::dword_ptr(x86::regs::esp), rots);
+
+ cc->push(x86::regs::eax);
+ // Resolve RVA
+ makeRelocation(0x1);
+ cc->mov(x86::regs::eax, app()->getBaseAddress());
+ cc->add(x86::qword_ptr(x86::regs::esp, 4), x86::regs::eax);
+ cc->pop(x86::regs::eax);
+
+
+ // Jump to a random RET if possible
+ if (!gs_retGadgets.empty())
+ cc->jmp((u32)gs_retGadgets[rand() % gs_retGadgets.size()]);
+ else
+ cc->ret();
+
+ return true;
+ }
+ }
+ // this->getCompiler()->call(static_cast(absolute));
+
+ return false;
+}
+
+MUT_TEMPLATE X86BinaryApplication* MutationLightSchema::app()
+{
+ return (X86BinaryApplication*)_app;
+}
+
+MUT_TEMPLATE void perses::MutationLightSchema::makeRelocation(int offset, bool relative, u64 absolute)
+{
+ static int relocIdx = 0;
+
+ Label reloc = this->getCompiler()->newNamedLabel(fmt::format("RELOC_{}", relocIdx++).c_str());
+
+ this->getCompiler()->bind(reloc);
+
+ if (!relative)
+ _relocEntryList.emplace_back(0, offset, 0, 0ull);
+ else
+ _relocEntryList.emplace_back(_streamOffset, offset, _currentInstruction->decoded.length, absolute);
+}
+
+MUT_TEMPLATE u32 MutationLightSchema::toRva(uptr address)
+{
+ return (address - app()->getBaseAddress());
+}
+
+MUT_TEMPLATE void MutationLightSchema::fetchPeb(x86::Gp dst)
+{
+ assembler::x86::Assembler* cc = this->getCompiler();
+
+ if constexpr (BitSize == PERSES_64BIT)
+ {
+ u64 imm = 0x60;
+ u32 rots = util::genRandInteger(1, 64);
+
+ imm = _byteswap_uint64(imm);
+ imm = _rotr64(imm, rots);
+
+ cc->mov(dst, imm);
+ cc->rol(dst, rots);
+ cc->bswap(dst);
+
+ assembler::x86::Mem peb(x86::qword_ptr(dst));
+ peb.setSegment(x86::regs::gs);
+
+ cc->mov(dst, peb);
+ }
+ else
+ {
+ u32 imm = offsetof(NT_TIB, Self);
+ u32 key = util::genRandInteger();
+ bool rot = (key & 1) != 0;
+
+ imm ^= key;
+ imm = _byteswap_ulong(imm);
+
+ if (rot) imm = _rotr(imm, key & 0xf);
+
+ cc->mov(dst, imm);
+ if (rot) cc->rol(dst, key & 0xf);
+ cc->bswap(dst);
+ cc->xor_(dst, key);
+
+
+ assembler::x86::Mem peb(x86::dword_ptr(dst));
+ peb.setSegment(x86::regs::fs);
+
+ cc->mov(dst, peb);
+ }
+}
+
+MUT_TEMPLATE bool perses::MutationLightSchema::recoverJumpTable(instruction_t* insn)
+{
+ if (!app())
+ return false;
+
+ x86::Assembler* cc = this->getCompiler();
+ assembler::x86::Gp stackReg;
+
+
+ if constexpr (BitSize == PERSES_64BIT)
+ {
+ stackReg = x86::regs::rsp;
+ }
+ else
+ {
+ stackReg = x86::regs::esp;
+ }
+
+ // NOTE: This was only tested on MSVC (VS2022), so this may have to be tweaked
+ // to support the output of different compilers.
+
+ int jumpTableSize = 0;
+
+ //
+ // Try to find jump table size, note that there can be another jump table right next to another individual jump table,
+ // this is a first resort to calculating it, the last resort is in x86BinaryApplication::inquireJumpTable.
+ auto it = std::find_if(_rtn->begin(), _rtn->end(), [insn](instruction_t& i) { return insn->address == i.address; });
+ if (it != _rtn->end())
+ {
+ int inCount = 0;
+
+ for (auto i = it; i != _rtn->begin(); --i)
+ {
+ if (inCount > 10)
+ break;
+
+ if (i->isMnemonic(ZYDIS_MNEMONIC_CMP))
+ {
+ if (i->isOperandType(1, ZYDIS_OPERAND_TYPE_IMMEDIATE))
+ {
+ jumpTableSize = i->operands[1].imm.value.u + 1;
+ break;
+ }
+ }
+
+ ++inCount;
+ }
+ }
+
+ if constexpr (BitSize == PERSES_32BIT)
+ {
+ if (!insn->isOperandType(0, ZYDIS_OPERAND_TYPE_MEMORY) || !insn->operands[0].mem.disp.has_displacement)
+ return false;
+
+ std::vector entries { };
+ if (!app()->inquireJumpTable(insn, _rtnBegin, _rtnEnd, jumpTableSize, entries))
+ return false;
+
+
+ // Sanity check. This may need to be changed - personally, I've never seen a jump table with less than 6 entries
+ //if (entries.size() >= 4)
+ {
+ logger()->info("[JUMP TABLE] Handling potential jump table with {} entries.", entries.size());
+ _jumpTables.insert(_jumpTables.end(), entries.begin(), entries.end());
+ }
+
+ // Remove existing relocations
+ u32 offset = insn->getFirstSegmentOffset(ZYDIS_INSTR_SEGMENT_DISPLACEMENT);
+ bool isRelocatable = app()->isRelocationPresent((insn->address - app()->getBaseAddress()) + offset);
+
+ if (isRelocatable)
+ {
+ // We need to remove/ignore the relocation so that our code doesn't break when the PE ldr. attempts to
+ // process the reloc. directory.
+ app()->removeRelocation((insn->address - app()->getBaseAddress()) + offset);
+ }
+
+ //x86::Mem mem;
+ //if (insn->operands[0].mem.base != ZYDIS_REGISTER_NONE)
+ //{
+ // x86::Gp base = x86util::getAsmRegAny(insn->operands[0].mem.base);
+ // x86::Gp index = x86util::getAsmRegAny(insn->operands[0].mem.index);
+ // mem.setBase(base);
+ // mem.setIndex(index);
+ // mem.setShift(insn->operands[0].mem.scale >> 1);
+ //}
+ //else
+ //{
+ // return false;
+ //}
+ //cc->jmp(mem);
+ //return true;
+
+ // JMP encryption
+ u64 dst = insn->operands[0].mem.disp.value;
+
+ u32 key = util::genRandInteger();
+ u32 swap = toRva(dst);
+
+ swap ^= key;
+ swap = _byteswap_ulong(swap);
+
+ cc->push(util::genRandInteger());
+ cc->pushfd();
+ cc->push(x86::regs::eax);
+
+ cc->mov(x86::regs::eax, swap);
+ cc->bswap(x86::regs::eax);
+ cc->xor_(x86::regs::eax, key);
+ cc->mov(x86::dword_ptr(x86::regs::esp, 8), x86::regs::eax);
+ cc->pop(x86::regs::eax);
+
+ // Translate RVA to VA
+ makeRelocation(0x4);
+ cc->add(x86::dword_ptr(x86::regs::esp, 4), app()->getBaseAddress());
+
+ if (insn->operands[0].mem.base != ZYDIS_REGISTER_NONE)
+ {
+ x86::Gp base = x86util::getAsmRegAny(insn->operands[0].mem.base);
+ cc->add(x86::dword_ptr(x86::regs::esp, 4), base);
+ }
+
+ if (insn->operands[0].mem.index != ZYDIS_REGISTER_NONE)
+ {
+ x86::Gp index = x86util::getAsmRegAny(insn->operands[0].mem.index);
+
+ if (insn->operands[0].mem.scale == 4)
+ {
+ cc->push(index);
+ cc->shl(index, 2);
+ cc->add(x86::dword_ptr(x86::regs::esp, 4+index.size()), index);
+ cc->pop(index);
+ }
+ else
+ {
+ if (insn->operands[0].mem.scale == 0)
+ cc->add(x86::dword_ptr(x86::regs::esp, 4), index);
+ }
+ }
+
+ // Load the memory offset
+ cc->push(x86::regs::eax);
+ cc->mov(x86::regs::eax, x86::dword_ptr(x86::regs::esp, 8));
+ cc->mov(x86::regs::eax, x86::dword_ptr(x86::regs::eax));
+ cc->xchg(x86::regs::eax, x86::dword_ptr(x86::regs::esp, 8));
+ cc->pop(x86::regs::eax);
+ cc->popfd();
+ // Jump to a random RET if possible
+ if (!gs_retGadgets.empty())
+ cc->jmp((u32)gs_retGadgets[rand() % gs_retGadgets.size()]);
+ else
+ cc->ret();
+
+ return true;
+ }
+ else
+ {
+ std::vector entries { };
+ if (!app()->inquireJumpTable(insn, _rtnBegin, _rtnEnd, jumpTableSize, entries))
+ return false;
+
+ u32 scale = (u32)insn->operands[1].mem.scale;
+
+ if (!entries.empty())
+ {
+ logger()->info("[JUMP TABLE] Handling potential jump table with {} entries.", entries.size());
+ _jumpTables.insert(_jumpTables.end(), entries.begin(), entries.end());
+ }
+
+ ZydisRegister base = insn->operands[1].mem.base;
+ ZydisRegister index = insn->operands[1].mem.index;
+ x86::Gp dstReg = x86util::getAsmRegAny(insn->operands[0].reg.value);
+ x86::Gpd dstRegDw = x86::gpd(dstReg.id());
+ x86::Gpq dstRegQw = x86::gpq(dstReg.id());
+ x86::Gp baseReg = x86util::getAsmRegAny(base);
+ x86::Gp indexReg = x86util::getAsmRegAny(index);
+
+ u32 val = insn->operands[1].mem.disp.value;
+ u32 key = util::genRandInteger();
+ u32 key2 = util::genRandInteger();
+
+ val ^= key;
+ val = _byteswap_ulong(val);
+ val = ~val;
+
+ cc->push(dstReg);
+
+ cc->mov(dstReg, val);
+ cc->not_(dstRegDw);
+ cc->bswap(dstRegDw);
+ cc->xor_(dstRegDw, key);
+ cc->add(dstRegQw, baseReg);
+ cc->xchg(dstRegQw, x86::qword_ptr(stackReg));
+
+ if (dstRegQw != indexReg)
+ cc->mov(dstRegQw, indexReg);
+
+ if (scale != 4)
+ {
+ PERSES_THROW("Unexpected scale in MutationLightSchema::recoverJumpTable.");
+ return false;
+ }
+
+ // Apply the scale
+ cc->shl(dstReg, 2);
+ cc->add(x86::qword_ptr(stackReg), dstRegQw);
+ cc->pop(dstReg);
+ cc->mov(dstReg, x86::qword_ptr(dstRegQw));
+
+ return true;
+ }
+
+ return false;
+}
+
+MUT_TEMPLATE void MutationLightSchema::writeJcc(ZydisDecodedInstruction* instr, assembler::Label& lbl)
+{
+ switch (instr->mnemonic)
+ {
+ case ZYDIS_MNEMONIC_JNBE:
+ this->getCompiler()->jnbe(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JB:
+ this->getCompiler()->jb(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JBE:
+ this->getCompiler()->jbe(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JECXZ:
+ this->getCompiler()->jecxz(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JL:
+ this->getCompiler()->jl(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JLE:
+ this->getCompiler()->jle(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JNB:
+ this->getCompiler()->jnb(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JNL:
+ this->getCompiler()->jnl(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JNLE:
+ this->getCompiler()->jnle(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JNO:
+ this->getCompiler()->jno(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JNP:
+ this->getCompiler()->jnp(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JNS:
+ this->getCompiler()->jns(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JNZ:
+ this->getCompiler()->jnz(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JO:
+ this->getCompiler()->jo(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JP:
+ this->getCompiler()->jp(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JS:
+ this->getCompiler()->js(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JZ:
+ this->getCompiler()->jz(lbl);
+ break;
+ case ZYDIS_MNEMONIC_JMP:
+ this->getCompiler()->jmp(lbl);
+ break;
+ case ZYDIS_MNEMONIC_CALL:
+ this->getCompiler()->call(lbl);
+ break;
+ default:
+ PERSES_THROW("Unknown JCC mnemonic passed into writeJcc.");
+ }
+}
+
+MUT_TEMPLATE void MutationLightSchema::genXor(assembler::x86::Gp dst, assembler::x86::Gp val)
+{
+ // x ^ y = (x & ~y) | (~x & y)
+
+ assembler::x86::Assembler* cc = this->getCompiler();
+ assembler::x86::Gp stackReg;
+
+ constexpr bool isx64 = BitSize == PERSES_64BIT;
+ bool isDll = false;
+
+ if (app())
+ {
+ isDll = app()->getImage().isDllOrSystemFile();
+ }
+
+ if constexpr (isx64)
+ {
+ stackReg = x86::regs::rsp;
+ }
+ else
+ {
+ stackReg = x86::regs::esp;
+ }
+
+ if (dst == val || dst == stackReg || val == stackReg)
+ {
+ cc->xor_(dst, val);
+ return;
+ }
+
+ // This can be done a million times better
+ cc->push(dst);
+
+ if constexpr (isx64)
+ {
+ cc->not_(x86::qword_ptr(stackReg));
+ cc->and_(x86::qword_ptr(stackReg), val);
+
+ cc->push(val);
+ cc->not_(val);
+ cc->and_(dst, val);
+ cc->pop(val);
+ cc->or_(dst, x86::qword_ptr(stackReg));
+ cc->add(stackReg, sizeof(u64));
+ }
+ else
+ {
+ cc->not_(x86::dword_ptr(stackReg));
+ cc->and_(x86::dword_ptr(stackReg), val);
+
+ cc->push(val);
+ cc->not_(val);
+ cc->and_(dst, val);
+ cc->pop(val);
+ cc->or_(dst, x86::dword_ptr(stackReg));
+ cc->add(stackReg, dst.size());
+ }
+}
+
+MUT_TEMPLATE void MutationLightSchema::genXorImm(assembler::x86::Gp dst, u32 val)
+{
+ // x ^ y = (x & ~y) | (~x & y)
+
+ assembler::x86::Assembler* cc = this->getCompiler();
+ assembler::x86::Gp stackReg;
+
+ constexpr bool isx64 = BitSize == PERSES_64BIT;
+
+ if constexpr (isx64)
+ stackReg = x86::regs::rsp;
+ else
+ stackReg = x86::regs::esp;
+
+ if constexpr (isx64)
+ {
+ cc->push(dst);
+ cc->sub(stackReg, sizeof(u64));
+ cc->mov(x86::qword_ptr(stackReg), val);
+
+ // (~x & y)
+ cc->not_(dst);
+ cc->and_(dst, val);
+
+ // restore dst.
+ cc->xchg(x86::ptr(stackReg, sizeof(u64), dst.size()), dst);
+
+ // (x & ~y)
+ cc->not_(x86::dword_ptr(stackReg));
+ cc->and_(dst, x86::ptr(stackReg));
+
+ // OR op.
+ cc->or_(dst, x86::ptr(stackReg, sizeof(u64)));
+
+ cc->add(stackReg, sizeof(u64) << 1);
+ }
+ else
+ {
+ cc->push(val);
+ cc->push(dst);
+ cc->not_(x86::ptr(stackReg, 0, dst.size()));
+ cc->and_(x86::ptr(stackReg, 0, dst.size()), val);
+ cc->not_(x86::ptr(stackReg, dst.size(), 4));
+ cc->and_(dst, x86::ptr(stackReg, dst.size(), 4));
+ cc->or_(dst, x86::ptr(stackReg, 0, 4));
+ cc->add(stackReg, 4 + dst.size());
+ }
+}
+
+MUT_TEMPLATE void perses::MutationLightSchema::genAdd(assembler::x86::Gp dst, assembler::x86::Gp val)
+{
+ // (x + y) = (x - (~y)) - 1
+
+ assembler::x86::Assembler* cc = this->getCompiler();
+
+ assembler::x86::Gp stackReg;
+ constexpr bool isx64 = BitSize == PERSES_64BIT;
+
+ if constexpr (isx64)
+ stackReg = x86::regs::rsp;
+ else
+ stackReg = x86::regs::esp;
+
+ if (dst == stackReg || dst.size() != val.size())
+ {
+ cc->add(dst, val);
+ return;
+ }
+
+ cc->push(val);
+ cc->not_(val);
+ cc->sub(dst, val);
+ cc->pop(val);
+ cc->sub(dst, 1);
+}
+
+MUT_TEMPLATE void MutationLightSchema::genAddImm(assembler::x86::Gp dst, u32 val)
+{
+ assembler::x86::Assembler* cc = this->getCompiler();
+ assembler::x86::Gp stackReg;
+
+ constexpr bool isx64 = BitSize == PERSES_64BIT;
+
+ if constexpr (isx64)
+ stackReg = x86::regs::rsp;
+ else
+ stackReg = x86::regs::esp;
+
+ if (dst == stackReg || dst.size() != sizeof(u32))
+ {
+ cc->add(dst, val);
+ return;
+ }
+
+ // x + y = not(not(x) - y)
+ cc->not_(dst);
+ cc->sub(dst, val);
+ cc->not_(dst);
+}
+
+// Explicit templates.
+template void perses::buildKnownRetGadgets(X86BinaryApplication* app);
+template void perses::buildKnownRetGadgets(X86BinaryApplication* app);
+
+template
+void perses::buildKnownRetGadgets(X86BinaryApplication* app)
+{
+ if (app && gs_retGadgets.empty())
+ {
+ if (app)
+ {
+ pepp::SectionHeader* xsec = nullptr;
+
+ // Find first executable section
+ for (int i = 0; i < app->getImage().getNumberOfSections(); ++i)
+ {
+ if (app->getImage().getSectionHdr(i).getCharacteristics() & pepp::SCN_MEM_EXECUTE)
+ {
+ xsec = &app->getImage().getSectionHdr(i);
+ break;
+ }
+ }
+
+ if (xsec)
+ {
+ // Find all locations in the executable section that have the RET opcode.
+ auto offsets = app->getImage().findBinarySequence(xsec, "c3");
+
+ // Translate all gadgets to raw addresses with the default base address
+ for (auto& offset : offsets)
+ {
+ offset = app->getImage().getPEHdr().offsetToRva(offset);
+ u64 gadget = offset + app->getBaseAddress();
+
+ gs_retGadgets.push_back(gadget);
+
+ // Build up to 0x1000 gadgets, you can remove this line to allow more
+ if (gs_retGadgets.size() > 0x1000)
+ break;
+ }
+
+ // NOTE: We build this list of RET's so we can jmp to them in the instruction stream instead of placing a `cc->ret()`.
+ // This can potentially make a analyzer place a Label where there shouldn't be and break the corresponding disassembly.
+ }
+ }
+ }
+
+}
+
+std::vector perses::getKnownRetGadgets()
+{
+ return gs_retGadgets;
+}
diff --git a/src/mutationlight.hpp b/src/mutationlight.hpp
new file mode 100644
index 0000000..7cfcd4b
--- /dev/null
+++ b/src/mutationlight.hpp
@@ -0,0 +1,48 @@
+#pragma once
+
+namespace perses
+{
+ template
+ class MutationLightSchema : public ProtectionSchema
+ {
+ public:
+ perses::assembler::CodeBuffer applyTransforms(Routine* rtn) override;
+ bool handlePush(instruction_t* insn);
+ bool handleMov(instruction_t* insn);
+ bool handleXor(instruction_t* insn);
+ bool handleAdd(instruction_t* insn);
+ bool handleRelInstruction(instruction_t* insn);
+ X86BinaryApplication* app();
+ protected:
+ void makeRelocation(int offset, bool relative = false, u64 absolute = 0ull);
+ u32 toRva(uptr address);
+ void fetchPeb(assembler::x86::Gp dst);
+ bool recoverJumpTable(instruction_t* insn);
+ void writeJcc(ZydisDecodedInstruction* instr, assembler::Label& lbl);
+ void genXor(assembler::x86::Gp dst, assembler::x86::Gp val);
+ void genXorImm(assembler::x86::Gp dst, u32 val);
+ void genAdd(assembler::x86::Gp dst, assembler::x86::Gp val);
+ void genAddImm(assembler::x86::Gp dst, u32 val);
+ private:
+ struct RelocGenEntry
+ {
+ u16 ioffset;
+ u16 roffset;
+ u16 length;
+ u64 absolute;
+ };
+
+ // Upper half of each entry stores the assosciated instruction's length
+ // Lower half will store the offet the relocation should happen.
+ std::vector _relocEntryList;
+ instruction_t* _currentInstruction;
+ uptr _rtnBegin, _rtnEnd;
+ std::vector _jumpTables;
+ u32 _streamOffset;
+ Routine *_rtn;
+ };
+
+ template
+ void buildKnownRetGadgets(X86BinaryApplication* app);
+ std::vector getKnownRetGadgets();
+}
\ No newline at end of file
diff --git a/src/perses.cpp b/src/perses.cpp
new file mode 100644
index 0000000..d0f6a31
--- /dev/null
+++ b/src/perses.cpp
@@ -0,0 +1,177 @@
+#include "perses.hpp"
+#include
+
+template
+void createApplication(perses::X86BinaryApplication* app, argparse::ArgumentParser& args)
+{
+ spdlog::stopwatch sw;
+ int numSeconds = 0;
+ float numMinutes = 0.f;
+
+ if (args.is_used("--rets"))
+ perses::buildKnownRetGadgets(app);
+
+ if (auto param = args.present("--map"))
+ {
+ std::filesystem::path path = *param;
+
+ if (!std::filesystem::exists(path))
+ {
+ logger()->critical("Unable to parse map file, non existent!");
+ goto Delete;
+ }
+
+ if (path.extension().string() == ".ida")
+ app->linkMapFile(perses::MapFileType::kIDAPro, *param);
+ else
+ app->linkMapFile(perses::MapFileType::kMSVC, *param);
+ }
+
+ if (auto param = args.present("--list"))
+ {
+ std::filesystem::path path = *param;
+
+ if (!std::filesystem::exists(path))
+ {
+ logger()->critical("Unable to parse list file, non existent!");
+ goto Delete;
+ }
+
+ app->parseFunctionList(*param);
+ }
+
+ if (auto param = args.present>("-a"))
+ {
+ for (auto& uaddr : *param)
+ {
+ perses::u64 addr = strtoull(uaddr.c_str(), nullptr, 16);
+ if (addr > 0)
+ app->addRoutineByAddress(addr, PERSES_MARKER_MUTATION);
+ }
+ }
+
+ if (auto param = args.present>("-s"))
+ {
+ if (app->hasMapFile())
+ {
+ for (auto& sym : *param)
+ {
+ app->addRoutineBySymbol(sym, PERSES_MARKER_MUTATION);
+ }
+ }
+ else
+ {
+ logger()->critical("Unable to use symbols argument without a linked .MAP file!");
+ goto Delete;
+ }
+ }
+
+ if (args.is_used("--scan"))
+ app->scanForMarkers();
+
+ if (app->getRoutines().empty())
+ {
+ logger()->critical("Unable to mutate: no routines in queue.");
+ goto Delete;
+ }
+
+ app->transformRoutines();
+ app->compile();
+
+ logger()->info("Mutated {} routines.", app->getRoutines().size());
+
+ numSeconds = std::chrono::duration_cast(sw.elapsed()).count();
+ numMinutes = (float)numSeconds / 60.f;
+
+ if (numSeconds > 60)
+ logger()->info("It took {} minutes and {} seconds to complete this operation.", (int)(numSeconds / 60), (int)((float)(numMinutes - (int)numMinutes) * 60.f));
+ else
+ logger()->info("It took {} seconds to complete this operation.", numSeconds);
+
+Delete:
+ delete app;
+}
+
+
+int main(int argc, char* argv[])
+{
+ argparse::ArgumentParser args("PERSES");
+ void* app = nullptr;
+
+ args.add_argument("-f", "--file")
+ .help("Input file path.")
+ .required();
+ args.add_argument("-x64")
+ .help("Required for X64 PE files.")
+ .default_value(false)
+ .implicit_value(true);
+ args.add_argument("-a", "--address")
+ .help("Address(es) to mutate")
+ .remaining();
+ args.add_argument("-s", "--symbol")
+ .help("Symbol(s) to mutate (requires .MAP)")
+ .remaining();
+ args.add_argument("--list")
+ .help("Parsable function list (NOTE: all entries in the list will be added).");
+ args.add_argument("--map")
+ .help("Parsable map file (NOTE: IDA Pro .MAP files must have their extension named as \".ida\").");
+ args.add_argument("--rets")
+ .help("Use RET gadgets.")
+ .default_value(false)
+ .implicit_value(true);
+ args.add_argument("--scan")
+ .help("Scan for protection markers.")
+ .default_value(false)
+ .implicit_value(true);
+
+ if (argc <= 1)
+ {
+ args.print_help();
+ return 1;
+ }
+
+ try {
+ args.parse_args(argc, argv);
+ }
+ catch (const std::runtime_error& err) {
+ args.print_help();
+ return 1;
+ }
+
+ logger()->debug("PERSES Code Protection Engine");
+
+ std::string filepath = args.get("-f");
+
+ if (!std::filesystem::exists(filepath))
+ {
+ logger()->critical("Unable to find file: {}.", filepath);
+ return 0;
+ }
+
+ if (args.get("-x64"))
+ {
+ createApplication(new perses::X86BinaryApplication(filepath), args);
+ }
+ else
+ {
+ createApplication(new perses::X86BinaryApplication(filepath), args);
+ }
+
+ return 0;
+}
+
+std::shared_ptr logger()
+{
+ static std::shared_ptr log = nullptr;
+
+ if (!log)
+ {
+ log = spdlog::stdout_color_mt("console");
+ log->set_level(spdlog::level::debug);
+ log->set_pattern("[%^PERSES%$] %v");
+
+ spdlog::set_error_handler([](const std::string& msg) { printf("*** LOG ERROR: %s ***\n", msg.c_str()); });
+ }
+
+ return log;
+}
\ No newline at end of file
diff --git a/src/perses.hpp b/src/perses.hpp
new file mode 100644
index 0000000..68d873b
--- /dev/null
+++ b/src/perses.hpp
@@ -0,0 +1,36 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+#include