added mm code (copy virtual memory, map page, address translation)

merge-requests/1/head
_xeroxz 4 years ago
parent 7b952d72d7
commit 51f47aa52e

@ -30,7 +30,7 @@ auto drv_entry(PDRIVER_OBJECT driver_object, PUNICODE_STRING registry_path) -> N
MmGetVirtualForPhysical(current_pml4));
// vmxroot will have the same "address space" as the current one being executed in...
memcpy(&mm::pml4[255], &kernel_pml4[255], sizeof(mm::pml4e) * 255);
memcpy(&mm::pml4[256], &kernel_pml4[256], sizeof(mm::pml4e) * 256);
// setup mapping ptes to be present, writeable, executable, and user supervisor false...
for (auto idx = 0u; idx < 254; ++idx)

118
mm.cpp

@ -4,12 +4,79 @@ namespace mm
{
auto translate(virt_addr_t virt_addr) -> u64
{
virt_addr_t cursor{ vmxroot_pml4 };
if (!reinterpret_cast<ppml4e>(cursor.value)[virt_addr.pml4_index].present)
return {};
cursor.pt_index = virt_addr.pml4_index;
if (!reinterpret_cast<ppdpte>(cursor.value)[virt_addr.pdpt_index].present)
return {};
// handle 1gb large page...
if (reinterpret_cast<ppdpte>(cursor.value)[virt_addr.pdpt_index].page_size)
return (reinterpret_cast<ppdpte>(cursor.value)
[virt_addr.pdpt_index].pfn << 12) + virt_addr.offset_1gb;
cursor.pd_index = virt_addr.pml4_index;
cursor.pt_index = virt_addr.pdpt_index;
if (!reinterpret_cast<ppde>(cursor.value)[virt_addr.pd_index].present)
return {};
// handle 2mb large page...
if (reinterpret_cast<ppde>(cursor.value)[virt_addr.pd_index].page_size)
return (reinterpret_cast<ppde>(cursor.value)
[virt_addr.pd_index].pfn << 12) + virt_addr.offset_2mb;
cursor.pdpt_index = virt_addr.pml4_index;
cursor.pd_index = virt_addr.pdpt_index;
cursor.pt_index = virt_addr.pd_index;
if (!reinterpret_cast<ppte>(cursor.value)[virt_addr.pt_index].present)
return {};
return (reinterpret_cast<ppte>(cursor.value)
[virt_addr.pt_index].pfn << 12) + virt_addr.offset_4kb;
}
auto translate(virt_addr_t virt_addr, u64 pml4_phys, map_type type) -> u64
{
const auto pml4 =
reinterpret_cast<ppml4e>(
map_page(pml4_phys, type));
if (!pml4[virt_addr.pml4_index].present)
return {};
const auto pdpt =
reinterpret_cast<ppdpte>(
map_page(pml4[virt_addr
.pml4_index].pfn << 12, type));
if (!pdpt[virt_addr.pdpt_index].present)
return {};
if (pdpt[virt_addr.pdpt_index].page_size)
return (pdpt[virt_addr.pdpt_index].pfn << 12) + virt_addr.offset_1gb;
const auto pd =
reinterpret_cast<ppde>(
map_page(pdpt[virt_addr
.pdpt_index].pfn << 12, type));
if (!pd[virt_addr.pd_index].present)
return {};
if (pd[virt_addr.pd_index].page_size)
return (pd[virt_addr.pd_index].pfn << 12) + virt_addr.offset_2mb;
const auto pt =
reinterpret_cast<ppte>(
map_page(pd[virt_addr
.pd_index].pfn << 12, type));
if (!pt[virt_addr.pt_index].present)
return {};
return (pt[virt_addr.pt_index].pfn << 12) + virt_addr.offset_4kb;
}
auto map_page(u64 phys_addr, map_type type) -> u64
@ -27,7 +94,56 @@ namespace mm
[result.pt_index].pfn = phys_addr >> 12;
__invlpg(result.value);
result.offset = virt_addr_t{ (void*)phys_addr }.offset;
result.offset_4kb = virt_addr_t{ (void*)phys_addr }.offset_4kb;
return reinterpret_cast<u64>(result.value);
}
auto map_virt(u64 dirbase, u64 virt_addr, map_type map_type) -> u64
{
const auto phys_addr =
translate(virt_addr_t{ (void*)
virt_addr }, dirbase, map_type);
if (!phys_addr)
return {};
return map_page(phys_addr, map_type);
}
auto copy_virt(u64 dirbase_src, u64 virt_src, u64 dirbase_dest, u64 virt_dest, u64 size) -> bool
{
while (size)
{
auto dest_size = PAGE_SIZE - virt_addr_t{ (void*)virt_dest }.offset_4kb;
if (size < dest_size)
dest_size = size;
auto src_size = PAGE_SIZE - virt_addr_t{ (void*)virt_src }.offset_4kb;
if (size < src_size)
src_size = size;
const auto mapped_src =
reinterpret_cast<void*>(
map_virt(dirbase_src, virt_src, map_type::src));
if (!mapped_src)
return false;
const auto mapped_dest =
reinterpret_cast<void*>(
map_virt(dirbase_dest, virt_dest, map_type::dest));
if (!mapped_dest)
return false;
// copy directly between the two pages...
auto current_size = min(dest_size, src_size);
memcpy(mapped_dest, mapped_src, current_size);
virt_src += current_size;
virt_dest += current_size;
size -= current_size;
}
return true;
}
}

@ -1,7 +1,7 @@
#pragma once
#include "hv_types.hpp"
#define PML4_SELF_REF 254
#define PML4_SELF_REF 255
#pragma section(".pml4", read, write)
namespace mm
@ -11,13 +11,31 @@ namespace mm
void* value;
struct
{
u64 offset : 12;
u64 offset_4kb : 12;
u64 pt_index : 9;
u64 pd_index : 9;
u64 pdpt_index : 9;
u64 pml4_index : 9;
u64 reserved : 16;
};
struct
{
u64 offset_2mb : 21;
u64 pd_index : 9;
u64 pdpt_index : 9;
u64 pml4_index : 9;
u64 reserved : 16;
};
struct
{
u64 offset_1gb : 30;
u64 pdpt_index : 9;
u64 pml4_index : 9;
u64 reserved : 16;
};
} virt_addr_t, * pvirt_addr_t;
typedef union _pml4e
@ -107,7 +125,7 @@ namespace mm
} pte, * ppte;
enum class map_type{ dest, src };
inline const ppml4e vmxroot_pml4 = reinterpret_cast<ppml4e>(0x7f0000000000);
inline const ppml4e vmxroot_pml4 = reinterpret_cast<ppml4e>(0x7fbfdfeff000);
// make sure this is 4kb aligned or you are going to be meeting allah...
__declspec(allocate(".pml4"))
@ -121,4 +139,11 @@ namespace mm
// map a page into vmxroot address space...
auto map_page(u64 phys_addr, map_type type) -> u64;
// map a page (4kb) from another address into vmxroot...
auto map_virt(u64 dirbase, u64 virt_addr, map_type map_type)->u64;
// copy virtual memory without changing cr3... this maps the physical memory into vmxroot
// address space and copies the memory directly between the physical pages... the memory must be paged in...
auto copy_virt(u64 dirbase_src, u64 virt_src, u64 dirbase_dest, u64 virt_dest, u64 size) -> bool;
}
Loading…
Cancel
Save