#include "mem_ctx.hpp" namespace nasa { mem_ctx::mem_ctx(kernel_ctx& krnl_ctx, DWORD pid) : k_ctx(&krnl_ctx), dirbase(get_dirbase(krnl_ctx, pid)), pid(pid) { // // allocate a pdpt // this->new_pdpt.second = reinterpret_cast( VirtualAlloc( NULL, PAGE_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE )); PAGE_IN(this->new_pdpt.second, PAGE_SIZE); // // get page table entries for new pdpt // pt_entries new_pdpt_entries; hyperspace_entries( new_pdpt_entries, new_pdpt.second ); this->new_pdpt.first = reinterpret_cast(new_pdpt_entries.pt.second.pfn << 12); // // make a new pml4e that points to our new pdpt. // new_pdpt_entries.pml4.second.pfn = new_pdpt_entries.pt.second.pfn; // // set the pml4e to point to the new pdpt // set_pml4e(reinterpret_cast<::ppml4e>(get_dirbase()) + PML4E_INDEX, new_pdpt_entries.pml4.second, true); // // make a new pd // this->new_pd.second = reinterpret_cast( VirtualAlloc( NULL, PAGE_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE )); PAGE_IN(this->new_pd.second, PAGE_SIZE); // // get paging table entries for pd // pt_entries new_pd_entries; hyperspace_entries( new_pd_entries, this->new_pd.second ); this->new_pd.first = reinterpret_cast(new_pd_entries.pt.second.pfn << 12); // // make a new pt // this->new_pt.second = reinterpret_cast( VirtualAlloc( NULL, PAGE_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE )); PAGE_IN(this->new_pt.second, PAGE_SIZE); // // get paging table entries for pt // pt_entries new_pt_entries; hyperspace_entries( new_pt_entries, this->new_pt.second ); this->new_pt.first = reinterpret_cast(new_pt_entries.pt.second.pfn << 12); } mem_ctx::~mem_ctx() { // // remove pml4e // pml4e null_value{ NULL }; set_pml4e(reinterpret_cast<::ppml4e>(get_dirbase()) + PML4E_INDEX, null_value, true); } void* mem_ctx::set_page(void* addr) { // // table entry change. // { ++pte_index; if (pte_index >= 511) { ++pde_index; pte_index = 0; } if (pde_index >= 511) { ++pdpte_index; pde_index = 0; } if (pdpte_index >= 511) pdpte_index = 0; } pdpte new_pdpte = { NULL }; new_pdpte.present = true; new_pdpte.rw = true; new_pdpte.pfn = reinterpret_cast(new_pd.first) >> 12; new_pdpte.user_supervisor = true; new_pdpte.accessed = true; // // set pdpte entry // *reinterpret_cast(new_pdpt.second + pdpte_index) = new_pdpte; pde new_pde = { NULL }; new_pde.present = true; new_pde.rw = true; new_pde.pfn = reinterpret_cast(new_pt.first) >> 12; new_pde.user_supervisor = true; new_pde.accessed = true; // // set pde entry // *reinterpret_cast(new_pd.second + pde_index) = new_pde; pte new_pte = { NULL }; new_pte.present = true; new_pte.rw = true; new_pte.pfn = reinterpret_cast(addr) >> 12; new_pte.user_supervisor = true; new_pte.accessed = true; // // set pte entry // *reinterpret_cast(new_pt.second + pte_index) = new_pte; // // set page offset // this->page_offset = virt_addr_t{ addr }.offset; return get_page(); } void* mem_ctx::get_page() const { // // builds a new address given the state of all table indexes // virt_addr_t new_addr; new_addr.pml4_index = PML4E_INDEX; new_addr.pdpt_index = this->pdpte_index; new_addr.pd_index = this->pde_index; new_addr.pt_index = this->pte_index; new_addr.offset = this->page_offset; return new_addr.value; } void* mem_ctx::get_dirbase(kernel_ctx& k_ctx, DWORD pid) { if (!pid) return NULL; const auto peproc = reinterpret_cast(k_ctx.get_peprocess(pid)); if (!peproc) return NULL; pte dirbase = k_ctx.rkm( reinterpret_cast(peproc + 0x28) ); return reinterpret_cast(dirbase.pfn << 12); } bool mem_ctx::hyperspace_entries(pt_entries& entries, void* addr) { if (!addr || !dirbase) return false; virt_addr_t virt_addr{ addr }; entries.pml4.first = reinterpret_cast(dirbase) + virt_addr.pml4_index; entries.pml4.second = k_ctx->rkm( k_ctx->get_virtual(entries.pml4.first)); if (!entries.pml4.second.value) return false; entries.pdpt.first = reinterpret_cast(entries.pml4.second.pfn << 12) + virt_addr.pdpt_index; entries.pdpt.second = k_ctx->rkm( k_ctx->get_virtual(entries.pdpt.first)); if (!entries.pdpt.second.value) return false; entries.pd.first = reinterpret_cast(entries.pdpt.second.pfn << 12) + virt_addr.pd_index; entries.pd.second = k_ctx->rkm( k_ctx->get_virtual(entries.pd.first)); // if its a 2mb page if (entries.pd.second.page_size) { memcpy( &entries.pt.second, &entries.pd.second, sizeof(pte) ); entries.pt.first = reinterpret_cast(entries.pd.second.value); return true; } entries.pt.first = reinterpret_cast(entries.pd.second.pfn << 12) + virt_addr.pt_index; entries.pt.second = k_ctx->rkm( k_ctx->get_virtual(entries.pt.first)); if (!entries.pt.second.value) return false; return true; } std::pair mem_ctx::get_pte(void* addr, bool use_hyperspace) { if (!dirbase || !addr) return {}; pt_entries entries; if (use_hyperspace ? hyperspace_entries(entries, addr) : (bool)virt_to_phys(entries, addr)) return { entries.pt.first, entries.pt.second }; return {}; } void mem_ctx::set_pte(void* addr, const ::pte& pte, bool use_hyperspace) { if (!dirbase || !addr) return; if (use_hyperspace) k_ctx->wkm(k_ctx->get_virtual(addr), pte); else write_phys(addr, pte); } std::pair mem_ctx::get_pde(void* addr, bool use_hyperspace) { if (!dirbase || !addr) return {}; pt_entries entries; if (use_hyperspace ? hyperspace_entries(entries, addr) : (bool)virt_to_phys(entries, addr)) return { entries.pd.first, entries.pd.second }; return {}; } void mem_ctx::set_pde(void* addr, const ::pde& pde, bool use_hyperspace) { if (!this->dirbase || !addr) return; if (use_hyperspace) k_ctx->wkm(k_ctx->get_virtual(addr), pde); else write_phys(addr, pde); } std::pair mem_ctx::get_pdpte(void* addr, bool use_hyperspace) { if (!dirbase || !addr) return {}; pt_entries entries; if (use_hyperspace ? hyperspace_entries(entries, addr) : (bool)virt_to_phys(entries, addr)) return { entries.pdpt.first, entries.pdpt.second }; return {}; } void mem_ctx::set_pdpte(void* addr, const ::pdpte& pdpte, bool use_hyperspace) { if (!this->dirbase || !addr) return; if (use_hyperspace) k_ctx->wkm(k_ctx->get_virtual(addr), pdpte); else write_phys(addr, pdpte); } std::pair mem_ctx::get_pml4e(void* addr, bool use_hyperspace) { if (!this->dirbase || !addr) return {}; pt_entries entries; if (use_hyperspace ? hyperspace_entries(entries, addr) : (bool)virt_to_phys(entries, addr)) return { entries.pml4.first, entries.pml4.second }; return {}; } void mem_ctx::set_pml4e(void* addr, const ::pml4e& pml4e, bool use_hyperspace) { if (!this->dirbase || !addr) return; if (use_hyperspace) k_ctx->wkm(k_ctx->get_virtual(addr), pml4e); else write_phys(addr, pml4e); } std::pair mem_ctx::read_virtual(void* buffer, void* addr, std::size_t size) { if (!buffer || !addr || !size || !dirbase) return {}; virt_addr_t virt_addr{ addr }; if (size <= PAGE_SIZE - virt_addr.offset) { pt_entries entries; read_phys ( buffer, virt_to_phys(entries, addr), size ); return { reinterpret_cast(reinterpret_cast(buffer) + size), reinterpret_cast(reinterpret_cast(addr) + size) }; } else { // cut remainder const auto [new_buffer_addr, new_addr] = read_virtual ( buffer, addr, PAGE_SIZE - virt_addr.offset ); // forward work load return read_virtual ( new_buffer_addr, new_addr, size - (PAGE_SIZE - virt_addr.offset) ); } } std::pair mem_ctx::write_virtual(void* buffer, void* addr, std::size_t size) { if (!buffer || !addr || !size || !dirbase) return {}; virt_addr_t virt_addr{ addr }; if (size <= PAGE_SIZE - virt_addr.offset) { pt_entries entries; write_phys ( buffer, virt_to_phys(entries, addr), size ); return { reinterpret_cast(reinterpret_cast(buffer) + size), reinterpret_cast(reinterpret_cast(addr) + size) }; } else { // cut remainder const auto [new_buffer_addr, new_addr] = write_virtual ( buffer, addr, PAGE_SIZE - virt_addr.offset ); // forward work load return write_virtual ( new_buffer_addr, new_addr, size - (PAGE_SIZE - virt_addr.offset) ); } } void mem_ctx::read_phys(void* buffer, void* addr, std::size_t size) { if (!buffer || !addr || !size) return; const auto temp_page = set_page(addr); if (temp_page) memcpy(buffer, temp_page, size); } void mem_ctx::write_phys(void* buffer, void* addr, std::size_t size) { if (!buffer || !addr || !size) return; const auto temp_page = set_page(addr); if (temp_page) memcpy(temp_page, buffer, size); } void* mem_ctx::virt_to_phys(pt_entries& entries, void* addr) { if (!addr || !this->dirbase) return {}; const virt_addr_t virt_addr{ addr }; // // traverse paging tables // auto pml4e = read_phys<::pml4e>( reinterpret_cast(this->dirbase) + virt_addr.pml4_index); entries.pml4.first = reinterpret_cast(this->dirbase) + virt_addr.pml4_index; entries.pml4.second = pml4e; if (!pml4e.value) return NULL; auto pdpte = read_phys<::pdpte>( reinterpret_cast(pml4e.pfn << 12) + virt_addr.pdpt_index); entries.pdpt.first = reinterpret_cast(pml4e.pfn << 12) + virt_addr.pdpt_index; entries.pdpt.second = pdpte; if (!pdpte.value) return NULL; auto pde = read_phys<::pde>( reinterpret_cast(pdpte.pfn << 12) + virt_addr.pd_index); entries.pd.first = reinterpret_cast(pdpte.pfn << 12) + virt_addr.pd_index; entries.pd.second = pde; if (!pde.value) return NULL; auto pte = read_phys<::pte>( reinterpret_cast(pde.pfn << 12) + virt_addr.pt_index); entries.pt.first = reinterpret_cast(pde.pfn << 12) + virt_addr.pt_index; entries.pt.second = pte; if (!pte.value) return NULL; return reinterpret_cast((pte.pfn << 12) + virt_addr.offset); } unsigned mem_ctx::get_pid() const { return pid; } void* mem_ctx::get_dirbase() const { return dirbase; } pml4e mem_ctx::operator[](std::uint16_t pml4_idx) { return read_phys<::pml4e>(reinterpret_cast(this->dirbase) + pml4_idx); } pdpte mem_ctx::operator[](const std::pair& entry_idx) { const auto pml4_entry = this->operator[](entry_idx.first); return read_phys<::pdpte>(reinterpret_cast(pml4_entry.pfn << 12) + entry_idx.second); } pde mem_ctx::operator[](const std::tuple& entry_idx) { const auto pdpt_entry = this->operator[]({ std::get<0>(entry_idx), std::get<1>(entry_idx) }); return read_phys<::pde>(reinterpret_cast(pdpt_entry.pfn << 12) + std::get<2>(entry_idx)); } pte mem_ctx::operator[](const std::tuple& entry_idx) { const auto pd_entry = this->operator[]({ std::get<0>(entry_idx), std::get<1>(entry_idx), std::get<2>(entry_idx) }); return read_phys<::pte>(reinterpret_cast(pd_entry.pfn << 12) + std::get<3>(entry_idx)); } }