fixed stability issues for both AMD & Intel systems...

merge-requests/5/head v1.1
xerox 4 years ago
parent ce7e10bb46
commit 8e3878583e

@ -1,5 +1,4 @@
#include "mem_ctx.hpp" #include "mem_ctx.hpp"
#include <cassert>
namespace physmeme namespace physmeme
{ {
@ -9,88 +8,153 @@ namespace physmeme
dirbase(get_dirbase(krnl_ctx, pid)), dirbase(get_dirbase(krnl_ctx, pid)),
pid(pid) pid(pid)
{ {
genesis_page.first = VirtualAlloc( // find an empty pml4e...
NULL, for (auto idx = 100u; idx > 0u; --idx)
PAGE_SIZE, if (!k_ctx->rkm<pml4e>(k_ctx->get_virtual((reinterpret_cast<::ppml4e>(get_dirbase()) + idx))).present)
MEM_COMMIT | MEM_RESERVE, this->pml4e_index = idx;
PAGE_READWRITE
); // allocate a pdpt
this->new_pdpt.second =
// reinterpret_cast<ppdpte>(
// page in the page, do not remove this makes the entries. VirtualAlloc(
// NULL,
*(std::uintptr_t*)genesis_page.first = 0xC0FFEE; PAGE_SIZE,
MEM_COMMIT | MEM_RESERVE,
// PAGE_READWRITE
// get the ppte and pte of the page we allocated ));
// PAGE_IN(this->new_pdpt.second, PAGE_SIZE);
auto [page_ppte, page_pte] = get_pte(genesis_page.first, true);
genesis_page.second = page_pte; // get page table entries for new pdpt
pt_entries new_pdpt_entries;
// hyperspace_entries(new_pdpt_entries, new_pdpt.second);
// allocate a page that will get the mapping of the first pages PT this->new_pdpt.first = reinterpret_cast<ppdpte>(new_pdpt_entries.pt.second.pfn << 12);
//
genesis_cursor.first = reinterpret_cast<::ppte>( // make a new pml4e that points to our new pdpt.
VirtualAlloc( new_pdpt_entries.pml4.second.pfn = new_pdpt_entries.pt.second.pfn;
NULL,
0x1000, // set the pml4e to point to the new pdpt
MEM_COMMIT | MEM_RESERVE, set_pml4e(reinterpret_cast<::ppml4e>(get_dirbase()) + this->pml4e_index, new_pdpt_entries.pml4.second, true);
PAGE_READWRITE
)); // make a new pd
this->new_pd.second =
// reinterpret_cast<ppde>(
// page it in VirtualAlloc(
// NULL,
*(std::uintptr_t*)genesis_cursor.first = 0xC0FFEE; PAGE_SIZE,
MEM_COMMIT | MEM_RESERVE,
// PAGE_READWRITE
// get ppte and pte of the cursor page. ));
// PAGE_IN(this->new_pd.second, PAGE_SIZE);
auto [cursor_ppte, cursor_pte] = get_pte(genesis_cursor.first, true);
genesis_cursor.second = cursor_pte;
// //
// change the page to the PT of the first page we allocated. // get paging table entries for pd
// //
cursor_pte.pfn = reinterpret_cast<std::uint64_t>(page_ppte) >> 12; pt_entries new_pd_entries;
set_pte(genesis_cursor.first, cursor_pte, true); hyperspace_entries(
new_pd_entries,
this->new_pd.second
);
this->new_pd.first = reinterpret_cast<ppde>(new_pd_entries.pt.second.pfn << 12);
// //
// change the offset of genesis cursor page to genesis pages pt_index since the page is now a PT // make a new pt
// WARNING: pointer arithmetic, do not add pt_index * 8
// //
genesis_cursor.first += +virt_addr_t{ genesis_page.first }.pt_index; this->new_pt.second =
FLUSH_TLB; reinterpret_cast<ppte>(
VirtualAlloc(
NULL,
PAGE_SIZE,
MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE
));
PAGE_IN(this->new_pt.second, PAGE_SIZE);
// get paging table entries for pt
pt_entries new_pt_entries;
hyperspace_entries(new_pt_entries, this->new_pt.second);
this->new_pt.first = reinterpret_cast<ppte>(new_pt_entries.pt.second.pfn << 12);
} }
mem_ctx::~mem_ctx() mem_ctx::~mem_ctx()
{ {
set_pte(genesis_page.first, genesis_page.second, true); // remove pml4e
set_pte(genesis_cursor.first, genesis_cursor.second, true); pml4e null_value{ NULL };
set_pml4e(reinterpret_cast<::ppml4e>(get_dirbase()) + this->pml4e_index, null_value, true);
} }
void* mem_ctx::set_page(void* addr) void* mem_ctx::set_page(void* addr)
{ {
// table entry change.
{
++pte_index;
if (pte_index >= 511)
{
++pde_index;
pte_index = 0;
}
if (pde_index >= 511)
{
++pdpte_index;
pde_index = 0;
}
if (pdpte_index >= 511)
pdpte_index = 0;
}
pdpte new_pdpte = { NULL };
new_pdpte.present = true;
new_pdpte.rw = true;
new_pdpte.pfn = reinterpret_cast<std::uintptr_t>(new_pd.first) >> 12;
new_pdpte.user_supervisor = true;
new_pdpte.accessed = true;
// set pdpte entry
*reinterpret_cast<pdpte*>(new_pdpt.second + pdpte_index) = new_pdpte;
pde new_pde = { NULL };
new_pde.present = true;
new_pde.rw = true;
new_pde.pfn = reinterpret_cast<std::uintptr_t>(new_pt.first) >> 12;
new_pde.user_supervisor = true;
new_pde.accessed = true;
// set pde entry
*reinterpret_cast<pde*>(new_pd.second + pde_index) = new_pde;
pte new_pte = { NULL };
new_pte.present = true;
new_pte.rw = true;
new_pte.pfn = reinterpret_cast<std::uintptr_t>(addr) >> 12;
new_pte.user_supervisor = true;
new_pte.accessed = true;
// set pte entry
*reinterpret_cast<pte*>(new_pt.second + pte_index) = new_pte;
// set page offset
this->page_offset = virt_addr_t{ addr }.offset; this->page_offset = virt_addr_t{ addr }.offset;
this->genesis_cursor.first->pfn = reinterpret_cast<uint64_t>(addr) >> 12;
FLUSH_TLB;
return get_page(); return get_page();
} }
void* mem_ctx::get_page() const void* mem_ctx::get_page() const
{ {
return reinterpret_cast<void*>( // builds a new address given the state of all table indexes
reinterpret_cast<std::uint64_t>( virt_addr_t new_addr;
this->genesis_page.first) + this->page_offset); new_addr.pml4_index = this->pml4e_index;
new_addr.pdpt_index = this->pdpte_index;
new_addr.pd_index = this->pde_index;
new_addr.pt_index = this->pte_index;
new_addr.offset = this->page_offset;
return new_addr.value;
} }
void* mem_ctx::get_dirbase(kernel_ctx& k_ctx, DWORD pid) void* mem_ctx::get_dirbase(kernel_ctx& k_ctx, DWORD pid)
{ {
const auto peproc = const auto peproc =
reinterpret_cast<std::uint64_t>( reinterpret_cast<std::uint64_t>(k_ctx.get_peprocess(pid));
k_ctx.get_peprocess(pid));
if (!peproc) return NULL;
pte dirbase = k_ctx.rkm<pte>( pte dirbase = k_ctx.rkm<pte>(
reinterpret_cast<void*>(peproc + 0x28)); reinterpret_cast<void*>(peproc + 0x28));
@ -150,12 +214,8 @@ namespace physmeme
return {}; return {};
pt_entries entries; pt_entries entries;
if (use_hyperspace ? hyperspace_entries(entries, addr) : virt_to_phys(entries, addr)) if (use_hyperspace ? hyperspace_entries(entries, addr) : (bool)virt_to_phys(entries, addr))
{ return { entries.pt.first, entries.pt.second };
::pte pte;
memcpy(&pte, &entries.pt.second, sizeof(pte));
return { entries.pt.first, pte };
}
return {}; return {};
} }
@ -164,16 +224,10 @@ namespace physmeme
if (!dirbase || !addr) if (!dirbase || !addr)
return; return;
pt_entries entries;
if (use_hyperspace) if (use_hyperspace)
if (hyperspace_entries(entries, addr)) k_ctx->wkm(k_ctx->get_virtual(addr), pte);
k_ctx->wkm(
k_ctx->get_virtual(entries.pt.first),
pte
);
else else
if (virt_to_phys(entries, addr)) write_phys(addr, pte);
write_phys(entries.pt.first, pte);
} }
std::pair<ppde, pde> mem_ctx::get_pde(void* addr, bool use_hyperspace) std::pair<ppde, pde> mem_ctx::get_pde(void* addr, bool use_hyperspace)
@ -182,34 +236,20 @@ namespace physmeme
return {}; return {};
pt_entries entries; pt_entries entries;
if (use_hyperspace ? hyperspace_entries(entries, addr) : virt_to_phys(entries, addr)) if (use_hyperspace ? hyperspace_entries(entries, addr) : (bool)virt_to_phys(entries, addr))
{ return { entries.pd.first, entries.pd.second };
::pde pde;
memcpy(
&pde,
&entries.pd.second,
sizeof(pde)
);
return { entries.pd.first, pde };
}
return {}; return {};
} }
void mem_ctx::set_pde(void* addr, const ::pde& pde, bool use_hyperspace) void mem_ctx::set_pde(void* addr, const ::pde& pde, bool use_hyperspace)
{ {
if (!dirbase || !addr) if (!this->dirbase || !addr)
return; return;
pt_entries entries;
if (use_hyperspace) if (use_hyperspace)
if (hyperspace_entries(entries, addr)) k_ctx->wkm(k_ctx->get_virtual(addr), pde);
k_ctx->wkm(
k_ctx->get_virtual(entries.pd.first),
pde
);
else else
if (virt_to_phys(entries, addr)) write_phys(addr, pde);
write_phys(entries.pd.first, pde);
} }
std::pair<ppdpte, pdpte> mem_ctx::get_pdpte(void* addr, bool use_hyperspace) std::pair<ppdpte, pdpte> mem_ctx::get_pdpte(void* addr, bool use_hyperspace)
@ -218,55 +258,42 @@ namespace physmeme
return {}; return {};
pt_entries entries; pt_entries entries;
if (use_hyperspace ? hyperspace_entries(entries, addr) : virt_to_phys(entries, addr)) if (use_hyperspace ? hyperspace_entries(entries, addr) : (bool)virt_to_phys(entries, addr))
return { entries.pdpt.first, entries.pdpt.second }; return { entries.pdpt.first, entries.pdpt.second };
return {}; return {};
} }
void mem_ctx::set_pdpte(void* addr, const ::pdpte& pdpte, bool use_hyperspace) void mem_ctx::set_pdpte(void* addr, const ::pdpte& pdpte, bool use_hyperspace)
{ {
if (!dirbase || !addr) if (!this->dirbase || !addr)
return; return;
pt_entries entries;
if (use_hyperspace) if (use_hyperspace)
if (hyperspace_entries(entries, addr)) k_ctx->wkm(k_ctx->get_virtual(addr), pdpte);
k_ctx->wkm(
k_ctx->get_virtual(entries.pdpt.first),
pdpte
);
else else
if (virt_to_phys(entries, addr)) write_phys(addr, pdpte);
write_phys(entries.pdpt.first, pdpte);
} }
std::pair<ppml4e, pml4e> mem_ctx::get_pml4e(void* addr, bool use_hyperspace) std::pair<ppml4e, pml4e> mem_ctx::get_pml4e(void* addr, bool use_hyperspace)
{ {
if (!dirbase || !addr) if (!this->dirbase || !addr)
return { {}, {} }; return {};
pt_entries entries; pt_entries entries;
if (use_hyperspace ? hyperspace_entries(entries, addr) : virt_to_phys(entries, addr)) if (use_hyperspace ? hyperspace_entries(entries, addr) : (bool)virt_to_phys(entries, addr))
return { entries.pml4.first, entries.pml4.second}; return { entries.pml4.first, entries.pml4.second };
return {};
return { {}, {} };
} }
void mem_ctx::set_pml4e(void* addr, const ::pml4e& pml4e, bool use_hyperspace) void mem_ctx::set_pml4e(void* addr, const ::pml4e& pml4e, bool use_hyperspace)
{ {
if (!dirbase || !addr) if (!this->dirbase || !addr)
return; return;
pt_entries entries;
if (use_hyperspace) if (use_hyperspace)
if (hyperspace_entries(entries, addr)) k_ctx->wkm(k_ctx->get_virtual(addr), pml4e);
k_ctx->wkm(
k_ctx->get_virtual(entries.pml4.first),
pml4e
);
else else
if (virt_to_phys(entries, addr)) write_phys(addr, pml4e);
write_phys(entries.pml4.first, pml4e);
} }
std::pair<void*, void*> mem_ctx::read_virtual(void* buffer, void* addr, std::size_t size) std::pair<void*, void*> mem_ctx::read_virtual(void* buffer, void* addr, std::size_t size)
@ -278,12 +305,15 @@ namespace physmeme
if (size <= PAGE_SIZE - virt_addr.offset) if (size <= PAGE_SIZE - virt_addr.offset)
{ {
pt_entries entries; pt_entries entries;
read_phys( read_phys
(
buffer, buffer,
virt_to_phys(entries, addr), virt_to_phys(entries, addr),
size size
); );
return {
return
{
reinterpret_cast<void*>(reinterpret_cast<std::uintptr_t>(buffer) + size), reinterpret_cast<void*>(reinterpret_cast<std::uintptr_t>(buffer) + size),
reinterpret_cast<void*>(reinterpret_cast<std::uintptr_t>(addr) + size) reinterpret_cast<void*>(reinterpret_cast<std::uintptr_t>(addr) + size)
}; };
@ -291,13 +321,16 @@ namespace physmeme
else else
{ {
// cut remainder // cut remainder
const auto [new_buffer_addr, new_addr] = read_virtual( const auto [new_buffer_addr, new_addr] = read_virtual
(
buffer, buffer,
addr, addr,
PAGE_SIZE - virt_addr.offset PAGE_SIZE - virt_addr.offset
); );
// forward work load // forward work load
return read_virtual( return read_virtual
(
new_buffer_addr, new_buffer_addr,
new_addr, new_addr,
size - (PAGE_SIZE - virt_addr.offset) size - (PAGE_SIZE - virt_addr.offset)
@ -314,13 +347,15 @@ namespace physmeme
if (size <= PAGE_SIZE - virt_addr.offset) if (size <= PAGE_SIZE - virt_addr.offset)
{ {
pt_entries entries; pt_entries entries;
write_phys( write_phys
(
buffer, buffer,
virt_to_phys(entries, addr), virt_to_phys(entries, addr),
size size
); );
return { return
{
reinterpret_cast<void*>(reinterpret_cast<std::uintptr_t>(buffer) + size), reinterpret_cast<void*>(reinterpret_cast<std::uintptr_t>(buffer) + size),
reinterpret_cast<void*>(reinterpret_cast<std::uintptr_t>(addr) + size) reinterpret_cast<void*>(reinterpret_cast<std::uintptr_t>(addr) + size)
}; };
@ -328,14 +363,16 @@ namespace physmeme
else else
{ {
// cut remainder // cut remainder
const auto [new_buffer_addr, new_addr] = write_virtual( const auto [new_buffer_addr, new_addr] = write_virtual
(
buffer, buffer,
addr, addr,
PAGE_SIZE - virt_addr.offset PAGE_SIZE - virt_addr.offset
); );
// forward work load // forward work load
return write_virtual( return write_virtual
(
new_buffer_addr, new_buffer_addr,
new_addr, new_addr,
size - (PAGE_SIZE - virt_addr.offset) size - (PAGE_SIZE - virt_addr.offset)
@ -348,7 +385,7 @@ namespace physmeme
if (!buffer || !addr || !size) if (!buffer || !addr || !size)
return; return;
auto temp_page = set_page(addr); const auto temp_page = set_page(addr);
if (temp_page) if (temp_page)
memcpy(buffer, temp_page, size); memcpy(buffer, temp_page, size);
} }
@ -358,25 +395,24 @@ namespace physmeme
if (!buffer || !addr || !size) if (!buffer || !addr || !size)
return; return;
auto temp_page = set_page(addr); const auto temp_page = set_page(addr);
if (temp_page) if (temp_page)
memcpy(temp_page, buffer, size); memcpy(temp_page, buffer, size);
} }
void* mem_ctx::virt_to_phys(pt_entries& entries, void* addr) void* mem_ctx::virt_to_phys(pt_entries& entries, void* addr)
{ {
if (!addr || !dirbase) if (!addr || !this->dirbase)
return {}; return {};
virt_addr_t virt_addr{ addr }; const virt_addr_t virt_addr{ addr };
// //
// traverse paging tables // traverse paging tables
// //
auto pml4e = read_phys<::pml4e>( auto pml4e = read_phys<::pml4e>(
reinterpret_cast<ppml4e>(dirbase) + virt_addr.pml4_index); reinterpret_cast<ppml4e>(this->dirbase) + virt_addr.pml4_index);
entries.pml4.first = reinterpret_cast<ppml4e>(dirbase) + virt_addr.pml4_index; entries.pml4.first = reinterpret_cast<ppml4e>(this->dirbase) + virt_addr.pml4_index;
entries.pml4.second = pml4e; entries.pml4.second = pml4e;
if (!pml4e.value) if (!pml4e.value)
@ -421,4 +457,27 @@ namespace physmeme
{ {
return dirbase; return dirbase;
} }
pml4e mem_ctx::operator[](std::uint16_t pml4_idx)
{
return read_phys<::pml4e>(reinterpret_cast<ppml4e>(this->dirbase) + pml4_idx);
}
pdpte mem_ctx::operator[](const std::pair<std::uint16_t, std::uint16_t>& entry_idx)
{
const auto pml4_entry = this->operator[](entry_idx.first);
return read_phys<::pdpte>(reinterpret_cast<ppdpte>(pml4_entry.pfn << 12) + entry_idx.second);
}
pde mem_ctx::operator[](const std::tuple<std::uint16_t, std::uint16_t, std::uint16_t>& entry_idx)
{
const auto pdpt_entry = this->operator[]({ std::get<0>(entry_idx), std::get<1>(entry_idx) });
return read_phys<::pde>(reinterpret_cast<ppde>(pdpt_entry.pfn << 12) + std::get<2>(entry_idx));
}
pte mem_ctx::operator[](const std::tuple<std::uint16_t, std::uint16_t, std::uint16_t, std::uint16_t>& entry_idx)
{
const auto pd_entry = this->operator[]({ std::get<0>(entry_idx), std::get<1>(entry_idx), std::get<2>(entry_idx) });
return read_phys<::pte>(reinterpret_cast<ppte>(pd_entry.pfn << 12) + std::get<3>(entry_idx));
}
} }

@ -2,9 +2,7 @@
#include "../util/nt.hpp" #include "../util/nt.hpp"
#include "../kernel_ctx/kernel_ctx.h" #include "../kernel_ctx/kernel_ctx.h"
// context switch causes the TLB to be flushed... #define PAGE_IN(addr, size) memset(addr, NULL, size)
#define FLUSH_TLB while(!SwitchToThread()) continue;
struct pt_entries struct pt_entries
{ {
std::pair<ppml4e, pml4e> pml4; std::pair<ppml4e, pml4e> pml4;
@ -17,8 +15,9 @@ namespace physmeme
{ {
class mem_ctx class mem_ctx
{ {
friend class mapper_ctx;
public: public:
explicit mem_ctx(kernel_ctx& k_ctx, DWORD pid); explicit mem_ctx(kernel_ctx& k_ctx, DWORD pid = GetCurrentProcessId());
~mem_ctx(); ~mem_ctx();
// //
@ -57,8 +56,7 @@ namespace physmeme
template <class T> template <class T>
T read_phys(void* addr) T read_phys(void* addr)
{ {
if (!addr) if (!addr) return {};
return {};
T buffer; T buffer;
read_phys((void*)&buffer, addr, sizeof(T)); read_phys((void*)&buffer, addr, sizeof(T));
return buffer; return buffer;
@ -67,8 +65,7 @@ namespace physmeme
template <class T> template <class T>
void write_phys(void* addr, const T& data) void write_phys(void* addr, const T& data)
{ {
if (!addr) if (!addr) return;
return;
write_phys((void*)&data, addr, sizeof(T)); write_phys((void*)&data, addr, sizeof(T));
} }
@ -78,8 +75,7 @@ namespace physmeme
template <class T> template <class T>
T read_virtual(void* addr) T read_virtual(void* addr)
{ {
if (!addr) if (!addr) return {};
return {};
T buffer; T buffer;
read_virtual((void*)&buffer, addr, sizeof(T)); read_virtual((void*)&buffer, addr, sizeof(T));
return buffer; return buffer;
@ -102,19 +98,26 @@ namespace physmeme
void* set_page(void* addr); void* set_page(void* addr);
void* get_page() const; void* get_page() const;
unsigned get_pid() const; unsigned get_pid() const;
kernel_ctx* k_ctx;
pml4e operator[](std::uint16_t pml4_idx);
pdpte operator[](const std::pair<std::uint16_t, std::uint16_t>& entry_idx);
pde operator[](const std::tuple<std::uint16_t, std::uint16_t, std::uint16_t>& entry_idx);
pte operator[](const std::tuple<std::uint16_t, std::uint16_t, std::uint16_t, std::uint16_t>& entry_idx);
private: private:
// //
// given an address fill pt entries with physical addresses and entry values. // given an address fill pt entries with physical addresses and entry values.
// //
bool hyperspace_entries(pt_entries& entries, void* addr); bool hyperspace_entries(pt_entries& entries, void* addr);
std::pair<void*, pte> genesis_page;
std::pair<ppte, pte> genesis_cursor;
void* dirbase; void* dirbase;
kernel_ctx* k_ctx;
std::uint16_t pml4e_index, pdpte_index, pde_index, pte_index, page_offset;
/// first == physical
/// second == virtual
std::pair<ppdpte, ppdpte> new_pdpt;
std::pair<ppde,ppde> new_pd;
std::pair<ppte, ppte> new_pt;
unsigned pid; unsigned pid;
unsigned short page_offset;
}; };
} }

@ -265,7 +265,7 @@ typedef union _pml4e
struct struct
{ {
ULONG64 present : 1; // Must be 1, region invalid if 0. ULONG64 present : 1; // Must be 1, region invalid if 0.
ULONG64 ReadWrite : 1; // If 0, writes not allowed. ULONG64 rw : 1; // If 0, writes not allowed.
ULONG64 user_supervisor : 1; // If 0, user-mode accesses not allowed. ULONG64 user_supervisor : 1; // If 0, user-mode accesses not allowed.
ULONG64 PageWriteThrough : 1; // Determines the memory type used to access PDPT. ULONG64 PageWriteThrough : 1; // Determines the memory type used to access PDPT.
ULONG64 page_cache : 1; // Determines the memory type used to access PDPT. ULONG64 page_cache : 1; // Determines the memory type used to access PDPT.
@ -309,11 +309,11 @@ typedef union _pde
struct struct
{ {
ULONG64 present : 1; // Must be 1, region invalid if 0. ULONG64 present : 1; // Must be 1, region invalid if 0.
ULONG64 ReadWrite : 1; // If 0, writes not allowed. ULONG64 rw : 1; // If 0, writes not allowed.
ULONG64 user_supervisor : 1; // If 0, user-mode accesses not allowed. ULONG64 user_supervisor : 1; // If 0, user-mode accesses not allowed.
ULONG64 PageWriteThrough : 1; // Determines the memory type used to access PT. ULONG64 PageWriteThrough : 1; // Determines the memory type used to access PT.
ULONG64 page_cache : 1; // Determines the memory type used to access PT. ULONG64 page_cache : 1; // Determines the memory type used to access PT.
ULONG64 Accessed : 1; // If 0, this entry has not been used for translation. ULONG64 accessed : 1; // If 0, this entry has not been used for translation.
ULONG64 Ignored1 : 1; ULONG64 Ignored1 : 1;
ULONG64 page_size : 1; // If 1, this entry maps a 2MB page. ULONG64 page_size : 1; // If 1, this entry maps a 2MB page.
ULONG64 Ignored2 : 4; ULONG64 Ignored2 : 4;
@ -331,7 +331,7 @@ typedef union _pte
struct struct
{ {
ULONG64 present : 1; // Must be 1, region invalid if 0. ULONG64 present : 1; // Must be 1, region invalid if 0.
ULONG64 ReadWrite : 1; // If 0, writes not allowed. ULONG64 rw : 1; // If 0, writes not allowed.
ULONG64 user_supervisor : 1; // If 0, user-mode accesses not allowed. ULONG64 user_supervisor : 1; // If 0, user-mode accesses not allowed.
ULONG64 PageWriteThrough : 1; // Determines the memory type used to access the memory. ULONG64 PageWriteThrough : 1; // Determines the memory type used to access the memory.
ULONG64 page_cache : 1; // Determines the memory type used to access the memory. ULONG64 page_cache : 1; // Determines the memory type used to access the memory.

Binary file not shown.
Loading…
Cancel
Save