00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032 #include <pch.h>
00033 #include "Emu/Utility/System/Memory/VirtualMemory.h"
00034 #include "SysDeps/Endian.h"
00035
00036 using namespace std;
00037 using namespace Onikiri;
00038 using namespace EmulatorUtility;
00039
00040
00041 #define ENABLED_EMULATOR_UTILITY_TLB 1
00042
00043
00044
00045 #define ENABLED_VIRTUAL_MEMORY_FAST_HELPER 1
00046
00047
00048
00049
00050 static const int PAGE_TABLE_MAP_INITIAL_BUCKET_COUNT = 16;
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060 TLB::TLB( int offsetBits )
00061 {
00062 memset( &m_body, 0, sizeof(m_body) );
00063 m_addr = 0;
00064 m_valid = false;
00065
00066 m_offsetBits = offsetBits;
00067 m_addrMask = shttl::mask( m_offsetBits, 64 - m_offsetBits );
00068 }
00069
00070 TLB::~TLB()
00071 {
00072 }
00073
00074 bool TLB::Lookup( u64 addr, PageTableEntry* entry ) const
00075 {
00076 #ifndef ENABLED_EMULATOR_UTILITY_TLB
00077 return false;
00078 #endif
00079
00080 if( m_valid && m_addr == ( addr & m_addrMask ) ){
00081 *entry = m_body;
00082 return true;
00083 }
00084
00085 return false;
00086 }
00087
00088 void TLB::Write( u64 addr, const PageTableEntry& entry )
00089 {
00090 m_addr = addr & m_addrMask;
00091 m_body = entry;
00092 m_valid = true;
00093 }
00094
00095 void TLB::Flush()
00096 {
00097 m_valid = false;
00098 }
00099
00100
00101
00102
00103
00104 PageTable::PageTable(int offsetBits) :
00105 m_map(
00106 PAGE_TABLE_MAP_INITIAL_BUCKET_COUNT,
00107 AddrHash(offsetBits)
00108 ),
00109 m_offsetBits(offsetBits),
00110 m_offsetMask(~(u64)0 << offsetBits),
00111 m_tlb(offsetBits),
00112 m_phyPagePool( sizeof(PhysicalMemoryPage) )
00113 {
00114 }
00115
00116 PageTable::~PageTable()
00117 {
00118
00119 for( map_type::iterator i = m_map.begin(); i != m_map.end(); ++i ){
00120 PageTableEntry& logPage = i->second;
00121 PhysicalMemoryPage* phyPage = logPage.phyPage;
00122 phyPage->refCount--;
00123 if( phyPage->refCount == 0 ){
00124 m_phyPagePool.free( phyPage );
00125 }
00126 }
00127 m_map.clear();
00128 }
00129
00130
00131 int PageTable::GetOffsetBits() const
00132 {
00133 return m_offsetBits;
00134 }
00135
00136
00137 u64 PageTable::GetOffsetMask() const
00138 {
00139 return m_offsetMask;
00140 }
00141
00142 size_t PageTable::GetPageSize() const
00143 {
00144 return (size_t)1 << m_offsetBits;
00145 }
00146
00147 void *PageTable::TargetToHost(u64 targetAddr)
00148 {
00149 PageTableEntry entry;
00150 if( m_tlb.Lookup( targetAddr, &entry ) ){
00151 return static_cast<u8*>(entry.phyPage->ptr) + (targetAddr & ~m_offsetMask);
00152 }
00153
00154 map_type::const_iterator e = m_map.find(targetAddr & m_offsetMask);
00155
00156 if (e == m_map.end()) {
00157 THROW_RUNTIME_ERROR("unassigned page referred.");
00158 return 0;
00159 }
00160 else{
00161
00162 entry = e->second;
00163 m_tlb.Write( targetAddr, entry );
00164 return static_cast<u8*>(entry.phyPage->ptr) + (targetAddr & ~m_offsetMask);
00165 }
00166 }
00167
00168 void PageTable::AddMap( u64 targetAddr, u8* hostAddr, VIRTUAL_MEMORY_ATTR_TYPE attr )
00169 {
00170 PageTableEntry logPage;
00171 PhysicalMemoryPage* phyPage = (PhysicalMemoryPage*)m_phyPagePool.malloc();
00172
00173 phyPage->ptr = hostAddr;
00174 phyPage->refCount = 1;
00175
00176 logPage.phyPage = phyPage;
00177 logPage.attr = attr;
00178
00179 m_map[ targetAddr & m_offsetMask ] = logPage;
00180 m_tlb.Flush();
00181 }
00182
00183
00184 void PageTable::CopyMap( u64 dstTargetAddr, u64 srcTargetAddr, VIRTUAL_MEMORY_ATTR_TYPE dstAttr )
00185 {
00186 if( !IsMapped( srcTargetAddr ) ){
00187 THROW_RUNTIME_ERROR("An unassigned page is copied.");
00188 }
00189
00190 map_type::iterator e = m_map.find( srcTargetAddr & m_offsetMask );
00191 e->second.phyPage->refCount++;
00192
00193 PageTableEntry newEntry;
00194 newEntry.phyPage = e->second.phyPage;
00195 newEntry.attr = dstAttr;
00196 m_map[ dstTargetAddr & m_offsetMask ] = newEntry;
00197
00198 m_tlb.Flush();
00199 }
00200
00201 bool PageTable::GetMap( u64 targetAddr, PageTableEntry* page )
00202 {
00203 if( m_tlb.Lookup( targetAddr, page ) ){
00204 return true;
00205 }
00206
00207 map_type::const_iterator e = m_map.find(targetAddr & m_offsetMask);
00208 if (e == m_map.end()){
00209 return false;
00210 }
00211 else{
00212 m_tlb.Write( targetAddr, e->second );
00213 *page = e->second;
00214 return true;
00215 }
00216 }
00217
00218 bool PageTable::SetMap( u64 targetAddr, const PageTableEntry& page )
00219 {
00220 map_type::iterator e = m_map.find(targetAddr & m_offsetMask);
00221 if (e == m_map.end()){
00222 return false;
00223 }
00224 else{
00225 m_tlb.Write( targetAddr, page );
00226 e->second = page;
00227 return true;
00228 }
00229 }
00230
00231
00232 int PageTable::GetPageReferenceCount( u64 targetAddr )
00233 {
00234 PageTableEntry page;
00235 if( GetMap( targetAddr, &page ) ){
00236 return page.phyPage->refCount;
00237 }
00238 else{
00239 return 0;
00240 }
00241 }
00242
00243 int PageTable::RemoveMap(u64 targetAddr)
00244 {
00245 map_type::iterator e = m_map.find(targetAddr & m_offsetMask);
00246
00247 int refCount = 0;
00248 if( e != m_map.end() ){
00249 PhysicalMemoryPage* phyPage = e->second.phyPage;
00250 phyPage->refCount--;
00251 refCount = phyPage->refCount;
00252 if( refCount == 0 ){
00253 m_phyPagePool.free( phyPage );
00254 }
00255 m_map.erase(e);
00256 m_tlb.Flush();
00257 return refCount;
00258 }
00259 return -1;
00260 }
00261
00262 bool PageTable::IsMapped(u64 targetAddr) const
00263 {
00264 PageTableEntry entry;
00265 if( m_tlb.Lookup( targetAddr, &entry ) ){
00266 return true;
00267 }
00268
00269 return (m_map.find(targetAddr & m_offsetMask) != m_map.end());
00270 }
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282 template <typename Iter>
00283 int VirtualMemory::SplitAtMapUnitBoundary(u64 addr, u64 size, Iter e) const
00284 {
00285 u64 unitSize = GetPageSize();
00286 u64 endAddr = addr+size;
00287 u64 offsetMask = m_pageTbl.GetOffsetMask();
00288 int nBlocks = 0;
00289
00290
00291 u64 firstSize = std::min(unitSize - (addr & ~offsetMask), endAddr - addr);
00292 *e++ = MemoryBlock(addr, firstSize);
00293 addr += firstSize;
00294 nBlocks ++;
00295
00296
00297 while (addr < endAddr) {
00298 *e++ = MemoryBlock(addr, std::min(unitSize, endAddr-addr) );
00299 addr += unitSize;
00300 nBlocks ++;
00301 }
00302
00303 return nBlocks;
00304 }
00305
00306
00307 VirtualMemory::VirtualMemory( int pid, bool bigEndian, SystemIF* simSystem ) :
00308 m_pageTbl( VIRTUAL_MEMORY_PAGE_SIZE_BITS ),
00309 m_pool( m_pageTbl.GetPageSize() ),
00310 m_simSystem( simSystem ),
00311 m_pid(pid),
00312 m_bigEndian(bigEndian)
00313 {
00314 }
00315
00316 VirtualMemory::~VirtualMemory()
00317 {
00318 }
00319
00320
00321 u64 VirtualMemory::GetPageSize() const
00322 {
00323 return m_pageTbl.GetPageSize();
00324 }
00325
00326 void VirtualMemory::ReadMemory( MemAccess* access )
00327 {
00328 u64 addr = access->address.address;
00329 PageTableEntry page;
00330
00331
00332 if( !m_pageTbl.GetMap( addr, &page ) ) {
00333 access->result = MemAccess::MAR_READ_INVALID_ADDRESS;
00334 access->value = 0;
00335 return;
00336 }
00337
00338
00339 if ((addr ^ (addr+access->size-1)) >> VIRTUAL_MEMORY_PAGE_SIZE_BITS != 0) {
00340 access->result = MemAccess::MAR_READ_UNALIGNED_ADDRESS;
00341 access->value = 0;
00342 return;
00343 }
00344
00345
00346 if( !(page.attr & VIRTUAL_MEMORY_ATTR_READ) ){
00347 access->result = MemAccess::MAR_READ_NOT_READABLE;
00348 return;
00349 }
00350
00351 void *ptr = m_pageTbl.TargetToHost( addr );
00352 u64 result;
00353 switch (access->size) {
00354 case 1:
00355 if (access->sign)
00356 result = (u64)(s64)EndianSpecifiedToHost(*static_cast<s8*>(ptr), m_bigEndian);
00357 else
00358 result = (u64)EndianSpecifiedToHost(*static_cast<u8*>(ptr), m_bigEndian);
00359 break;
00360 case 2:
00361 if (access->sign)
00362 result = (u64)(s64)EndianSpecifiedToHost(*static_cast<s16*>(ptr), m_bigEndian);
00363 else
00364 result = (u64)EndianSpecifiedToHost(*static_cast<u16*>(ptr), m_bigEndian);
00365 break;
00366 case 4:
00367 if (access->sign)
00368 result = (u64)(s64)EndianSpecifiedToHost(*static_cast<s32*>(ptr), m_bigEndian);
00369 else
00370 result = (u64)EndianSpecifiedToHost(*static_cast<u32*>(ptr), m_bigEndian);
00371 break;
00372 case 8:
00373 result = EndianSpecifiedToHost(*static_cast<u64*>(ptr), m_bigEndian);
00374 break;
00375 default:
00376 THROW_RUNTIME_ERROR("Invalid Read Size");
00377 result = 0;
00378 }
00379
00380 access->result = MemAccess::MAR_SUCCESS;
00381 access->value = result;
00382 }
00383
00384 void VirtualMemory::WriteMemory( MemAccess* access )
00385 {
00386 PageTableEntry page;
00387 u64 addr = access->address.address;
00388 if( !m_pageTbl.GetMap( addr, &page ) ) {
00389 access->result = MemAccess::MAR_WRITE_INVALID_ADDRESS;
00390 return;
00391 }
00392
00393
00394 if ((addr ^ (addr+access->size-1)) >> VIRTUAL_MEMORY_PAGE_SIZE_BITS != 0) {
00395 access->result = MemAccess::MAR_WRITE_UNALIGNED_ADDRESS;
00396 return;
00397 }
00398
00399 if( !(page.attr & VIRTUAL_MEMORY_ATTR_WRITE) ){
00400 access->result = MemAccess::MAR_WRITE_NOT_WRITABLE;
00401 return;
00402 }
00403
00404 if( CopyPageOnWrite( addr ) ){
00405
00406
00407
00408 m_pageTbl.GetMap( addr, &page );
00409 }
00410
00411 void *ptr = m_pageTbl.TargetToHost( addr );
00412 switch (access->size) {
00413 case 1:
00414 *static_cast<u8*>(ptr) = EndianHostToSpecified((u8)access->value, m_bigEndian);
00415 break;
00416 case 2:
00417 *static_cast<u16*>(ptr) = EndianHostToSpecified((u16)access->value, m_bigEndian);
00418 break;
00419 case 4:
00420 *static_cast<u32*>(ptr) = EndianHostToSpecified((u32)access->value, m_bigEndian);
00421 break;
00422 case 8:
00423 *static_cast<u64*>(ptr) = EndianHostToSpecified((u64)access->value, m_bigEndian);
00424 break;
00425 default:
00426 THROW_RUNTIME_ERROR("Invalid Write Size");
00427 }
00428
00429 access->result = MemAccess::MAR_SUCCESS;
00430 }
00431
00432
00433 void VirtualMemory::TargetMemset(u64 targetAddr, int value, u64 size)
00434 {
00435 #ifdef ENABLED_VIRTUAL_MEMORY_FAST_HELPER
00436 BlockArray blocks;
00437 SplitAtMapUnitBoundary(targetAddr, size, back_inserter(blocks) );
00438
00439 for (BlockArray::iterator e = blocks.begin(); e != blocks.end(); ++e) {
00440 CopyPageOnWrite( e->addr );
00441 memset(m_pageTbl.TargetToHost(e->addr), value, (size_t)e->size);
00442 }
00443 #else
00444 for( u64 i = 0; i < size; i++ ){
00445 WriteMemory( targetAddr + i, 1, value );
00446 }
00447 #endif
00448 }
00449
00450 void VirtualMemory::MemCopyToHost(void* dst, u64 src, u64 size)
00451 {
00452 #ifdef ENABLED_VIRTUAL_MEMORY_FAST_HELPER
00453
00454 BlockArray blocks;
00455 SplitAtMapUnitBoundary(src, size, back_inserter(blocks) );
00456
00457 u8* dst_u8 = static_cast<u8*>(dst);
00458 for (BlockArray::iterator e = blocks.begin(); e != blocks.end(); ++e) {
00459 memcpy(dst_u8, m_pageTbl.TargetToHost(e->addr), (size_t)e->size);
00460 dst_u8 += e->size;
00461 }
00462 #else
00463 u8* host = static_cast<u8*>(dst);
00464 u64 target = src;
00465 for( u64 i = 0; i < size; i++ ){
00466 host[i] = static_cast<u8>( ReadMemory( target + i, 1 ) );
00467 }
00468 #endif
00469 }
00470
00471 void VirtualMemory::MemCopyToTarget(u64 dst, const void* src, u64 size)
00472 {
00473 #ifdef ENABLED_VIRTUAL_MEMORY_FAST_HELPER
00474
00475 BlockArray blocks;
00476 SplitAtMapUnitBoundary(dst, size, back_inserter(blocks) );
00477
00478 const u8* src_u8 = static_cast<const u8*>(src);
00479 for (BlockArray::iterator e = blocks.begin(); e != blocks.end(); ++e) {
00480 CopyPageOnWrite( e->addr );
00481 memcpy(m_pageTbl.TargetToHost(e->addr), src_u8, (size_t)e->size);
00482 src_u8 += e->size;
00483 }
00484 #else
00485 const u8* host = static_cast<const u8*>(src);
00486 u64 target = dst;
00487 for( u64 i = 0; i < size; i++ ){
00488 WriteMemory( target + i, 1, host[i] );
00489 }
00490 #endif
00491 }
00492
00493 void VirtualMemory::AssignPhysicalMemory(u64 addr, VIRTUAL_MEMORY_ATTR_TYPE attr)
00494 {
00495 if (m_pageTbl.IsMapped(addr)) {
00496 THROW_RUNTIME_ERROR( "The specified target address is already mapped." );
00497
00498 return;
00499 }
00500
00501 void* mem = m_pool.malloc();
00502 if (!mem)
00503 THROW_RUNTIME_ERROR("out of memory");
00504 m_pageTbl.AddMap(addr, (u8*)mem, attr);
00505 memset(mem, 0, (size_t)GetPageSize());
00506
00507 m_simSystem->NotifyMemoryAllocation(
00508 Addr( m_pid, TID_INVALID, addr),
00509 GetPageSize(),
00510 true
00511 );
00512 }
00513
00514 void VirtualMemory::AssignPhysicalMemory(u64 addr, u64 size, VIRTUAL_MEMORY_ATTR_TYPE attr)
00515 {
00516 BlockArray blocks;
00517 SplitAtMapUnitBoundary(addr, size, back_inserter(blocks) );
00518
00519 for (BlockArray::iterator e = blocks.begin(); e != blocks.end(); ++e){
00520 AssignPhysicalMemory(e->addr, attr);
00521 }
00522 }
00523
00524 void VirtualMemory::FreePhysicalMemory(u64 addr)
00525 {
00526 if(!m_pageTbl.IsMapped(addr)){
00527 return;
00528 }
00529
00530 PageTableEntry page;
00531 if( m_pageTbl.GetMap( addr, &page ) ){
00532
00533 if( page.phyPage->refCount == 1 && page.phyPage->ptr ){
00534 m_simSystem->NotifyMemoryAllocation(
00535 Addr( m_pid, TID_INVALID, addr),
00536 GetPageSize(),
00537 false
00538 );
00539 m_pool.free( page.phyPage->ptr );
00540 }
00541 m_pageTbl.RemoveMap( addr );
00542 }
00543 }
00544
00545 void VirtualMemory::FreePhysicalMemory(u64 addr, u64 size)
00546 {
00547 BlockArray blocks;
00548 SplitAtMapUnitBoundary(addr, size, back_inserter(blocks) );
00549
00550 for (BlockArray::iterator e = blocks.begin(); e != blocks.end(); ++e){
00551 FreePhysicalMemory(e->addr);
00552 }
00553 }
00554
00555
00556 void VirtualMemory::SetPhysicalMemoryMapping( u64 dstAddr, u64 srcAddr, VIRTUAL_MEMORY_ATTR_TYPE attr )
00557 {
00558 if( !m_pageTbl.IsMapped( srcAddr ) ){
00559 THROW_RUNTIME_ERROR( "The specified source address is not mapped." );
00560 }
00561
00562
00563 if( m_pageTbl.IsMapped( dstAddr ) ){
00564 THROW_RUNTIME_ERROR( "The specified target address is already mapped." );
00565 }
00566
00567 m_pageTbl.CopyMap( dstAddr, srcAddr, attr );
00568 }
00569
00570
00571 void VirtualMemory::SetPhysicalMemoryMapping( u64 dstAddr, u64 srcAddr, u64 size, VIRTUAL_MEMORY_ATTR_TYPE attr )
00572 {
00573 BlockArray blocks;
00574 SplitAtMapUnitBoundary( dstAddr, size, back_inserter(blocks) );
00575 for (BlockArray::iterator e = blocks.begin(); e != blocks.end(); ++e){
00576 SetPhysicalMemoryMapping( e->addr, srcAddr, attr );
00577 }
00578 }
00579
00580
00581
00582 void VirtualMemory::AssignAndCopyPhysicalMemory( u64 addr, VIRTUAL_MEMORY_ATTR_TYPE attr )
00583 {
00584 PageTableEntry page;
00585 if( !m_pageTbl.GetMap( addr, &page ) ) {
00586 THROW_RUNTIME_ERROR( "The specified address is not mapped." );
00587 return;
00588 }
00589
00590
00591 if( page.phyPage->refCount == 1 ){
00592 return;
00593 }
00594
00595 m_pageTbl.RemoveMap( addr );
00596 AssignPhysicalMemory( addr, attr );
00597 MemCopyToTarget( addr & m_pageTbl.GetOffsetMask(), page.phyPage->ptr, GetPageSize() );
00598 }
00599
00600
00601 void VirtualMemory::AssignAndCopyPhysicalMemory( u64 addr, u64 size, VIRTUAL_MEMORY_ATTR_TYPE attr )
00602 {
00603 BlockArray blocks;
00604 SplitAtMapUnitBoundary(addr, size, back_inserter(blocks) );
00605 for (BlockArray::iterator e = blocks.begin(); e != blocks.end(); ++e){
00606 AssignAndCopyPhysicalMemory(e->addr, attr);
00607 }
00608 }
00609
00610
00611 void VirtualMemory::SetPageAttribute( u64 addr, VIRTUAL_MEMORY_ATTR_TYPE attr )
00612 {
00613 PageTableEntry page;
00614 if( !m_pageTbl.GetMap( addr, &page ) ) {
00615 THROW_RUNTIME_ERROR( "The specified address is not mapped." );
00616 }
00617
00618 page.attr = attr;
00619 m_pageTbl.SetMap( addr, page );
00620 }
00621
00622
00623
00624 bool VirtualMemory::CopyPageOnWrite( u64 addr )
00625 {
00626 PageTableEntry page;
00627 if( !m_pageTbl.GetMap( addr, &page ) ){
00628 THROW_RUNTIME_ERROR( "The specified address is not mapped." );
00629 return false;
00630 }
00631
00632
00633
00634 if( page.phyPage->refCount > 1 ){
00635
00636 AssignAndCopyPhysicalMemory( addr, page.attr );
00637 return true;
00638 }
00639 return false;
00640 }