diff --git a/src/mem/ruby/structures/TBETable.hh b/src/mem/ruby/structures/TBETable.hh --- a/src/mem/ruby/structures/TBETable.hh +++ b/src/mem/ruby/structures/TBETable.hh @@ -82,7 +82,6 @@ inline bool TBETable::isPresent(Addr address) const { - assert(address == makeLineAddress(address)); assert(m_map.size() <= m_number_of_TBEs); return static_cast(m_map.count(address)); } diff --git a/src/mem/ruby/structures/TimerTable.cc b/src/mem/ruby/structures/TimerTable.cc --- a/src/mem/ruby/structures/TimerTable.cc +++ b/src/mem/ruby/structures/TimerTable.cc @@ -64,7 +64,6 @@ void TimerTable::set(Addr address, Tick ready_time) { - assert(address == makeLineAddress(address)); assert(!m_map.count(address)); m_map[address] = ready_time; @@ -81,7 +80,6 @@ void TimerTable::unset(Addr address) { - assert(address == makeLineAddress(address)); assert(m_map.count(address)); m_map.erase(address); diff --git a/src/mem/ruby/system/DMASequencer.cc b/src/mem/ruby/system/DMASequencer.cc --- a/src/mem/ruby/system/DMASequencer.cc +++ b/src/mem/ruby/system/DMASequencer.cc @@ -77,7 +77,8 @@ std::shared_ptr msg = std::make_shared(clockEdge()); msg->getPhysicalAddress() = paddr; - msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress()); + msg->getLineAddress() = + makeLineAddress(msg->getPhysicalAddress(), m_block_size_bits); msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD; int offset = paddr & m_data_block_mask; @@ -119,7 +120,8 @@ active_request.bytes_completed; assert((msg->getPhysicalAddress() & m_data_block_mask) == 0); - msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress()); + msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress(), + m_block_size_bits); msg->getType() = (active_request.write ? SequencerRequestType_ST : SequencerRequestType_LD); diff --git a/src/mem/ruby/system/GPUCoalescer.cc b/src/mem/ruby/system/GPUCoalescer.cc --- a/src/mem/ruby/system/GPUCoalescer.cc +++ b/src/mem/ruby/system/GPUCoalescer.cc @@ -232,7 +232,7 @@ RequestStatus GPUCoalescer::getRequestStatus(PacketPtr pkt, RubyRequestType request_type) { - Addr line_addr = makeLineAddress(pkt->getAddr()); + Addr line_addr = makeLineAddress(pkt->getAddr(), m_block_size_bits); if (!m_mandatory_q_ptr->areNSlotsAvailable(1, clockEdge())) { return RequestStatus_BufferFull; @@ -323,7 +323,7 @@ schedule(deadlockCheckEvent, m_deadlock_threshold + curTick()); } - Addr line_addr = makeLineAddress(pkt->getAddr()); + Addr line_addr = makeLineAddress(pkt->getAddr(), m_block_size_bits); if ((request_type == RubyRequestType_ST) || (request_type == RubyRequestType_ATOMIC) || (request_type == RubyRequestType_ATOMIC_RETURN) || @@ -390,7 +390,8 @@ assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); - Addr line_addr = makeLineAddress(srequest->pkt->getAddr()); + Addr line_addr = makeLineAddress(srequest->pkt->getAddr(), + m_block_size_bits); if ((srequest->m_type == RubyRequestType_ST) || (srequest->m_type == RubyRequestType_RMW_Read) || (srequest->m_type == RubyRequestType_RMW_Write) || @@ -486,10 +487,11 @@ Cycles firstResponseTime, bool isRegion) { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); DPRINTF(GPUCoalescer, "write callback for address %#x\n", address); - assert(m_writeRequestTable.count(makeLineAddress(address))); + assert(m_writeRequestTable.count(makeLineAddress(address, + m_block_size_bits))); RequestTable::iterator i = m_writeRequestTable.find(address); assert(i != m_writeRequestTable.end()); @@ -569,8 +571,9 @@ Cycles firstResponseTime, bool isRegion) { - assert(address == makeLineAddress(address)); - assert(m_readRequestTable.count(makeLineAddress(address))); + assert(address == makeLineAddress(address, m_block_size_bits)); + assert(m_readRequestTable.count(makeLineAddress(address, + m_block_size_bits))); DPRINTF(GPUCoalescer, "read callback for address %#x\n", address); RequestTable::iterator i = m_readRequestTable.find(address); @@ -600,9 +603,9 @@ { PacketPtr pkt = srequest->pkt; Addr request_address = pkt->getAddr(); - Addr request_line_address = makeLineAddress(request_address); - - Addr offset_address = getOffset(request_address); + Addr request_line_address = makeLineAddress(request_address, + m_block_size_bits); + Addr offset_address = getOffset(request_address, m_block_size_bits); RubyRequestType type = srequest->m_type; @@ -630,7 +633,8 @@ assert(type == reqCoalescer[request_line_address][i].second[PrimaryType]); request_address = pkt->getAddr(); - request_line_address = makeLineAddress(pkt->getAddr()); + request_line_address = makeLineAddress(pkt->getAddr(), + m_block_size_bits); if (pkt->getPtr()) { if ((type == RubyRequestType_LD) || (type == RubyRequestType_ATOMIC) || @@ -835,7 +839,7 @@ if (status != RequestStatus_Ready) return status; - Addr line_addr = makeLineAddress(pkt->getAddr()); + Addr line_addr = makeLineAddress(pkt->getAddr(), m_block_size_bits); // Check if this request can be coalesced with previous // requests from this cycle. @@ -900,7 +904,7 @@ HSASegment accessSegment = reqSegmentToHSASegment(pkt->req); HSAScope accessScope = reqScopeToHSAScope(pkt->req); - Addr line_addr = makeLineAddress(pkt->getAddr()); + Addr line_addr = makeLineAddress(pkt->getAddr(), m_block_size_bits); // Creating WriteMask that records written bytes // and atomic operations. This enables partial writes @@ -949,7 +953,7 @@ } DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n", curTick(), m_version, "Coal", "Begin", "", "", - printAddress(msg->getPhysicalAddress()), + printAddress(msg->getPhysicalAddress(), m_block_size_bits), RubyRequestType_to_string(secondary_type)); fatal_if(secondary_type == RubyRequestType_IFETCH, @@ -1080,12 +1084,12 @@ MachineType mach, const DataBlock& data) { - assert(address == makeLineAddress(address)); - - Addr offset_address = getOffset(address); + Addr offset_address = getOffset(address, m_block_size_bits); + assert(address == makeLineAddress(address, m_block_size_bits)); DPRINTF(GPUCoalescer, "atomic callback for address %#x\n", address); - assert(m_writeRequestTable.count(makeLineAddress(address))); + assert(m_writeRequestTable.count(makeLineAddress(address, + m_block_size_bits))); RequestTable::iterator i = m_writeRequestTable.find(address); assert(i != m_writeRequestTable.end()); @@ -1106,7 +1110,8 @@ PacketPtr pkt = srequest->pkt; Addr request_address = pkt->getAddr(); - Addr request_line_address = makeLineAddress(pkt->getAddr()); + Addr request_line_address = makeLineAddress(pkt->getAddr(), + m_block_size_bits); int len = reqCoalescer[request_line_address].size(); std::vector mylist; @@ -1115,13 +1120,13 @@ assert(srequest->m_type == reqCoalescer[request_line_address][i].second[PrimaryType]); request_address = (pkt->getAddr()); - request_line_address = makeLineAddress(request_address); + request_line_address = makeLineAddress(request_address, + m_block_size_bits); if (pkt->getPtr() && srequest->m_type != RubyRequestType_ATOMIC_NO_RETURN) { /* atomics are done in memory, and return the data *before* the atomic op... */ memcpy(pkt->getPtr(), - data.getData(offset_address, - pkt->getSize()), + data.getData(offset_address, pkt->getSize()), pkt->getSize()); } else { DPRINTF(MemoryAccess, @@ -1284,7 +1289,8 @@ DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n", curTick(), m_version, "Coal", success ? "Done" : "SC_Failed", "", "", - printAddress(srequest->pkt->getAddr()), total_lat); + printAddress(srequest->pkt->getAddr(), m_block_size_bits), + total_lat); } void diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc --- a/src/mem/ruby/system/RubyPort.cc +++ b/src/mem/ruby/system/RubyPort.cc @@ -256,10 +256,10 @@ curTick() + rs->clockPeriod()); return true; } + } - assert(getOffset(pkt->getAddr()) + pkt->getSize() <= - RubySystem::getBlockSizeBytes()); - } + assert(getOffset(pkt->getAddr(), ruby_port->m_block_size_bits) + + pkt->getSize() <= ruby_port->m_block_size_bytes); // Submit the ruby request RequestStatus requestStatus = ruby_port->makeRequest(pkt); @@ -320,7 +320,8 @@ } assert(pkt->getAddr() + pkt->getSize() <= - makeLineAddress(pkt->getAddr()) + RubySystem::getBlockSizeBytes()); + makeLineAddress(pkt->getAddr(), rp->m_block_size_bits) + + rp->m_block_size_bytes); if (access_backing_store) { // The attached physmem contains the official version of data. diff --git a/src/mem/ruby/system/RubySystem.cc b/src/mem/ruby/system/RubySystem.cc --- a/src/mem/ruby/system/RubySystem.cc +++ b/src/mem/ruby/system/RubySystem.cc @@ -399,7 +399,7 @@ RubySystem::functionalRead(PacketPtr pkt) { Addr address(pkt->getAddr()); - Addr line_address = makeLineAddress(address); + Addr line_address = makeLineAddress(address, m_block_size_bits); AccessPermission access_perm = AccessPermission_NotPresent; int num_controllers = m_abs_cntrl_vec.size(); @@ -482,7 +482,7 @@ RubySystem::functionalWrite(PacketPtr pkt) { Addr addr(pkt->getAddr()); - Addr line_addr = makeLineAddress(addr); + Addr line_addr = makeLineAddress(addr, m_block_size_bits); AccessPermission access_perm = AccessPermission_NotPresent; int num_controllers = m_abs_cntrl_vec.size(); diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc --- a/src/mem/ruby/system/Sequencer.cc +++ b/src/mem/ruby/system/Sequencer.cc @@ -172,7 +172,7 @@ schedule(deadlockCheckEvent, clockEdge(m_deadlock_threshold)); } - Addr line_addr = makeLineAddress(pkt->getAddr()); + Addr line_addr = makeLineAddress(pkt->getAddr(), m_block_size_bits); // Create a default entry, mapping the address to NULL, the cast is // there to make gcc 4.4 happy RequestTable::value_type default_entry(line_addr, @@ -352,8 +352,9 @@ const Cycles forwardRequestTime, const Cycles firstResponseTime) { - assert(address == makeLineAddress(address)); - assert(m_writeRequestTable.count(makeLineAddress(address))); + assert(address == makeLineAddress(address, m_block_size_bits)); + assert(m_writeRequestTable.count( + makeLineAddress(address, m_block_size_bits))); RequestTable::iterator i = m_writeRequestTable.find(address); assert(i != m_writeRequestTable.end()); @@ -399,8 +400,9 @@ Cycles forwardRequestTime, Cycles firstResponseTime) { - assert(address == makeLineAddress(address)); - assert(m_readRequestTable.count(makeLineAddress(address))); + assert(address == makeLineAddress(address, m_block_size_bits)); + assert(m_readRequestTable.count( + makeLineAddress(address, m_block_size_bits))); RequestTable::iterator i = m_readRequestTable.find(address); assert(i != m_readRequestTable.end()); @@ -430,7 +432,7 @@ PacketPtr pkt = srequest->pkt; Addr request_address(pkt->getAddr()); - Addr offset_address = getOffset(request_address); + Addr offset_address = getOffset(request_address, m_block_size_bits); RubyRequestType type = srequest->m_type; Cycles issued_time = srequest->issue_time; @@ -445,7 +447,8 @@ DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %d cycles\n", curTick(), m_version, "Seq", llscSuccess ? "Done" : "SC_Failed", "", "", - printAddress(request_address), total_latency); + printAddress(request_address, m_block_size_bits), + total_latency); // update the data unless it is a non-data-carrying flush if (RubySystem::getWarmupEnabled()) { @@ -617,7 +620,7 @@ DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n", curTick(), m_version, "Seq", "Begin", "", "", - printAddress(msg->getPhysicalAddress()), + printAddress(msg->getPhysicalAddress(), m_block_size_bits), RubyRequestType_to_string(secondary_type)); // The Sequencer currently assesses instruction and data cache hit latency diff --git a/src/mem/slicc/symbols/StateMachine.py b/src/mem/slicc/symbols/StateMachine.py --- a/src/mem/slicc/symbols/StateMachine.py +++ b/src/mem/slicc/symbols/StateMachine.py @@ -1231,7 +1231,8 @@ ${ident}_Event_to_string(event), ${ident}_State_to_string(state), ${ident}_State_to_string(next_state), - printAddress(addr), GET_TRANSITION_COMMENT()); + printAddress(addr, m_block_size_bits), + GET_TRANSITION_COMMENT()); CLEAR_TRANSITION_COMMENT(); ''') @@ -1255,7 +1256,7 @@ ${ident}_Event_to_string(event), ${ident}_State_to_string(state), ${ident}_State_to_string(next_state), - printAddress(addr), "Resource Stall"); + printAddress(addr, m_block_size_bits), "Resource Stall"); } else if (result == TransitionResult_ProtocolStall) { DPRINTF(RubyGenerated, "stalling\\n"); DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\\n", @@ -1263,7 +1264,7 @@ ${ident}_Event_to_string(event), ${ident}_State_to_string(state), ${ident}_State_to_string(next_state), - printAddress(addr), "Protocol Stall"); + printAddress(addr, m_block_size_bits), "Protocol Stall"); } return result; diff --git a/src/mem/slicc/symbols/Type.py b/src/mem/slicc/symbols/Type.py --- a/src/mem/slicc/symbols/Type.py +++ b/src/mem/slicc/symbols/Type.py @@ -423,7 +423,8 @@ for dm in self.data_members.values(): if dm.type.c_ident == "Addr": code(''' -out << "${{dm.ident}} = " << printAddress(m_${{dm.ident}}) << " ";''') +out << "${{dm.ident}} = " << std::hex << "0x" << m_${{dm.ident}} + << std::dec;''') else: code('out << "${{dm.ident}} = " << m_${{dm.ident}} << " ";''') diff --git a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh --- a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh +++ b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh @@ -100,8 +100,8 @@ inline bool testAndRead(Addr addr, DataBlock& blk, Packet *pkt, uint32_t block_size_bits) { - Addr pktLineAddr = makeLineAddress(pkt->getAddr()); - Addr lineAddr = makeLineAddress(addr); + Addr pktLineAddr = makeLineAddress(pkt->getAddr(), block_size_bits); + Addr lineAddr = makeLineAddress(addr, block_size_bits); if (pktLineAddr == lineAddr) { uint8_t *data = pkt->getPtr(); @@ -127,8 +127,8 @@ testAndReadMask(Addr addr, DataBlock& blk, WriteMask& mask, Packet *pkt, uint32_t block_size_bits) { - Addr pktLineAddr = makeLineAddress(pkt->getAddr()); - Addr lineAddr = makeLineAddress(addr); + Addr pktLineAddr = makeLineAddress(pkt->getAddr(), block_size_bits); + Addr lineAddr = makeLineAddress(addr, block_size_bits); if (pktLineAddr == lineAddr) { uint8_t *data = pkt->getPtr(); @@ -157,8 +157,8 @@ testAndWrite(Addr addr, DataBlock& blk, Packet *pkt, uint32_t block_size_bits) { - Addr pktLineAddr = makeLineAddress(pkt->getAddr()); - Addr lineAddr = makeLineAddress(addr); + Addr pktLineAddr = makeLineAddress(pkt->getAddr(), block_size_bits); + Addr lineAddr = makeLineAddress(addr, block_size_bits); if (pktLineAddr == lineAddr) { const uint8_t *data = pkt->getConstPtr(); diff --git a/src/mem/ruby/structures/CacheMemory.cc b/src/mem/ruby/structures/CacheMemory.cc --- a/src/mem/ruby/structures/CacheMemory.cc +++ b/src/mem/ruby/structures/CacheMemory.cc @@ -102,7 +102,7 @@ int64_t CacheMemory::addressToCacheSet(Addr address) const { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); return bitSelect(address, m_start_index_bit, m_start_index_bit + m_cache_num_set_bits - 1); } @@ -112,7 +112,7 @@ int CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const { - assert(tag == makeLineAddress(tag)); + assert(tag == makeLineAddress(tag, m_block_size_bits)); // search the set for the tags auto it = m_tag_index.find(tag); if (it != m_tag_index.end()) @@ -128,7 +128,7 @@ CacheMemory::findTagInSetIgnorePermissions(int64_t cacheSet, Addr tag) const { - assert(tag == makeLineAddress(tag)); + assert(tag == makeLineAddress(tag, m_block_size_bits)); // search the set for the tags auto it = m_tag_index.find(tag); if (it != m_tag_index.end()) @@ -163,7 +163,7 @@ CacheMemory::tryCacheAccess(Addr address, RubyRequestType type, DataBlock*& data_ptr) { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); DPRINTF(RubyCache, "address: %#x\n", address); int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); @@ -190,7 +190,7 @@ CacheMemory::testCacheAccess(Addr address, RubyRequestType type, DataBlock*& data_ptr) { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); DPRINTF(RubyCache, "address: %#x\n", address); int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); @@ -213,7 +213,7 @@ bool CacheMemory::isTagPresent(Addr address) const { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); @@ -232,7 +232,7 @@ bool CacheMemory::cacheAvail(Addr address) const { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); int64_t cacheSet = addressToCacheSet(address); @@ -254,7 +254,7 @@ AbstractCacheEntry* CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch) { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); assert(!isTagPresent(address)); assert(cacheAvail(address)); DPRINTF(RubyCache, "address: %#x\n", address); @@ -294,7 +294,7 @@ void CacheMemory::deallocate(Addr address) { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); assert(isTagPresent(address)); DPRINTF(RubyCache, "address: %#x\n", address); int64_t cacheSet = addressToCacheSet(address); @@ -310,7 +310,7 @@ Addr CacheMemory::cacheProbe(Addr address) const { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); assert(!cacheAvail(address)); int64_t cacheSet = addressToCacheSet(address); @@ -322,7 +322,7 @@ AbstractCacheEntry* CacheMemory::lookup(Addr address) { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); if (loc == -1) return NULL; @@ -333,7 +333,7 @@ const AbstractCacheEntry* CacheMemory::lookup(Addr address) const { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); if (loc == -1) return NULL; @@ -457,7 +457,7 @@ CacheMemory::setLocked(Addr address, int context) { DPRINTF(RubyCache, "Setting Lock for addr: %#x to %d\n", address, context); - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); assert(loc != -1); @@ -468,7 +468,7 @@ CacheMemory::clearLocked(Addr address) { DPRINTF(RubyCache, "Clear Lock for addr: %#x\n", address); - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); assert(loc != -1); @@ -478,7 +478,7 @@ bool CacheMemory::isLocked(Addr address, int context) { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); assert(loc != -1); diff --git a/src/mem/ruby/structures/PerfectCacheMemory.hh b/src/mem/ruby/structures/PerfectCacheMemory.hh --- a/src/mem/ruby/structures/PerfectCacheMemory.hh +++ b/src/mem/ruby/structures/PerfectCacheMemory.hh @@ -113,7 +113,7 @@ inline bool PerfectCacheMemory::isTagPresent(Addr address) const { - return m_map.count(makeLineAddress(address)) > 0; + return m_map.count(makeLineAddress(address, m_block_size_bits)) > 0; } template @@ -132,7 +132,7 @@ PerfectCacheLineState line_state; line_state.m_permission = AccessPermission_Invalid; line_state.m_entry = ENTRY(); - m_map[makeLineAddress(address)] = line_state; + m_map[makeLineAddress(address, m_block_size_bits)] = line_state; } // deallocate entry @@ -140,7 +140,7 @@ inline void PerfectCacheMemory::deallocate(Addr address) { - m_map.erase(makeLineAddress(address)); + m_map.erase(makeLineAddress(address, m_block_size_bits)); } // Returns with the physical address of the conflicting cache line @@ -157,7 +157,7 @@ inline ENTRY* PerfectCacheMemory::lookup(Addr address) { - return &m_map[makeLineAddress(address)].m_entry; + return &m_map[makeLineAddress(address, m_block_size_bits)].m_entry; } // looks an address up in the cache @@ -165,14 +165,14 @@ inline const ENTRY* PerfectCacheMemory::lookup(Addr address) const { - return &m_map[makeLineAddress(address)].m_entry; + return &m_map[makeLineAddress(address, m_block_size_bits)].m_entry; } template inline AccessPermission PerfectCacheMemory::getPermission(Addr address) const { - return m_map[makeLineAddress(address)].m_permission; + return m_map[makeLineAddress(address, m_block_size_bits)].m_permission; } template @@ -180,7 +180,7 @@ PerfectCacheMemory::changePermission(Addr address, AccessPermission new_perm) { - Addr line_address = makeLineAddress(address); + Addr line_address = makeLineAddress(address, m_block_size_bits); PerfectCacheLineState& line_state = m_map[line_address]; line_state.m_permission = new_perm; } diff --git a/src/mem/ruby/structures/PersistentTable.cc b/src/mem/ruby/structures/PersistentTable.cc --- a/src/mem/ruby/structures/PersistentTable.cc +++ b/src/mem/ruby/structures/PersistentTable.cc @@ -45,7 +45,7 @@ MachineID locker, AccessType type) { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); static const PersistentTableEntry dflt; pair r = @@ -71,7 +71,7 @@ PersistentTable::persistentRequestUnlock(Addr address, MachineID unlocker) { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); assert(m_map.count(address)); PersistentTableEntry& entry = m_map[address]; @@ -96,8 +96,7 @@ PersistentTable::okToIssueStarving(Addr address, MachineID machId) const { - assert(address == makeLineAddress(address)); - + assert(address == makeLineAddress(address, m_block_size_bits)); AddressMap::const_iterator i = m_map.find(address); if (i == m_map.end()) { // No entry present @@ -118,7 +117,7 @@ MachineID PersistentTable::findSmallest(Addr address) const { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); AddressMap::const_iterator i = m_map.find(address); assert(i != m_map.end()); const PersistentTableEntry& entry = i->second; @@ -128,7 +127,7 @@ AccessType PersistentTable::typeOfSmallest(Addr address) const { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); AddressMap::const_iterator i = m_map.find(address); assert(i != m_map.end()); const PersistentTableEntry& entry = i->second; @@ -143,7 +142,7 @@ void PersistentTable::markEntries(Addr address) { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); AddressMap::iterator i = m_map.find(address); if (i == m_map.end()) return; @@ -160,7 +159,7 @@ bool PersistentTable::isLocked(Addr address) const { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); // If an entry is present, it must be locked return m_map.count(address) > 0; @@ -169,7 +168,7 @@ int PersistentTable::countStarvingForAddress(Addr address) const { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); AddressMap::const_iterator i = m_map.find(address); if (i == m_map.end()) return 0; @@ -181,7 +180,7 @@ int PersistentTable::countReadStarvingForAddress(Addr address) const { - assert(address == makeLineAddress(address)); + assert(address == makeLineAddress(address, m_block_size_bits)); AddressMap::const_iterator i = m_map.find(address); if (i == m_map.end()) return 0; diff --git a/src/mem/ruby/structures/Prefetcher.cc b/src/mem/ruby/structures/Prefetcher.cc --- a/src/mem/ruby/structures/Prefetcher.cc +++ b/src/mem/ruby/structures/Prefetcher.cc @@ -138,7 +138,7 @@ Prefetcher::observeMiss(Addr address, const RubyRequestType& type) { DPRINTF(RubyPrefetcher, "Observed miss for %#x\n", address); - Addr line_addr = makeLineAddress(address); + Addr line_addr = makeLineAddress(address, m_block_size_bits); numMissObserved++; // check to see if we have already issued a prefetch for this block @@ -235,8 +235,8 @@ // extend this prefetching stream by 1 (or more) Addr page_addr = pageAddress(stream->m_address); - Addr line_addr = makeNextStrideAddress(stream->m_address, - stream->m_stride); + Addr line_addr = makeNextStrideAddress( + stream->m_address, stream->m_stride, m_block_size_bytes); // possibly stop prefetching at page boundaries if (page_addr != pageAddress(line_addr)) { @@ -291,7 +291,7 @@ // initialize the stream prefetcher PrefetchEntry *mystream = &(m_array[index]); - mystream->m_address = makeLineAddress(address); + mystream->m_address = makeLineAddress(address, m_block_size_bits); mystream->m_stride = stride; mystream->m_use_time = m_controller->curCycle(); mystream->m_is_valid = true; @@ -299,11 +299,12 @@ // create a number of initial prefetches for this stream Addr page_addr = pageAddress(mystream->m_address); - Addr line_addr = makeLineAddress(mystream->m_address); + Addr line_addr = makeLineAddress(mystream->m_address, m_block_size_bits); // insert a number of prefetches into the prefetch table for (int k = 0; k < m_num_startup_pfs; k++) { - line_addr = makeNextStrideAddress(line_addr, stride); + line_addr = makeNextStrideAddress( + line_addr, stride, m_block_size_bytes); // possibly stop prefetching at page boundaries if (page_addr != pageAddress(line_addr)) { numPagesCrossed++; @@ -333,7 +334,8 @@ if (m_array[i].m_is_valid) { for (int j = 0; j < m_num_startup_pfs; j++) { if (makeNextStrideAddress(m_array[i].m_address, - -(m_array[i].m_stride*j)) == address) { + -m_array[i].m_stride * j, m_block_size_bytes) == + address) { return &(m_array[i]); } } @@ -350,10 +352,12 @@ //reset the alloc flag alloc = false; - Addr line_addr = makeLineAddress(address); + Addr line_addr = makeLineAddress(address, m_block_size_bits); for (int i = 0; i < m_num_unit_filters; i++) { if (filter_table[i] == line_addr) { - filter_table[i] = makeNextStrideAddress(filter_table[i], stride); + filter_table[i] = makeNextStrideAddress(filter_table[i], + stride, + m_block_size_bytes); filter_hit[i]++; if (filter_hit[i] >= m_train_misses) { alloc = true; @@ -364,7 +368,8 @@ // enter this address in the table int local_index = index; - filter_table[local_index] = makeNextStrideAddress(line_addr, stride); + filter_table[local_index] = makeNextStrideAddress(line_addr, stride, + m_block_size_bytes); filter_hit[local_index] = 0; local_index = local_index + 1; if (local_index >= m_num_unit_filters) { @@ -384,7 +389,7 @@ /// look for non-unit strides based on a (user-defined) page size Addr page_addr = pageAddress(address); - Addr line_addr = makeLineAddress(address); + Addr line_addr = makeLineAddress(address, m_block_size_bits); for (uint32_t i = 0; i < m_num_nonunit_filters; i++) { if (pageAddress(m_nonunit_filter[i]) == page_addr) { diff --git a/src/mem/protocol/MOESI_CMP_directory-dma.sm b/src/mem/protocol/MOESI_CMP_directory-dma.sm --- a/src/mem/protocol/MOESI_CMP_directory-dma.sm +++ b/src/mem/protocol/MOESI_CMP_directory-dma.sm @@ -123,16 +123,15 @@ in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") { if (dmaResponseQueue_in.isReady(clockEdge())) { peek( dmaResponseQueue_in, ResponseMsg) { + Addr lineAddr := makeLineAddress(in_msg.addr, block_size_bits); + if (in_msg.Type == CoherenceResponseType:DMA_ACK) { - trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr), - TBEs[makeLineAddress(in_msg.addr)]); + trigger(Event:DMA_Ack, lineAddr, TBEs[lineAddr]); } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE || in_msg.Type == CoherenceResponseType:DATA) { - trigger(Event:Data, makeLineAddress(in_msg.addr), - TBEs[makeLineAddress(in_msg.addr)]); + trigger(Event:Data, lineAddr, TBEs[lineAddr]); } else if (in_msg.Type == CoherenceResponseType:ACK) { - trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr), - TBEs[makeLineAddress(in_msg.addr)]); + trigger(Event:Inv_Ack, lineAddr, TBEs[lineAddr]); } else { error("Invalid response type"); } diff --git a/src/mem/protocol/MOESI_CMP_token-dir.sm b/src/mem/protocol/MOESI_CMP_token-dir.sm --- a/src/mem/protocol/MOESI_CMP_token-dir.sm +++ b/src/mem/protocol/MOESI_CMP_token-dir.sm @@ -738,8 +738,8 @@ peek(responseNetwork_in, ResponseMsg) { DataBlock DataBlk := tbe.DataBlk; tbe.DataBlk := in_msg.DataBlk; - tbe.DataBlk.copyPartial(DataBlk, getOffset(tbe.PhysicalAddress), - tbe.Len); + Addr offset := getOffset(tbe.PhysicalAddress, block_size_bits); + tbe.DataBlk.copyPartial(DataBlk, offset, tbe.Len); } } diff --git a/src/mem/protocol/RubySlicc_Util.sm b/src/mem/protocol/RubySlicc_Util.sm --- a/src/mem/protocol/RubySlicc_Util.sm +++ b/src/mem/protocol/RubySlicc_Util.sm @@ -41,12 +41,12 @@ void dirProfileCoherenceRequest(NodeID node, bool needCLB); int max_tokens(); Addr setOffset(Addr addr, int offset); -Addr makeLineAddress(Addr addr); -int getOffset(Addr addr); +Addr makeLineAddress(Addr addr, uint32_t block_size_bits); +Addr getOffset(Addr addr, uint32_t block_size_bits); int mod(int val, int mod); Addr bitSelect(Addr addr, int small, int big); Addr maskLowOrderBits(Addr addr, int number); -Addr makeNextStrideAddress(Addr addr, int stride); +Addr makeNextStrideAddress(Addr addr, int stride, int block_size_bytes); structure(BoolVec, external="yes") { } int countBoolVec(BoolVec bVec); diff --git a/src/mem/ruby/common/Address.hh b/src/mem/ruby/common/Address.hh --- a/src/mem/ruby/common/Address.hh +++ b/src/mem/ruby/common/Address.hh @@ -43,9 +43,9 @@ Addr maskLowOrderBits(Addr addr, unsigned int number); Addr maskHighOrderBits(Addr addr, unsigned int number); Addr shiftLowOrderBits(Addr addr, unsigned int number); -Addr getOffset(Addr addr); -Addr makeLineAddress(Addr addr); -Addr makeNextStrideAddress(Addr addr, int stride); -std::string printAddress(Addr addr); +Addr getOffset(Addr addr, uint32_t block_size_bits); +Addr makeLineAddress(Addr addr, uint32_t block_size_bits); +Addr makeNextStrideAddress(Addr addr, int stride, uint32_t block_size_bytes); +std::string printAddress(Addr addr, uint32_t block_size_bits); #endif // __MEM_RUBY_COMMON_ADDRESS_HH__ diff --git a/src/mem/ruby/common/Address.cc b/src/mem/ruby/common/Address.cc --- a/src/mem/ruby/common/Address.cc +++ b/src/mem/ruby/common/Address.cc @@ -26,6 +26,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include "base/intmath.hh" #include "mem/ruby/common/Address.hh" #include "mem/ruby/system/RubySystem.hh" @@ -103,27 +104,27 @@ } Addr -getOffset(Addr addr) +getOffset(Addr addr, uint32_t block_size_bits) { return bitSelect(addr, 0, RubySystem::getBlockSizeBits() - 1); } Addr -makeLineAddress(Addr addr) +makeLineAddress(Addr addr, uint32_t block_size_bits) { return maskLowOrderBits(addr, RubySystem::getBlockSizeBits()); } // returns the next stride address based on line address Addr -makeNextStrideAddress(Addr addr, int stride) +makeNextStrideAddress(Addr addr, int stride, uint32_t block_size_bytes) { return maskLowOrderBits(addr, RubySystem::getBlockSizeBits()) + RubySystem::getBlockSizeBytes() * stride; } std::string -printAddress(Addr addr) +printAddress(Addr addr, uint32_t block_size_bits) { std::stringstream out; out << "[" << std::hex << "0x" << addr << "," << " line 0x" diff --git a/src/mem/ruby/filters/H3BloomFilter.cc b/src/mem/ruby/filters/H3BloomFilter.cc --- a/src/mem/ruby/filters/H3BloomFilter.cc +++ b/src/mem/ruby/filters/H3BloomFilter.cc @@ -496,7 +496,7 @@ int H3BloomFilter::get_index(Addr addr, int i) { - uint64_t x = makeLineAddress(addr); + uint64_t x = makeLineAddress(addr, m_block_size_bits); // uint64_t y = (x*mults_list[i] + adds_list[i]) % primes_list[i]; int y = hash_H3(x,i); diff --git a/src/mem/ruby/filters/MultiBitSelBloomFilter.cc b/src/mem/ruby/filters/MultiBitSelBloomFilter.cc --- a/src/mem/ruby/filters/MultiBitSelBloomFilter.cc +++ b/src/mem/ruby/filters/MultiBitSelBloomFilter.cc @@ -161,7 +161,7 @@ // m_skip_bits is used to perform BitSelect after skipping some // bits. Used to simulate BitSel hashing on larger than cache-line // granularities - uint64_t x = (makeLineAddress(addr) >> m_skip_bits); + uint64_t x = (makeLineAddress(addr, m_block_size_bits) >> m_skip_bits); int y = hash_bitsel(x, i, m_num_hashes, 30, m_filter_size_bits); //36-bit addresses, 6-bit cache lines diff --git a/src/mem/ruby/network/MessageBuffer.cc b/src/mem/ruby/network/MessageBuffer.cc --- a/src/mem/ruby/network/MessageBuffer.cc +++ b/src/mem/ruby/network/MessageBuffer.cc @@ -317,7 +317,6 @@ { DPRINTF(RubyQueue, "Stalling due to %#x\n", addr); assert(isReady(current_time)); - assert(getOffset(addr) == 0); MsgPtr message = m_prio_heap.front(); dequeue(current_time); diff --git a/src/mem/ruby/profiler/AddressProfiler.cc b/src/mem/ruby/profiler/AddressProfiler.cc --- a/src/mem/ruby/profiler/AddressProfiler.cc +++ b/src/mem/ruby/profiler/AddressProfiler.cc @@ -290,7 +290,8 @@ } // record data address trace info - data_addr = makeLineAddress(data_addr); + uint32_t block_size_bytes = m_profiler->m_block_size_bytes; + data_addr = makeLineAddress(data_addr, block_size_bytes); lookupTraceForAddress(data_addr, m_dataAccessTrace). update(type, access_mode, id, sharing_miss); diff --git a/src/mem/ruby/slicc_interface/AbstractController.cc b/src/mem/ruby/slicc_interface/AbstractController.cc --- a/src/mem/ruby/slicc_interface/AbstractController.cc +++ b/src/mem/ruby/slicc_interface/AbstractController.cc @@ -280,7 +280,8 @@ PacketPtr pkt = Packet::createWrite(req); uint8_t *newData = new uint8_t[size]; pkt->dataDynamic(newData); - memcpy(newData, block.getData(getOffset(addr), size), size); + memcpy(newData, block.getData(getOffset(addr, m_block_size_bits), size), + size); SenderState *s = new SenderState(id); pkt->pushSenderState(s); diff --git a/src/mem/ruby/slicc_interface/RubyRequest.hh b/src/mem/ruby/slicc_interface/RubyRequest.hh --- a/src/mem/ruby/slicc_interface/RubyRequest.hh +++ b/src/mem/ruby/slicc_interface/RubyRequest.hh @@ -81,7 +81,7 @@ m_scope(_scope), m_segment(_segment) { - m_LineAddress = makeLineAddress(m_PhysicalAddress); + m_LineAddress = makeLineAddress(m_PhysicalAddress, block_size_bits); } RubyRequest(Tick curTime, uint64_t _paddr, uint8_t* _data, int _len, @@ -107,7 +107,7 @@ m_scope(_scope), m_segment(_segment) { - m_LineAddress = makeLineAddress(m_PhysicalAddress); + m_LineAddress = makeLineAddress(m_PhysicalAddress, block_size_bits); } RubyRequest(Tick curTime, uint64_t _paddr, uint8_t* _data, int _len, @@ -135,7 +135,7 @@ m_scope(_scope), m_segment(_segment) { - m_LineAddress = makeLineAddress(m_PhysicalAddress); + m_LineAddress = makeLineAddress(m_PhysicalAddress, block_size_bits); } diff --git a/src/cpu/testers/rubytest/RubyTester.hh b/src/cpu/testers/rubytest/RubyTester.hh --- a/src/cpu/testers/rubytest/RubyTester.hh +++ b/src/cpu/testers/rubytest/RubyTester.hh @@ -86,7 +86,8 @@ { Addr addr; DataBlock dataBlock; - SenderState(Addr _addr, uint32_t size) : addr(_addr), dataBlock() + SenderState(Addr _addr, uint32_t size, uint32_t block_size_bytes) + : addr(_addr), dataBlock(block_size_bytes) { } }; diff --git a/src/mem/protocol/MESI_Two_Level-dir.sm b/src/mem/protocol/MESI_Two_Level-dir.sm --- a/src/mem/protocol/MESI_Two_Level-dir.sm +++ b/src/mem/protocol/MESI_Two_Level-dir.sm @@ -195,14 +195,14 @@ if (requestNetwork_in.isReady(clockEdge())) { peek(requestNetwork_in, RequestMsg) { assert(in_msg.Destination.isElement(machineID)); + Addr lineAddr := makeLineAddress(in_msg.addr, block_size_bits); + if (isGETRequest(in_msg.Type)) { trigger(Event:Fetch, in_msg.addr, TBEs[in_msg.addr]); } else if (in_msg.Type == CoherenceRequestType:DMA_READ) { - trigger(Event:DMA_READ, makeLineAddress(in_msg.addr), - TBEs[makeLineAddress(in_msg.addr)]); + trigger(Event:DMA_READ, lineAddr, TBEs[lineAddr]); } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) { - trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr), - TBEs[makeLineAddress(in_msg.addr)]); + trigger(Event:DMA_WRITE, lineAddr, TBEs[lineAddr]); } else { DPRINTF(RubySlicc, "%s\n", in_msg); error("Invalid message"); diff --git a/src/mem/protocol/MESI_Two_Level-dma.sm b/src/mem/protocol/MESI_Two_Level-dma.sm --- a/src/mem/protocol/MESI_Two_Level-dma.sm +++ b/src/mem/protocol/MESI_Two_Level-dma.sm @@ -95,10 +95,11 @@ in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") { if (dmaResponseQueue_in.isReady(clockEdge())) { peek( dmaResponseQueue_in, ResponseMsg) { + Addr lineAddr := makeLineAddress(in_msg.addr, block_size_bits); if (in_msg.Type == CoherenceResponseType:ACK) { - trigger(Event:Ack, makeLineAddress(in_msg.addr)); + trigger(Event:Ack, lineAddr); } else if (in_msg.Type == CoherenceResponseType:DATA) { - trigger(Event:Data, makeLineAddress(in_msg.addr)); + trigger(Event:Data, lineAddr); } else { error("Invalid response type"); } diff --git a/src/mem/protocol/MOESI_AMD_Base-RegionBuffer.sm b/src/mem/protocol/MOESI_AMD_Base-RegionBuffer.sm --- a/src/mem/protocol/MOESI_AMD_Base-RegionBuffer.sm +++ b/src/mem/protocol/MOESI_AMD_Base-RegionBuffer.sm @@ -221,7 +221,7 @@ Addr getNextBlock(Addr addr) { Addr a := addr; - return makeNextStrideAddress(a, 1); + return makeNextStrideAddress(a, 1, blockBytes); } MachineID getPeer(MachineID mach, Addr address) { diff --git a/src/mem/protocol/MOESI_AMD_Base-RegionDir.sm b/src/mem/protocol/MOESI_AMD_Base-RegionDir.sm --- a/src/mem/protocol/MOESI_AMD_Base-RegionDir.sm +++ b/src/mem/protocol/MOESI_AMD_Base-RegionDir.sm @@ -232,7 +232,7 @@ Addr getNextBlock(Addr addr) { Addr a := addr; - makeNextStrideAddress(a, 1); + makeNextStrideAddress(a, 1, blockBytes); return a; } diff --git a/src/mem/protocol/MOESI_CMP_directory-dir.sm b/src/mem/protocol/MOESI_CMP_directory-dir.sm --- a/src/mem/protocol/MOESI_CMP_directory-dir.sm +++ b/src/mem/protocol/MOESI_CMP_directory-dir.sm @@ -276,11 +276,11 @@ } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) { trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]); } else if (in_msg.Type == CoherenceRequestType:DMA_READ) { - trigger(Event:DMA_READ, makeLineAddress(in_msg.addr), - TBEs[makeLineAddress(in_msg.addr)]); + Addr lineAddr := makeLineAddress(in_msg.addr, block_size_bits); + trigger(Event:DMA_READ, lineAddr, TBEs[lineAddr]); } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) { - trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr), - TBEs[makeLineAddress(in_msg.addr)]); + Addr lineAddr := makeLineAddress(in_msg.addr, block_size_bits); + trigger(Event:DMA_WRITE, lineAddr, TBEs[lineAddr]); } else { error("Invalid message"); } @@ -488,8 +488,8 @@ desc="Queue off-chip writeback request") { peek(unblockNetwork_in, ResponseMsg) { DataBlock DataBlk := in_msg.DataBlk; - DataBlk.copyPartial(tbe.DataBlk, getOffset(tbe.PhysicalAddress), - tbe.Len); + Addr offset := getOffset(tbe.PhysicalAddress, block_size_bits); + DataBlk.copyPartial(tbe.DataBlk, offset, tbe.Len); queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency, DataBlk); } # Node ID 065361f6fd531c7bd2fefd8e3d895cfba47d7dcc # Parent dd80771f73b0825429217414ea8205aada9419d3 diff --git a/src/cpu/testers/rubytest/Check.cc b/src/cpu/testers/rubytest/Check.cc --- a/src/cpu/testers/rubytest/Check.cc +++ b/src/cpu/testers/rubytest/Check.cc @@ -117,7 +117,9 @@ // Push the DataBlock onto the sender state. The sequencer will // update the DataBlock on the return. - pkt->senderState = new SenderState(m_address, req->getSize()); + uint32_t block_size_bytes = 1 << m_block_size_bits; + pkt->senderState = new SenderState(m_address, req->getSize(), + block_size_bytes); if (port->sendTimingReq(pkt)) { DPRINTF(RubyTest, "successfully initiated prefetch.\n"); @@ -154,7 +156,9 @@ // Push the DataBlock onto the sender state. The sequencer will // update the DataBlock on the return. - pkt->senderState = new SenderState(m_address, req->getSize()); + uint32_t block_size_bytes = 1 << m_block_size_bits; + pkt->senderState = new SenderState(m_address, req->getSize(), + block_size_bytes); if (port->sendTimingReq(pkt)) { DPRINTF(RubyTest, "initiating Flush - successful\n"); @@ -199,7 +203,9 @@ // Push the DataBlock onto the sender state. The sequencer will // update the DataBlock on the return. - pkt->senderState = new SenderState(writeAddr, req->getSize()); + uint32_t block_size_bytes = 1 << m_block_size_bits; + pkt->senderState = new SenderState(writeAddr, req->getSize(), + block_size_bytes); if (port->sendTimingReq(pkt)) { DPRINTF(RubyTest, "initiating action - successful\n"); @@ -253,7 +259,9 @@ // Push the DataBlock onto the sender state. The sequencer will // update the DataBlock on the return. - pkt->senderState = new SenderState(m_address, req->getSize()); + uint32_t block_size_bytes = 1 << m_block_size_bits; + pkt->senderState = new SenderState(m_address, req->getSize(), + block_size_bytes); if (port->sendTimingReq(pkt)) { DPRINTF(RubyTest, "initiating check - successful\n"); @@ -283,7 +291,8 @@ // This isn't exactly right since we now have multi-byte checks // assert(getAddress() == address); - assert(makeLineAddress(m_address) == makeLineAddress(address)); + assert(makeLineAddress(m_address, m_block_size_bits) == + makeLineAddress(address, m_block_size_bits)); assert(data != NULL); DPRINTF(RubyTest, "RubyTester Callback\n"); @@ -334,7 +343,7 @@ } DPRINTF(RubyTest, "proc: %d, Address: 0x%x\n", proc, - makeLineAddress(m_address)); + makeLineAddress(m_address, m_block_size_bits)); DPRINTF(RubyTest, "Callback done\n"); debugPrint(); }