diff -r 94383c5124d2 src/mem/protocol/MESI_CMP_directory-L1cache.sm --- a/src/mem/protocol/MESI_CMP_directory-L1cache.sm Mon Nov 19 11:21:09 2012 -0500 +++ b/src/mem/protocol/MESI_CMP_directory-L1cache.sm Tue Nov 20 22:05:10 2012 -0600 @@ -548,6 +548,7 @@ out_msg.AckCount := 1; } } + L1DcacheMemory.clearLocked(address, address + out_msg.MessageSize - 1); } action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") { diff -r 94383c5124d2 src/mem/protocol/RubySlicc_Types.sm --- a/src/mem/protocol/RubySlicc_Types.sm Mon Nov 19 11:21:09 2012 -0500 +++ b/src/mem/protocol/RubySlicc_Types.sm Tue Nov 20 22:05:10 2012 -0600 @@ -154,6 +154,7 @@ void setMRU(Address); void recordRequestType(CacheRequestType); bool checkResourceAvailable(CacheResourceType, Address); + void clearLocked(Address, Address); } structure (WireBuffer, inport="yes", outport="yes", external = "yes") { diff -r 94383c5124d2 src/mem/ruby/common/Address.hh --- a/src/mem/ruby/common/Address.hh Mon Nov 19 11:21:09 2012 -0500 +++ b/src/mem/ruby/common/Address.hh Tue Nov 20 22:05:10 2012 -0600 @@ -101,6 +101,18 @@ } inline bool +operator<=(const Address &obj1, const Address &obj2) +{ + return obj1.getAddress() <= obj2.getAddress(); +} + +inline bool +operator>=(const Address &obj1, const Address &obj2) +{ + return obj1.getAddress() >= obj2.getAddress(); +} + +inline bool operator<(const Address& obj1, const Address& obj2) { return obj1.getAddress() < obj2.getAddress(); diff -r 94383c5124d2 src/mem/ruby/slicc_interface/AbstractCacheEntry.hh --- a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh Mon Nov 19 11:21:09 2012 -0500 +++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh Tue Nov 20 22:05:10 2012 -0600 @@ -34,6 +34,7 @@ #define __MEM_RUBY_SLICC_INTERFACE_ABSTRACTCACHEENTRY_HH__ #include +#include #include "mem/protocol/AccessPermission.hh" #include "mem/ruby/common/Address.hh" @@ -41,6 +42,7 @@ #include "mem/ruby/slicc_interface/AbstractEntry.hh" class DataBlock; +class Lock; class AbstractCacheEntry : public AbstractEntry { @@ -54,10 +56,39 @@ Address m_Address; // Address of this block, required by CacheMemory Time m_LastRef; // Last time this block was referenced, required // by CacheMemory - int m_locked; // Holds info whether the address is locked, - // required for implementing LL/SC + + std::list m_locks; // Holds info on locked addrss ranges + // required for implementing LL/SC }; +class Lock +{ + public: + Lock(Address low_addr, Address high_addr, int context_id) + : lowAddress(low_addr), + highAddress(high_addr), + contextId(context_id) + { + } + + bool overlapping(Address low_addr, Address high_addr) + { + return (low_addr <= highAddress) && (high_addr >= lowAddress); + } + + bool matches(Address low_addr, Address high_addr, int context_id) + { + return (contextId == context_id) && + (lowAddress <= low_addr && highAddress >= high_addr); + } + + private: + Address lowAddress; + Address highAddress; + int contextId; +}; + + inline std::ostream& operator<<(std::ostream& out, const AbstractCacheEntry& obj) { diff -r 94383c5124d2 src/mem/ruby/slicc_interface/AbstractCacheEntry.cc --- a/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc Mon Nov 19 11:21:09 2012 -0500 +++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.cc Tue Nov 20 22:05:10 2012 -0600 @@ -32,7 +32,6 @@ { m_Permission = AccessPermission_NotPresent; m_Address.setAddress(0); - m_locked = -1; } AbstractCacheEntry::~AbstractCacheEntry() @@ -45,6 +44,6 @@ AbstractEntry::changePermission(new_perm); if ((new_perm == AccessPermission_Invalid) || (new_perm == AccessPermission_NotPresent)) { - m_locked = -1; + m_locks.clear(); } } diff -r 94383c5124d2 src/mem/ruby/system/CacheMemory.hh --- a/src/mem/ruby/system/CacheMemory.hh Mon Nov 19 11:21:09 2012 -0500 +++ b/src/mem/ruby/system/CacheMemory.hh Tue Nov 20 22:05:10 2012 -0600 @@ -106,9 +106,11 @@ RubyAccessMode accessType, PrefetchBit pfBit); - void setLocked (const Address& addr, int context); - void clearLocked (const Address& addr); - bool isLocked (const Address& addr, int context); + void setLocked (const Address &addr_low, const Address &addr_high, + int context); + void clearLocked (const Address &addr_low, const Address &addr_high); + bool isLocked (const Address &addr_low, const Address &addr_high, + int context); // Print cache contents void print(std::ostream& out) const; void printData(std::ostream& out) const; diff -r 94383c5124d2 src/mem/ruby/system/CacheMemory.cc --- a/src/mem/ruby/system/CacheMemory.cc Mon Nov 19 11:21:09 2012 -0500 +++ b/src/mem/ruby/system/CacheMemory.cc Tue Nov 20 22:05:10 2012 -0600 @@ -253,7 +253,7 @@ set[i]->m_Permission = AccessPermission_Invalid; DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n", address); - set[i]->m_locked = -1; + set[i]->m_locks.clear(); m_tag_index[address] = i; m_replacementPolicy_ptr->touch(cacheSet, i, curTick()); @@ -419,37 +419,59 @@ } void -CacheMemory::setLocked(const Address& address, int context) +CacheMemory::setLocked(const Address &addr_low, const Address &addr_high, + int context) { - DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", address, context); - assert(address == line_address(address)); - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); + DPRINTF(RubyCache, "Setting Lock for addr: %x-%x to %d\n", addr_low, addr_high, context); + Address line_addr = line_address(addr_low); + Index cacheSet = addressToCacheSet(line_addr); + int loc = findTagInSet(cacheSet, line_addr); assert(loc != -1); - m_cache[cacheSet][loc]->m_locked = context; + // Add lock to the list of known locks + m_cache[cacheSet][loc]->m_locks.push_front(Lock(addr_low, addr_high, context)); } void -CacheMemory::clearLocked(const Address& address) +CacheMemory::clearLocked(const Address &addr_low, const Address &addr_high) { - DPRINTF(RubyCache, "Clear Lock for addr: %x\n", address); - assert(address == line_address(address)); - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); + DPRINTF(RubyCache, "Clear Lock for addr: %x-%x\n", addr_low, addr_high); + Address line_addr = line_address(addr_low); + Index cacheSet = addressToCacheSet(line_addr); + int loc = findTagInSet(cacheSet, line_addr); assert(loc != -1); - m_cache[cacheSet][loc]->m_locked = -1; + + // Iterate through the known locks and clear any overlapping this region + std::list& lock_list = m_cache[cacheSet][loc]->m_locks; + std::list::iterator lock_itr = lock_list.begin(); + while (lock_itr != lock_list.end()) { + if (lock_itr->overlapping(addr_low, addr_high)) { + lock_itr = lock_list.erase(lock_itr); + } else { + ++lock_itr; + } + } } bool -CacheMemory::isLocked(const Address& address, int context) +CacheMemory::isLocked(const Address &addr_low, const Address &addr_high, int context) { - assert(address == line_address(address)); - Index cacheSet = addressToCacheSet(address); - int loc = findTagInSet(cacheSet, address); + Address line_addr = line_address(addr_low); + Index cacheSet = addressToCacheSet(line_addr); + int loc = findTagInSet(cacheSet, line_addr); assert(loc != -1); - DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n", - address, m_cache[cacheSet][loc]->m_locked, context); - return m_cache[cacheSet][loc]->m_locked == context; + DPRINTF(RubyCache, "Testing Lock for addr: %llx-%llx cur %d con %d\n", + addr_low, addr_high, context); + + // Iterate through the lock list seeing if we get a match + std::list& lock_list = m_cache[cacheSet][loc]->m_locks; + for (std::list::iterator lock_itr = lock_list.begin(); + lock_itr != lock_list.end(); ++lock_itr) { + if (lock_itr->matches(addr_low, addr_high, context)) { + return true; + } + } + + return false; } void diff -r 94383c5124d2 src/mem/ruby/system/Sequencer.cc --- a/src/mem/ruby/system/Sequencer.cc Mon Nov 19 11:21:09 2012 -0500 +++ b/src/mem/ruby/system/Sequencer.cc Tue Nov 20 22:05:10 2012 -0600 @@ -320,8 +320,9 @@ // longer locked. // bool success = true; + Address address_high(address.getAddress() + request->pkt->getSize() - 1); if (request->m_type == RubyRequestType_Store_Conditional) { - if (!m_dataCache_ptr->isLocked(address, m_version)) { + if (!m_dataCache_ptr->isLocked(address, address_high, m_version)) { // // For failed SC requests, indicate the failure to the cpu by // setting the extra data to zero. @@ -338,19 +339,19 @@ // // Independent of success, all SC operations must clear the lock // - m_dataCache_ptr->clearLocked(address); + m_dataCache_ptr->clearLocked(address, address_high); } else if (request->m_type == RubyRequestType_Load_Linked) { // // Note: To fully follow Alpha LLSC semantics, should the LL clear any // previously locked cache lines? // - m_dataCache_ptr->setLocked(address, m_version); + m_dataCache_ptr->setLocked(address, address_high, m_version); } else if ((m_dataCache_ptr->isTagPresent(address)) && - (m_dataCache_ptr->isLocked(address, m_version))) { + (m_dataCache_ptr->isLocked(address, address_high, m_version))) { // // Normal writes should clear the locked address // - m_dataCache_ptr->clearLocked(address); + m_dataCache_ptr->clearLocked(address, address_high); } return success; }