# Node ID 58e937f1077b9c6fdadf17d68cc104c5b073ef58 # Parent 0684c3a6cee6ba44f22280e3defd3aca35bd3c18 diff --git a/configs/example/ruby_random_test.py b/configs/example/ruby_random_test.py --- a/configs/example/ruby_random_test.py +++ b/configs/example/ruby_random_test.py @@ -125,10 +125,15 @@ # # Tie the ruby tester ports to the ruby cpu read and write ports # - if ruby_port.support_data_reqs: - tester.cpuDataPort = ruby_port.slave - if ruby_port.support_inst_reqs: - tester.cpuInstPort = ruby_port.slave + if ruby_port.support_data_reqs and ruby_port.support_inst_reqs: + tester.cpuInstDataPort = ruby_port.slave + elif ruby_port.support_data_reqs: + tester.cpuDataPort = ruby_port.slave + elif ruby_port.support_inst_reqs: + tester.cpuInstPort = ruby_port.slave + + # Do not automatically retry stalled Ruby requests + ruby_port.no_retry_on_stall = True # # Tell each sequencer this is the ruby tester so that it diff --git a/configs/ruby/MESI_Three_Level.py b/configs/ruby/MESI_Three_Level.py --- a/configs/ruby/MESI_Three_Level.py +++ b/configs/ruby/MESI_Three_Level.py @@ -100,15 +100,26 @@ l0d_cache = L0Cache(size = '4096B', assoc = 1, is_icache = False, start_index_bit = block_size_bits, replacement_policy="LRU") - l0_cntrl = L0Cache_Controller(version = i*num_cpus_per_cluster + j, - Icache = l0i_cache, Dcache = l0d_cache, - send_evictions = send_evicts(options), - clk_domain=system.cpu[i].clk_domain, - ruby_system = ruby_system) + if len(system.cpu) == 1: + l0_cntrl = L0Cache_Controller(version = i*num_cpus_per_cluster + j, + Icache = l0i_cache, Dcache = l0d_cache, + send_evictions = send_evicts(options), + clk_domain=system.cpu[0].clk_domain, + ruby_system = ruby_system) - cpu_seq = RubySequencer(version = i, icache = l0i_cache, - clk_domain=system.cpu[i].clk_domain, - dcache = l0d_cache, ruby_system = ruby_system) + cpu_seq = RubySequencer(version = i, icache = l0i_cache, + clk_domain=system.cpu[0].clk_domain, + dcache = l0d_cache, ruby_system = ruby_system) + else: + l0_cntrl = L0Cache_Controller(version = i*num_cpus_per_cluster + j, + Icache = l0i_cache, Dcache = l0d_cache, + send_evictions = send_evicts(options), + clk_domain=system.cpu[i].clk_domain, + ruby_system = ruby_system) + + cpu_seq = RubySequencer(version = i, icache = l0i_cache, + clk_domain=system.cpu[i].clk_domain, + dcache = l0d_cache, ruby_system = ruby_system) l0_cntrl.sequencer = cpu_seq diff --git a/configs/ruby/MESI_Two_Level.py b/configs/ruby/MESI_Two_Level.py --- a/configs/ruby/MESI_Two_Level.py +++ b/configs/ruby/MESI_Two_Level.py @@ -88,22 +88,40 @@ prefetcher = RubyPrefetcher.Prefetcher() - l1_cntrl = L1Cache_Controller(version = i, - L1Icache = l1i_cache, - L1Dcache = l1d_cache, - l2_select_num_bits = l2_bits, - send_evictions = send_evicts(options), - prefetcher = prefetcher, - ruby_system = ruby_system, - clk_domain=system.cpu[i].clk_domain, - transitions_per_cycle=options.ports, - enable_prefetch = False) + if len(system.cpu) == 1: + l1_cntrl = L1Cache_Controller(version = i, + L1Icache = l1i_cache, + L1Dcache = l1d_cache, + l2_select_num_bits = l2_bits, + send_evictions = send_evicts(options), + prefetcher = prefetcher, + ruby_system = ruby_system, + clk_domain=system.cpu[0].clk_domain, + transitions_per_cycle=options.ports, + enable_prefetch = False) - cpu_seq = RubySequencer(version = i, - icache = l1i_cache, - dcache = l1d_cache, - clk_domain=system.cpu[i].clk_domain, - ruby_system = ruby_system) + cpu_seq = RubySequencer(version = i, + icache = l1i_cache, + dcache = l1d_cache, + clk_domain=system.cpu[0].clk_domain, + ruby_system = ruby_system) + else: + l1_cntrl = L1Cache_Controller(version = i, + L1Icache = l1i_cache, + L1Dcache = l1d_cache, + l2_select_num_bits = l2_bits, + send_evictions = send_evicts(options), + prefetcher = prefetcher, + ruby_system = ruby_system, + clk_domain=system.cpu[i].clk_domain, + transitions_per_cycle=options.ports, + enable_prefetch = False) + + cpu_seq = RubySequencer(version = i, + icache = l1i_cache, + dcache = l1d_cache, + clk_domain=system.cpu[i].clk_domain, + ruby_system = ruby_system) l1_cntrl.sequencer = cpu_seq exec("ruby_system.l1_cntrl%d = l1_cntrl" % i) diff --git a/configs/ruby/MI_example.py b/configs/ruby/MI_example.py --- a/configs/ruby/MI_example.py +++ b/configs/ruby/MI_example.py @@ -78,18 +78,32 @@ # # Only one unified L1 cache exists. Can cache instructions and data. # - l1_cntrl = L1Cache_Controller(version = i, - cacheMemory = cache, - send_evictions = send_evicts(options), - transitions_per_cycle = options.ports, - clk_domain=system.cpu[i].clk_domain, - ruby_system = ruby_system) + if len(system.cpu) == 1: + l1_cntrl = L1Cache_Controller(version = i, + cacheMemory = cache, + send_evictions = send_evicts(options), + transitions_per_cycle = options.ports, + clk_domain=system.cpu[0].clk_domain, + ruby_system = ruby_system) - cpu_seq = RubySequencer(version = i, - icache = cache, - dcache = cache, - clk_domain=system.cpu[i].clk_domain, - ruby_system = ruby_system) + cpu_seq = RubySequencer(version = i, + icache = cache, + dcache = cache, + clk_domain=system.cpu[0].clk_domain, + ruby_system = ruby_system) + else: + l1_cntrl = L1Cache_Controller(version = i, + cacheMemory = cache, + send_evictions = send_evicts(options), + transitions_per_cycle = options.ports, + clk_domain=system.cpu[i].clk_domain, + ruby_system = ruby_system) + + cpu_seq = RubySequencer(version = i, + icache = cache, + dcache = cache, + clk_domain=system.cpu[i].clk_domain, + ruby_system = ruby_system) l1_cntrl.sequencer = cpu_seq exec("ruby_system.l1_cntrl%d = l1_cntrl" % i) diff --git a/configs/ruby/MOESI_CMP_directory.py b/configs/ruby/MOESI_CMP_directory.py --- a/configs/ruby/MOESI_CMP_directory.py +++ b/configs/ruby/MOESI_CMP_directory.py @@ -86,20 +86,36 @@ start_index_bit = block_size_bits, is_icache = False) - l1_cntrl = L1Cache_Controller(version = i, - L1Icache = l1i_cache, - L1Dcache = l1d_cache, - l2_select_num_bits = l2_bits, - send_evictions = send_evicts(options), - transitions_per_cycle = options.ports, - clk_domain=system.cpu[i].clk_domain, - ruby_system = ruby_system) + if len(system.cpu): + l1_cntrl = L1Cache_Controller(version = i, + L1Icache = l1i_cache, + L1Dcache = l1d_cache, + l2_select_num_bits = l2_bits, + send_evictions = send_evicts(options), + transitions_per_cycle = options.ports, + clk_domain=system.cpu[0].clk_domain, + ruby_system = ruby_system) - cpu_seq = RubySequencer(version = i, - icache = l1i_cache, - dcache = l1d_cache, - clk_domain=system.cpu[i].clk_domain, - ruby_system = ruby_system) + cpu_seq = RubySequencer(version = i, + icache = l1i_cache, + dcache = l1d_cache, + clk_domain=system.cpu[0].clk_domain, + ruby_system = ruby_system) + else: + l1_cntrl = L1Cache_Controller(version = i, + L1Icache = l1i_cache, + L1Dcache = l1d_cache, + l2_select_num_bits = l2_bits, + send_evictions = send_evicts(options), + transitions_per_cycle = options.ports, + clk_domain=system.cpu[i].clk_domain, + ruby_system = ruby_system) + + cpu_seq = RubySequencer(version = i, + icache = l1i_cache, + dcache = l1d_cache, + clk_domain=system.cpu[i].clk_domain, + ruby_system = ruby_system) l1_cntrl.sequencer = cpu_seq exec("ruby_system.l1_cntrl%d = l1_cntrl" % i) diff --git a/configs/ruby/MOESI_CMP_token.py b/configs/ruby/MOESI_CMP_token.py --- a/configs/ruby/MOESI_CMP_token.py +++ b/configs/ruby/MOESI_CMP_token.py @@ -97,29 +97,54 @@ assoc = options.l1d_assoc, start_index_bit = block_size_bits) - l1_cntrl = L1Cache_Controller(version = i, - L1Icache = l1i_cache, - L1Dcache = l1d_cache, - l2_select_num_bits = l2_bits, - N_tokens = n_tokens, - retry_threshold = \ - options.l1_retries, - fixed_timeout_latency = \ - options.timeout_latency, - dynamic_timeout_enabled = \ - not options.disable_dyn_timeouts, - no_mig_atomic = not \ - options.allow_atomic_migration, - send_evictions = send_evicts(options), - transitions_per_cycle = options.ports, - clk_domain=system.cpu[i].clk_domain, - ruby_system = ruby_system) + if len(system.cpu) == 1: + l1_cntrl = L1Cache_Controller(version = i, + L1Icache = l1i_cache, + L1Dcache = l1d_cache, + l2_select_num_bits = l2_bits, + N_tokens = n_tokens, + retry_threshold = \ + options.l1_retries, + fixed_timeout_latency = \ + options.timeout_latency, + dynamic_timeout_enabled = \ + not options.disable_dyn_timeouts, + no_mig_atomic = not \ + options.allow_atomic_migration, + send_evictions = send_evicts(options), + transitions_per_cycle = options.ports, + clk_domain=system.cpu[0].clk_domain, + ruby_system = ruby_system) - cpu_seq = RubySequencer(version = i, - icache = l1i_cache, - dcache = l1d_cache, - clk_domain=system.cpu[i].clk_domain, - ruby_system = ruby_system) + cpu_seq = RubySequencer(version = i, + icache = l1i_cache, + dcache = l1d_cache, + clk_domain=system.cpu[0].clk_domain, + ruby_system = ruby_system) + else: + l1_cntrl = L1Cache_Controller(version = i, + L1Icache = l1i_cache, + L1Dcache = l1d_cache, + l2_select_num_bits = l2_bits, + N_tokens = n_tokens, + retry_threshold = \ + options.l1_retries, + fixed_timeout_latency = \ + options.timeout_latency, + dynamic_timeout_enabled = \ + not options.disable_dyn_timeouts, + no_mig_atomic = not \ + options.allow_atomic_migration, + send_evictions = send_evicts(options), + transitions_per_cycle = options.ports, + clk_domain=system.cpu[i].clk_domain, + ruby_system = ruby_system) + + cpu_seq = RubySequencer(version = i, + icache = l1i_cache, + dcache = l1d_cache, + clk_domain=system.cpu[i].clk_domain, + ruby_system = ruby_system) l1_cntrl.sequencer = cpu_seq exec("ruby_system.l1_cntrl%d = l1_cntrl" % i) diff --git a/configs/ruby/MOESI_hammer.py b/configs/ruby/MOESI_hammer.py --- a/configs/ruby/MOESI_hammer.py +++ b/configs/ruby/MOESI_hammer.py @@ -97,22 +97,40 @@ assoc = options.l2_assoc, start_index_bit = block_size_bits) - l1_cntrl = L1Cache_Controller(version = i, - L1Icache = l1i_cache, - L1Dcache = l1d_cache, - L2cache = l2_cache, - no_mig_atomic = not \ - options.allow_atomic_migration, - send_evictions = send_evicts(options), - transitions_per_cycle = options.ports, - clk_domain=system.cpu[i].clk_domain, - ruby_system = ruby_system) + if len(system.cpu) == 1: + l1_cntrl = L1Cache_Controller(version = i, + L1Icache = l1i_cache, + L1Dcache = l1d_cache, + L2cache = l2_cache, + no_mig_atomic = not \ + options.allow_atomic_migration, + send_evictions = send_evicts(options), + transitions_per_cycle = options.ports, + clk_domain=system.cpu[0].clk_domain, + ruby_system = ruby_system) - cpu_seq = RubySequencer(version = i, - icache = l1i_cache, - dcache = l1d_cache, - clk_domain=system.cpu[i].clk_domain, - ruby_system = ruby_system) + cpu_seq = RubySequencer(version = i, + icache = l1i_cache, + dcache = l1d_cache, + clk_domain=system.cpu[0].clk_domain, + ruby_system = ruby_system) + else: + l1_cntrl = L1Cache_Controller(version = i, + L1Icache = l1i_cache, + L1Dcache = l1d_cache, + L2cache = l2_cache, + no_mig_atomic = not \ + options.allow_atomic_migration, + send_evictions = send_evicts(options), + transitions_per_cycle = options.ports, + clk_domain=system.cpu[i].clk_domain, + ruby_system = ruby_system) + + cpu_seq = RubySequencer(version = i, + icache = l1i_cache, + dcache = l1d_cache, + clk_domain=system.cpu[i].clk_domain, + ruby_system = ruby_system) l1_cntrl.sequencer = cpu_seq if options.recycle_latency: diff --git a/src/cpu/testers/rubytest/Check.cc b/src/cpu/testers/rubytest/Check.cc --- a/src/cpu/testers/rubytest/Check.cc +++ b/src/cpu/testers/rubytest/Check.cc @@ -96,7 +96,9 @@ cmd = MemCmd::ReadReq; // if necessary, make the request an instruction fetch - if (m_tester_ptr->isInstReadableCpuPort(index)) { + if (m_tester_ptr->isInstOnlyCpuPort(index) || + (m_tester_ptr->isInstDataCpuPort(index) && + (random_mt.random(0, 0x1)))) { flags.set(Request::INST_FETCH); } } else { @@ -178,7 +180,7 @@ // Stores are assumed to be 1 byte-sized Request *req = new Request(writeAddr.getAddress(), 1, flags, - m_tester_ptr->masterId(), curTick(), + m_tester_ptr->masterId(), curTick(), m_pc.getAddress()); req->setThreadContext(index, 0); @@ -208,6 +210,7 @@ DPRINTF(RubyTest, "status before action update: %s\n", (TesterStatus_to_string(m_status)).c_str()); m_status = TesterStatus_Action_Pending; + DPRINTF(RubyTest, "Check %s, State=Action_Pending\n", m_address); } else { // If the packet did not issue, must delete // Note: No need to delete the data, the packet destructor @@ -235,13 +238,16 @@ Request::Flags flags; // If necessary, make the request an instruction fetch - if (m_tester_ptr->isInstReadableCpuPort(index)) { + if (m_tester_ptr->isInstOnlyCpuPort(index) || + (m_tester_ptr->isInstDataCpuPort(index) && + (random_mt.random(0, 0x1)))) { flags.set(Request::INST_FETCH); } // Checks are sized depending on the number of bytes written Request *req = new Request(m_address.getAddress(), CHECK_SIZE, flags, - m_tester_ptr->masterId(), curTick(), m_pc.getAddress()); + m_tester_ptr->masterId(), curTick(), + m_pc.getAddress()); req->setThreadContext(index, 0); PacketPtr pkt = new Packet(req, MemCmd::ReadReq); @@ -257,6 +263,7 @@ DPRINTF(RubyTest, "status before check update: %s\n", TesterStatus_to_string(m_status).c_str()); m_status = TesterStatus_Check_Pending; + DPRINTF(RubyTest, "Check %s, State=Check_Pending\n", m_address); } else { // If the packet did not issue, must delete // Note: No need to delete the data, the packet destructor @@ -294,8 +301,11 @@ m_store_count++; if (m_store_count == CHECK_SIZE) { m_status = TesterStatus_Ready; + DPRINTF(RubyTest, "Check %s, State=Ready\n", m_address); } else { m_status = TesterStatus_Idle; + DPRINTF(RubyTest, "Check %s, State=Idle store_count: %d\n", + m_address, m_store_count); } DPRINTF(RubyTest, "Action callback return data now %d\n", data->getByte(0)); @@ -319,6 +329,7 @@ m_tester_ptr->incrementCheckCompletions(); m_status = TesterStatus_Idle; + DPRINTF(RubyTest, "Check %s, State=Idle\n", m_address); pickValue(); } else { @@ -338,6 +349,7 @@ assert(m_status == TesterStatus_Idle || m_status == TesterStatus_Ready); m_status = TesterStatus_Idle; m_address = address; + DPRINTF(RubyTest, "Check %s, State=Idle\n", m_address); m_store_count = 0; } @@ -345,7 +357,6 @@ Check::pickValue() { assert(m_status == TesterStatus_Idle); - m_status = TesterStatus_Idle; m_value = random_mt.random(0, 0xff); // One byte m_store_count = 0; } @@ -356,7 +367,8 @@ assert(m_status == TesterStatus_Idle || m_status == TesterStatus_Ready); m_status = TesterStatus_Idle; m_initiatingNode = (random_mt.random(0, m_num_writers - 1)); - DPRINTF(RubyTest, "picked initiating node %d\n", m_initiatingNode); + DPRINTF(RubyTest, "Check %s, State=Idle, picked initiating node %d\n", + m_address, m_initiatingNode); m_store_count = 0; } diff --git a/src/cpu/testers/rubytest/CheckTable.cc b/src/cpu/testers/rubytest/CheckTable.cc --- a/src/cpu/testers/rubytest/CheckTable.cc +++ b/src/cpu/testers/rubytest/CheckTable.cc @@ -43,6 +43,7 @@ const int size1 = 32; const int size2 = 100; + DPRINTF(RubyTest, "Adding false sharing checks\n"); // The first set is to get some false sharing physical = 1000; for (int i = 0; i < size1; i++) { @@ -52,6 +53,7 @@ physical += CHECK_SIZE; } + DPRINTF(RubyTest, "Adding cache conflict checks\n"); // The next two sets are to get some limited false sharing and // cache conflicts physical = 1000; @@ -62,6 +64,7 @@ physical += 256; } + DPRINTF(RubyTest, "Adding cache conflict checks2\n"); physical = 1000 + CHECK_SIZE; for (int i = 0; i < size2; i++) { // Setup linear addresses @@ -95,6 +98,8 @@ } } + DPRINTF(RubyTest, "Adding check for address: %s\n", address); + Check* check_ptr = new Check(address, Address(100 + m_check_vector.size()), m_num_writers, m_num_readers, m_tester_ptr); for (int i = 0; i < CHECK_SIZE; i++) { @@ -114,7 +119,7 @@ Check* CheckTable::getCheck(const Address& address) { - DPRINTF(RubyTest, "Looking for check by address: %s", address); + DPRINTF(RubyTest, "Looking for check by address: %s\n", address); m5::hash_map::iterator i = m_lookup_map.find(address); diff --git a/src/cpu/testers/rubytest/RubyTester.hh b/src/cpu/testers/rubytest/RubyTester.hh --- a/src/cpu/testers/rubytest/RubyTester.hh +++ b/src/cpu/testers/rubytest/RubyTester.hh @@ -94,7 +94,8 @@ virtual BaseMasterPort &getMasterPort(const std::string &if_name, PortID idx = InvalidPortID); - bool isInstReadableCpuPort(int idx); + bool isInstOnlyCpuPort(int idx); + bool isInstDataCpuPort(int idx); MasterPort* getReadableCpuPort(int idx); MasterPort* getWritableCpuPort(int idx); @@ -153,7 +154,8 @@ int m_num_readers; int m_wakeup_frequency; bool m_check_flush; - int m_num_inst_ports; + int m_num_inst_only_ports; + int m_num_inst_data_ports; }; inline std::ostream& diff --git a/src/cpu/testers/rubytest/RubyTester.cc b/src/cpu/testers/rubytest/RubyTester.cc --- a/src/cpu/testers/rubytest/RubyTester.cc +++ b/src/cpu/testers/rubytest/RubyTester.cc @@ -60,7 +60,8 @@ m_num_readers(0), m_wakeup_frequency(p->wakeup_frequency), m_check_flush(p->check_flush), - m_num_inst_ports(p->port_cpuInstPort_connection_count) + m_num_inst_only_ports(p->port_cpuInstPort_connection_count), + m_num_inst_data_ports(p->port_cpuInstDataPort_connection_count) { m_checks_completed = 0; @@ -75,15 +76,25 @@ // Note: the inst ports are the lowest elements of the readPort vector, // then the data ports are added to the readPort vector // + int idx = 0; for (int i = 0; i < p->port_cpuInstPort_connection_count; ++i) { readPorts.push_back(new CpuPort(csprintf("%s-instPort%d", name(), i), - this, i)); + this, idx)); + idx++; + } + for (int i = 0; i < p->port_cpuInstDataPort_connection_count; ++i) { + CpuPort *port = new CpuPort(csprintf("%s-instDataPort%d", name(), i), + this, idx); + readPorts.push_back(port); + writePorts.push_back(port); + idx++; } for (int i = 0; i < p->port_cpuDataPort_connection_count; ++i) { CpuPort *port = new CpuPort(csprintf("%s-dataPort%d", name(), i), - this, i); + this, idx); readPorts.push_back(port); writePorts.push_back(port); + idx++; } // add the check start event to the event queue @@ -117,32 +128,45 @@ BaseMasterPort & RubyTester::getMasterPort(const std::string &if_name, PortID idx) { - if (if_name != "cpuInstPort" && if_name != "cpuDataPort") { + if (if_name != "cpuInstPort" && if_name != "cpuInstDataPort" && + if_name != "cpuDataPort") { // pass it along to our super class return MemObject::getMasterPort(if_name, idx); } else { if (if_name == "cpuInstPort") { - if (idx > m_num_inst_ports) { - panic("RubyTester::getMasterPort: unknown inst port idx %d\n", + if (idx > m_num_inst_only_ports) { + panic("RubyTester::getMasterPort: unknown inst port %d\n", idx); } // - // inst ports directly map to the lowest readPort elements + // inst ports map to the lowest readPort elements // return *readPorts[idx]; + } else if (if_name == "cpuInstDataPort") { + if (idx > m_num_inst_data_ports) { + panic("RubyTester::getMasterPort: unknown inst+data port %d\n", + idx); + } + int read_idx = idx + m_num_inst_only_ports; + // + // inst+data ports map to the next readPort elements + // + return *readPorts[read_idx]; } else { assert(if_name == "cpuDataPort"); // - // add the inst port offset to translate to the correct read port - // index + // data only ports map to the final readPort elements // - int read_idx = idx + m_num_inst_ports; - if (read_idx >= static_cast(readPorts.size())) { - panic("RubyTester::getMasterPort: unknown data port idx %d\n", + if (idx > (static_cast(readPorts.size()) - + (m_num_inst_only_ports + m_num_inst_data_ports))) { + panic("RubyTester::getMasterPort: unknown data port %d\n", idx); } + int read_idx = idx + m_num_inst_only_ports + m_num_inst_data_ports; return *readPorts[read_idx]; } + // Note: currently the Ruby Tester does not support write only ports + // but that could easily be added here } } @@ -165,9 +189,16 @@ } bool -RubyTester::isInstReadableCpuPort(int idx) +RubyTester::isInstOnlyCpuPort(int idx) { - return idx < m_num_inst_ports; + return idx < m_num_inst_only_ports; +} + +bool +RubyTester::isInstDataCpuPort(int idx) +{ + return ((idx >= m_num_inst_only_ports) && + (idx < (m_num_inst_only_ports + m_num_inst_data_ports))); } MasterPort* @@ -192,13 +223,13 @@ // Mark that we made progress m_last_progress_vector[proc] = curCycle(); - DPRINTF(RubyTest, "completed request for proc: %d\n", proc); - DPRINTF(RubyTest, "addr: 0x%x, size: %d, data: ", + DPRINTF(RubyTest, "completed request for proc: %d", proc); + DPRINTFR(RubyTest, " addr: 0x%x, size: %d, data: ", data->getAddress(), data->getSize()); for (int byte = 0; byte < data->getSize(); byte++) { - DPRINTF(RubyTest, "%d", data->getByte(byte)); + DPRINTFR(RubyTest, "%d ", data->getByte(byte)); } - DPRINTF(RubyTest, "\n"); + DPRINTFR(RubyTest, "\n"); // This tells us our store has 'completed' or for a load gives us // back the data to make the check diff --git a/src/cpu/testers/rubytest/RubyTester.py b/src/cpu/testers/rubytest/RubyTester.py --- a/src/cpu/testers/rubytest/RubyTester.py +++ b/src/cpu/testers/rubytest/RubyTester.py @@ -34,8 +34,9 @@ type = 'RubyTester' cxx_header = "cpu/testers/rubytest/RubyTester.hh" num_cpus = Param.Int("number of cpus / RubyPorts") - cpuDataPort = VectorMasterPort("the cpu data cache ports") - cpuInstPort = VectorMasterPort("the cpu inst cache ports") + cpuInstDataPort = VectorMasterPort("cpu combo ports to inst & data caches") + cpuInstPort = VectorMasterPort("cpu ports to only inst caches") + cpuDataPort = VectorMasterPort("cpu ports to only data caches") checks_to_complete = Param.Int(100, "checks to complete") deadlock_threshold = Param.Int(50000, "how often to check for deadlock") wakeup_frequency = Param.Int(10, "number of cycles between wakeups") diff --git a/src/mem/ruby/system/RubyPort.hh b/src/mem/ruby/system/RubyPort.hh --- a/src/mem/ruby/system/RubyPort.hh +++ b/src/mem/ruby/system/RubyPort.hh @@ -11,7 +11,7 @@ * unmodified and in its entirety in all distributions of the software, * modified or unmodified, in source code or in binary form. * - * Copyright (c) 2009 Advanced Micro Devices, Inc. + * Copyright (c) 2009-2013 Advanced Micro Devices, Inc. * Copyright (c) 2011 Mark D. Hill and David A. Wood * All rights reserved. * @@ -77,10 +77,12 @@ RespPacketQueue queue; RubySystem* ruby_system; bool access_backing_store; + bool no_retry_on_stall; public: MemSlavePort(const std::string &_name, RubyPort *_port, - RubySystem*_system, bool _access_backing_store, PortID id); + RubySystem*_system, bool _access_backing_store, + PortID id, bool _no_retry_on_stall); void hitCallback(PacketPtr pkt); void evictionCallback(const Address& address); @@ -95,6 +97,8 @@ AddrRangeList getAddrRanges() const { AddrRangeList ranges; return ranges; } + void addToRetryList(); + private: bool isPhysMemAddress(Addr addr) const; }; @@ -165,6 +169,7 @@ unsigned int drain(DrainManager *dm); protected: + void trySendRetries(); void ruby_hit_callback(PacketPtr pkt); void testDrainComplete(); void ruby_eviction_callback(const Address& address); @@ -186,10 +191,14 @@ System* system; private: + bool onRetryList(MemSlavePort * port) + { + return (std::find(retryList.begin(), retryList.end(), port) != + retryList.end()); + } void addToRetryList(MemSlavePort * port) { - assert(std::find(retryList.begin(), retryList.end(), port) == - retryList.end()); + assert(!onRetryList(port)); retryList.push_back(port); } diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc --- a/src/mem/ruby/system/RubyPort.cc +++ b/src/mem/ruby/system/RubyPort.cc @@ -11,7 +11,7 @@ * unmodified and in its entirety in all distributions of the software, * modified or unmodified, in source code or in binary form. * - * Copyright (c) 2009 Advanced Micro Devices, Inc. + * Copyright (c) 2009-2013 Advanced Micro Devices, Inc. * Copyright (c) 2011 Mark D. Hill and David A. Wood * All rights reserved. * @@ -58,7 +58,8 @@ pioSlavePort(csprintf("%s.pio-slave-port", name()), this), memMasterPort(csprintf("%s.mem-master-port", name()), this), memSlavePort(csprintf("%s-mem-slave-port", name()), this, - p->ruby_system, p->ruby_system->getAccessBackingStore(), -1), + p->ruby_system, p->ruby_system->getAccessBackingStore(), -1, + p->no_retry_on_stall), gotAddrRanges(p->port_master_connection_count), drainManager(NULL) { assert(m_version != -1); @@ -67,7 +68,7 @@ for (size_t i = 0; i < p->port_slave_connection_count; ++i) { slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(), i), this, p->ruby_system, - p->ruby_system->getAccessBackingStore(), i)); + p->ruby_system->getAccessBackingStore(), i, p->no_retry_on_stall)); } // create the master ports based on the number of connected ports @@ -159,9 +160,11 @@ RubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port, RubySystem *_system, - bool _access_backing_store, PortID id) + bool _access_backing_store, PortID id, + bool _no_retry_on_stall) : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this), - ruby_system(_system), access_backing_store(_access_backing_store) + ruby_system(_system), access_backing_store(_access_backing_store), + no_retry_on_stall(_no_retry_on_stall) { DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name); } @@ -268,21 +271,33 @@ return true; } - // - // Unless one is using the ruby tester, record the stalled M5 port for - // later retry when the sequencer becomes free. - // - if (!ruby_port->m_usingRubyTester) { - ruby_port->addToRetryList(this); - } DPRINTF(RubyPort, "Request for address %#x did not issued because %s\n", pkt->getAddr(), RequestStatus_to_string(requestStatus)); + addToRetryList(); + + ruby_port->testDrainComplete(); + return false; } void +RubyPort::MemSlavePort::addToRetryList() +{ + RubyPort *ruby_port = static_cast(&owner); + + // + // Unless the requestor do not want retries (e.g., the Ruby tester), + // record the stalled M5 port for later retry when the sequencer + // becomes free. + // + if (!no_retry_on_stall && !ruby_port->onRetryList(this)) { + ruby_port->addToRetryList(this); + } +} + +void RubyPort::MemSlavePort::recvFunctional(PacketPtr pkt) { DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr()); @@ -356,31 +371,33 @@ port->hitCallback(pkt); + trySendRetries(); +} + +void +RubyPort::trySendRetries() +{ // // If we had to stall the MemSlavePorts, wake them up because the sequencer // likely has free resources now. // if (!retryList.empty()) { - // - // Record the current list of ports to retry on a temporary list before - // calling sendRetry on those ports. sendRetry will cause an - // immediate retry, which may result in the ports being put back on the - // list. Therefore we want to clear the retryList before calling - // sendRetry. - // + // Record the current list of ports to retry on a temporary list + // before calling sendRetryReq on those ports. sendRetryReq will cause + // an immediate retry, which may result in the ports being put back on + // the list. Therefore we want to clear the retryList before calling + // sendRetryReq. std::vector curRetryList(retryList); retryList.clear(); for (auto i = curRetryList.begin(); i != curRetryList.end(); ++i) { DPRINTF(RubyPort, - "Sequencer may now be free. SendRetry to port %s\n", + "Sequencer may now be free. SendRetry to port %s\n", (*i)->name()); (*i)->sendRetryReq(); } } - - testDrainComplete(); } void diff --git a/src/mem/ruby/system/Sequencer.py b/src/mem/ruby/system/Sequencer.py --- a/src/mem/ruby/system/Sequencer.py +++ b/src/mem/ruby/system/Sequencer.py @@ -45,6 +45,7 @@ mem_slave_port = SlavePort("Ruby memory port") using_ruby_tester = Param.Bool(False, "") + no_retry_on_stall = Param.Bool(False, "") ruby_system = Param.RubySystem("") system = Param.System(Parent.any, "system object") support_data_reqs = Param.Bool(True, "data cache requests supported") @@ -53,7 +54,7 @@ class RubyPortProxy(RubyPort): type = 'RubyPortProxy' cxx_header = "mem/ruby/system/RubyPortProxy.hh" - + class RubySequencer(RubyPort): type = 'RubySequencer' cxx_class = 'Sequencer'