test_performance_vm_multi_cpus:
cfg:
- core_count : 3
+ core_count : 2
mult : "90%"
mpps_per_core_golden :
- min: 15.9
- max: 16.5
+ min: 15.7
+ max: 16.3
test_performance_vm_multi_cpus_cached:
cfg:
- core_count : 3
+ core_count : 2
mult : "90%"
mpps_per_core_golden :
- min: 29.6
- max: 30.5
+ min: 28.8
+ max: 29.5
test_performance_syn_attack_multi_cpus:
cfg:
- core_count : 3
+ core_count : 2
mult : "90%"
mpps_per_core_golden :
min: 13.0
setup_cfg = self.get_benchmark_param('cfg')
scenario_cfg = {}
- scenario_cfg['name'] = "VM - 64 bytes, two CPUs"
+ scenario_cfg['name'] = "VM - 64 bytes, multi CPUs"
scenario_cfg['streams'] = self.build_perf_profile_vm(64)
scenario_cfg['core_count'] = setup_cfg['core_count']
- # two CPUs, VM, cached, 64 bytes
+ # multi CPUs, VM, cached, 64 bytes
def test_performance_vm_multi_cpus_cached (self):
setup_cfg = self.get_benchmark_param('cfg')
scenario_cfg = {}
- scenario_cfg['name'] = "VM - 64 bytes, single CPU, cache size 1024"
+ scenario_cfg['name'] = "VM - 64 bytes, multi CPU, cache size 1024"
scenario_cfg['streams'] = self.build_perf_profile_vm(64, cache_size = 1024)
self.execute_single_scenario(scenario_cfg)
- # two CPUs, syn attack, 64 bytes
+ # multi CPUs, syn attack, 64 bytes
def test_performance_syn_attack_multi_cpus (self):
setup_cfg = self.get_benchmark_param('cfg')
scenario_cfg = {}
- scenario_cfg['name'] = "syn attack - 64 bytes, two CPUs"
+ scenario_cfg['name'] = "syn attack - 64 bytes, multi CPUs"
scenario_cfg['streams'] = self.build_perf_profile_syn_attack(64)
scenario_cfg['core_count'] = setup_cfg['core_count']
self.c.add_streams(ports = [0], streams = scenario_cfg['streams'])
# use one core
+ cores_per_port = self.c.system_info.get('dp_core_count_per_port', 0)
+ if cores_per_port < scenario_cfg['core_count']:
+ assert 0, "test configuration requires {0} cores but only {1} per port are available".format(scenario_cfg['core_count'], cores_per_port)
+
core_mask = (2 ** scenario_cfg['core_count']) - 1
self.c.start(ports = [0], mult = scenario_cfg['mult'], core_mask = [core_mask])
}
break;
- /* a case called when a time strech happens */
- case scSTRECH:
- {
- dsec_t dt = cur_time - n_time;
- handle_time_strech(cur_time, dt, offset, thread);
-
- /* re-read the top of the queue - it might have changed with messaging */
- node = m_p_queue.top();
- n_time = node->m_time + offset;
-
- /* go back to INIT */
- state = scINIT;
-
- }
- break;
-
case scWORK:
{
int node_count = 0;
do_sleep(cur_time,thread,n_time); // estimate loop
state=scWORK;
break;
+
+
default:
- assert(0);
+ handle_slow_operations(state, node, cur_time, n_time, offset, thread);
+ break;
} /* switch */
+
}/* while*/
return (teardown(thread,always,old_offset,offset));
}
+
+FORCE_NO_INLINE void CNodeGenerator::handle_slow_operations(sch_state_t &state,
+ CGenNode * &node,
+ dsec_t &cur_time,
+ dsec_t &n_time,
+ dsec_t &offset,
+ CFlowGenListPerThread *thread) {
+ switch (state) {
+ case scSTRECH:
+ {
+ handle_time_strech(node, cur_time, n_time, offset, thread);
+
+ /* go back to work */
+ state = scWORK;
+
+ }
+ break;
+
+ default:
+ assert(0);
+ }
+
+}
+
/**
* when time is streched - the flow_sync node
* might be postpond too much
* @author imarom (7/31/2016)
*
*/
-FORCE_NO_INLINE void CNodeGenerator::handle_time_strech(dsec_t cur_time,
- dsec_t dt,
- dsec_t &offset,
- CFlowGenListPerThread *thread) {
+void CNodeGenerator::handle_time_strech(CGenNode * &node,
+ dsec_t &cur_time,
+ dsec_t &n_time,
+ dsec_t &offset,
+ CFlowGenListPerThread *thread) {
+
+
+ /* fix the time offset */
+ dsec_t dt = cur_time - n_time;
+ offset += dt;
/* check if flow sync message was delayed too much */
if ( (cur_time - m_last_sync_time_sec) > SYNC_TIME_OUT ) {
handle_maintenance(thread);
+
+ /* re-read the top of the queue - it might have changed with messaging */
+ node = m_p_queue.top();
+ n_time = node->m_time + offset;
}
- /* fix the time offset */
- offset += dt;
}
int CNodeGenerator::flush_file_sim(dsec_t max_time,
CFlowGenListPerThread * thread,
double &old_offset);
- FORCE_NO_INLINE void handle_time_strech(dsec_t cur_time, dsec_t dt, dsec_t &offset, CFlowGenListPerThread * thread);
+ FORCE_NO_INLINE void handle_slow_operations(sch_state_t &state,
+ CGenNode * &node,
+ dsec_t &cur_time,
+ dsec_t &n_time,
+ dsec_t &offset,
+ CFlowGenListPerThread *thread);
+
+ void handle_time_strech(CGenNode * &node,
+ dsec_t &cur_time,
+ dsec_t &n_time,
+ dsec_t &offset,
+ CFlowGenListPerThread *thread);
+
private:
void handle_command(CGenNode *node, CFlowGenListPerThread *thread, bool &exit_scheduler);