Ports on which to execute the command
ipg_usec : float
- Inter-packet gap in microseconds
+ Inter-packet gap in microseconds.
+ Exclusive with min_ipg_usec
speedup : float
A factor to adjust IPG. effectively IPG = IPG / speedup
min_ipg_usec : float
Minimum inter-packet gap in microseconds to guard from too small ipg.
+ Exclusive with ipg_usec
:raises:
+ :exc:`STLError`
Ports on which to execute the command
ipg_usec : float
- Inter-packet gap in microseconds
+ Inter-packet gap in microseconds.
+ Exclusive with min_ipg_usec
speedup : float
A factor to adjust IPG. effectively IPG = IPG / speedup
min_ipg_usec : float
Minimum inter-packet gap in microseconds to guard from too small ipg.
+ Exclusive with ipg_usec
:raises:
+ :exc:`STLError`
validate_type('vm', vm, (list, type(None)))
validate_type('is_dual', is_dual, bool)
validate_type('min_ipg_usec', min_ipg_usec, (float, int, type(None)))
+ if all([ipg_usec, min_ipg_usec]):
+ raise STLError('Please specify either ipg or minimal ipg, not both.')
# no support for > 1MB PCAP - use push remote
if opts.remote:
self.push_remote(opts.file[0],
- ports = opts.ports,
- ipg_usec = opts.ipg_usec,
+ ports = opts.ports,
+ ipg_usec = opts.ipg_usec,
min_ipg_usec = opts.min_ipg_usec,
- speedup = opts.speedup,
- count = opts.count,
- duration = opts.duration,
- is_dual = opts.dual)
+ speedup = opts.speedup,
+ count = opts.count,
+ duration = opts.duration,
+ is_dual = opts.dual)
else:
self.push_pcap(opts.file[0],
- ports = opts.ports,
- ipg_usec = opts.ipg_usec,
+ ports = opts.ports,
+ ipg_usec = opts.ipg_usec,
min_ipg_usec = opts.min_ipg_usec,
- speedup = opts.speedup,
- count = opts.count,
- duration = opts.duration,
- force = opts.force,
- is_dual = opts.dual)
+ speedup = opts.speedup,
+ count = opts.count,
+ duration = opts.duration,
+ force = opts.force,
+ is_dual = opts.dual)
# check filename
if not os.path.isfile(pcap_file):
raise STLError("file '{0}' does not exists".format(pcap_file))
+ if speedup <= 0:
+ raise STLError('Speedup should not be negative.')
+ if min_ipg_usec and min_ipg_usec < 0:
+ raise STLError('min_ipg_usec should not be negative.')
+
# make sure IPG is not less than 0.001 usec
if (ipg_usec is not None and (ipg_usec < 0.001 * speedup) and
def __pkts_to_streams (pkts, ipg_usec, min_ipg_usec, speedup, loop_count, vm, packet_hook, start_delay_usec = 0):
streams = []
- if speedup == 0:
- raise STLError('Speedup should not be 0')
- if min_ipg_usec and min_ipg_usec < 0:
- raise STLError('min_ipg_usec should not be negative.')
-
if packet_hook:
pkts = [(packet_hook(cap), meta) for (cap, meta) in pkts]
- if ipg_usec == None:
- constant_diff = None
- else:
- constant_diff = ipg_usec / float(speedup)
- if min_ipg_usec is not None:
- constant_diff = max(constant_diff, min_ipg_usec)
-
for i, (cap, meta) in enumerate(pkts, start = 1):
# IPG - if not provided, take from cap
- if constant_diff is None:
+ if ipg_usec is None:
packet_time = meta[0] * 1e6 + meta[1]
if i == 1:
- isg = min_ipg_usec if min_ipg_usec else 0
- else:
- isg = (packet_time - prev_time) / float(speedup)
- if min_ipg_usec:
- isg = max(isg, min_ipg_usec)
+ prev_time = packet_time
+ isg = (packet_time - prev_time) / float(speedup)
+ if min_ipg_usec and isg < min_ipg_usec:
+ isg = min_ipg_usec
prev_time = packet_time
- else:
- isg = constant_diff
+ else: # user specified ipg
+ if min_ipg_usec:
+ isg = min_ipg_usec
+ else:
+ isg = ipg_usec / float(speedup)
# handle last packet
if i == len(pkts):
else:
next = i + 1
action_count = 0
- self_start = False if i != 1 else True
-
- # add stream with delay that will not be part of loop: "delayed_start" -> 1 -> 2 -> 3 -> ... -> 1 -> 2
- if start_delay_usec and i == 1:
- if loop_count == 1: # no loop actually
- isg = start_delay_usec
- else:
- streams.append(STLStream(name = 'delayed_start',
- packet = STLPktBuilder(pkt_buffer = cap, vm = vm),
- mode = STLTXSingleBurst(total_pkts = 1, percentage = 100),
- self_start = True,
- isg = start_delay_usec,
- action_count = action_count,
- next = next))
- action_count = max(0, action_count - 1)
- self_start = False
streams.append(STLStream(name = i,
packet = STLPktBuilder(pkt_buffer = cap, vm = vm),
mode = STLTXSingleBurst(total_pkts = 1, percentage = 100),
- self_start = self_start,
- isg = isg,
+ self_start = True if (i == 1) else False,
+ isg = isg, # usec
action_count = action_count,
next = next))
show_count = 10
err_list = []
err_count = 0
- for x in self.rc_list:
- if x.data and not x.rc:
- err_count += 1
- if len(err_list) < show_count:
- err_list.append(format_text(x.data, 'bold'))
+ for x in filter(len, listify(self.err())):
+ err_count += 1
+ if len(err_list) < show_count:
+ err_list.append(format_text(x, 'bold'))
s = '\n' if len(err_list) > 1 else ''
if err_count > show_count:
s += format_text('Occurred %s errors, showing first %s:\n' % (err_count, show_count), 'bold')
def underline(text):
return text_attribute(text, 'underline')
-
-start_end_newlines = re.compile('^(\n)*([^\n].*[^\n])?(\n)*$', re.DOTALL)
+# apply attribute on each non-empty line
def text_attribute(text, attribute):
- match = start_end_newlines.match(text)
- try:
- startpad, msg, endpad = match.groups('')
- except:
- startpad = endpad = ''
- msg = text
- return "{startpad}{startattr}{txt}{endattr}{endpad}".format(
- startpad = startpad,
- startattr = TEXT_CODES[attribute]['start'],
- txt = msg,
- endattr = TEXT_CODES[attribute]['end'],
- endpad = endpad)
+ return '\n'.join(['{start}{txt}{end}'.format(
+ start = TEXT_CODES[attribute]['start'],
+ txt = line,
+ end = TEXT_CODES[attribute]['end'])
+ if line else '' for line in ('%s' % text).split('\n')])
FUNC_DICT = {'blue': blue,
uint8_t port_id = parse_port(params, result);
std::string pcap_filename = parse_string(params, "pcap_filename", result);
double ipg_usec = parse_double(params, "ipg_usec", result);
- double min_ipg_sec = 0;
- if (params.isMember("min_ipg_usec")) {
- min_ipg_sec = usec_to_sec(parse_double(params, "min_ipg_usec", result));
- }
- double speedup = parse_double(params, "speedup", result);
+ double min_ipg_sec = usec_to_sec(parse_udouble(params, "min_ipg_usec", result, 0));
+ double speedup = parse_udouble(params, "speedup", result);
uint32_t count = parse_uint32(params, "count", result);
double duration = parse_double(params, "duration", result);
bool is_dual = parse_bool(params, "is_dual", result, false);
uint8_t port_id = parse_port(params, result);
- uint32_t stream_id = parse_int(params, "stream_id", result);
+ uint32_t stream_id = parse_uint32(params, "stream_id", result);
const Json::Value §ion = parse_object(params, "stream", result);
stream->m_random_seed = parse_uint32(section, "random_seed", result,0); /* default is zero */
/* inter stream gap */
- stream->m_isg_usec = parse_double(section, "isg", result);
+ stream->m_isg_usec = parse_udouble(section, "isg", result);
stream->m_next_stream_id = parse_int(section, "next_stream_id", result);
generate_parse_err(result, "RX stats is not supported on this interface");
}
- stream->m_rx_check.m_pg_id = parse_int(rx, "stream_id", result);
+ stream->m_rx_check.m_pg_id = parse_uint32(rx, "stream_id", result);
std::string type = parse_string(rx, "rule_type", result);
if (type == "latency") {
stream->m_rx_check.m_rule_type = TrexPlatformApi::IF_STAT_PAYLOAD;
} else if (type == "single_burst") {
- uint32_t total_pkts = parse_int(mode, "total_pkts", result);
+ uint32_t total_pkts = parse_uint32(mode, "total_pkts", result);
stream.reset(new TrexStream(TrexStream::stSINGLE_BURST, port_id, stream_id));
stream->set_single_burst(total_pkts);
} else if (type == "multi_burst") {
- double ibg_usec = parse_double(mode, "ibg", result);
- uint32_t num_bursts = parse_int(mode, "count", result);
- uint32_t pkts_per_burst = parse_int(mode, "pkts_per_burst", result);
+ double ibg_usec = parse_udouble(mode, "ibg", result);
+ uint32_t num_bursts = parse_uint32(mode, "count", result);
+ uint32_t pkts_per_burst = parse_uint32(mode, "pkts_per_burst", result);
stream.reset(new TrexStream(TrexStream::stMULTI_BURST,port_id, stream_id ));
stream->set_multi_burst(pkts_per_burst,num_bursts,ibg_usec);
void
TrexRpcCmdAddStream::parse_rate(const Json::Value &rate, std::unique_ptr<TrexStream> &stream, Json::Value &result) {
- double value = parse_double(rate, "value", result);
- if (value <= 0) {
- std::stringstream ss;
- ss << "rate value must be a positive number - got: '" << value << "'";
- generate_parse_err(result, ss.str());
- }
+ double value = parse_udouble(rate, "value", result);
auto rate_types = {"pps", "bps_L1", "bps_L2", "percentage"};
std::string rate_type = parse_choice(rate, "type", rate_types, result);
uint8_t port_id = parse_port(params, result);
TrexStatelessPort *port = get_stateless_obj()->get_port_by_id(port_id);
- uint32_t stream_id = parse_int(params, "stream_id", result);
+ uint32_t stream_id = parse_uint32(params, "stream_id", result);
TrexStream *stream = port->get_stream_by_id(stream_id);
if (!stream) {
TrexStatelessPort *port = get_stateless_obj()->get_port_by_id(port_id);
bool get_pkt = parse_bool(params, "get_pkt", result);
- uint32_t stream_id = parse_int(params, "stream_id", result);
+ uint32_t stream_id = parse_uint32(params, "stream_id", result);
TrexStream *stream = port->get_stream_by_id(stream_id);
std::string type = parse_choice(mul_obj, "type", TrexPortMultiplier::g_types, result);
std::string op = parse_string(mul_obj, "op", result);
- double value = parse_double(mul_obj, "value", result);
+ double value = parse_udouble(mul_obj, "value", result);
- if ( value <=0 ){
+ if ( value == 0 ){
generate_parse_err(result, "multiplier can't be zero");
}
return "int";
case FIELD_TYPE_DOUBLE:
return "double";
+ case FIELD_TYPE_UDOUBLE:
+ return "unsigned double";
case FIELD_TYPE_OBJ:
return "object";
case FIELD_TYPE_STR:
case Json::uintValue:
return "uint";
case Json::realValue:
- return "real";
+ return "double";
case Json::stringValue:
return "string";
case Json::booleanValue:
void
TrexRpcCommand::check_field_type_common(const Json::Value &field, const std::string &name, field_type_e type, Json::Value &result) {
- std::stringstream ss;
+ std::string specific_err;
/* first check if field exists */
if (field == Json::Value::null) {
- ss << "field '" << name << "' is missing";
- generate_parse_err(result, ss.str());
+ specific_err = "field '" + name + "' is missing";
+ generate_parse_err(result, specific_err);
}
bool rc = true;
+ specific_err = "is '" + std::string(json_type_to_name(field)) + "', expecting '" + std::string(type_to_str(type)) + "'";
switch (type) {
case FIELD_TYPE_BYTE:
- if ( (!field.isUInt()) || (field.asInt() > 0xFF)) {
+ if (!field.isUInt64()) {
+ rc = false;
+ } else if (field.asUInt64() > 0xFF) {
+ specific_err = "has size bigger than uint8.";
rc = false;
}
break;
case FIELD_TYPE_UINT16:
- if ( (!field.isUInt()) || (field.asInt() > 0xFFFF)) {
+ if (!field.isUInt64()) {
+ rc = false;
+ } else if (field.asUInt64() > 0xFFFF) {
+ specific_err = "has size bigger than uint16.";
rc = false;
}
break;
case FIELD_TYPE_UINT32:
- if ( (!field.isUInt()) || (field.asUInt() > 0xFFFFFFFF)) {
+ if (!field.isUInt64()) {
+ rc = false;
+ } else if (field.asUInt64() > 0xFFFFFFFF) {
+ specific_err = "has size bigger than uint32.";
rc = false;
}
break;
}
break;
+ case FIELD_TYPE_UDOUBLE:
+ if (!field.isDouble()) {
+ rc = false;
+ } else if (field.asDouble() < 0) {
+ specific_err = "has negative value.";
+ rc = false;
+ }
+ break;
+
case FIELD_TYPE_OBJ:
if (!field.isObject()) {
rc = false;
}
if (!rc) {
- ss << "error at offset: " << field.getOffsetStart() << " - '" << name << "' is '" << json_type_to_name(field) << "', expecting '" << type_to_str(type) << "'";
- generate_parse_err(result, ss.str());
+ generate_parse_err(result, "error at offset: " + std::to_string(field.getOffsetStart()) + " - '" + name + "' " + specific_err);
}
}
FIELD_TYPE_UINT64,
FIELD_TYPE_INT,
FIELD_TYPE_DOUBLE,
+ FIELD_TYPE_UDOUBLE,
FIELD_TYPE_BOOL,
FIELD_TYPE_STR,
FIELD_TYPE_OBJ,
return parent[param].asDouble();
}
+ template<typename T> double parse_udouble(const Json::Value &parent, const T ¶m, Json::Value &result) {
+ check_field_type(parent, param, FIELD_TYPE_UDOUBLE, result);
+ return parent[param].asDouble();
+ }
+
template<typename T> bool parse_bool(const Json::Value &parent, const T ¶m, Json::Value &result) {
check_field_type(parent, param, FIELD_TYPE_BOOL, result);
return parent[param].asBool();
return parse_double(parent, param, result);
}
+ template<typename T> double parse_udouble(const Json::Value &parent, const T ¶m, Json::Value &result, double def) {
+ /* if not exists - default */
+ if (parent[param] == Json::Value::null) {
+ if (def < 0) {
+ std::stringstream ss;
+ ss << "default value of '" << param << "' is negative (please report)";
+ generate_parse_err(result, ss.str());
+ } else {
+ return def;
+ }
+ }
+ return parse_udouble(parent, param, result);
+ }
+
template<typename T> bool parse_bool(const Json::Value &parent, const T ¶m, Json::Value &result, bool def) {
/* if not exists - default */
if (parent[param] == Json::Value::null) {
verify_state(PORT_STATE_IDLE | PORT_STATE_STREAMS, "add_stream");
- if (m_stream_table.size() >= 20000) {
- throw TrexException("Reached limit of 20k streams at the port.");
+ if (m_stream_table.size() >= MAX_STREAMS) {
+ throw TrexException("Reached limit of " + std::to_string(MAX_STREAMS) + " streams at the port.");
}
get_stateless_obj()->m_rx_flow_stat.add_stream(stream);
TrexPortOwner m_owner;
int m_pending_async_stop_event;
+ static const uint32_t MAX_STREAMS = 20000;
};