summary |
shortlog |
log |
commit | commitdiff |
review |
tree
raw |
patch |
inline | side by side (from parent 1:
a316744)
Add an ALWAYS_ASSERT (...) macro, to (a) shut up coverity, and (b)
check the indicated condition in production images.
As in:
p = hash_get(...);
ALWAYS_ASSERT(p) /* was ASSERT(p) */
elt = pool_elt_at_index(pool, p[0]);
This may not be the best way to handle a specific case, but failure to
check return values at all followed by e.g. a pointer dereference
isn't ok.
Type: fix
Ticket: VPP-1837
Signed-off-by: Dave Barach <dave@barachs.net>
Change-Id: Ia97c641cefcfb7ea7d77ea5a55ed4afea0345acb
uword *vtep = ip46_address_is_ip4 (ip) ?
hash_get (gtpu_main.vtep4, ip->ip4.as_u32) :
hash_get_mem (gtpu_main.vtep6, &ip->ip6);
uword *vtep = ip46_address_is_ip4 (ip) ?
hash_get (gtpu_main.vtep4, ip->ip4.as_u32) :
hash_get_mem (gtpu_main.vtep6, &ip->ip6);
if (--(*vtep) != 0)
return *vtep;
ip46_address_is_ip4 (ip) ?
if (--(*vtep) != 0)
return *vtep;
ip46_address_is_ip4 (ip) ?
{
p = hash_get(lb_maps_by_path_index, lbmp->lbmp_index);
{
p = hash_get(lb_maps_by_path_index, lbmp->lbmp_index);
+ ALWAYS_ASSERT(NULL != p);
fib_node_list_remove(p[0], lbmp->lbmp_sibling);
}
fib_node_list_remove(p[0], lbmp->lbmp_sibling);
}
tmp_buckets[jj++] = bucket++;
}
}
tmp_buckets[jj++] = bucket++;
}
}
{
bucket += lbmp->lbmp_weight;
}
{
bucket += lbmp->lbmp_weight;
}
fed = fib_entry_delegate_find(export_entry,
FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
fed = fib_entry_delegate_find(export_entry,
FIB_ENTRY_DELEGATE_ATTACHED_EXPORT);
+ ALWAYS_ASSERT(NULL != fed);
export = pool_elt_at_index(fib_ae_export_pool, fed->fd_index);
export = pool_elt_at_index(fib_ae_export_pool, fed->fd_index);
pool_put(fib_ae_import_pool, import);
fib_entry_delegate_remove(fib_entry,
FIB_ENTRY_DELEGATE_ATTACHED_IMPORT);
pool_put(fib_ae_import_pool, import);
fib_entry_delegate_remove(fib_entry,
FIB_ENTRY_DELEGATE_ATTACHED_IMPORT);
s = format(s, "export-sibling:%d ", import->faei_export_sibling);
s = format(s, "exporter:%d ", import->faei_exporter);
s = format(s, "export-fib:%d ", import->faei_export_fib);
s = format(s, "export-sibling:%d ", import->faei_export_sibling);
s = format(s, "exporter:%d ", import->faei_exporter);
s = format(s, "export-fib:%d ", import->faei_export_fib);
s = format(s, "import-entry:%d ", import->faei_import_entry);
s = format(s, "import-fib:%d ", import->faei_import_fib);
s = format(s, "import-entry:%d ", import->faei_import_entry);
s = format(s, "import-fib:%d ", import->faei_import_fib);
fib_ae_export_t *export;
export = pool_elt_at_index(fib_ae_export_pool, expi);
fib_ae_export_t *export;
export = pool_elt_at_index(fib_ae_export_pool, expi);
s = format(s, "\n Attached-Export:%d:[", (export - fib_ae_export_pool));
s = format(s, "export-entry:%d ", export->faee_ei);
s = format(s, "\n Attached-Export:%d:[", (export - fib_ae_export_pool));
s = format(s, "export-entry:%d ", export->faee_ei);
next0 = t0->next_dpo.dpoi_next_node;
}
next0 = t0->next_dpo.dpoi_next_node;
}
+ ALWAYS_ASSERT (t0 != NULL);
vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
next1 = t1->next_dpo.dpoi_next_node;
}
next1 = t1->next_dpo.dpoi_next_node;
}
+ ALWAYS_ASSERT (t1 != NULL);
vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
/* Note: change to always set next0 if it may be set to drop */
next0 = t0->next_dpo.dpoi_next_node;
}
/* Note: change to always set next0 if it may be set to drop */
next0 = t0->next_dpo.dpoi_next_node;
}
+
+ ALWAYS_ASSERT (t0 != NULL);
+
vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
/* Apply the rewrite string. $$$$ vnet_rewrite? */
vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
/* Apply the rewrite string. $$$$ vnet_rewrite? */
uword *vtep = ip46_address_is_ip4 (ip) ?
hash_get (geneve_main.vtep4, ip->ip4.as_u32) :
hash_get_mem (geneve_main.vtep6, &ip->ip6);
uword *vtep = ip46_address_is_ip4 (ip) ?
hash_get (geneve_main.vtep4, ip->ip4.as_u32) :
hash_get_mem (geneve_main.vtep6, &ip->ip6);
if (--(*vtep) != 0)
return *vtep;
ip46_address_is_ip4 (ip) ?
if (--(*vtep) != 0)
return *vtep;
ip46_address_is_ip4 (ip) ?
if (PREDICT_FALSE (last_sw_if_index != sw_if_index0))
{
uword *p = hash_get (im->spd_index_by_sw_if_index, sw_if_index0);
if (PREDICT_FALSE (last_sw_if_index != sw_if_index0))
{
uword *p = hash_get (im->spd_index_by_sw_if_index, sw_if_index0);
spd_index0 = p[0];
spd0 = pool_elt_at_index (im->spds, spd_index0);
last_sw_if_index = sw_if_index0;
spd_index0 = p[0];
spd0 = pool_elt_at_index (im->spds, spd_index0);
last_sw_if_index = sw_if_index0;
{
uword *smp;
smp = hash_get (app->listeners_table, listen_session_get_handle (listener));
{
uword *smp;
smp = hash_get (app->listeners_table, listen_session_get_handle (listener));
+ ALWAYS_ASSERT (smp != 0);
return segment_manager_get (*smp);
}
return segment_manager_get (*smp);
}
uword *vtep = ip46_address_is_ip4 (ip) ?
hash_get (vxlan_gbp_main.vtep4, ip->ip4.as_u32) :
hash_get_mem (vxlan_gbp_main.vtep6, &ip->ip6);
uword *vtep = ip46_address_is_ip4 (ip) ?
hash_get (vxlan_gbp_main.vtep4, ip->ip4.as_u32) :
hash_get_mem (vxlan_gbp_main.vtep6, &ip->ip6);
if (--(*vtep) != 0)
return *vtep;
ip46_address_is_ip4 (ip) ?
if (--(*vtep) != 0)
return *vtep;
ip46_address_is_ip4 (ip) ?
uword *vtep = ip46_address_is_ip4 (ip) ?
hash_get (vxlan_gpe_main.vtep4, ip->ip4.as_u32) :
hash_get_mem (vxlan_gpe_main.vtep6, &ip->ip6);
uword *vtep = ip46_address_is_ip4 (ip) ?
hash_get (vxlan_gpe_main.vtep4, ip->ip4.as_u32) :
hash_get_mem (vxlan_gpe_main.vtep6, &ip->ip6);
if (--(*vtep) != 0)
return *vtep;
ip46_address_is_ip4 (ip) ?
if (--(*vtep) != 0)
return *vtep;
ip46_address_is_ip4 (ip) ?
uword *vtep = ip46_address_is_ip4 (ip) ?
hash_get (vxlan_main.vtep4, ip->ip4.as_u32) :
hash_get_mem (vxlan_main.vtep6, &ip->ip6);
uword *vtep = ip46_address_is_ip4 (ip) ?
hash_get (vxlan_main.vtep4, ip->ip4.as_u32) :
hash_get_mem (vxlan_main.vtep6, &ip->ip6);
if (--(*vtep) != 0)
return *vtep;
ip46_address_is_ip4 (ip) ?
if (--(*vtep) != 0)
return *vtep;
ip46_address_is_ip4 (ip) ?
{
ASSERT (ip46_address_is_multicast (ip));
uword *p = hash_get_mem (vxlan_main.mcast_shared, ip);
{
ASSERT (ip46_address_is_multicast (ip));
uword *p = hash_get_mem (vxlan_main.mcast_shared, ip);
mcast_shared_t ret = {.as_u64 = *p };
return ret;
}
mcast_shared_t ret = {.as_u64 = *p };
return ret;
}
+/*
+ * This version always generates code, and has a Coverity-specific
+ * version to stop Coverity complaining about
+ * ALWAYS_ASSERT(p != 0); p->member...
+ */
+
+#ifndef __COVERITY__
+#define ALWAYS_ASSERT(truth) \
+do { \
+ if (PREDICT_FALSE(!(truth))) \
+ { \
+ _clib_error (CLIB_ERROR_ABORT, 0, 0, \
+ "%s:%d (%s) assertion `%s' fails", \
+ __FILE__, \
+ (uword) __LINE__, \
+ clib_error_function, \
+ # truth); \
+ } \
+} while (0)
+#else /* __COVERITY__ */
+#define ALWAYS_ASSERT(truth) \
+do { \
+ if (PREDICT_FALSE(!(truth))) \
+ { \
+ abort(); \
+ } \
+} while (0)
+#endif /* __COVERITY */
+
#if defined(__clang__)
#define STATIC_ASSERT(truth,...)
#else
#if defined(__clang__)
#define STATIC_ASSERT(truth,...)
#else