Coding standards cleanup in vnet/vnet/devices, fixes VPP-248
[vpp.git] / vnet / vnet / devices / nic / ixge.c
index b98e7d9..849dd08 100644 (file)
@@ -54,40 +54,45 @@ ixge_main_t ixge_main;
 static vlib_node_registration_t ixge_input_node;
 static vlib_node_registration_t ixge_process_node;
 
-static void ixge_semaphore_get (ixge_device_t * xd)
+static void
+ixge_semaphore_get (ixge_device_t * xd)
 {
-  ixge_main_t * xm = &ixge_main;
-  vlib_main_t * vm = xm->vlib_main;
-  ixge_regs_t * r = xd->regs;
+  ixge_main_t *xm = &ixge_main;
+  vlib_main_t *vm = xm->vlib_main;
+  ixge_regs_t *r = xd->regs;
   u32 i;
 
   i = 0;
-  while (! (r->software_semaphore & (1 << 0)))
+  while (!(r->software_semaphore & (1 << 0)))
     {
       if (i > 0)
        vlib_process_suspend (vm, 100e-6);
       i++;
     }
-  do {
-    r->software_semaphore |= 1 << 1;
-  } while (! (r->software_semaphore & (1 << 1)));
+  do
+    {
+      r->software_semaphore |= 1 << 1;
+    }
+  while (!(r->software_semaphore & (1 << 1)));
 }
 
-static void ixge_semaphore_release (ixge_device_t * xd)
+static void
+ixge_semaphore_release (ixge_device_t * xd)
 {
-  ixge_regs_t * r = xd->regs;
+  ixge_regs_t *r = xd->regs;
   r->software_semaphore &= ~3;
 }
 
-static void ixge_software_firmware_sync (ixge_device_t * xd, u32 sw_mask)
+static void
+ixge_software_firmware_sync (ixge_device_t * xd, u32 sw_mask)
 {
-  ixge_main_t * xm = &ixge_main;
-  vlib_main_t * vm = xm->vlib_main;
-  ixge_regs_t * r = xd->regs;
+  ixge_main_t *xm = &ixge_main;
+  vlib_main_t *vm = xm->vlib_main;
+  ixge_regs_t *r = xd->regs;
   u32 fw_mask = sw_mask << 5;
   u32 m, done = 0;
 
-  while (! done)
+  while (!done)
     {
       ixge_semaphore_get (xd);
       m = r->software_firmware_sync;
@@ -95,22 +100,25 @@ static void ixge_software_firmware_sync (ixge_device_t * xd, u32 sw_mask)
       if (done)
        r->software_firmware_sync = m | sw_mask;
       ixge_semaphore_release (xd);
-      if (! done)
+      if (!done)
        vlib_process_suspend (vm, 10e-3);
     }
 }
 
-static void ixge_software_firmware_sync_release (ixge_device_t * xd, u32 sw_mask)
+static void
+ixge_software_firmware_sync_release (ixge_device_t * xd, u32 sw_mask)
 {
-  ixge_regs_t * r = xd->regs;
+  ixge_regs_t *r = xd->regs;
   ixge_semaphore_get (xd);
   r->software_firmware_sync &= ~sw_mask;
   ixge_semaphore_release (xd);
 }
 
-u32 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v, u32 is_read)
+u32
+ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
+                        u32 v, u32 is_read)
 {
-  ixge_regs_t * r = xd->regs;
+  ixge_regs_t *r = xd->regs;
   const u32 busy_bit = 1 << 30;
   u32 x;
 
@@ -119,11 +127,13 @@ u32 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u3
 
   ASSERT (reg_index < (1 << 16));
   ASSERT (dev_type < (1 << 5));
-  if (! is_read)
+  if (!is_read)
     r->xge_mac.phy_data = v;
 
   /* Address cycle. */
-  x = reg_index | (dev_type << 16) | (xd->phys[xd->phy_index].mdio_address << 21);
+  x =
+    reg_index | (dev_type << 16) | (xd->
+                                   phys[xd->phy_index].mdio_address << 21);
   r->xge_mac.phy_command = x | busy_bit;
   /* Busy wait timed to take 28e-6 secs.  No suspend. */
   while (r->xge_mac.phy_command & busy_bit)
@@ -141,16 +151,25 @@ u32 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u3
   return v;
 }
 
-static u32 ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
-{ return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */ 1); }
+static u32
+ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
+{
+  return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0,  /* is_read */
+                                 1);
+}
 
-static void ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
-{ (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */ 0); }
+static void
+ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
+{
+  (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v,  /* is_read */
+                                 0);
+}
 
-static void ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
+static void
+ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
 {
-  ixge_main_t * xm = &ixge_main;
-  ixge_device_t * xd = vec_elt_at_index (xm->devices, b->private_data);
+  ixge_main_t *xm = &ixge_main;
+  ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
   u32 v;
 
   v = 0;
@@ -159,10 +178,11 @@ static void ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
   xd->regs->i2c_control = v;
 }
 
-static void ixge_i2c_get_bits (i2c_bus_t * b, int * scl, int * sda)
+static void
+ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
 {
-  ixge_main_t * xm = &ixge_main;
-  ixge_device_t * xd = vec_elt_at_index (xm->devices, b->private_data);
+  ixge_main_t *xm = &ixge_main;
+  ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
   u32 v;
 
   v = xd->regs->i2c_control;
@@ -170,13 +190,14 @@ static void ixge_i2c_get_bits (i2c_bus_t * b, int * scl, int * sda)
   *scl = (v & (1 << 0)) != 0;
 }
 
-static u16 ixge_read_eeprom (ixge_device_t * xd, u32 address)
+static u16
+ixge_read_eeprom (ixge_device_t * xd, u32 address)
 {
-  ixge_regs_t * r = xd->regs;
+  ixge_regs_t *r = xd->regs;
   u32 v;
-  r->eeprom_read = ((/* start bit */ (1 << 0)) | (address << 2));
+  r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
   /* Wait for done bit. */
-  while (! ((v = r->eeprom_read) & (1 << 1)))
+  while (!((v = r->eeprom_read) & (1 << 1)))
     ;
   return v >> 16;
 }
@@ -258,7 +279,7 @@ ixge_sfp_device_up_down (ixge_device_t * xd, uword is_up)
       v |= (1 << 12);
       xd->regs->xge_mac.auto_negotiation_control = v;
 
-      while (! (xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
+      while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
        ;
 
       v = xd->regs->xge_mac.auto_negotiation_control;
@@ -277,16 +298,16 @@ ixge_sfp_device_up_down (ixge_device_t * xd, uword is_up)
   ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
 
   /* Give time for link partner to notice that we're up. */
-  if (is_up &&
-      vlib_in_process_context(vlib_get_main())) {
-    vlib_process_suspend (vlib_get_main(), 300e-3);
-  }
+  if (is_up && vlib_in_process_context (vlib_get_main ()))
+    {
+      vlib_process_suspend (vlib_get_main (), 300e-3);
+    }
 }
 
 always_inline ixge_dma_regs_t *
 get_dma_regs (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 qi)
 {
-  ixge_regs_t * r = xd->regs;
+  ixge_regs_t *r = xd->regs;
   ASSERT (qi < 128);
   if (rt == VLIB_RX)
     return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
@@ -297,19 +318,19 @@ get_dma_regs (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 qi)
 static clib_error_t *
 ixge_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
 {
-  vnet_hw_interface_t * hif = vnet_get_hw_interface (vnm, hw_if_index);
+  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
   uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
-  ixge_main_t * xm = &ixge_main;
-  ixge_device_t * xd = vec_elt_at_index (xm->devices, hif->dev_instance);
-  ixge_dma_regs_t * dr = get_dma_regs (xd, VLIB_RX, 0);
+  ixge_main_t *xm = &ixge_main;
+  ixge_device_t *xd = vec_elt_at_index (xm->devices, hif->dev_instance);
+  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
 
   if (is_up)
     {
       xd->regs->rx_enable |= 1;
       xd->regs->tx_dma_control |= 1;
       dr->control |= 1 << 25;
-      while (! (dr->control & (1 << 25)))
-        ;
+      while (!(dr->control & (1 << 25)))
+       ;
     }
   else
     {
@@ -322,24 +343,26 @@ ixge_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
   return /* no error */ 0;
 }
 
-static void ixge_sfp_phy_init (ixge_device_t * xd)
+static void
+ixge_sfp_phy_init (ixge_device_t * xd)
 {
-  ixge_phy_t * phy = xd->phys + xd->phy_index;
-  i2c_bus_t * ib = &xd->i2c_bus;
+  ixge_phy_t *phy = xd->phys + xd->phy_index;
+  i2c_bus_t *ib = &xd->i2c_bus;
 
   ib->private_data = xd->device_index;
   ib->put_bits = ixge_i2c_put_bits;
   ib->get_bits = ixge_i2c_get_bits;
   vlib_i2c_init (ib);
 
-  vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) &xd->sfp_eeprom);
+  vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
 
-  if ( vlib_i2c_bus_timed_out(ib)  || ! sfp_eeprom_is_valid (&xd->sfp_eeprom))
+  if (vlib_i2c_bus_timed_out (ib) || !sfp_eeprom_is_valid (&xd->sfp_eeprom))
     xd->sfp_eeprom.id = SFP_ID_unknown;
   else
     {
       /* FIXME 5 => SR/LR eeprom ID. */
-      clib_error_t * e = ixge_sfp_phy_init_from_eeprom (xd, 5 + xd->pci_function);
+      clib_error_t *e =
+       ixge_sfp_phy_init_from_eeprom (xd, 5 + xd->pci_function);
       if (e)
        clib_error_report (e);
     }
@@ -347,11 +370,12 @@ static void ixge_sfp_phy_init (ixge_device_t * xd)
   phy->mdio_address = ~0;
 }
 
-static void ixge_phy_init (ixge_device_t * xd)
+static void
+ixge_phy_init (ixge_device_t * xd)
 {
-  ixge_main_t * xm = &ixge_main;
-  vlib_main_t * vm = xm->vlib_main;
-  ixge_phy_t * phy = xd->phys + xd->phy_index;
+  ixge_main_t *xm = &ixge_main;
+  vlib_main_t *vm = xm->vlib_main;
+  ixge_phy_t *phy = xd->phys + xd->phy_index;
 
   switch (xd->device_id)
     {
@@ -383,16 +407,19 @@ static void ixge_phy_init (ixge_device_t * xd)
       return;
   }
 
-  phy->id = ((ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1) << 16)
-            | ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID2));
+  phy->id =
+    ((ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1) << 16) |
+     ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID2));
 
   {
-    ELOG_TYPE_DECLARE (e) = {
-      .function = (char *) __FUNCTION__,
-      .format = "ixge %d, phy id 0x%d mdio address %d",
-      .format_args = "i4i4i4",
-    };
-    struct { u32 instance, id, address; } * ed;
+    ELOG_TYPE_DECLARE (e) =
+    {
+    .function = (char *) __FUNCTION__,.format =
+       "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
+    struct
+    {
+      u32 instance, id, address;
+    } *ed;
     ed = ELOG_DATA (&vm->elog_main, e);
     ed->instance = xd->device_index;
     ed->id = phy->id;
@@ -400,26 +427,34 @@ static void ixge_phy_init (ixge_device_t * xd)
   }
 
   /* Reset phy. */
-  ixge_write_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL, XGE_PHY_CONTROL_RESET);
+  ixge_write_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL,
+                     XGE_PHY_CONTROL_RESET);
 
   /* Wait for self-clearning reset bit to clear. */
-  do {
-    vlib_process_suspend (vm, 1e-3);
-  } while (ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL) & XGE_PHY_CONTROL_RESET);
+  do
+    {
+      vlib_process_suspend (vm, 1e-3);
+    }
+  while (ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL) &
+        XGE_PHY_CONTROL_RESET);
 }
 
-static u8 * format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
+static u8 *
+format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
 {
-  ixge_rx_from_hw_descriptor_t * d = va_arg (*va, ixge_rx_from_hw_descriptor_t *);
+  ixge_rx_from_hw_descriptor_t *d =
+    va_arg (*va, ixge_rx_from_hw_descriptor_t *);
   u32 s0 = d->status[0], s2 = d->status[2];
   u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
   uword indent = format_get_indent (s);
 
   s = format (s, "%s-owned",
-             (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE) ? "sw" : "hw");
-  s = format (s, ", length this descriptor %d, l3 offset %d",
-             d->n_packet_bytes_this_descriptor,
-             IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s0));
+             (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE) ? "sw" :
+             "hw");
+  s =
+    format (s, ", length this descriptor %d, l3 offset %d",
+           d->n_packet_bytes_this_descriptor,
+           IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s0));
   if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET)
     s = format (s, ", end-of-packet");
 
@@ -441,14 +476,17 @@ static u8 * format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
   if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
     {
       s = format (s, "ip4%s",
-                 (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" : "");
+                 (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
+                 "");
       if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED)
        s = format (s, " checksum %s",
-                   (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR) ? "bad" : "ok");
+                   (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR) ?
+                   "bad" : "ok");
     }
   if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
     s = format (s, "ip6%s",
-               (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" : "");
+               (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
+               "");
   is_tcp = is_udp = 0;
   if ((is_ip = (is_ip4 | is_ip6)))
     {
@@ -462,24 +500,26 @@ static u8 * format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
 
   if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED)
     s = format (s, ", tcp checksum %s",
-               (s2 & IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR) ? "bad" : "ok");
+               (s2 & IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR) ? "bad" :
+               "ok");
   if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED)
     s = format (s, ", udp checksum %s",
-               (s2 & IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR) ? "bad" : "ok");
+               (s2 & IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR) ? "bad" :
+               "ok");
 
   return s;
 }
 
-static u8 * format_ixge_tx_descriptor (u8 * s, va_list * va)
+static u8 *
+format_ixge_tx_descriptor (u8 * s, va_list * va)
 {
-  ixge_tx_descriptor_t * d = va_arg (*va, ixge_tx_descriptor_t *);
+  ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
   u32 s0 = d->status0, s1 = d->status1;
   uword indent = format_get_indent (s);
   u32 v;
 
   s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
-             d->buffer_address,
-             s1 >> 14, d->n_bytes_this_buffer);
+             d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
 
   s = format (s, "\n%U", format_white_space, indent);
 
@@ -514,7 +554,8 @@ static u8 * format_ixge_tx_descriptor (u8 * s, va_list * va)
   return s;
 }
 
-typedef struct {
+typedef struct
+{
   ixge_descriptor_t before, after;
 
   u32 buffer_index;
@@ -529,22 +570,24 @@ typedef struct {
   vlib_buffer_t buffer;
 } ixge_rx_dma_trace_t;
 
-static u8 * format_ixge_rx_dma_trace (u8 * s, va_list * va)
+static u8 *
+format_ixge_rx_dma_trace (u8 * s, va_list * va)
 {
   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
-  vlib_node_t * node = va_arg (*va, vlib_node_t *);
-  vnet_main_t * vnm = vnet_get_main();
-  ixge_rx_dma_trace_t * t = va_arg (*va, ixge_rx_dma_trace_t *);
-  ixge_main_t * xm = &ixge_main;
-  ixge_device_t * xd = vec_elt_at_index (xm->devices, t->device_index);
-  format_function_t * f;
+  vlib_node_t *node = va_arg (*va, vlib_node_t *);
+  vnet_main_t *vnm = vnet_get_main ();
+  ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
+  ixge_main_t *xm = &ixge_main;
+  ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
+  format_function_t *f;
   uword indent = format_get_indent (s);
 
   {
-    vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
-    s = format (s, "%U rx queue %d",
-               format_vnet_sw_interface_name, vnm, sw,
-               t->queue_index);
+    vnet_sw_interface_t *sw =
+      vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
+    s =
+      format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
+             t->queue_index);
   }
 
   s = format (s, "\n%Ubefore: %U",
@@ -552,19 +595,16 @@ static u8 * format_ixge_rx_dma_trace (u8 * s, va_list * va)
              format_ixge_rx_from_hw_descriptor, &t->before);
   s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
              format_white_space, indent,
-             t->after.rx_to_hw.head_address,
-             t->after.rx_to_hw.tail_address);
+             t->after.rx_to_hw.head_address, t->after.rx_to_hw.tail_address);
 
   s = format (s, "\n%Ubuffer 0x%x: %U",
              format_white_space, indent,
-             t->buffer_index,
-             format_vlib_buffer, &t->buffer);
+             t->buffer_index, format_vlib_buffer, &t->buffer);
 
-  s = format (s, "\n%U",
-             format_white_space, indent);
+  s = format (s, "\n%U", format_white_space, indent);
 
   f = node->format_buffer;
-  if (! f || ! t->is_start_of_packet)
+  if (!f || !t->is_start_of_packet)
     f = format_hex_bytes;
   s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
 
@@ -578,16 +618,17 @@ static u8 * format_ixge_rx_dma_trace (u8 * s, va_list * va)
   _ (rx_alloc_fail, "rx buf alloc from free list failed")      \
   _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
 
-typedef enum {
+typedef enum
+{
 #define _(f,s) IXGE_ERROR_##f,
   foreach_ixge_error
 #undef _
-  IXGE_N_ERROR,
+    IXGE_N_ERROR,
 } ixge_error_t;
 
 always_inline void
-ixge_rx_next_and_error_from_status_x1 (ixge_device_t *xd,
-                                       u32 s00, u32 s02,
+ixge_rx_next_and_error_from_status_x1 (ixge_device_t * xd,
+                                      u32 s00, u32 s02,
                                       u8 * next0, u8 * error0, u32 * flags0)
 {
   u8 is0_ip4, is0_ip6, n0, e0;
@@ -600,8 +641,7 @@ ixge_rx_next_and_error_from_status_x1 (ixge_device_t *xd,
   n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
 
   e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
-        ? IXGE_ERROR_ip4_checksum_error
-        : e0);
+       ? IXGE_ERROR_ip4_checksum_error : e0);
 
   is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
   n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
@@ -614,13 +654,11 @@ ixge_rx_next_and_error_from_status_x1 (ixge_device_t *xd,
 
   f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
                | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
-       ? IP_BUFFER_L4_CHECKSUM_COMPUTED
-       : 0);
+       ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0);
 
   f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
                 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
-        ? 0
-        : IP_BUFFER_L4_CHECKSUM_CORRECT);
+        ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT);
 
   *error0 = e0;
   *next0 = n0;
@@ -628,8 +666,8 @@ ixge_rx_next_and_error_from_status_x1 (ixge_device_t *xd,
 }
 
 always_inline void
-ixge_rx_next_and_error_from_status_x2 (ixge_device_t *xd,
-                                       u32 s00, u32 s02,
+ixge_rx_next_and_error_from_status_x2 (ixge_device_t * xd,
+                                      u32 s00, u32 s02,
                                       u32 s10, u32 s12,
                                       u8 * next0, u8 * error0, u32 * flags0,
                                       u8 * next1, u8 * error1, u32 * flags1)
@@ -648,11 +686,9 @@ ixge_rx_next_and_error_from_status_x2 (ixge_device_t *xd,
   n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
 
   e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
-        ? IXGE_ERROR_ip4_checksum_error
-        : e0);
+       ? IXGE_ERROR_ip4_checksum_error : e0);
   e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
-           ? IXGE_ERROR_ip4_checksum_error
-        : e1);
+       ? IXGE_ERROR_ip4_checksum_error : e1);
 
   is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
   is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
@@ -677,21 +713,17 @@ ixge_rx_next_and_error_from_status_x2 (ixge_device_t *xd,
 
   f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
                | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
-       ? IP_BUFFER_L4_CHECKSUM_COMPUTED
-       : 0);
+       ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0);
   f1 = ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
                | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
-       ? IP_BUFFER_L4_CHECKSUM_COMPUTED
-       : 0);
+       ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0);
 
   f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
                 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
-        ? 0
-        : IP_BUFFER_L4_CHECKSUM_CORRECT);
+        ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT);
   f1 |= ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
                 | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
-        ? 0
-        : IP_BUFFER_L4_CHECKSUM_CORRECT);
+        ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT);
 
   *flags0 = f0;
   *flags1 = f1;
@@ -703,14 +735,13 @@ ixge_rx_trace (ixge_main_t * xm,
               ixge_dma_queue_t * dq,
               ixge_descriptor_t * before_descriptors,
               u32 * before_buffers,
-              ixge_descriptor_t * after_descriptors,
-              uword n_descriptors)
+              ixge_descriptor_t * after_descriptors, uword n_descriptors)
 {
-  vlib_main_t * vm = xm->vlib_main;
-  vlib_node_runtime_t * node = dq->rx.node;
-  ixge_rx_from_hw_descriptor_t * bd;
-  ixge_rx_to_hw_descriptor_t * ad;
-  u32 * b, n_left, is_sop, next_index_sop;
+  vlib_main_t *vm = xm->vlib_main;
+  vlib_node_runtime_t *node = dq->rx.node;
+  ixge_rx_from_hw_descriptor_t *bd;
+  ixge_rx_to_hw_descriptor_t *ad;
+  u32 *b, n_left, is_sop, next_index_sop;
 
   n_left = n_descriptors;
   b = before_buffers;
@@ -722,8 +753,8 @@ ixge_rx_trace (ixge_main_t * xm,
   while (n_left >= 2)
     {
       u32 bi0, bi1, flags0, flags1;
-      vlib_buffer_t * b0, * b1;
-      ixge_rx_dma_trace_t * t0, * t1;
+      vlib_buffer_t *b0, *b1;
+      ixge_rx_dma_trace_t *t0, *t1;
       u8 next0, error0, next1, error1;
 
       bi0 = b[0];
@@ -734,7 +765,7 @@ ixge_rx_trace (ixge_main_t * xm,
       b1 = vlib_get_buffer (vm, bi1);
 
       ixge_rx_next_and_error_from_status_x2 (xd,
-                                             bd[0].status[0], bd[0].status[2],
+                                            bd[0].status[0], bd[0].status[2],
                                             bd[1].status[0], bd[1].status[2],
                                             &next0, &error0, &flags0,
                                             &next1, &error1, &flags1);
@@ -776,8 +807,8 @@ ixge_rx_trace (ixge_main_t * xm,
   while (n_left >= 1)
     {
       u32 bi0, flags0;
-      vlib_buffer_t * b0;
-      ixge_rx_dma_trace_t * t0;
+      vlib_buffer_t *b0;
+      ixge_rx_dma_trace_t *t0;
       u8 next0, error0;
 
       bi0 = b[0];
@@ -786,7 +817,7 @@ ixge_rx_trace (ixge_main_t * xm,
       b0 = vlib_get_buffer (vm, bi0);
 
       ixge_rx_next_and_error_from_status_x1 (xd,
-                                             bd[0].status[0], bd[0].status[2],
+                                            bd[0].status[0], bd[0].status[2],
                                             &next0, &error0, &flags0);
 
       next_index_sop = is_sop ? next0 : next_index_sop;
@@ -810,7 +841,8 @@ ixge_rx_trace (ixge_main_t * xm,
     }
 }
 
-typedef struct {
+typedef struct
+{
   ixge_tx_descriptor_t descriptor;
 
   u32 buffer_index;
@@ -825,22 +857,24 @@ typedef struct {
   vlib_buffer_t buffer;
 } ixge_tx_dma_trace_t;
 
-static u8 * format_ixge_tx_dma_trace (u8 * s, va_list * va)
+static u8 *
+format_ixge_tx_dma_trace (u8 * s, va_list * va)
 {
   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
-  ixge_tx_dma_trace_t * t = va_arg (*va, ixge_tx_dma_trace_t *);
-  vnet_main_t * vnm = vnet_get_main();
-  ixge_main_t * xm = &ixge_main;
-  ixge_device_t * xd = vec_elt_at_index (xm->devices, t->device_index);
-  format_function_t * f;
+  ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
+  vnet_main_t *vnm = vnet_get_main ();
+  ixge_main_t *xm = &ixge_main;
+  ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
+  format_function_t *f;
   uword indent = format_get_indent (s);
 
   {
-    vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
-    s = format (s, "%U tx queue %d",
-               format_vnet_sw_interface_name, vnm, sw,
-               t->queue_index);
+    vnet_sw_interface_t *sw =
+      vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
+    s =
+      format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
+             t->queue_index);
   }
 
   s = format (s, "\n%Udescriptor: %U",
@@ -849,28 +883,27 @@ static u8 * format_ixge_tx_dma_trace (u8 * s, va_list * va)
 
   s = format (s, "\n%Ubuffer 0x%x: %U",
              format_white_space, indent,
-             t->buffer_index,
-             format_vlib_buffer, &t->buffer);
+             t->buffer_index, format_vlib_buffer, &t->buffer);
 
-  s = format (s, "\n%U",
-             format_white_space, indent);
+  s = format (s, "\n%U", format_white_space, indent);
 
   f = format_ethernet_header_with_length;
-  if (! f || ! t->is_start_of_packet)
+  if (!f || !t->is_start_of_packet)
     f = format_hex_bytes;
   s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
 
   return s;
 }
 
-typedef struct {
-  vlib_node_runtime_t * node;
+typedef struct
+{
+  vlib_node_runtime_t *node;
 
   u32 is_start_of_packet;
 
   u32 n_bytes_in_packet;
 
-  ixge_tx_descriptor_t * start_of_packet_descriptor;
+  ixge_tx_descriptor_t *start_of_packet_descriptor;
 } ixge_tx_state_t;
 
 static void
@@ -879,13 +912,12 @@ ixge_tx_trace (ixge_main_t * xm,
               ixge_dma_queue_t * dq,
               ixge_tx_state_t * tx_state,
               ixge_tx_descriptor_t * descriptors,
-              u32 * buffers,
-              uword n_descriptors)
+              u32 * buffers, uword n_descriptors)
 {
-  vlib_main_t * vm = xm->vlib_main;
-  vlib_node_runtime_t * node = tx_state->node;
-  ixge_tx_descriptor_t * d;
-  u32 * b, n_left, is_sop;
+  vlib_main_t *vm = xm->vlib_main;
+  vlib_node_runtime_t *node = tx_state->node;
+  ixge_tx_descriptor_t *d;
+  u32 *b, n_left, is_sop;
 
   n_left = n_descriptors;
   b = buffers;
@@ -895,8 +927,8 @@ ixge_tx_trace (ixge_main_t * xm,
   while (n_left >= 2)
     {
       u32 bi0, bi1;
-      vlib_buffer_t * b0, * b1;
-      ixge_tx_dma_trace_t * t0, * t1;
+      vlib_buffer_t *b0, *b1;
+      ixge_tx_dma_trace_t *t0, *t1;
 
       bi0 = b[0];
       bi1 = b[1];
@@ -935,8 +967,8 @@ ixge_tx_trace (ixge_main_t * xm,
   while (n_left >= 1)
     {
       u32 bi0;
-      vlib_buffer_t * b0;
-      ixge_tx_dma_trace_t * t0;
+      vlib_buffer_t *b0;
+      ixge_tx_dma_trace_t *t0;
 
       bi0 = b[0];
       n_left -= 1;
@@ -980,7 +1012,8 @@ ixge_ring_add (ixge_dma_queue_t * q, u32 i0, u32 i1)
 }
 
 always_inline uword
-ixge_tx_descriptor_matches_template (ixge_main_t * xm, ixge_tx_descriptor_t * d)
+ixge_tx_descriptor_matches_template (ixge_main_t * xm,
+                                    ixge_tx_descriptor_t * d)
 {
   u32 cmp;
 
@@ -1002,14 +1035,14 @@ ixge_tx_no_wrap (ixge_main_t * xm,
                 ixge_dma_queue_t * dq,
                 u32 * buffers,
                 u32 start_descriptor_index,
-                u32 n_descriptors,
-                ixge_tx_state_t * tx_state)
+                u32 n_descriptors, ixge_tx_state_t * tx_state)
 {
-  vlib_main_t * vm = xm->vlib_main;
-  ixge_tx_descriptor_t * d, * d_sop;
+  vlib_main_t *vm = xm->vlib_main;
+  ixge_tx_descriptor_t *d, *d_sop;
   u32 n_left = n_descriptors;
-  u32 * to_free = vec_end (xm->tx_buffers_pending_free);
-  u32 * to_tx = vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
+  u32 *to_free = vec_end (xm->tx_buffers_pending_free);
+  u32 *to_tx =
+    vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
   u32 is_sop = tx_state->is_start_of_packet;
   u32 len_sop = tx_state->n_bytes_in_packet;
   u16 template_status = xm->tx_descriptor_template.status0;
@@ -1021,7 +1054,7 @@ ixge_tx_no_wrap (ixge_main_t * xm,
 
   while (n_left >= 4)
     {
-      vlib_buffer_t * b0, * b1;
+      vlib_buffer_t *b0, *b1;
       u32 bi0, fi0, len0;
       u32 bi1, fi1, len1;
       u8 is_eop0, is_eop1;
@@ -1031,7 +1064,7 @@ ixge_tx_no_wrap (ixge_main_t * xm,
       vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
 
       if ((descriptor_prefetch_rotor & 0x3) == 0)
-        CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
+       CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
 
       descriptor_prefetch_rotor += 2;
 
@@ -1062,24 +1095,32 @@ ixge_tx_no_wrap (ixge_main_t * xm,
       ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
       ASSERT (ixge_tx_descriptor_matches_template (xm, d + 1));
 
-      d[0].buffer_address = vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
-      d[1].buffer_address = vlib_get_buffer_data_physical_address (vm, bi1) + b1->current_data;
+      d[0].buffer_address =
+       vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
+      d[1].buffer_address =
+       vlib_get_buffer_data_physical_address (vm, bi1) + b1->current_data;
 
       d[0].n_bytes_this_buffer = len0;
       d[1].n_bytes_this_buffer = len1;
 
-      d[0].status0 = template_status | (is_eop0 << IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
-      d[1].status0 = template_status | (is_eop1 << IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
+      d[0].status0 =
+       template_status | (is_eop0 <<
+                          IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
+      d[1].status0 =
+       template_status | (is_eop1 <<
+                          IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
 
       len_sop = (is_sop ? 0 : len_sop) + len0;
-      d_sop[0].status1 = IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
+      d_sop[0].status1 =
+       IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
       d += 1;
       d_sop = is_eop0 ? d : d_sop;
 
       is_sop = is_eop0;
 
       len_sop = (is_sop ? 0 : len_sop) + len1;
-      d_sop[0].status1 = IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
+      d_sop[0].status1 =
+       IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
       d += 1;
       d_sop = is_eop1 ? d : d_sop;
 
@@ -1088,7 +1129,7 @@ ixge_tx_no_wrap (ixge_main_t * xm,
 
   while (n_left > 0)
     {
-      vlib_buffer_t * b0;
+      vlib_buffer_t *b0;
       u32 bi0, fi0, len0;
       u8 is_eop0;
 
@@ -1110,14 +1151,18 @@ ixge_tx_no_wrap (ixge_main_t * xm,
 
       ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
 
-      d[0].buffer_address = vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
+      d[0].buffer_address =
+       vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
 
       d[0].n_bytes_this_buffer = len0;
 
-      d[0].status0 = template_status | (is_eop0 << IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
+      d[0].status0 =
+       template_status | (is_eop0 <<
+                          IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
 
       len_sop = (is_sop ? 0 : len_sop) + len0;
-      d_sop[0].status1 = IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
+      d_sop[0].status1 =
+       IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
       d += 1;
       d_sop = is_eop0 ? d : d_sop;
 
@@ -1126,18 +1171,20 @@ ixge_tx_no_wrap (ixge_main_t * xm,
 
   if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
     {
-      to_tx = vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
+      to_tx =
+       vec_elt_at_index (dq->descriptor_buffer_indices,
+                         start_descriptor_index);
       ixge_tx_trace (xm, xd, dq, tx_state,
-                    &dq->descriptors[start_descriptor_index].tx,
-                    to_tx,
+                    &dq->descriptors[start_descriptor_index].tx, to_tx,
                     n_descriptors);
     }
 
-  _vec_len (xm->tx_buffers_pending_free) = to_free - xm->tx_buffers_pending_free;
+  _vec_len (xm->tx_buffers_pending_free) =
+    to_free - xm->tx_buffers_pending_free;
 
   /* When we are done d_sop can point to end of ring.  Wrap it if so. */
   {
-    ixge_tx_descriptor_t * d_start = &dq->descriptors[0].tx;
+    ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
 
     ASSERT (d_sop - d_start <= dq->n_descriptors);
     d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
@@ -1152,14 +1199,13 @@ ixge_tx_no_wrap (ixge_main_t * xm,
 
 static uword
 ixge_interface_tx (vlib_main_t * vm,
-                  vlib_node_runtime_t * node,
-                  vlib_frame_t * f)
+                  vlib_node_runtime_t * node, vlib_frame_t * f)
 {
-  ixge_main_t * xm = &ixge_main;
-  vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
-  ixge_device_t * xd = vec_elt_at_index (xm->devices, rd->dev_instance);
-  ixge_dma_queue_t * dq;
-  u32 * from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
+  ixge_main_t *xm = &ixge_main;
+  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+  ixge_device_t *xd = vec_elt_at_index (xm->devices, rd->dev_instance);
+  ixge_dma_queue_t *dq;
+  u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
   u32 queue_index = 0;         /* fixme parameter */
   ixge_tx_state_t tx_state;
 
@@ -1189,8 +1235,8 @@ ixge_interface_tx (vlib_main_t * vm,
       i_sop = i_eop = ~0;
       for (i = n_left_tx - 1; i >= 0; i--)
        {
-         vlib_buffer_t * b = vlib_get_buffer (vm, from[i]);
-         if (! (b->flags & VLIB_BUFFER_NEXT_PRESENT))
+         vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
+         if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
            {
              if (i_sop != ~0 && i_eop != ~0)
                break;
@@ -1204,12 +1250,15 @@ ixge_interface_tx (vlib_main_t * vm,
        n_ok = i_eop + 1;
 
       {
-       ELOG_TYPE_DECLARE (e) = {
-         .function = (char *) __FUNCTION__,
-         .format = "ixge %d, ring full to tx %d head %d tail %d",
-         .format_args = "i2i2i2i2",
-       };
-       struct { u16 instance, to_tx, head, tail; } * ed;
+       ELOG_TYPE_DECLARE (e) =
+       {
+       .function = (char *) __FUNCTION__,.format =
+           "ixge %d, ring full to tx %d head %d tail %d",.format_args =
+           "i2i2i2i2",};
+       struct
+       {
+         u16 instance, to_tx, head, tail;
+       } *ed;
        ed = ELOG_DATA (&vm->elog_main, e);
        ed->instance = xd->device_index;
        ed->to_tx = n_descriptors_to_tx;
@@ -1221,7 +1270,8 @@ ixge_interface_tx (vlib_main_t * vm,
        {
          n_tail_drop = n_descriptors_to_tx - n_ok;
          vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
-         vlib_error_count (vm, ixge_input_node.index, IXGE_ERROR_tx_full_drops, n_tail_drop);
+         vlib_error_count (vm, ixge_input_node.index,
+                           IXGE_ERROR_tx_full_drops, n_tail_drop);
        }
 
       n_descriptors_to_tx = n_ok;
@@ -1232,7 +1282,8 @@ ixge_interface_tx (vlib_main_t * vm,
   /* Process from tail to end of descriptor ring. */
   if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
     {
-      u32 n = clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
+      u32 n =
+       clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
       n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
       from += n;
       n_descriptors_to_tx -= n;
@@ -1244,7 +1295,8 @@ ixge_interface_tx (vlib_main_t * vm,
 
   if (n_descriptors_to_tx > 0)
     {
-      u32 n = ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
+      u32 n =
+       ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
       from += n;
       ASSERT (n == n_descriptors_to_tx);
       dq->tail_index += n;
@@ -1259,13 +1311,13 @@ ixge_interface_tx (vlib_main_t * vm,
   /* Report status when last descriptor is done. */
   {
     u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
-    ixge_tx_descriptor_t * d = &dq->descriptors[i].tx;
+    ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
     d->status0 |= IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS;
   }
 
   /* Give new descriptors to hardware. */
   {
-    ixge_dma_regs_t * dr = get_dma_regs (xd, VLIB_TX, queue_index);
+    ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
 
     CLIB_MEMORY_BARRIER ();
 
@@ -1291,26 +1343,26 @@ static uword
 ixge_rx_queue_no_wrap (ixge_main_t * xm,
                       ixge_device_t * xd,
                       ixge_dma_queue_t * dq,
-                      u32 start_descriptor_index,
-                      u32 n_descriptors)
+                      u32 start_descriptor_index, u32 n_descriptors)
 {
-  vlib_main_t * vm = xm->vlib_main;
-  vlib_node_runtime_t * node = dq->rx.node;
-  ixge_descriptor_t * d;
-  static ixge_descriptor_t * d_trace_save;
-  static u32 * d_trace_buffers;
+  vlib_main_t *vm = xm->vlib_main;
+  vlib_node_runtime_t *node = dq->rx.node;
+  ixge_descriptor_t *d;
+  static ixge_descriptor_t *d_trace_save;
+  static u32 *d_trace_buffers;
   u32 n_descriptors_left = n_descriptors;
-  u32 * to_rx = vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
-  u32 * to_add;
+  u32 *to_rx =
+    vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
+  u32 *to_add;
   u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
   u32 bi_last = dq->rx.saved_last_buffer_index;
   u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
   u32 is_sop = dq->rx.is_start_of_packet;
-  u32 next_index, n_left_to_next, * to_next;
+  u32 next_index, n_left_to_next, *to_next;
   u32 n_packets = 0;
   u32 n_bytes = 0;
   u32 n_trace = vlib_get_trace_count (vm, node);
-  vlib_buffer_t * b_last, b_dummy;
+  vlib_buffer_t *b_last, b_dummy;
 
   ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
   d = &dq->descriptors[start_descriptor_index];
@@ -1346,8 +1398,8 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
           xm->vlib_buffer_free_list_index);
        _vec_len (xm->rx_buffers_to_add) += n_allocated;
 
-        /* Handle transient allocation failure */
-       if (PREDICT_FALSE(l + n_allocated <= n_descriptors_left))
+       /* Handle transient allocation failure */
+       if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
          {
            if (n_allocated == 0)
              vlib_error_count (vm, ixge_input_node.index,
@@ -1358,7 +1410,7 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
 
            n_descriptors_left = l + n_allocated;
          }
-        n_descriptors = n_descriptors_left;
+       n_descriptors = n_descriptors_left;
       }
 
     /* Add buffers from end of vector going backwards. */
@@ -1367,25 +1419,24 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
 
   while (n_descriptors_left > 0)
     {
-      vlib_get_next_frame (vm, node, next_index,
-                          to_next, n_left_to_next);
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
 
       while (n_descriptors_left >= 4 && n_left_to_next >= 2)
        {
-         vlib_buffer_t * b0, * b1;
+         vlib_buffer_t *b0, *b1;
          u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
          u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
          u8 is_eop0, error0, next0;
          u8 is_eop1, error1, next1;
-          ixge_descriptor_t d0, d1;
+         ixge_descriptor_t d0, d1;
 
          vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
          vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
 
-          CLIB_PREFETCH (d + 2, 32, STORE);
+         CLIB_PREFETCH (d + 2, 32, STORE);
 
-          d0.as_u32x4 = d[0].as_u32x4;
-          d1.as_u32x4 = d[1].as_u32x4;
+         d0.as_u32x4 = d[0].as_u32x4;
+         d1.as_u32x4 = d[1].as_u32x4;
 
          s20 = d0.rx_from_hw.status[2];
          s21 = d1.rx_from_hw.status[2];
@@ -1393,7 +1444,8 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
          s00 = d0.rx_from_hw.status[0];
          s01 = d1.rx_from_hw.status[0];
 
-         if (! ((s20 & s21) & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
+         if (!
+             ((s20 & s21) & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
            goto found_hw_owned_descriptor_x2;
 
          bi0 = to_rx[0];
@@ -1408,21 +1460,25 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
          to_rx += 2;
          to_add -= 2;
 
-         ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (vm, bi0));
-         ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (vm, bi1));
-         ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (vm, fi0));
-         ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (vm, fi1));
+         ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+                 vlib_buffer_is_known (vm, bi0));
+         ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+                 vlib_buffer_is_known (vm, bi1));
+         ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+                 vlib_buffer_is_known (vm, fi0));
+         ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+                 vlib_buffer_is_known (vm, fi1));
 
          b0 = vlib_get_buffer (vm, bi0);
          b1 = vlib_get_buffer (vm, bi1);
 
-          /*
-           * Turn this on if you run into
-           * "bad monkey" contexts, and you want to know exactly
-           * which nodes they've visited... See main.c...
-           */
-          VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
-          VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b1);
+         /*
+          * Turn this on if you run into
+          * "bad monkey" contexts, and you want to know exactly
+          * which nodes they've visited... See main.c...
+          */
+         VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+         VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
 
          CLIB_PREFETCH (b0->data, CLIB_CACHE_LINE_BYTES, LOAD);
          CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
@@ -1443,8 +1499,8 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
 
          vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
          vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
-         vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32)~0;
-         vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32)~0;
+         vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+         vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
 
          b0->error = node->errors[error0];
          b1->error = node->errors[error1];
@@ -1456,24 +1512,22 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
 
          /* Give new buffers to hardware. */
          d0.rx_to_hw.tail_address =
-              vlib_get_buffer_data_physical_address (vm, fi0);
+           vlib_get_buffer_data_physical_address (vm, fi0);
          d1.rx_to_hw.tail_address =
-              vlib_get_buffer_data_physical_address (vm, fi1);
+           vlib_get_buffer_data_physical_address (vm, fi1);
          d0.rx_to_hw.head_address = d[0].rx_to_hw.tail_address;
          d1.rx_to_hw.head_address = d[1].rx_to_hw.tail_address;
-          d[0].as_u32x4 = d0.as_u32x4;
-          d[1].as_u32x4 = d1.as_u32x4;
+         d[0].as_u32x4 = d0.as_u32x4;
+         d[1].as_u32x4 = d1.as_u32x4;
 
          d += 2;
          n_descriptors_left -= 2;
 
          /* Point to either l2 or l3 header depending on next. */
          l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
-              ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00)
-              : 0;
+           ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
          l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
-              ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s01)
-              : 0;
+           ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s01) : 0;
 
          b0->current_length = len0 - l3_offset0;
          b1->current_length = len1 - l3_offset1;
@@ -1492,122 +1546,126 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
 
              if (is_eop0)
                {
-                 u8 * msg = vlib_validate_buffer (vm, bi_sop0, /* follow_buffer_next */ 1);
-                 ASSERT (! msg);
+                 u8 *msg = vlib_validate_buffer (vm, bi_sop0,
+                                                 /* follow_buffer_next */ 1);
+                 ASSERT (!msg);
                }
              if (is_eop1)
                {
-                 u8 * msg = vlib_validate_buffer (vm, bi_sop1, /* follow_buffer_next */ 1);
-                 ASSERT (! msg);
+                 u8 *msg = vlib_validate_buffer (vm, bi_sop1,
+                                                 /* follow_buffer_next */ 1);
+                 ASSERT (!msg);
+               }
+           }
+         if (0)                /* "Dave" version */
+           {
+             u32 bi_sop0 = is_sop ? bi0 : bi_sop;
+             u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
+
+             if (is_eop0)
+               {
+                 to_next[0] = bi_sop0;
+                 to_next++;
+                 n_left_to_next--;
+
+                 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+                                                  to_next, n_left_to_next,
+                                                  bi_sop0, next0);
+               }
+             if (is_eop1)
+               {
+                 to_next[0] = bi_sop1;
+                 to_next++;
+                 n_left_to_next--;
+
+                 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+                                                  to_next, n_left_to_next,
+                                                  bi_sop1, next1);
+               }
+             is_sop = is_eop1;
+             bi_sop = bi_sop1;
+           }
+         if (1)                /* "Eliot" version */
+           {
+             /* Speculatively enqueue to cached next. */
+             u8 saved_is_sop = is_sop;
+             u32 bi_sop_save = bi_sop;
+
+             bi_sop = saved_is_sop ? bi0 : bi_sop;
+             to_next[0] = bi_sop;
+             to_next += is_eop0;
+             n_left_to_next -= is_eop0;
+
+             bi_sop = is_eop0 ? bi1 : bi_sop;
+             to_next[0] = bi_sop;
+             to_next += is_eop1;
+             n_left_to_next -= is_eop1;
+
+             is_sop = is_eop1;
+
+             if (PREDICT_FALSE
+                 (!(next0 == next_index && next1 == next_index)))
+               {
+                 /* Undo speculation. */
+                 to_next -= is_eop0 + is_eop1;
+                 n_left_to_next += is_eop0 + is_eop1;
+
+                 /* Re-do both descriptors being careful about where we enqueue. */
+                 bi_sop = saved_is_sop ? bi0 : bi_sop_save;
+                 if (is_eop0)
+                   {
+                     if (next0 != next_index)
+                       vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
+                     else
+                       {
+                         to_next[0] = bi_sop;
+                         to_next += 1;
+                         n_left_to_next -= 1;
+                       }
+                   }
+
+                 bi_sop = is_eop0 ? bi1 : bi_sop;
+                 if (is_eop1)
+                   {
+                     if (next1 != next_index)
+                       vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
+                     else
+                       {
+                         to_next[0] = bi_sop;
+                         to_next += 1;
+                         n_left_to_next -= 1;
+                       }
+                   }
+
+                 /* Switch cached next index when next for both packets is the same. */
+                 if (is_eop0 && is_eop1 && next0 == next1)
+                   {
+                     vlib_put_next_frame (vm, node, next_index,
+                                          n_left_to_next);
+                     next_index = next0;
+                     vlib_get_next_frame (vm, node, next_index,
+                                          to_next, n_left_to_next);
+                   }
                }
            }
-          if (0) /* "Dave" version */
-            {
-              u32 bi_sop0 = is_sop ? bi0 : bi_sop;
-              u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
-
-              if (is_eop0)
-                {
-                  to_next[0] = bi_sop0;
-                  to_next++;
-                  n_left_to_next--;
-
-                  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                                   to_next, n_left_to_next,
-                                                   bi_sop0, next0);
-                }
-              if (is_eop1)
-                {
-                  to_next[0] = bi_sop1;
-                  to_next++;
-                  n_left_to_next--;
-
-                  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                                   to_next, n_left_to_next,
-                                                   bi_sop1, next1);
-                }
-              is_sop = is_eop1;
-              bi_sop = bi_sop1;
-            }
-          if (1) /* "Eliot" version */
-            {
-              /* Speculatively enqueue to cached next. */
-              u8 saved_is_sop = is_sop;
-              u32 bi_sop_save = bi_sop;
-
-              bi_sop = saved_is_sop ? bi0 : bi_sop;
-              to_next[0] = bi_sop;
-              to_next += is_eop0;
-              n_left_to_next -= is_eop0;
-
-              bi_sop = is_eop0 ? bi1 : bi_sop;
-              to_next[0] = bi_sop;
-              to_next += is_eop1;
-              n_left_to_next -= is_eop1;
-
-              is_sop = is_eop1;
-
-              if (PREDICT_FALSE (! (next0 == next_index && next1 == next_index)))
-                {
-                  /* Undo speculation. */
-                  to_next -= is_eop0 + is_eop1;
-                  n_left_to_next += is_eop0 + is_eop1;
-
-                  /* Re-do both descriptors being careful about where we enqueue. */
-                  bi_sop = saved_is_sop ? bi0 : bi_sop_save;
-                  if (is_eop0)
-                    {
-                      if (next0 != next_index)
-                        vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
-                      else
-                        {
-                          to_next[0] = bi_sop;
-                          to_next += 1;
-                          n_left_to_next -= 1;
-                        }
-                    }
-
-                  bi_sop = is_eop0 ? bi1 : bi_sop;
-                  if (is_eop1)
-                    {
-                      if (next1 != next_index)
-                        vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
-                      else
-                        {
-                          to_next[0] = bi_sop;
-                          to_next += 1;
-                          n_left_to_next -= 1;
-                        }
-                    }
-
-                  /* Switch cached next index when next for both packets is the same. */
-                  if (is_eop0 && is_eop1 && next0 == next1)
-                    {
-                      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-                      next_index = next0;
-                      vlib_get_next_frame (vm, node, next_index,
-                                           to_next, n_left_to_next);
-                    }
-                }
-            }
        }
 
-    /* Bail out of dual loop and proceed with single loop. */
+      /* Bail out of dual loop and proceed with single loop. */
     found_hw_owned_descriptor_x2:
 
       while (n_descriptors_left > 0 && n_left_to_next > 0)
        {
-         vlib_buffer_t * b0;
+         vlib_buffer_t *b0;
          u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
          u8 is_eop0, error0, next0;
-          ixge_descriptor_t d0;
+         ixge_descriptor_t d0;
 
-          d0.as_u32x4 = d[0].as_u32x4;
+         d0.as_u32x4 = d[0].as_u32x4;
 
          s20 = d0.rx_from_hw.status[2];
          s00 = d0.rx_from_hw.status[0];
 
-         if (! (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
+         if (!(s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
            goto found_hw_owned_descriptor_x1;
 
          bi0 = to_rx[0];
@@ -1618,21 +1676,23 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
          to_rx += 1;
          to_add -= 1;
 
-         ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (vm, bi0));
-         ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (vm, fi0));
+         ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+                 vlib_buffer_is_known (vm, bi0));
+         ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+                 vlib_buffer_is_known (vm, fi0));
 
          b0 = vlib_get_buffer (vm, bi0);
 
-          /*
-           * Turn this on if you run into
-           * "bad monkey" contexts, and you want to know exactly
-           * which nodes they've visited...
-           */
-          VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
+         /*
+          * Turn this on if you run into
+          * "bad monkey" contexts, and you want to know exactly
+          * which nodes they've visited...
+          */
+         VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
 
          is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
          ixge_rx_next_and_error_from_status_x1
-            (xd, s00, s20, &next0, &error0, &flags0);
+           (xd, s00, s20, &next0, &error0, &flags0);
 
          next0 = is_sop ? next0 : next_index_sop;
          next_index_sop = next0;
@@ -1640,7 +1700,7 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
          b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
 
          vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
-         vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32)~0;
+         vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
 
          b0->error = node->errors[error0];
 
@@ -1650,17 +1710,16 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
 
          /* Give new buffer to hardware. */
          d0.rx_to_hw.tail_address =
-              vlib_get_buffer_data_physical_address (vm, fi0);
+           vlib_get_buffer_data_physical_address (vm, fi0);
          d0.rx_to_hw.head_address = d0.rx_to_hw.tail_address;
-          d[0].as_u32x4 = d0.as_u32x4;
+         d[0].as_u32x4 = d0.as_u32x4;
 
          d += 1;
          n_descriptors_left -= 1;
 
          /* Point to either l2 or l3 header depending on next. */
          l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
-              ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00)
-              : 0;
+           ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
          b0->current_length = len0 - l3_offset0;
          b0->current_data = l3_offset0;
 
@@ -1672,48 +1731,49 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
 
          if (CLIB_DEBUG > 0 && is_eop0)
            {
-             u8 * msg = vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
-             ASSERT (! msg);
+             u8 *msg =
+               vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
+             ASSERT (!msg);
            }
 
-          if (0) /* "Dave" version */
-            {
-              if (is_eop0)
-                {
-                  to_next[0] = bi_sop;
-                  to_next++;
-                  n_left_to_next--;
-
-                  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                                   to_next, n_left_to_next,
-                                                   bi_sop, next0);
-                }
-            }
-          if (1) /* "Eliot" version */
-            {
-              if (PREDICT_TRUE (next0 == next_index))
-                {
-                  to_next[0] = bi_sop;
-                  to_next += is_eop0;
-                  n_left_to_next -= is_eop0;
-                }
-              else
-                {
-                  if (next0 != next_index && is_eop0)
-                    vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
-
-                  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-                  next_index = next0;
-                  vlib_get_next_frame (vm, node, next_index,
-                                       to_next, n_left_to_next);
-                }
-            }
-          is_sop = is_eop0;
+         if (0)                /* "Dave" version */
+           {
+             if (is_eop0)
+               {
+                 to_next[0] = bi_sop;
+                 to_next++;
+                 n_left_to_next--;
+
+                 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+                                                  to_next, n_left_to_next,
+                                                  bi_sop, next0);
+               }
+           }
+         if (1)                /* "Eliot" version */
+           {
+             if (PREDICT_TRUE (next0 == next_index))
+               {
+                 to_next[0] = bi_sop;
+                 to_next += is_eop0;
+                 n_left_to_next -= is_eop0;
+               }
+             else
+               {
+                 if (next0 != next_index && is_eop0)
+                   vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
+
+                 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+                 next_index = next0;
+                 vlib_get_next_frame (vm, node, next_index,
+                                      to_next, n_left_to_next);
+               }
+           }
+         is_sop = is_eop0;
        }
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
 
- found_hw_owned_descriptor_x1:
+found_hw_owned_descriptor_x1:
   if (n_descriptors_left > 0)
     vlib_put_next_frame (vm, node, next_index, n_left_to_next);
 
@@ -1728,8 +1788,7 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
        ixge_rx_trace (xm, xd, dq,
                       d_trace_save,
                       d_trace_buffers,
-                      &dq->descriptors[start_descriptor_index],
-                      n);
+                      &dq->descriptors[start_descriptor_index], n);
        vlib_set_trace_count (vm, node, n_trace - n);
       }
     if (d_trace_save)
@@ -1743,8 +1802,8 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
        enqueued a packet. */
     if (is_sop)
       {
-        b_last->next_buffer = ~0;
-        bi_last = ~0;
+       b_last->next_buffer = ~0;
+       bi_last = ~0;
       }
 
     dq->rx.n_descriptors_done_this_call = n_done;
@@ -1763,16 +1822,16 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
 static uword
 ixge_rx_queue (ixge_main_t * xm,
               ixge_device_t * xd,
-              vlib_node_runtime_t * node,
-              u32 queue_index)
+              vlib_node_runtime_t * node, u32 queue_index)
 {
-  ixge_dma_queue_t * dq = vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
-  ixge_dma_regs_t * dr = get_dma_regs (xd, VLIB_RX, dq->queue_index);
+  ixge_dma_queue_t *dq =
+    vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
+  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, dq->queue_index);
   uword n_packets = 0;
   u32 hw_head_index, sw_head_index;
 
   /* One time initialization. */
-  if (! dq->rx.node)
+  if (!dq->rx.node)
     {
       dq->rx.node = node;
       dq->rx.is_start_of_packet = 1;
@@ -1797,7 +1856,9 @@ ixge_rx_queue (ixge_main_t * xm,
     {
       u32 n_tried = dq->n_descriptors - sw_head_index;
       n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
-      sw_head_index = ixge_ring_add (dq, sw_head_index, dq->rx.n_descriptors_done_this_call);
+      sw_head_index =
+       ixge_ring_add (dq, sw_head_index,
+                      dq->rx.n_descriptors_done_this_call);
 
       if (dq->rx.n_descriptors_done_this_call != n_tried)
        goto done;
@@ -1806,60 +1867,63 @@ ixge_rx_queue (ixge_main_t * xm,
     {
       u32 n_tried = hw_head_index - sw_head_index;
       n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
-      sw_head_index = ixge_ring_add (dq, sw_head_index, dq->rx.n_descriptors_done_this_call);
+      sw_head_index =
+       ixge_ring_add (dq, sw_head_index,
+                      dq->rx.n_descriptors_done_this_call);
     }
 
- done:
+done:
   dq->head_index = sw_head_index;
-  dq->tail_index = ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
+  dq->tail_index =
+    ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
 
   /* Give tail back to hardware. */
   CLIB_MEMORY_BARRIER ();
 
   dr->tail_index = dq->tail_index;
 
-  vlib_increment_combined_counter (vnet_main.interface_main.combined_sw_if_counters
-                                  + VNET_INTERFACE_COUNTER_RX,
-                                   0 /* cpu_index */,
-                                  xd->vlib_sw_if_index,
-                                  n_packets,
+  vlib_increment_combined_counter (vnet_main.
+                                  interface_main.combined_sw_if_counters +
+                                  VNET_INTERFACE_COUNTER_RX,
+                                  0 /* cpu_index */ ,
+                                  xd->vlib_sw_if_index, n_packets,
                                   dq->rx.n_bytes);
 
   return n_packets;
 }
 
-static void ixge_interrupt (ixge_main_t * xm, ixge_device_t * xd, u32 i)
+static void
+ixge_interrupt (ixge_main_t * xm, ixge_device_t * xd, u32 i)
 {
-  vlib_main_t * vm = xm->vlib_main;
-  ixge_regs_t * r = xd->regs;
+  vlib_main_t *vm = xm->vlib_main;
+  ixge_regs_t *r = xd->regs;
 
   if (i != 20)
     {
-      ELOG_TYPE_DECLARE (e) = {
-       .function = (char *) __FUNCTION__,
-       .format = "ixge %d, %s",
-       .format_args = "i1t1",
-       .n_enum_strings = 16,
-       .enum_strings = {
-         "flow director",
-         "rx miss",
-         "pci exception",
-         "mailbox",
-         "link status change",
-         "linksec key exchange",
-         "manageability event",
-         "reserved23",
-         "sdp0",
-         "sdp1",
-         "sdp2",
-         "sdp3",
-         "ecc",
-         "descriptor handler error",
-         "tcp timer",
-         "other",
-       },
-      };
-      struct { u8 instance; u8 index; } * ed;
+      ELOG_TYPE_DECLARE (e) =
+      {
+       .function = (char *) __FUNCTION__,.format =
+         "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
+         16,.enum_strings =
+       {
+      "flow director",
+           "rx miss",
+           "pci exception",
+           "mailbox",
+           "link status change",
+           "linksec key exchange",
+           "manageability event",
+           "reserved23",
+           "sdp0",
+           "sdp1",
+           "sdp2",
+           "sdp3",
+           "ecc", "descriptor handler error", "tcp timer", "other",},};
+      struct
+      {
+       u8 instance;
+       u8 index;
+      } *ed;
       ed = ELOG_DATA (&vm->elog_main, e);
       ed->instance = xd->device_index;
       ed->index = i - 16;
@@ -1869,27 +1933,29 @@ static void ixge_interrupt (ixge_main_t * xm, ixge_device_t * xd, u32 i)
       u32 v = r->xge_mac.link_status;
       uword is_up = (v & (1 << 30)) != 0;
 
-      ELOG_TYPE_DECLARE (e) = {
-       .function = (char *) __FUNCTION__,
-       .format = "ixge %d, link status change 0x%x",
-       .format_args = "i4i4",
-      };
-      struct { u32 instance, link_status; } * ed;
+      ELOG_TYPE_DECLARE (e) =
+      {
+      .function = (char *) __FUNCTION__,.format =
+         "ixge %d, link status change 0x%x",.format_args = "i4i4",};
+      struct
+      {
+       u32 instance, link_status;
+      } *ed;
       ed = ELOG_DATA (&vm->elog_main, e);
       ed->instance = xd->device_index;
       ed->link_status = v;
       xd->link_status_at_last_link_change = v;
 
       vlib_process_signal_event (vm, ixge_process_node.index,
-                                 EVENT_SET_FLAGS,
-                                 ((is_up<<31) | xd->vlib_hw_if_index));
+                                EVENT_SET_FLAGS,
+                                ((is_up << 31) | xd->vlib_hw_if_index));
     }
 }
 
 always_inline u32
 clean_block (u32 * b, u32 * t, u32 n_left)
 {
-  u32 * t0 = t;
+  u32 *t0 = t;
 
   while (n_left >= 4)
     {
@@ -1932,9 +1998,10 @@ clean_block (u32 * b, u32 * t, u32 n_left)
 static void
 ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
 {
-  vlib_main_t * vm = xm->vlib_main;
-  ixge_dma_queue_t * dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
-  u32 n_clean, * b, * t, * t0;
+  vlib_main_t *vm = xm->vlib_main;
+  ixge_dma_queue_t *dq =
+    vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
+  u32 n_clean, *b, *t, *t0;
   i32 n_hw_owned_descriptors;
   i32 first_to_clean, last_to_clean;
   u64 hwbp_race = 0;
@@ -1948,12 +2015,15 @@ ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
       hwbp_race++;
       if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
        {
-         ELOG_TYPE_DECLARE (e) = {
-           .function = (char *) __FUNCTION__,
-           .format = "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",
-           .format_args = "i4i4i4i4",
-         };
-         struct { u32 instance, head_index, tail_index, n_buffers_on_ring; } * ed;
+         ELOG_TYPE_DECLARE (e) =
+         {
+         .function = (char *) __FUNCTION__,.format =
+             "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
+             = "i4i4i4i4",};
+         struct
+         {
+           u32 instance, head_index, tail_index, n_buffers_on_ring;
+         } *ed;
          ed = ELOG_DATA (&vm->elog_main, e);
          ed->instance = xd->device_index;
          ed->head_index = dq->head_index;
@@ -1964,23 +2034,26 @@ ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
 
   dq->head_index = dq->tx.head_index_write_back[0];
   n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
-  ASSERT(dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
+  ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
   n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
 
   if (IXGE_HWBP_RACE_ELOG && hwbp_race)
     {
-         ELOG_TYPE_DECLARE (e) = {
-           .function = (char *) __FUNCTION__,
-           .format = "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",
-           .format_args = "i4i4i4i4i4",
-         };
-         struct { u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries; } * ed;
-         ed = ELOG_DATA (&vm->elog_main, e);
-         ed->instance = xd->device_index;
-         ed->head_index = dq->head_index;
-         ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
-         ed->n_clean = n_clean;
-         ed->retries = hwbp_race;
+      ELOG_TYPE_DECLARE (e) =
+      {
+      .function = (char *) __FUNCTION__,.format =
+         "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
+         = "i4i4i4i4i4",};
+      struct
+      {
+       u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
+      } *ed;
+      ed = ELOG_DATA (&vm->elog_main, e);
+      ed->instance = xd->device_index;
+      ed->head_index = dq->head_index;
+      ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
+      ed->n_clean = n_clean;
+      ed->retries = hwbp_race;
     }
 
   /*
@@ -1996,11 +2069,11 @@ ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
   /* Clean the n_clean descriptors prior to the reported hardware head */
   last_to_clean = dq->head_index - 1;
   last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
-      last_to_clean;
+    last_to_clean;
 
   first_to_clean = (last_to_clean) - (n_clean - 1);
   first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
-      first_to_clean;
+    first_to_clean;
 
   vec_resize (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
   t0 = t = xm->tx_buffers_pending_free;
@@ -2016,7 +2089,7 @@ ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
 
   /* Typical case: clean from first to last */
   if (first_to_clean <= last_to_clean)
-      t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
+    t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
 
   if (t > t0)
     {
@@ -2029,25 +2102,39 @@ ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
 }
 
 /* RX queue interrupts 0 thru 7; TX 8 thru 15. */
-always_inline uword ixge_interrupt_is_rx_queue (uword i)
-{ return i < 8; }
+always_inline uword
+ixge_interrupt_is_rx_queue (uword i)
+{
+  return i < 8;
+}
 
-always_inline uword ixge_interrupt_is_tx_queue (uword i)
-{ return i >= 8 && i < 16; }
+always_inline uword
+ixge_interrupt_is_tx_queue (uword i)
+{
+  return i >= 8 && i < 16;
+}
 
-always_inline uword ixge_tx_queue_to_interrupt (uword i)
-{ return 8 + i; }
+always_inline uword
+ixge_tx_queue_to_interrupt (uword i)
+{
+  return 8 + i;
+}
 
-always_inline uword ixge_rx_queue_to_interrupt (uword i)
-{ return 0 + i; }
+always_inline uword
+ixge_rx_queue_to_interrupt (uword i)
+{
+  return 0 + i;
+}
 
-always_inline uword ixge_interrupt_rx_queue (uword i)
+always_inline uword
+ixge_interrupt_rx_queue (uword i)
 {
   ASSERT (ixge_interrupt_is_rx_queue (i));
   return i - 0;
 }
 
-always_inline uword ixge_interrupt_tx_queue (uword i)
+always_inline uword
+ixge_interrupt_tx_queue (uword i)
 {
   ASSERT (ixge_interrupt_is_tx_queue (i));
   return i - 8;
@@ -2055,10 +2142,9 @@ always_inline uword ixge_interrupt_tx_queue (uword i)
 
 static uword
 ixge_device_input (ixge_main_t * xm,
-                  ixge_device_t * xd,
-                  vlib_node_runtime_t * node)
+                  ixge_device_t * xd, vlib_node_runtime_t * node)
 {
-  ixge_regs_t * r = xd->regs;
+  ixge_regs_t *r = xd->regs;
   u32 i, s;
   uword n_rx_packets = 0;
 
@@ -2066,6 +2152,7 @@ ixge_device_input (ixge_main_t * xm,
   if (s)
     r->interrupt.status_write_1_to_clear = s;
 
+  /* *INDENT-OFF* */
   foreach_set_bit (i, s, ({
     if (ixge_interrupt_is_rx_queue (i))
       n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
@@ -2076,17 +2163,16 @@ ixge_device_input (ixge_main_t * xm,
     else
       ixge_interrupt (xm, xd, i);
   }));
+  /* *INDENT-ON* */
 
   return n_rx_packets;
 }
 
 static uword
-ixge_input (vlib_main_t * vm,
-           vlib_node_runtime_t * node,
-           vlib_frame_t * f)
+ixge_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f)
 {
-  ixge_main_t * xm = &ixge_main;
-  ixge_device_t * xd;
+  ixge_main_t *xm = &ixge_main;
+  ixge_device_t *xd;
   uword n_rx_packets = 0;
 
   if (node->state == VLIB_NODE_STATE_INTERRUPT)
@@ -2094,6 +2180,7 @@ ixge_input (vlib_main_t * vm,
       uword i;
 
       /* Loop over devices with interrupts. */
+      /* *INDENT-OFF* */
       foreach_set_bit (i, node->runtime_data[0], ({
        xd = vec_elt_at_index (xm->devices, i);
        n_rx_packets += ixge_device_input (xm, xd, node);
@@ -2102,6 +2189,7 @@ ixge_input (vlib_main_t * vm,
        if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
          xd->regs->interrupt.enable_write_1_to_set = ~0;
       }));
+      /* *INDENT-ON* */
 
       /* Clear mask of devices with pending interrupts. */
       node->runtime_data[0] = 0;
@@ -2110,25 +2198,26 @@ ixge_input (vlib_main_t * vm,
     {
       /* Poll all devices for input/interrupts. */
       vec_foreach (xd, xm->devices)
-       {
-         n_rx_packets += ixge_device_input (xm, xd, node);
+      {
+       n_rx_packets += ixge_device_input (xm, xd, node);
 
-         /* Re-enable interrupts when switching out of polling mode. */
-         if (node->flags &
-             VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
-           xd->regs->interrupt.enable_write_1_to_set = ~0;
-       }
+       /* Re-enable interrupts when switching out of polling mode. */
+       if (node->flags &
+           VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
+         xd->regs->interrupt.enable_write_1_to_set = ~0;
+      }
     }
 
   return n_rx_packets;
 }
 
-static char * ixge_error_strings[] = {
+static char *ixge_error_strings[] = {
 #define _(n,s) s,
-    foreach_ixge_error
+  foreach_ixge_error
 #undef _
 };
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (ixge_input_node, static) = {
   .function = ixge_input,
   .type = VLIB_NODE_TYPE_INPUT,
@@ -2154,12 +2243,14 @@ VLIB_REGISTER_NODE (ixge_input_node, static) = {
 
 VLIB_NODE_FUNCTION_MULTIARCH_CLONE (ixge_input)
 CLIB_MULTIARCH_SELECT_FN (ixge_input)
+/* *INDENT-ON* */
 
-static u8 * format_ixge_device_name (u8 * s, va_list * args)
+static u8 *
+format_ixge_device_name (u8 * s, va_list * args)
 {
   u32 i = va_arg (*args, u32);
-  ixge_main_t * xm = &ixge_main;
-  ixge_device_t * xd = vec_elt_at_index (xm->devices, i);
+  ixge_main_t *xm = &ixge_main;
+  ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
   return format (s, "TenGigabitEthernet%U",
                 format_vlib_pci_handle, &xd->pci_device.bus_address);
 }
@@ -2175,7 +2266,8 @@ static u8 ixge_counter_flags[] = {
 #undef _64
 };
 
-static void ixge_update_counters (ixge_device_t * xd)
+static void
+ixge_update_counters (ixge_device_t * xd)
 {
   /* Byte offset for counter registers. */
   static u32 reg_offsets[] = {
@@ -2185,7 +2277,7 @@ static void ixge_update_counters (ixge_device_t * xd)
 #undef _
 #undef _64
   };
-  volatile u32 * r = (volatile u32 *) xd->regs;
+  volatile u32 *r = (volatile u32 *) xd->regs;
   int i;
 
   for (i = 0; i < ARRAY_LEN (xd->counters); i++)
@@ -2195,14 +2287,15 @@ static void ixge_update_counters (ixge_device_t * xd)
       if (ixge_counter_flags[i] & IXGE_COUNTER_NOT_CLEAR_ON_READ)
        r[o] = 0;
       if (ixge_counter_flags[i] & IXGE_COUNTER_IS_64_BIT)
-       xd->counters[i] += (u64) r[o+1] << (u64) 32;
+       xd->counters[i] += (u64) r[o + 1] << (u64) 32;
     }
 }
 
-static u8 * format_ixge_device_id (u8 * s, va_list * args)
+static u8 *
+format_ixge_device_id (u8 * s, va_list * args)
 {
   u32 device_id = va_arg (*args, u32);
-  char * t = 0;
+  char *t = 0;
   switch (device_id)
     {
 #define _(f,n) case n: t = #f; break;
@@ -2219,35 +2312,36 @@ static u8 * format_ixge_device_id (u8 * s, va_list * args)
   return s;
 }
 
-static u8 * format_ixge_link_status (u8 * s, va_list * args)
+static u8 *
+format_ixge_link_status (u8 * s, va_list * args)
 {
-  ixge_device_t * xd = va_arg (*args, ixge_device_t *);
+  ixge_device_t *xd = va_arg (*args, ixge_device_t *);
   u32 v = xd->link_status_at_last_link_change;
 
   s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
 
   {
-    char * modes[] = {
+    char *modes[] = {
       "1g", "10g parallel", "10g serial", "autoneg",
     };
-    char * speeds[] = {
+    char *speeds[] = {
       "unknown", "100m", "1g", "10g",
     };
     s = format (s, ", mode %s, speed %s",
-               modes[(v >> 26) & 3],
-               speeds[(v >> 28) & 3]);
+               modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
   }
 
   return s;
 }
 
-static u8 * format_ixge_device (u8 * s, va_list * args)
+static u8 *
+format_ixge_device (u8 * s, va_list * args)
 {
   u32 dev_instance = va_arg (*args, u32);
   CLIB_UNUSED (int verbose) = va_arg (*args, int);
-  ixge_main_t * xm = &ixge_main;
-  ixge_device_t * xd = vec_elt_at_index (xm->devices, dev_instance);
-  ixge_phy_t * phy = xd->phys + xd->phy_index;
+  ixge_main_t *xm = &ixge_main;
+  ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
+  ixge_phy_t *phy = xd->phys + xd->phy_index;
   uword indent = format_get_indent (s);
 
   ixge_update_counters (xd);
@@ -2255,8 +2349,7 @@ static u8 * format_ixge_device (u8 * s, va_list * args)
 
   s = format (s, "Intel 8259X: id %U\n%Ulink %U",
              format_ixge_device_id, xd->device_id,
-             format_white_space, indent + 2,
-             format_ixge_link_status, xd);
+             format_white_space, indent + 2, format_ixge_link_status, xd);
 
   {
 
@@ -2274,30 +2367,31 @@ static u8 * format_ixge_device (u8 * s, va_list * args)
 
   /* FIXME */
   {
-    ixge_dma_queue_t * dq = vec_elt_at_index (xd->dma_queues[VLIB_RX], 0);
-    ixge_dma_regs_t * dr = get_dma_regs (xd, VLIB_RX, 0);
+    ixge_dma_queue_t *dq = vec_elt_at_index (xd->dma_queues[VLIB_RX], 0);
+    ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
     u32 hw_head_index = dr->head_index;
     u32 sw_head_index = dq->head_index;
     u32 nitems;
 
     nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
     s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
-                format_white_space, indent + 2, nitems, dq->n_descriptors);
+               format_white_space, indent + 2, nitems, dq->n_descriptors);
 
     s = format (s, "\n%U%d buffers in driver rx cache",
-                format_white_space, indent + 2, vec_len(xm->rx_buffers_to_add));
+               format_white_space, indent + 2,
+               vec_len (xm->rx_buffers_to_add));
 
     s = format (s, "\n%U%d buffers on tx queue 0 ring",
-                format_white_space, indent + 2,
-                xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
+               format_white_space, indent + 2,
+               xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
   }
   {
     u32 i;
     u64 v;
-    static char * names[] = {
+    static char *names[] = {
 #define _(a,f) #f,
 #define _64(a,f) _(a,f)
-    foreach_ixge_counter
+      foreach_ixge_counter
 #undef _
 #undef _64
     };
@@ -2308,18 +2402,18 @@ static u8 * format_ixge_device (u8 * s, va_list * args)
        if (v != 0)
          s = format (s, "\n%U%-40U%16Ld",
                      format_white_space, indent + 2,
-                     format_c_identifier, names[i],
-                     v);
+                     format_c_identifier, names[i], v);
       }
   }
 
   return s;
 }
 
-static void ixge_clear_hw_interface_counters (u32 instance)
+static void
+ixge_clear_hw_interface_counters (u32 instance)
 {
-  ixge_main_t * xm = &ixge_main;
-  ixge_device_t * xd = vec_elt_at_index (xm->devices, instance);
+  ixge_main_t *xm = &ixge_main;
+  ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
   ixge_update_counters (xd);
   memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
 }
@@ -2328,12 +2422,13 @@ static void ixge_clear_hw_interface_counters (u32 instance)
  * Dynamically redirect all pkts from a specific interface
  * to the specified node
  */
-static void ixge_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
-                                          u32 node_index)
+static void
+ixge_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+                             u32 node_index)
 {
-  ixge_main_t * xm = &ixge_main;
+  ixge_main_t *xm = &ixge_main;
   vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
-  ixge_device_t * xd = vec_elt_at_index (xm->devices, hw->dev_instance);
+  ixge_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
 
   /* Shut off redirection */
   if (node_index == ~0)
@@ -2347,6 +2442,7 @@ static void ixge_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
 }
 
 
+/* *INDENT-OFF* */
 VNET_DEVICE_CLASS (ixge_device_class) = {
   .name = "ixge",
   .tx_function = ixge_interface_tx,
@@ -2357,46 +2453,53 @@ VNET_DEVICE_CLASS (ixge_device_class) = {
   .admin_up_down_function = ixge_interface_admin_up_down,
   .rx_redirect_to_node = ixge_set_interface_next_node,
 };
+/* *INDENT-ON* */
 
-#define IXGE_N_BYTES_IN_RX_BUFFER  (2048)  // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
+#define IXGE_N_BYTES_IN_RX_BUFFER  (2048)      // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
 
 static clib_error_t *
 ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
 {
-  ixge_main_t * xm = &ixge_main;
-  vlib_main_t * vm = xm->vlib_main;
-  ixge_dma_queue_t * dq;
-  clib_error_t * error = 0;
+  ixge_main_t *xm = &ixge_main;
+  vlib_main_t *vm = xm->vlib_main;
+  ixge_dma_queue_t *dq;
+  clib_error_t *error = 0;
 
   vec_validate (xd->dma_queues[rt], queue_index);
   dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
 
-  if (! xm->n_descriptors_per_cache_line)
-    xm->n_descriptors_per_cache_line = CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
+  if (!xm->n_descriptors_per_cache_line)
+    xm->n_descriptors_per_cache_line =
+      CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
 
-  if (! xm->n_bytes_in_rx_buffer)
+  if (!xm->n_bytes_in_rx_buffer)
     xm->n_bytes_in_rx_buffer = IXGE_N_BYTES_IN_RX_BUFFER;
   xm->n_bytes_in_rx_buffer = round_pow2 (xm->n_bytes_in_rx_buffer, 1024);
-  if (! xm->vlib_buffer_free_list_index)
+  if (!xm->vlib_buffer_free_list_index)
     {
-      xm->vlib_buffer_free_list_index = vlib_buffer_get_or_create_free_list (vm, xm->n_bytes_in_rx_buffer, "ixge rx");
+      xm->vlib_buffer_free_list_index =
+       vlib_buffer_get_or_create_free_list (vm, xm->n_bytes_in_rx_buffer,
+                                            "ixge rx");
       ASSERT (xm->vlib_buffer_free_list_index != 0);
     }
 
-  if (! xm->n_descriptors[rt])
+  if (!xm->n_descriptors[rt])
     xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
 
   dq->queue_index = queue_index;
-  dq->n_descriptors = round_pow2 (xm->n_descriptors[rt], xm->n_descriptors_per_cache_line);
+  dq->n_descriptors =
+    round_pow2 (xm->n_descriptors[rt], xm->n_descriptors_per_cache_line);
   dq->head_index = dq->tail_index = 0;
 
   dq->descriptors = vlib_physmem_alloc_aligned (vm, &error,
-                                               dq->n_descriptors * sizeof (dq->descriptors[0]),
-                                               128 /* per chip spec */);
+                                               dq->n_descriptors *
+                                               sizeof (dq->descriptors[0]),
+                                               128 /* per chip spec */ );
   if (error)
     return error;
 
-  memset (dq->descriptors, 0, dq->n_descriptors * sizeof (dq->descriptors[0]));
+  memset (dq->descriptors, 0,
+         dq->n_descriptors * sizeof (dq->descriptors[0]));
   vec_resize (dq->descriptor_buffer_indices, dq->n_descriptors);
 
   if (rt == VLIB_RX)
@@ -2404,20 +2507,24 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
       u32 n_alloc, i;
 
       n_alloc = vlib_buffer_alloc_from_free_list
-       (vm, dq->descriptor_buffer_indices, vec_len (dq->descriptor_buffer_indices),
+       (vm, dq->descriptor_buffer_indices,
+        vec_len (dq->descriptor_buffer_indices),
         xm->vlib_buffer_free_list_index);
       ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
       for (i = 0; i < n_alloc; i++)
        {
-         vlib_buffer_t * b = vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]);
-         dq->descriptors[i].rx_to_hw.tail_address = vlib_physmem_virtual_to_physical (vm, b->data);
+         vlib_buffer_t *b =
+           vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]);
+         dq->descriptors[i].rx_to_hw.tail_address =
+           vlib_physmem_virtual_to_physical (vm, b->data);
        }
     }
   else
     {
       u32 i;
 
-      dq->tx.head_index_write_back = vlib_physmem_alloc (vm, &error, CLIB_CACHE_LINE_BYTES);
+      dq->tx.head_index_write_back =
+       vlib_physmem_alloc (vm, &error, CLIB_CACHE_LINE_BYTES);
 
       for (i = 0; i < dq->n_descriptors; i++)
        dq->descriptors[i].tx = xm->tx_descriptor_template;
@@ -2426,7 +2533,7 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
     }
 
   {
-    ixge_dma_regs_t * dr = get_dma_regs (xd, rt, queue_index);
+    ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
     u64 a;
 
     a = vlib_physmem_virtual_to_physical (vm, dq->descriptors);
@@ -2439,24 +2546,23 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
       {
        ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
        dr->rx_split_control =
-         (/* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
-          | (/* lo free descriptor threshold (units of 64 descriptors) */
-             (1 << 22))
-          | (/* descriptor type: advanced one buffer */
-             (1 << 25))
-          | (/* drop if no descriptors available */
-             (1 << 28)));
+         ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
+          | (                  /* lo free descriptor threshold (units of 64 descriptors) */
+              (1 << 22)) | (   /* descriptor type: advanced one buffer */
+                             (1 << 25)) | (    /* drop if no descriptors available */
+                                            (1 << 28)));
 
        /* Give hardware all but last 16 cache lines' worth of descriptors. */
        dq->tail_index = dq->n_descriptors -
-          16*xm->n_descriptors_per_cache_line;
+         16 * xm->n_descriptors_per_cache_line;
       }
     else
       {
        /* Make sure its initialized before hardware can get to it. */
        dq->tx.head_index_write_back[0] = dq->head_index;
 
-       a = vlib_physmem_virtual_to_physical (vm, dq->tx.head_index_write_back);
+       a =
+         vlib_physmem_virtual_to_physical (vm, dq->tx.head_index_write_back);
        dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
        dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
       }
@@ -2470,19 +2576,19 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
 
     if (rt == VLIB_TX)
       {
-        xd->regs->tx_dma_control |= (1 << 0);
-        dr->control |= ((32 << 0) /* prefetch threshold */
-                        | (64 << 8) /* host threshold */
-                        | (0 << 16) /* writeback threshold*/);
+       xd->regs->tx_dma_control |= (1 << 0);
+       dr->control |= ((32 << 0)       /* prefetch threshold */
+                       | (64 << 8)     /* host threshold */
+                       | (0 << 16) /* writeback threshold */ );
       }
 
     /* Enable this queue and wait for hardware to initialize
        before adding to tail. */
     if (rt == VLIB_TX)
       {
-        dr->control |= 1 << 25;
-        while (! (dr->control & (1 << 25)))
-          ;
+       dr->control |= 1 << 25;
+       while (!(dr->control & (1 << 25)))
+         ;
       }
 
     /* Set head/tail indices and enable DMA. */
@@ -2493,14 +2599,13 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
   return error;
 }
 
-static u32 ixge_flag_change (vnet_main_t * vnm,
-                             vnet_hw_interface_t * hw,
-                             u32 flags)
+static u32
+ixge_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
 {
-  ixge_device_t * xd;
-  ixge_regs_t * r;
+  ixge_device_t *xd;
+  ixge_regs_t *r;
   u32 old;
-  ixge_main_t * xm = &ixge_main;
+  ixge_main_t *xm = &ixge_main;
 
   xd = vec_elt_at_index (xm->devices, hw->dev_instance);
   r = xd->regs;
@@ -2508,121 +2613,119 @@ static u32 ixge_flag_change (vnet_main_t * vnm,
   old = r->filter_control;
 
   if (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
-    r->filter_control = old |(1 << 9) /* unicast promiscuous */;
+    r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
   else
     r->filter_control = old & ~(1 << 9);
 
   return old;
 }
 
-static void ixge_device_init (ixge_main_t * xm)
+static void
+ixge_device_init (ixge_main_t * xm)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  ixge_device_t * xd;
+  vnet_main_t *vnm = vnet_get_main ();
+  ixge_device_t *xd;
 
   /* Reset chip(s). */
   vec_foreach (xd, xm->devices)
-    {
-      ixge_regs_t * r = xd->regs;
-      const u32 reset_bit = (1 << 26) | (1 << 3);
+  {
+    ixge_regs_t *r = xd->regs;
+    const u32 reset_bit = (1 << 26) | (1 << 3);
 
-      r->control |= reset_bit;
+    r->control |= reset_bit;
 
-      /* No need to suspend.  Timed to take ~1e-6 secs */
-      while (r->control & reset_bit)
-       ;
+    /* No need to suspend.  Timed to take ~1e-6 secs */
+    while (r->control & reset_bit)
+      ;
 
-      /* Software loaded. */
-      r->extended_control |= (1 << 28);
+    /* Software loaded. */
+    r->extended_control |= (1 << 28);
 
-      ixge_phy_init (xd);
+    ixge_phy_init (xd);
 
-      /* Register ethernet interface. */
-      {
-       u8 addr8[6];
-       u32 i, addr32[2];
-       clib_error_t * error;
-
-       addr32[0] = r->rx_ethernet_address0[0][0];
-       addr32[1] = r->rx_ethernet_address0[0][1];
-       for (i = 0; i < 6; i++)
-         addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
-
-       error = ethernet_register_interface
-         (vnm,
-          ixge_device_class.index,
-          xd->device_index,
-          /* ethernet address */ addr8,
-          &xd->vlib_hw_if_index,
-           ixge_flag_change);
-       if (error)
-         clib_error_report (error);
-      }
+    /* Register ethernet interface. */
+    {
+      u8 addr8[6];
+      u32 i, addr32[2];
+      clib_error_t *error;
+
+      addr32[0] = r->rx_ethernet_address0[0][0];
+      addr32[1] = r->rx_ethernet_address0[0][1];
+      for (i = 0; i < 6; i++)
+       addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
+
+      error = ethernet_register_interface
+       (vnm, ixge_device_class.index, xd->device_index,
+        /* ethernet address */ addr8,
+        &xd->vlib_hw_if_index, ixge_flag_change);
+      if (error)
+       clib_error_report (error);
+    }
 
-      {
-       vnet_sw_interface_t * sw = vnet_get_hw_sw_interface (vnm, xd->vlib_hw_if_index);
-       xd->vlib_sw_if_index = sw->sw_if_index;
-      }
+    {
+      vnet_sw_interface_t *sw =
+       vnet_get_hw_sw_interface (vnm, xd->vlib_hw_if_index);
+      xd->vlib_sw_if_index = sw->sw_if_index;
+    }
 
-      ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
+    ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
 
-      xm->n_descriptors[VLIB_TX] = 20 * VLIB_FRAME_SIZE;
+    xm->n_descriptors[VLIB_TX] = 20 * VLIB_FRAME_SIZE;
 
-      ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
+    ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
 
-      /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
-      r->interrupt.queue_mapping[0] =
-       ((/* valid bit */ (1 << 7) |
-         ixge_rx_queue_to_interrupt (0)) << 0);
+    /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
+    r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
+                                     ixge_rx_queue_to_interrupt (0)) << 0);
 
-      r->interrupt.queue_mapping[0] |=
-       ((/* valid bit */ (1 << 7) |
-         ixge_tx_queue_to_interrupt (0)) << 8);
+    r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
+                                      ixge_tx_queue_to_interrupt (0)) << 8);
 
-      /* No use in getting too many interrupts.
-        Limit them to one every 3/4 ring size at line rate
-        min sized packets.
-        No need for this since kernel/vlib main loop provides adequate interrupt
-        limiting scheme. */
-      if (0)
-       {
-         f64 line_rate_max_pps = 10e9 / (8 * (64 + /* interframe padding */ 20));
-         ixge_throttle_queue_interrupt (r, 0, .75 * xm->n_descriptors[VLIB_RX] / line_rate_max_pps);
-       }
+    /* No use in getting too many interrupts.
+       Limit them to one every 3/4 ring size at line rate
+       min sized packets.
+       No need for this since kernel/vlib main loop provides adequate interrupt
+       limiting scheme. */
+    if (0)
+      {
+       f64 line_rate_max_pps =
+         10e9 / (8 * (64 + /* interframe padding */ 20));
+       ixge_throttle_queue_interrupt (r, 0,
+                                      .75 * xm->n_descriptors[VLIB_RX] /
+                                      line_rate_max_pps);
+      }
 
-      /* Accept all multicast and broadcast packets. Should really add them
-        to the dst_ethernet_address register array. */
-      r->filter_control |= (1 << 10) | (1 << 8);
+    /* Accept all multicast and broadcast packets. Should really add them
+       to the dst_ethernet_address register array. */
+    r->filter_control |= (1 << 10) | (1 << 8);
 
-      /* Enable frames up to size in mac frame size register. */
-      r->xge_mac.control |= 1 << 2;
-      r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
+    /* Enable frames up to size in mac frame size register. */
+    r->xge_mac.control |= 1 << 2;
+    r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
 
-      /* Enable all interrupts. */
-      if (! IXGE_ALWAYS_POLL)
-       r->interrupt.enable_write_1_to_set = ~0;
-    }
+    /* Enable all interrupts. */
+    if (!IXGE_ALWAYS_POLL)
+      r->interrupt.enable_write_1_to_set = ~0;
+  }
 }
 
 static uword
-ixge_process (vlib_main_t * vm,
-             vlib_node_runtime_t * rt,
-             vlib_frame_t * f)
+ixge_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  ixge_main_t * xm = &ixge_main;
-  ixge_device_t * xd;
-  uword event_type, * event_data = 0;
+  vnet_main_t *vnm = vnet_get_main ();
+  ixge_main_t *xm = &ixge_main;
+  ixge_device_t *xd;
+  uword event_type, *event_data = 0;
   f64 timeout, link_debounce_deadline;
 
   ixge_device_init (xm);
 
   /* Clear all counters. */
   vec_foreach (xd, xm->devices)
-    {
-      ixge_update_counters (xd);
-      memset (xd->counters, 0, sizeof (xd->counters));
-    }
+  {
+    ixge_update_counters (xd);
+    memset (xd->counters, 0, sizeof (xd->counters));
+  }
 
   timeout = 30.0;
   link_debounce_deadline = 1e70;
@@ -2630,40 +2733,41 @@ ixge_process (vlib_main_t * vm,
   while (1)
     {
       /* 36 bit stat counters could overflow in ~50 secs.
-        We poll every 30 secs to be conservative. */
+         We poll every 30 secs to be conservative. */
       vlib_process_wait_for_event_or_clock (vm, timeout);
 
       event_type = vlib_process_get_events (vm, &event_data);
 
-      switch (event_type) {
-      case EVENT_SET_FLAGS:
-        /* 1 ms */
-        link_debounce_deadline = vlib_time_now(vm) + 1e-3;
-        timeout = 1e-3;
-        break;
-
-      case ~0:
-       /* No events found: timer expired. */
-        if (vlib_time_now(vm) > link_debounce_deadline)
-          {
-            vec_foreach (xd, xm->devices)
-              {
-                ixge_regs_t * r = xd->regs;
-                u32 v = r->xge_mac.link_status;
-                uword is_up = (v & (1 << 30)) != 0;
-
-                vnet_hw_interface_set_flags
-                  (vnm, xd->vlib_hw_if_index,
-                   is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
-              }
-            link_debounce_deadline = 1e70;
-            timeout = 30.0;
-          }
-       break;
+      switch (event_type)
+       {
+       case EVENT_SET_FLAGS:
+         /* 1 ms */
+         link_debounce_deadline = vlib_time_now (vm) + 1e-3;
+         timeout = 1e-3;
+         break;
 
-      default:
-       ASSERT (0);
-      }
+       case ~0:
+         /* No events found: timer expired. */
+         if (vlib_time_now (vm) > link_debounce_deadline)
+           {
+             vec_foreach (xd, xm->devices)
+             {
+               ixge_regs_t *r = xd->regs;
+               u32 v = r->xge_mac.link_status;
+               uword is_up = (v & (1 << 30)) != 0;
+
+               vnet_hw_interface_set_flags
+                 (vnm, xd->vlib_hw_if_index,
+                  is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
+             }
+             link_debounce_deadline = 1e70;
+             timeout = 30.0;
+           }
+         break;
+
+       default:
+         ASSERT (0);
+       }
 
       if (event_data)
        _vec_len (event_data) = 0;
@@ -2674,8 +2778,7 @@ ixge_process (vlib_main_t * vm,
        if (now - xm->time_last_stats_update > 30)
          {
            xm->time_last_stats_update = now;
-           vec_foreach (xd, xm->devices)
-             ixge_update_counters (xd);
+           vec_foreach (xd, xm->devices) ixge_update_counters (xd);
          }
       }
     }
@@ -2684,23 +2787,26 @@ ixge_process (vlib_main_t * vm,
 }
 
 static vlib_node_registration_t ixge_process_node = {
-    .function = ixge_process,
-    .type = VLIB_NODE_TYPE_PROCESS,
-    .name = "ixge-process",
+  .function = ixge_process,
+  .type = VLIB_NODE_TYPE_PROCESS,
+  .name = "ixge-process",
 };
 
-clib_error_t * ixge_init (vlib_main_t * vm)
+clib_error_t *
+ixge_init (vlib_main_t * vm)
 {
-  ixge_main_t * xm = &ixge_main;
-  clib_error_t * error;
+  ixge_main_t *xm = &ixge_main;
+  clib_error_t *error;
 
   xm->vlib_main = vm;
-  memset (&xm->tx_descriptor_template, 0, sizeof (xm->tx_descriptor_template));
-  memset (&xm->tx_descriptor_template_mask, 0, sizeof (xm->tx_descriptor_template_mask));
+  memset (&xm->tx_descriptor_template, 0,
+         sizeof (xm->tx_descriptor_template));
+  memset (&xm->tx_descriptor_template_mask, 0,
+         sizeof (xm->tx_descriptor_template_mask));
   xm->tx_descriptor_template.status0 =
-    (IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED
-     | IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED
-     IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS);
+    (IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED |
+     IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED |
+     IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS);
   xm->tx_descriptor_template_mask.status0 = 0xffff;
   xm->tx_descriptor_template_mask.status1 = 0x00003fff;
 
@@ -2719,16 +2825,17 @@ VLIB_INIT_FUNCTION (ixge_init);
 
 
 static void
-ixge_pci_intr_handler(vlib_pci_device_t * dev)
+ixge_pci_intr_handler (vlib_pci_device_t * dev)
 {
-  ixge_main_t * xm = &ixge_main;
-  vlib_main_t * vm = xm->vlib_main;
+  ixge_main_t *xm = &ixge_main;
+  vlib_main_t *vm = xm->vlib_main;
 
   vlib_node_set_interrupt_pending (vm, ixge_input_node.index);
 
   /* Let node know which device is interrupting. */
   {
-    vlib_node_runtime_t * rt = vlib_node_get_runtime (vm, ixge_input_node.index);
+    vlib_node_runtime_t *rt =
+      vlib_node_get_runtime (vm, ixge_input_node.index);
     rt->runtime_data[0] |= 1 << dev->private_data;
   }
 }
@@ -2736,10 +2843,10 @@ ixge_pci_intr_handler(vlib_pci_device_t * dev)
 static clib_error_t *
 ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev)
 {
-  ixge_main_t * xm = &ixge_main;
-  clib_error_t * error;
-  void * r;
-  ixge_device_t * xd;
+  ixge_main_t *xm = &ixge_main;
+  clib_error_t *error;
+  void *r;
+  ixge_device_t *xd;
 
   /* Device found: make sure we have dma memory. */
   if (unix_physmem_is_fake (vm))
@@ -2753,7 +2860,7 @@ ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev)
 
   if (vec_len (xm->devices) == 1)
     {
-      ixge_input_node.function = ixge_input_multiarch_select();
+      ixge_input_node.function = ixge_input_multiarch_select ();
     }
 
   xd->pci_device = dev[0];
@@ -2780,14 +2887,15 @@ ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev)
       xm->process_node_index = ixge_process_node.index;
     }
 
-  error = vlib_pci_bus_master_enable(dev);
+  error = vlib_pci_bus_master_enable (dev);
 
   if (error)
     return error;
 
-  return vlib_pci_intr_enable(dev);
+  return vlib_pci_intr_enable (dev);
 }
 
+/* *INDENT-OFF* */
 PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
   .init_function = ixge_pci_init,
   .interrupt_handler = ixge_pci_intr_handler,
@@ -2798,8 +2906,10 @@ PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
     { 0 },
   },
 };
+/* *INDENT-ON* */
 
-void ixge_set_next_node (ixge_rx_next_t next, char *name)
+void
+ixge_set_next_node (ixge_rx_next_t next, char *name)
 {
   vlib_node_registration_t *r = &ixge_input_node;
 
@@ -2817,3 +2927,11 @@ void ixge_set_next_node (ixge_rx_next_t next, char *name)
     }
 }
 #endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */