Commit e971a119 authored by Aaro Koskinen's avatar Aaro Koskinen Committed by Greg Kroah-Hartman

staging: octeon: support enabling multiple rx groups

Support enabling multiple RX groups.
Signed-off-by: default avatarAaro Koskinen <aaro.koskinen@iki.fi>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 942bab48
...@@ -47,7 +47,7 @@ static struct oct_rx_group { ...@@ -47,7 +47,7 @@ static struct oct_rx_group {
int irq; int irq;
int group; int group;
struct napi_struct napi; struct napi_struct napi;
} oct_rx_group; } oct_rx_group[16];
/** /**
* cvm_oct_do_interrupt - interrupt handler. * cvm_oct_do_interrupt - interrupt handler.
...@@ -442,7 +442,16 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) ...@@ -442,7 +442,16 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
*/ */
void cvm_oct_poll_controller(struct net_device *dev) void cvm_oct_poll_controller(struct net_device *dev)
{ {
cvm_oct_poll(&oct_rx_group, 16); int i;
for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
if (!(pow_receive_groups & BIT(i)))
continue;
cvm_oct_poll(&oct_rx_group[i], 16);
}
} }
#endif #endif
...@@ -461,65 +470,80 @@ void cvm_oct_rx_initialize(void) ...@@ -461,65 +470,80 @@ void cvm_oct_rx_initialize(void)
if (!dev_for_napi) if (!dev_for_napi)
panic("No net_devices were allocated."); panic("No net_devices were allocated.");
netif_napi_add(dev_for_napi, &oct_rx_group.napi, cvm_oct_napi_poll, for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
rx_napi_weight); int ret;
napi_enable(&oct_rx_group.napi);
oct_rx_group.irq = OCTEON_IRQ_WORKQ0 + pow_receive_group; if (!(pow_receive_groups & BIT(i)))
oct_rx_group.group = pow_receive_group; continue;
/* Register an IRQ handler to receive POW interrupts */ netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
i = request_irq(oct_rx_group.irq, cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_napi_poll, rx_napi_weight);
&oct_rx_group.napi); napi_enable(&oct_rx_group[i].napi);
if (i) oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
panic("Could not acquire Ethernet IRQ %d\n", oct_rx_group.irq); oct_rx_group[i].group = i;
disable_irq_nosync(oct_rx_group.irq); /* Register an IRQ handler to receive POW interrupts */
ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
"Ethernet", &oct_rx_group[i].napi);
if (ret)
panic("Could not acquire Ethernet IRQ %d\n",
oct_rx_group[i].irq);
/* Enable POW interrupt when our port has at least one packet */ disable_irq_nosync(oct_rx_group[i].irq);
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
union cvmx_sso_wq_int_thrx int_thr; /* Enable POW interrupt when our port has at least one packet */
union cvmx_pow_wq_int_pc int_pc; if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
union cvmx_sso_wq_int_thrx int_thr;
int_thr.u64 = 0; union cvmx_pow_wq_int_pc int_pc;
int_thr.s.tc_en = 1;
int_thr.s.tc_thr = 1; int_thr.u64 = 0;
cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), int_thr.s.tc_en = 1;
int_thr.u64); int_thr.s.tc_thr = 1;
cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
int_pc.u64 = 0;
int_pc.s.pc_thr = 5; int_pc.u64 = 0;
cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64); int_pc.s.pc_thr = 5;
} else { cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
union cvmx_pow_wq_int_thrx int_thr; } else {
union cvmx_pow_wq_int_pc int_pc; union cvmx_pow_wq_int_thrx int_thr;
union cvmx_pow_wq_int_pc int_pc;
int_thr.u64 = 0;
int_thr.s.tc_en = 1;
int_thr.s.tc_thr = 1;
cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
int_thr.u64);
int_pc.u64 = 0;
int_pc.s.pc_thr = 5;
cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
}
/* Schedule NAPI now. This will indirectly enable the interrupt. */ int_thr.u64 = 0;
napi_schedule(&oct_rx_group.napi); int_thr.s.tc_en = 1;
int_thr.s.tc_thr = 1;
cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
int_pc.u64 = 0;
int_pc.s.pc_thr = 5;
cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
}
/* Schedule NAPI now. This will indirectly enable the
* interrupt.
*/
napi_schedule(&oct_rx_group[i].napi);
}
} }
void cvm_oct_rx_shutdown(void) void cvm_oct_rx_shutdown(void)
{ {
/* Disable POW interrupt */ int i;
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0); for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
else
cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0); if (!(pow_receive_groups & BIT(i)))
continue;
/* Free the interrupt handler */ /* Disable POW interrupt */
free_irq(oct_rx_group.irq, cvm_oct_device); if (OCTEON_IS_MODEL(OCTEON_CN68XX))
cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
else
cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
/* Free the interrupt handler */
free_irq(oct_rx_group[i].irq, cvm_oct_device);
netif_napi_del(&oct_rx_group.napi); netif_napi_del(&oct_rx_group[i].napi);
}
} }
...@@ -45,7 +45,7 @@ MODULE_PARM_DESC(num_packet_buffers, "\n" ...@@ -45,7 +45,7 @@ MODULE_PARM_DESC(num_packet_buffers, "\n"
"\tNumber of packet buffers to allocate and store in the\n" "\tNumber of packet buffers to allocate and store in the\n"
"\tFPA. By default, 1024 packet buffers are used.\n"); "\tFPA. By default, 1024 packet buffers are used.\n");
int pow_receive_group = 15; static int pow_receive_group = 15;
module_param(pow_receive_group, int, 0444); module_param(pow_receive_group, int, 0444);
MODULE_PARM_DESC(pow_receive_group, "\n" MODULE_PARM_DESC(pow_receive_group, "\n"
"\tPOW group to receive packets from. All ethernet hardware\n" "\tPOW group to receive packets from. All ethernet hardware\n"
...@@ -86,6 +86,8 @@ int rx_napi_weight = 32; ...@@ -86,6 +86,8 @@ int rx_napi_weight = 32;
module_param(rx_napi_weight, int, 0444); module_param(rx_napi_weight, int, 0444);
MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter."); MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
/* Mask indicating which receive groups are in use. */
int pow_receive_groups;
/* /*
* cvm_oct_poll_queue_stopping - flag to indicate polling should stop. * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
...@@ -678,6 +680,8 @@ static int cvm_oct_probe(struct platform_device *pdev) ...@@ -678,6 +680,8 @@ static int cvm_oct_probe(struct platform_device *pdev)
cvmx_helper_initialize_packet_io_global(); cvmx_helper_initialize_packet_io_global();
pow_receive_groups = BIT(pow_receive_group);
/* Change the input group for all ports before input is enabled */ /* Change the input group for all ports before input is enabled */
num_interfaces = cvmx_helper_get_number_of_interfaces(); num_interfaces = cvmx_helper_get_number_of_interfaces();
for (interface = 0; interface < num_interfaces; interface++) { for (interface = 0; interface < num_interfaces; interface++) {
......
...@@ -72,7 +72,7 @@ void cvm_oct_link_poll(struct net_device *dev); ...@@ -72,7 +72,7 @@ void cvm_oct_link_poll(struct net_device *dev);
extern int always_use_pow; extern int always_use_pow;
extern int pow_send_group; extern int pow_send_group;
extern int pow_receive_group; extern int pow_receive_groups;
extern char pow_send_list[]; extern char pow_send_list[];
extern struct net_device *cvm_oct_device[]; extern struct net_device *cvm_oct_device[];
extern atomic_t cvm_oct_poll_queue_stopping; extern atomic_t cvm_oct_poll_queue_stopping;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment