Commit 1455ea1d authored by David S. Miller's avatar David S. Miller

Merge branch 's390-next'

Julian Wiedmann says:

====================
s390/qeth: updates 2020-03-25

please apply the following patch series for qeth to netdev's net-next
tree.
Same series as yesterday, with one minor update to patch 1 as per
your review.

This adds
1) NAPI poll support for the async-Completion Queue (with one qdio layer
   patch acked by Heiko),
2) ethtool support for per-queue TX IRQ coalescing,
3) various cleanups.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 29f3490b bb59c8a8
...@@ -338,7 +338,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, ...@@ -338,7 +338,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
* @no_output_qs: number of output queues * @no_output_qs: number of output queues
* @input_handler: handler to be called for input queues * @input_handler: handler to be called for input queues
* @output_handler: handler to be called for output queues * @output_handler: handler to be called for output queues
* @queue_start_poll_array: polling handlers (one per input queue or NULL) * @irq_poll: Data IRQ polling handler (NULL when not supported)
* @scan_threshold: # of in-use buffers that triggers scan on output queue * @scan_threshold: # of in-use buffers that triggers scan on output queue
* @int_parm: interruption parameter * @int_parm: interruption parameter
* @input_sbal_addr_array: address of no_input_qs * 128 pointers * @input_sbal_addr_array: address of no_input_qs * 128 pointers
...@@ -359,8 +359,7 @@ struct qdio_initialize { ...@@ -359,8 +359,7 @@ struct qdio_initialize {
unsigned int no_output_qs; unsigned int no_output_qs;
qdio_handler_t *input_handler; qdio_handler_t *input_handler;
qdio_handler_t *output_handler; qdio_handler_t *output_handler;
void (**queue_start_poll_array) (struct ccw_device *, int, void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
unsigned long);
unsigned int scan_threshold; unsigned int scan_threshold;
unsigned long int_parm; unsigned long int_parm;
struct qdio_buffer **input_sbal_addr_array; struct qdio_buffer **input_sbal_addr_array;
...@@ -415,8 +414,8 @@ extern int qdio_activate(struct ccw_device *); ...@@ -415,8 +414,8 @@ extern int qdio_activate(struct ccw_device *);
extern void qdio_release_aob(struct qaob *); extern void qdio_release_aob(struct qaob *);
extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int, extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
unsigned int); unsigned int);
extern int qdio_start_irq(struct ccw_device *, int); extern int qdio_start_irq(struct ccw_device *cdev);
extern int qdio_stop_irq(struct ccw_device *, int); extern int qdio_stop_irq(struct ccw_device *cdev);
extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *); extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
bool is_input, unsigned int *bufnr, bool is_input, unsigned int *bufnr,
......
...@@ -177,8 +177,8 @@ struct qdio_queue_perf_stat { ...@@ -177,8 +177,8 @@ struct qdio_queue_perf_stat {
unsigned int nr_sbal_total; unsigned int nr_sbal_total;
}; };
enum qdio_queue_irq_states { enum qdio_irq_poll_states {
QDIO_QUEUE_IRQS_DISABLED, QDIO_IRQ_DISABLED,
}; };
struct qdio_input_q { struct qdio_input_q {
...@@ -188,10 +188,6 @@ struct qdio_input_q { ...@@ -188,10 +188,6 @@ struct qdio_input_q {
int ack_count; int ack_count;
/* last time of noticing incoming data */ /* last time of noticing incoming data */
u64 timestamp; u64 timestamp;
/* upper-layer polling flag */
unsigned long queue_irq_state;
/* callback to start upper-layer polling */
void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
}; };
struct qdio_output_q { struct qdio_output_q {
...@@ -299,6 +295,9 @@ struct qdio_irq { ...@@ -299,6 +295,9 @@ struct qdio_irq {
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
unsigned long poll_state;
debug_info_t *debug_area; debug_info_t *debug_area;
struct mutex setup_mutex; struct mutex setup_mutex;
struct qdio_dev_perf_stat perf_stat; struct qdio_dev_perf_stat perf_stat;
......
...@@ -128,8 +128,8 @@ static int qstat_show(struct seq_file *m, void *v) ...@@ -128,8 +128,8 @@ static int qstat_show(struct seq_file *m, void *v)
q->u.in.ack_start, q->u.in.ack_count); q->u.in.ack_start, q->u.in.ack_count);
seq_printf(m, "DSCI: %x IRQs disabled: %u\n", seq_printf(m, "DSCI: %x IRQs disabled: %u\n",
*(u8 *)q->irq_ptr->dsci, *(u8 *)q->irq_ptr->dsci,
test_bit(QDIO_QUEUE_IRQS_DISABLED, test_bit(QDIO_IRQ_DISABLED,
&q->u.in.queue_irq_state)); &q->irq_ptr->poll_state));
} }
seq_printf(m, "SBAL states:\n"); seq_printf(m, "SBAL states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
......
...@@ -950,19 +950,14 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) ...@@ -950,19 +950,14 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return; return;
for_each_input_queue(irq_ptr, q, i) { if (irq_ptr->irq_poll) {
if (q->u.in.queue_start_poll) { if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
/* skip if polling is enabled or already in work */ irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, else
&q->u.in.queue_irq_state)) { QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
QDIO_PERF_STAT_INC(irq_ptr, int_discarded); } else {
continue; for_each_input_queue(irq_ptr, q, i)
}
q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
q->irq_ptr->int_parm);
} else {
tasklet_schedule(&q->tasklet); tasklet_schedule(&q->tasklet);
}
} }
if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold) if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
...@@ -1610,24 +1605,26 @@ EXPORT_SYMBOL_GPL(do_QDIO); ...@@ -1610,24 +1605,26 @@ EXPORT_SYMBOL_GPL(do_QDIO);
/** /**
* qdio_start_irq - process input buffers * qdio_start_irq - process input buffers
* @cdev: associated ccw_device for the qdio subchannel * @cdev: associated ccw_device for the qdio subchannel
* @nr: input queue number
* *
* Return codes * Return codes
* 0 - success * 0 - success
* 1 - irqs not started since new data is available * 1 - irqs not started since new data is available
*/ */
int qdio_start_irq(struct ccw_device *cdev, int nr) int qdio_start_irq(struct ccw_device *cdev)
{ {
struct qdio_q *q; struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct qdio_irq *irq_ptr = cdev->private->qdio_data;
unsigned int i;
if (!irq_ptr) if (!irq_ptr)
return -ENODEV; return -ENODEV;
q = irq_ptr->input_qs[nr];
clear_nonshared_ind(irq_ptr); clear_nonshared_ind(irq_ptr);
qdio_stop_polling(q);
clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); for_each_input_queue(irq_ptr, q, i)
qdio_stop_polling(q);
clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
/* /*
* We need to check again to not lose initiative after * We need to check again to not lose initiative after
...@@ -1635,13 +1632,16 @@ int qdio_start_irq(struct ccw_device *cdev, int nr) ...@@ -1635,13 +1632,16 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
*/ */
if (test_nonshared_ind(irq_ptr)) if (test_nonshared_ind(irq_ptr))
goto rescan; goto rescan;
if (!qdio_inbound_q_done(q, q->first_to_check))
goto rescan; for_each_input_queue(irq_ptr, q, i) {
if (!qdio_inbound_q_done(q, q->first_to_check))
goto rescan;
}
return 0; return 0;
rescan: rescan:
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
&q->u.in.queue_irq_state))
return 0; return 0;
else else
return 1; return 1;
...@@ -1729,23 +1729,19 @@ EXPORT_SYMBOL(qdio_get_next_buffers); ...@@ -1729,23 +1729,19 @@ EXPORT_SYMBOL(qdio_get_next_buffers);
/** /**
* qdio_stop_irq - disable interrupt processing for the device * qdio_stop_irq - disable interrupt processing for the device
* @cdev: associated ccw_device for the qdio subchannel * @cdev: associated ccw_device for the qdio subchannel
* @nr: input queue number
* *
* Return codes * Return codes
* 0 - interrupts were already disabled * 0 - interrupts were already disabled
* 1 - interrupts successfully disabled * 1 - interrupts successfully disabled
*/ */
int qdio_stop_irq(struct ccw_device *cdev, int nr) int qdio_stop_irq(struct ccw_device *cdev)
{ {
struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (!irq_ptr) if (!irq_ptr)
return -ENODEV; return -ENODEV;
q = irq_ptr->input_qs[nr];
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
&q->u.in.queue_irq_state))
return 0; return 0;
else else
return 1; return 1;
......
...@@ -224,15 +224,6 @@ static void setup_queues(struct qdio_irq *irq_ptr, ...@@ -224,15 +224,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1; q->is_input_q = 1;
if (qdio_init->queue_start_poll_array &&
qdio_init->queue_start_poll_array[i]) {
q->u.in.queue_start_poll =
qdio_init->queue_start_poll_array[i];
set_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state);
} else {
q->u.in.queue_start_poll = NULL;
}
setup_storage_lists(q, irq_ptr, input_sbal_array, i); setup_storage_lists(q, irq_ptr, input_sbal_array, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
...@@ -483,6 +474,13 @@ int qdio_setup_irq(struct qdio_initialize *init_data) ...@@ -483,6 +474,13 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid); ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data); setup_queues(irq_ptr, init_data);
if (init_data->irq_poll) {
irq_ptr->irq_poll = init_data->irq_poll;
set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
} else {
irq_ptr->irq_poll = NULL;
}
setup_qib(irq_ptr, init_data); setup_qib(irq_ptr, init_data);
qdio_setup_thinint(irq_ptr); qdio_setup_thinint(irq_ptr);
set_impl_params(irq_ptr, init_data->qib_param_field_format, set_impl_params(irq_ptr, init_data->qib_param_field_format,
......
...@@ -135,28 +135,24 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) ...@@ -135,28 +135,24 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
has_multiple_inq_on_dsci(irq)) has_multiple_inq_on_dsci(irq))
xchg(irq->dsci, 0); xchg(irq->dsci, 0);
if (irq->irq_poll) {
if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state))
irq->irq_poll(irq->cdev, irq->int_parm);
else
QDIO_PERF_STAT_INC(irq, int_discarded);
return;
}
for_each_input_queue(irq, q, i) { for_each_input_queue(irq, q, i) {
if (q->u.in.queue_start_poll) { if (!shared_ind(irq))
/* skip if polling is enabled or already in work */ xchg(irq->dsci, 0);
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state)) { /*
QDIO_PERF_STAT_INC(irq, int_discarded); * Call inbound processing but not directly
continue; * since that could starve other thinint queues.
} */
tasklet_schedule(&q->tasklet);
/* avoid dsci clear here, done after processing */
q->u.in.queue_start_poll(irq->cdev, q->nr,
irq->int_parm);
} else {
if (!shared_ind(irq))
xchg(irq->dsci, 0);
/*
* Call inbound processing but not directly
* since that could starve other thinint queues.
*/
tasklet_schedule(&q->tasklet);
}
} }
} }
......
...@@ -178,10 +178,6 @@ struct qeth_vnicc_info { ...@@ -178,10 +178,6 @@ struct qeth_vnicc_info {
#define QETH_RECLAIM_WORK_TIME HZ #define QETH_RECLAIM_WORK_TIME HZ
#define QETH_MAX_PORTNO 15 #define QETH_MAX_PORTNO 15
/*IPv6 address autoconfiguration stuff*/
#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
#define UNIQUE_ID_NOT_BY_CARD 0x10000
/*****************************************************************************/ /*****************************************************************************/
/* QDIO queue and buffer handling */ /* QDIO queue and buffer handling */
/*****************************************************************************/ /*****************************************************************************/
...@@ -215,6 +211,7 @@ struct qeth_vnicc_info { ...@@ -215,6 +211,7 @@ struct qeth_vnicc_info {
#define QETH_PRIO_Q_ING_TOS 2 #define QETH_PRIO_Q_ING_TOS 2
#define QETH_PRIO_Q_ING_SKB 3 #define QETH_PRIO_Q_ING_SKB 3
#define QETH_PRIO_Q_ING_VLAN 4 #define QETH_PRIO_Q_ING_VLAN 4
#define QETH_PRIO_Q_ING_FIXED 5
/* Packing */ /* Packing */
#define QETH_LOW_WATERMARK_PACK 2 #define QETH_LOW_WATERMARK_PACK 2
...@@ -406,6 +403,7 @@ struct qeth_qdio_out_buffer { ...@@ -406,6 +403,7 @@ struct qeth_qdio_out_buffer {
struct qdio_buffer *buffer; struct qdio_buffer *buffer;
atomic_t state; atomic_t state;
int next_element_to_fill; int next_element_to_fill;
unsigned int frames;
unsigned int bytes; unsigned int bytes;
struct sk_buff_head skb_list; struct sk_buff_head skb_list;
int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
...@@ -461,6 +459,8 @@ struct qeth_out_q_stats { ...@@ -461,6 +459,8 @@ struct qeth_out_q_stats {
u64 tso_bytes; u64 tso_bytes;
u64 packing_mode_switch; u64 packing_mode_switch;
u64 stopped; u64 stopped;
u64 doorbell;
u64 coal_frames;
u64 completion_yield; u64 completion_yield;
u64 completion_timer; u64 completion_timer;
...@@ -471,6 +471,8 @@ struct qeth_out_q_stats { ...@@ -471,6 +471,8 @@ struct qeth_out_q_stats {
u64 tx_dropped; u64 tx_dropped;
}; };
#define QETH_TX_MAX_COALESCED_FRAMES 1
#define QETH_TX_COALESCE_USECS 25
#define QETH_TX_TIMER_USECS 500 #define QETH_TX_TIMER_USECS 500
struct qeth_qdio_out_q { struct qeth_qdio_out_q {
...@@ -494,9 +496,13 @@ struct qeth_qdio_out_q { ...@@ -494,9 +496,13 @@ struct qeth_qdio_out_q {
struct napi_struct napi; struct napi_struct napi;
struct timer_list timer; struct timer_list timer;
struct qeth_hdr *prev_hdr; struct qeth_hdr *prev_hdr;
unsigned int coalesced_frames;
u8 bulk_start; u8 bulk_start;
u8 bulk_count; u8 bulk_count;
u8 bulk_max; u8 bulk_max;
unsigned int coalesce_usecs;
unsigned int max_coalesced_frames;
}; };
#define qeth_for_each_output_queue(card, q, i) \ #define qeth_for_each_output_queue(card, q, i) \
...@@ -505,12 +511,10 @@ struct qeth_qdio_out_q { ...@@ -505,12 +511,10 @@ struct qeth_qdio_out_q {
#define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi) #define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi)
static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue) static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue,
unsigned long usecs)
{ {
if (timer_pending(&queue->timer)) timer_reduce(&queue->timer, usecs_to_jiffies(usecs) + jiffies);
return;
mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) +
jiffies);
} }
static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue) static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
...@@ -672,22 +676,20 @@ struct qeth_card_blkt { ...@@ -672,22 +676,20 @@ struct qeth_card_blkt {
#define QETH_BROADCAST_WITH_ECHO 0x01 #define QETH_BROADCAST_WITH_ECHO 0x01
#define QETH_BROADCAST_WITHOUT_ECHO 0x02 #define QETH_BROADCAST_WITHOUT_ECHO 0x02
#define QETH_LAYER2_MAC_REGISTERED 0x02
struct qeth_card_info { struct qeth_card_info {
unsigned short unit_addr2; unsigned short unit_addr2;
unsigned short cula; unsigned short cula;
u8 chpid; u8 chpid;
__u16 func_level; __u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1]; char mcl_level[QETH_MCL_LENGTH + 1];
u8 dev_addr_is_registered:1;
u8 open_when_online:1; u8 open_when_online:1;
u8 promisc_mode:1; u8 promisc_mode:1;
u8 use_v1_blkt:1; u8 use_v1_blkt:1;
u8 is_vm_nic:1; u8 is_vm_nic:1;
int mac_bits;
enum qeth_card_types type; enum qeth_card_types type;
enum qeth_link_types link_type; enum qeth_link_types link_type;
int broadcast_capable; int broadcast_capable;
int unique_id;
bool layer_enforced; bool layer_enforced;
struct qeth_card_blkt blkt; struct qeth_card_blkt blkt;
__u32 diagass_support; __u32 diagass_support;
...@@ -752,7 +754,7 @@ enum qeth_addr_disposition { ...@@ -752,7 +754,7 @@ enum qeth_addr_disposition {
struct qeth_rx { struct qeth_rx {
int b_count; int b_count;
int b_index; int b_index;
struct qdio_buffer_element *b_element; u8 buf_element;
int e_offset; int e_offset;
int qdio_err; int qdio_err;
}; };
......
This diff is collapsed.
...@@ -211,16 +211,16 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, ...@@ -211,16 +211,16 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN; card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (sysfs_streq(buf, "no_prio_queueing:0")) { } else if (sysfs_streq(buf, "no_prio_queueing:0")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 0; card->qdio.default_out_queue = 0;
} else if (sysfs_streq(buf, "no_prio_queueing:1")) { } else if (sysfs_streq(buf, "no_prio_queueing:1")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 1; card->qdio.default_out_queue = 1;
} else if (sysfs_streq(buf, "no_prio_queueing:2")) { } else if (sysfs_streq(buf, "no_prio_queueing:2")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 2; card->qdio.default_out_queue = 2;
} else if (sysfs_streq(buf, "no_prio_queueing:3")) { } else if (sysfs_streq(buf, "no_prio_queueing:3")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 3; card->qdio.default_out_queue = 3;
} else if (sysfs_streq(buf, "no_prio_queueing")) { } else if (sysfs_streq(buf, "no_prio_queueing")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
......
...@@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats[] = { ...@@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("TSO bytes", tso_bytes), QETH_TXQ_STAT("TSO bytes", tso_bytes),
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch), QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
QETH_TXQ_STAT("Queue stopped", stopped), QETH_TXQ_STAT("Queue stopped", stopped),
QETH_TXQ_STAT("Doorbell", doorbell),
QETH_TXQ_STAT("IRQ for frames", coal_frames),
QETH_TXQ_STAT("Completion yield", completion_yield), QETH_TXQ_STAT("Completion yield", completion_yield),
QETH_TXQ_STAT("Completion timer", completion_timer), QETH_TXQ_STAT("Completion timer", completion_timer),
}; };
...@@ -108,6 +110,38 @@ static void qeth_get_ethtool_stats(struct net_device *dev, ...@@ -108,6 +110,38 @@ static void qeth_get_ethtool_stats(struct net_device *dev,
txq_stats, TXQ_STATS_LEN); txq_stats, TXQ_STATS_LEN);
} }
static void __qeth_set_coalesce(struct net_device *dev,
struct qeth_qdio_out_q *queue,
struct ethtool_coalesce *coal)
{
WRITE_ONCE(queue->coalesce_usecs, coal->tx_coalesce_usecs);
WRITE_ONCE(queue->max_coalesced_frames, coal->tx_max_coalesced_frames);
if (coal->tx_coalesce_usecs &&
netif_running(dev) &&
!qeth_out_queue_is_empty(queue))
qeth_tx_arm_timer(queue, coal->tx_coalesce_usecs);
}
static int qeth_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_qdio_out_q *queue;
unsigned int i;
if (!IS_IQD(card))
return -EOPNOTSUPP;
if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames)
return -EINVAL;
qeth_for_each_output_queue(card, queue, i)
__qeth_set_coalesce(dev, queue, coal);
return 0;
}
static void qeth_get_ringparam(struct net_device *dev, static void qeth_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param) struct ethtool_ringparam *param)
{ {
...@@ -243,6 +277,43 @@ static int qeth_set_tunable(struct net_device *dev, ...@@ -243,6 +277,43 @@ static int qeth_set_tunable(struct net_device *dev,
} }
} }
static int qeth_get_per_queue_coalesce(struct net_device *dev, u32 __queue,
struct ethtool_coalesce *coal)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_qdio_out_q *queue;
if (!IS_IQD(card))
return -EOPNOTSUPP;
if (__queue >= card->qdio.no_out_queues)
return -EINVAL;
queue = card->qdio.out_qs[__queue];
coal->tx_coalesce_usecs = queue->coalesce_usecs;
coal->tx_max_coalesced_frames = queue->max_coalesced_frames;
return 0;
}
static int qeth_set_per_queue_coalesce(struct net_device *dev, u32 queue,
struct ethtool_coalesce *coal)
{
struct qeth_card *card = dev->ml_priv;
if (!IS_IQD(card))
return -EOPNOTSUPP;
if (queue >= card->qdio.no_out_queues)
return -EINVAL;
if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames)
return -EINVAL;
__qeth_set_coalesce(dev, card->qdio.out_qs[queue], coal);
return 0;
}
/* Helper function to fill 'advertising' and 'supported' which are the same. */ /* Helper function to fill 'advertising' and 'supported' which are the same. */
/* Autoneg and full-duplex are supported and advertised unconditionally. */ /* Autoneg and full-duplex are supported and advertised unconditionally. */
/* Always advertise and support all speeds up to specified, and only one */ /* Always advertise and support all speeds up to specified, and only one */
...@@ -442,7 +513,10 @@ static int qeth_get_link_ksettings(struct net_device *netdev, ...@@ -442,7 +513,10 @@ static int qeth_get_link_ksettings(struct net_device *netdev,
} }
const struct ethtool_ops qeth_ethtool_ops = { const struct ethtool_ops qeth_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
ETHTOOL_COALESCE_TX_MAX_FRAMES,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
.set_coalesce = qeth_set_coalesce,
.get_ringparam = qeth_get_ringparam, .get_ringparam = qeth_get_ringparam,
.get_strings = qeth_get_strings, .get_strings = qeth_get_strings,
.get_ethtool_stats = qeth_get_ethtool_stats, .get_ethtool_stats = qeth_get_ethtool_stats,
...@@ -453,6 +527,8 @@ const struct ethtool_ops qeth_ethtool_ops = { ...@@ -453,6 +527,8 @@ const struct ethtool_ops qeth_ethtool_ops = {
.get_ts_info = qeth_get_ts_info, .get_ts_info = qeth_get_ts_info,
.get_tunable = qeth_get_tunable, .get_tunable = qeth_get_tunable,
.set_tunable = qeth_set_tunable, .set_tunable = qeth_set_tunable,
.get_per_queue_coalesce = qeth_get_per_queue_coalesce,
.set_per_queue_coalesce = qeth_set_per_queue_coalesce,
.get_link_ksettings = qeth_get_link_ksettings, .get_link_ksettings = qeth_get_link_ksettings,
}; };
......
...@@ -52,11 +52,11 @@ static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode) ...@@ -52,11 +52,11 @@ static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
break; break;
case IPA_RC_L2_DUP_MAC: case IPA_RC_L2_DUP_MAC:
case IPA_RC_L2_DUP_LAYER3_MAC: case IPA_RC_L2_DUP_LAYER3_MAC:
rc = -EEXIST; rc = -EADDRINUSE;
break; break;
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
rc = -EPERM; rc = -EADDRNOTAVAIL;
break; break;
case IPA_RC_L2_MAC_NOT_FOUND: case IPA_RC_L2_MAC_NOT_FOUND:
rc = -ENOENT; rc = -ENOENT;
...@@ -105,11 +105,11 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) ...@@ -105,11 +105,11 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
"MAC address %pM successfully registered\n", mac); "MAC address %pM successfully registered\n", mac);
} else { } else {
switch (rc) { switch (rc) {
case -EEXIST: case -EADDRINUSE:
dev_warn(&card->gdev->dev, dev_warn(&card->gdev->dev,
"MAC address %pM already exists\n", mac); "MAC address %pM already exists\n", mac);
break; break;
case -EPERM: case -EADDRNOTAVAIL:
dev_warn(&card->gdev->dev, dev_warn(&card->gdev->dev,
"MAC address %pM is not authorized\n", mac); "MAC address %pM is not authorized\n", mac);
break; break;
...@@ -126,7 +126,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) ...@@ -126,7 +126,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Wmac"); QETH_CARD_TEXT(card, 2, "L2Wmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd); rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc == -EEXIST) if (rc == -EADDRINUSE)
QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n", QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
CARD_DEVID(card)); CARD_DEVID(card));
else if (rc) else if (rc)
...@@ -291,7 +291,6 @@ static void qeth_l2_stop_card(struct qeth_card *card) ...@@ -291,7 +291,6 @@ static void qeth_l2_stop_card(struct qeth_card *card)
qeth_qdio_clear_card(card, 0); qeth_qdio_clear_card(card, 0);
qeth_clear_working_pool_list(card); qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq); flush_workqueue(card->event_wq);
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
card->info.promisc_mode = 0; card->info.promisc_mode = 0;
} }
...@@ -337,14 +336,16 @@ static void qeth_l2_register_dev_addr(struct qeth_card *card) ...@@ -337,14 +336,16 @@ static void qeth_l2_register_dev_addr(struct qeth_card *card)
qeth_l2_request_initial_mac(card); qeth_l2_request_initial_mac(card);
if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr)) if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr))
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; card->info.dev_addr_is_registered = 1;
else
card->info.dev_addr_is_registered = 0;
} }
static int qeth_l2_validate_addr(struct net_device *dev) static int qeth_l2_validate_addr(struct net_device *dev)
{ {
struct qeth_card *card = dev->ml_priv; struct qeth_card *card = dev->ml_priv;
if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) if (card->info.dev_addr_is_registered)
return eth_validate_addr(dev); return eth_validate_addr(dev);
QETH_CARD_TEXT(card, 4, "nomacadr"); QETH_CARD_TEXT(card, 4, "nomacadr");
...@@ -370,7 +371,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) ...@@ -370,7 +371,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
/* don't register the same address twice */ /* don't register the same address twice */
if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) card->info.dev_addr_is_registered)
return 0; return 0;
/* add the new address, switch over, drop the old */ /* add the new address, switch over, drop the old */
...@@ -380,9 +381,9 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) ...@@ -380,9 +381,9 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
ether_addr_copy(old_addr, dev->dev_addr); ether_addr_copy(old_addr, dev->dev_addr);
ether_addr_copy(dev->dev_addr, addr->sa_data); ether_addr_copy(dev->dev_addr, addr->sa_data);
if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) if (card->info.dev_addr_is_registered)
qeth_l2_remove_mac(card, old_addr); qeth_l2_remove_mac(card, old_addr);
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; card->info.dev_addr_is_registered = 1;
return 0; return 0;
} }
......
...@@ -949,39 +949,36 @@ static int qeth_l3_get_unique_id_cb(struct qeth_card *card, ...@@ -949,39 +949,36 @@ static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data) struct qeth_reply *reply, unsigned long data)
{ {
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
u16 *uid = reply->param;
if (cmd->hdr.return_code == 0) { if (cmd->hdr.return_code == 0) {
card->info.unique_id = cmd->data.create_destroy_addr.uid; *uid = cmd->data.create_destroy_addr.uid;
return 0; return 0;
} }
card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
UNIQUE_ID_NOT_BY_CARD;
dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n"); dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n");
return -EIO; return -EIO;
} }
static int qeth_l3_get_unique_id(struct qeth_card *card) static u16 qeth_l3_get_unique_id(struct qeth_card *card, u16 uid)
{ {
int rc = 0;
struct qeth_cmd_buffer *iob; struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "guniqeid"); QETH_CARD_TEXT(card, 2, "guniqeid");
if (!qeth_is_supported(card, IPA_IPV6)) { if (!qeth_is_supported(card, IPA_IPV6))
card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | goto out;
UNIQUE_ID_NOT_BY_CARD;
return 0;
}
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6, iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
IPA_DATA_SIZEOF(create_destroy_addr)); IPA_DATA_SIZEOF(create_destroy_addr));
if (!iob) if (!iob)
return -ENOMEM; goto out;
__ipa_cmd(iob)->data.create_destroy_addr.uid = card->info.unique_id; __ipa_cmd(iob)->data.create_destroy_addr.uid = uid;
rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL); qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, &uid);
return rc;
out:
return uid;
} }
static int static int
...@@ -1920,6 +1917,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { ...@@ -1920,6 +1917,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
static int qeth_l3_setup_netdev(struct qeth_card *card) static int qeth_l3_setup_netdev(struct qeth_card *card)
{ {
struct net_device *dev = card->dev;
unsigned int headroom; unsigned int headroom;
int rc; int rc;
...@@ -1937,9 +1935,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) ...@@ -1937,9 +1935,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
/*IPv6 address autoconfiguration stuff*/ /*IPv6 address autoconfiguration stuff*/
qeth_l3_get_unique_id(card); dev->dev_id = qeth_l3_get_unique_id(card, dev->dev_id);
if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
card->dev->dev_id = card->info.unique_id & 0xffff;
if (!IS_VM_NIC(card)) { if (!IS_VM_NIC(card)) {
card->dev->features |= NETIF_F_SG; card->dev->features |= NETIF_F_SG;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment