Commit 779e6e1c authored by Jan Glauber's avatar Jan Glauber Committed by Heiko Carstens

[S390] qdio: new qdio driver.

List of major changes:
- split qdio driver into several files
- seperation of thin interrupt code
- improved handling for multiple thin interrupt devices
- inbound and outbound processing now always runs in tasklet context
- significant less tasklet schedules per interrupt needed
- merged qebsm with non-qebsm handling
- cleanup qdio interface and added kerneldoc
- coding style
Reviewed-by: default avatarCornelia Huck <cornelia.huck@de.ibm.com>
Reviewed-by: default avatarUtz Bacher <utz.bacher@de.ibm.com>
Reviewed-by: default avatarUrsula Braun <braunu@de.ibm.com>
Signed-off-by: default avatarJan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
parent dae39843
......@@ -9,4 +9,6 @@ ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
/*
* drivers/s390/cio/qdio_debug.c
*
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <asm/qdio.h>
#include <asm/debug.h>
#include "qdio_debug.h"
#include "qdio.h"
debug_info_t *qdio_dbf_setup;
debug_info_t *qdio_dbf_trace;
static struct dentry *debugfs_root;
#define MAX_DEBUGFS_QUEUES 32
static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
static DEFINE_MUTEX(debugfs_mutex);
void qdio_allocate_do_dbf(struct qdio_initialize *init_data)
{
char dbf_text[20];
sprintf(dbf_text, "qfmt:%x", init_data->q_format);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8);
sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *));
sprintf(dbf_text, "niq:%4x", init_data->no_input_qs);
QDIO_DBF_TEXT0(0, setup, dbf_text);
sprintf(dbf_text, "noq:%4x", init_data->no_output_qs);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long));
QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long));
QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *));
}
static void qdio_unregister_dbf_views(void)
{
if (qdio_dbf_setup)
debug_unregister(qdio_dbf_setup);
if (qdio_dbf_trace)
debug_unregister(qdio_dbf_trace);
}
static int qdio_register_dbf_views(void)
{
qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES,
QDIO_DBF_SETUP_NR_AREAS,
QDIO_DBF_SETUP_LEN);
if (!qdio_dbf_setup)
goto oom;
debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL);
qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES,
QDIO_DBF_TRACE_NR_AREAS,
QDIO_DBF_TRACE_LEN);
if (!qdio_dbf_trace)
goto oom;
debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view);
debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL);
return 0;
oom:
qdio_unregister_dbf_views();
return -ENOMEM;
}
static int qstat_show(struct seq_file *m, void *v)
{
unsigned char state;
struct qdio_q *q = m->private;
int i;
if (!q)
return 0;
seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci);
seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
seq_printf(m, "ftc: %d\n", q->first_to_check);
seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc);
seq_printf(m, "polling: %d\n", q->u.in.polling);
seq_printf(m, "slsb buffer states:\n");
qdio_siga_sync_q(q);
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
get_buf_state(q, i, &state);
switch (state) {
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_OUTPUT_NOT_INIT:
seq_printf(m, "N");
break;
case SLSB_P_INPUT_PRIMED:
case SLSB_CU_OUTPUT_PRIMED:
seq_printf(m, "+");
break;
case SLSB_P_INPUT_ACK:
seq_printf(m, "A");
break;
case SLSB_P_INPUT_ERROR:
case SLSB_P_OUTPUT_ERROR:
seq_printf(m, "x");
break;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_OUTPUT_EMPTY:
seq_printf(m, "-");
break;
case SLSB_P_INPUT_HALTED:
case SLSB_P_OUTPUT_HALTED:
seq_printf(m, ".");
break;
default:
seq_printf(m, "?");
}
if (i == 63)
seq_printf(m, "\n");
}
seq_printf(m, "\n");
return 0;
}
static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
struct qdio_q *q = seq->private;
if (!q)
return 0;
if (q->is_input_q)
xchg(q->irq_ptr->dsci, 1);
local_bh_disable();
tasklet_schedule(&q->tasklet);
local_bh_enable();
return count;
}
static int qstat_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qstat_show,
filp->f_path.dentry->d_inode->i_private);
}
static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name)
{
memset(name, 0, sizeof(name));
sprintf(name, "%s", cdev->dev.bus_id);
if (q->is_input_q)
sprintf(name + strlen(name), "_input");
else
sprintf(name + strlen(name), "_output");
sprintf(name + strlen(name), "_%d", q->nr);
}
static void remove_debugfs_entry(struct qdio_q *q)
{
int i;
for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
if (!debugfs_queues[i])
continue;
if (debugfs_queues[i]->d_inode->i_private == q) {
debugfs_remove(debugfs_queues[i]);
debugfs_queues[i] = NULL;
}
}
}
static struct file_operations debugfs_fops = {
.owner = THIS_MODULE,
.open = qstat_seq_open,
.read = seq_read,
.write = qstat_seq_write,
.llseek = seq_lseek,
.release = single_release,
};
static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
{
int i = 0;
char name[40];
while (debugfs_queues[i] != NULL) {
i++;
if (i >= MAX_DEBUGFS_QUEUES)
return;
}
get_queue_name(q, cdev, name);
debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
debugfs_root, q, &debugfs_fops);
}
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
{
struct qdio_q *q;
int i;
mutex_lock(&debugfs_mutex);
for_each_input_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
for_each_output_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
mutex_unlock(&debugfs_mutex);
}
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
{
struct qdio_q *q;
int i;
mutex_lock(&debugfs_mutex);
for_each_input_queue(irq_ptr, q, i)
remove_debugfs_entry(q);
for_each_output_queue(irq_ptr, q, i)
remove_debugfs_entry(q);
mutex_unlock(&debugfs_mutex);
}
int __init qdio_debug_init(void)
{
debugfs_root = debugfs_create_dir("qdio_queues", NULL);
return qdio_register_dbf_views();
}
void qdio_debug_exit(void)
{
debugfs_remove(debugfs_root);
qdio_unregister_dbf_views();
}
/*
* drivers/s390/cio/qdio_debug.h
*
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#ifndef QDIO_DEBUG_H
#define QDIO_DEBUG_H
#include <asm/debug.h>
#include <asm/qdio.h>
#include "qdio.h"
#define QDIO_DBF_HEX(ex, name, level, addr, len) \
do { \
if (ex) \
debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \
else \
debug_event(qdio_dbf_##name, level, (void *)(addr), len); \
} while (0)
#define QDIO_DBF_TEXT(ex, name, level, text) \
do { \
if (ex) \
debug_text_exception(qdio_dbf_##name, level, text); \
else \
debug_text_event(qdio_dbf_##name, level, text); \
} while (0)
#define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len)
#define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len)
#define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len)
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len)
#define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len)
#define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len)
#define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len)
#else
#define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0)
#define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0)
#define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0)
#define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0)
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text)
#define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text)
#define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text)
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text)
#define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text)
#define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text)
#define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text)
#else
#define QDIO_DBF_TEXT3(ex, name, text) do {} while (0)
#define QDIO_DBF_TEXT4(ex, name, text) do {} while (0)
#define QDIO_DBF_TEXT5(ex, name, text) do {} while (0)
#define QDIO_DBF_TEXT6(ex, name, text) do {} while (0)
#endif /* CONFIG_QDIO_DEBUG */
/* s390dbf views */
#define QDIO_DBF_SETUP_LEN 8
#define QDIO_DBF_SETUP_PAGES 4
#define QDIO_DBF_SETUP_NR_AREAS 1
#define QDIO_DBF_TRACE_LEN 8
#define QDIO_DBF_TRACE_NR_AREAS 2
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TRACE_PAGES 16
#define QDIO_DBF_SETUP_LEVEL 6
#define QDIO_DBF_TRACE_LEVEL 4
#else /* !CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TRACE_PAGES 4
#define QDIO_DBF_SETUP_LEVEL 2
#define QDIO_DBF_TRACE_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
extern debug_info_t *qdio_dbf_setup;
extern debug_info_t *qdio_dbf_trace;
void qdio_allocate_do_dbf(struct qdio_initialize *init_data);
void debug_print_bstat(struct qdio_q *q);
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
int qdio_debug_init(void);
void qdio_debug_exit(void);
#endif
This diff is collapsed.
/*
* drivers/s390/cio/qdio_perf.c
*
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/ccwdev.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "chsc.h"
#include "qdio_debug.h"
#include "qdio_perf.h"
int qdio_performance_stats;
struct qdio_perf_stats perf_stats;
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *qdio_perf_pde;
#endif
inline void qdio_perf_stat_inc(atomic_long_t *count)
{
if (qdio_performance_stats)
atomic_long_inc(count);
}
inline void qdio_perf_stat_dec(atomic_long_t *count)
{
if (qdio_performance_stats)
atomic_long_dec(count);
}
/*
* procfs functions
*/
static int qdio_perf_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.qdio_int));
seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.pci_int));
seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.thin_int));
seq_printf(m, "\n");
seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.tasklet_inbound));
seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.tasklet_outbound));
seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
(long)atomic_long_read(&perf_stats.tasklet_thinint),
(long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
(long)atomic_long_read(&perf_stats.thinint_inbound),
(long)atomic_long_read(&perf_stats.thinint_inbound_loop));
seq_printf(m, "\n");
seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.siga_in));
seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.siga_out));
seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.siga_sync));
seq_printf(m, "\n");
seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.inbound_handler));
seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.outbound_handler));
seq_printf(m, "\n");
seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
(long)atomic_long_read(&perf_stats.fast_requeue));
seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
(long)atomic_long_read(&perf_stats.debug_tl_out_timer));
seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.debug_stop_polling));
seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
(long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
seq_printf(m, "\n");
return 0;
}
static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qdio_perf_proc_show, NULL);
}
static struct file_operations qdio_perf_proc_fops = {
.owner = THIS_MODULE,
.open = qdio_perf_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* sysfs functions
*/
static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
{
return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
}
static ssize_t qdio_perf_stats_store(struct bus_type *bus,
const char *buf, size_t count)
{
unsigned long i;
if (strict_strtoul(buf, 16, &i) != 0)
return -EINVAL;
if ((i != 0) && (i != 1))
return -EINVAL;
if (i == qdio_performance_stats)
return count;
qdio_performance_stats = i;
/* reset performance statistics */
if (i == 0)
memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
return count;
}
static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
qdio_perf_stats_store);
int __init qdio_setup_perf_stats(void)
{
int rc;
rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
if (rc)
return rc;
#ifdef CONFIG_PROC_FS
memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
NULL, &qdio_perf_proc_fops);
#endif
return 0;
}
void __exit qdio_remove_perf_stats(void)
{
#ifdef CONFIG_PROC_FS
remove_proc_entry("qdio_perf", NULL);
#endif
bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
}
/*
* drivers/s390/cio/qdio_perf.h
*
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#ifndef QDIO_PERF_H
#define QDIO_PERF_H
#include <linux/types.h>
#include <linux/device.h>
#include <asm/atomic.h>
struct qdio_perf_stats {
/* interrupt handler calls */
atomic_long_t qdio_int;
atomic_long_t pci_int;
atomic_long_t thin_int;
/* tasklet runs */
atomic_long_t tasklet_inbound;
atomic_long_t tasklet_outbound;
atomic_long_t tasklet_thinint;
atomic_long_t tasklet_thinint_loop;
atomic_long_t thinint_inbound;
atomic_long_t thinint_inbound_loop;
atomic_long_t thinint_inbound_loop2;
/* signal adapter calls */
atomic_long_t siga_out;
atomic_long_t siga_in;
atomic_long_t siga_sync;
/* misc */
atomic_long_t inbound_handler;
atomic_long_t outbound_handler;
atomic_long_t fast_requeue;
/* for debugging */
atomic_long_t debug_tl_out_timer;
atomic_long_t debug_stop_polling;
};
extern struct qdio_perf_stats perf_stats;
extern int qdio_performance_stats;
int qdio_setup_perf_stats(void);
void qdio_remove_perf_stats(void);
extern void qdio_perf_stat_inc(atomic_long_t *count);
extern void qdio_perf_stat_dec(atomic_long_t *count);
#endif
This diff is collapsed.
/*
* linux/drivers/s390/cio/thinint_qdio.c
*
* thin interrupt support for qdio
*
* Copyright 2000-2008 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/io.h>
#include <asm/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include "cio.h"
#include "ioasm.h"
#include "qdio.h"
#include "qdio_debug.h"
#include "qdio_perf.h"
/*
* Restriction: only 63 iqdio subchannels would have its own indicator,
* after that, subsequent subchannels share one indicator
*/
#define TIQDIO_NR_NONSHARED_IND 63
#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
#define TIQDIO_SHARED_IND 63
/* list of thin interrupt input queues */
static LIST_HEAD(tiq_list);
/* adapter local summary indicator */
static unsigned char *tiqdio_alsi;
/* device state change indicators */
struct indicator_t {
u32 ind; /* u32 because of compare-and-swap performance */
atomic_t count; /* use count, 0 or 1 for non-shared indicators */
};
static struct indicator_t *q_indicators;
static void tiqdio_tasklet_fn(unsigned long data);
static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
static int css_qdio_omit_svs;
static inline unsigned long do_clear_global_summary(void)
{
register unsigned long __fn asm("1") = 3;
register unsigned long __tmp asm("2");
register unsigned long __time asm("3");
asm volatile(
" .insn rre,0xb2650000,2,0"
: "+d" (__fn), "=d" (__tmp), "=d" (__time));
return __time;
}
/* returns addr for the device state change indicator */
static u32 *get_indicator(void)
{
int i;
for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
if (!atomic_read(&q_indicators[i].count)) {
atomic_set(&q_indicators[i].count, 1);
return &q_indicators[i].ind;
}
/* use the shared indicator */
atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
return &q_indicators[TIQDIO_SHARED_IND].ind;
}
static void put_indicator(u32 *addr)
{
int i;
if (!addr)
return;
i = ((unsigned long)addr - (unsigned long)q_indicators) /
sizeof(struct indicator_t);
atomic_dec(&q_indicators[i].count);
}
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
/* No TDD facility? If we must use SIGA-s we can also omit SVS. */
if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
css_qdio_omit_svs = 1;
for_each_input_queue(irq_ptr, q, i) {
list_add_rcu(&q->entry, &tiq_list);
synchronize_rcu();
}
xchg(irq_ptr->dsci, 1);
tasklet_schedule(&tiqdio_tasklet);
}
/*
* we cannot stop the tiqdio tasklet here since it is for all
* thinint qdio devices and it must run as long as there is a
* thinint device left
*/
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
for_each_input_queue(irq_ptr, q, i) {
list_del_rcu(&q->entry);
synchronize_rcu();
}
}
static inline int tiqdio_inbound_q_done(struct qdio_q *q)
{
unsigned char state;
if (!atomic_read(&q->nr_buf_used))
return 1;
qdio_siga_sync_q(q);
get_buf_state(q, q->first_to_check, &state);
if (state == SLSB_P_INPUT_PRIMED)
/* more work coming */
return 0;
return 1;
}
static inline int shared_ind(struct qdio_irq *irq_ptr)
{
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
}
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
qdio_perf_stat_inc(&perf_stats.thinint_inbound);
qdio_sync_after_thinint(q);
/*
* Maybe we have work on our outbound queues... at least
* we have to check the PCI capable queues.
*/
qdio_check_outbound_after_thinint(q);
again:
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_inbound_handler(q);
if (!tiqdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
goto again;
}
qdio_stop_polling(q);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
if (!tiqdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
goto again;
}
}
void tiqdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
__tiqdio_inbound_processing(q);
}
/* check for work on all inbound thinint queues */
static void tiqdio_tasklet_fn(unsigned long data)
{
struct qdio_q *q;
qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
again:
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
list_for_each_entry_rcu(q, &tiq_list, entry)
/* only process queues from changed sets */
if (*q->irq_ptr->dsci) {
/* only clear it if the indicator is non-shared */
if (!shared_ind(q->irq_ptr))
xchg(q->irq_ptr->dsci, 0);
/*
* don't call inbound processing directly since
* that could starve other thinint queues
*/
tasklet_schedule(&q->tasklet);
}
rcu_read_unlock();
/*
* if we used the shared indicator clear it now after all queues
* were processed
*/
if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) {
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
/* prevent racing */
if (*tiqdio_alsi)
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
}
/* check for more work */
if (*tiqdio_alsi) {
xchg(tiqdio_alsi, 0);
qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
goto again;
}
}
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @ind: pointer to adapter local summary indicator
* @drv_data: NULL
*/
static void tiqdio_thinint_handler(void *ind, void *drv_data)
{
qdio_perf_stat_inc(&perf_stats.thin_int);
/*
* SVS only when needed: issue SVS to benefit from iqdio interrupt
* avoidance (SVS clears adapter interrupt suppression overwrite)
*/
if (!css_qdio_omit_svs)
do_clear_global_summary();
/*
* reset local summary indicator (tiqdio_alsi) to stop adapter
* interrupts for now, the tasklet will clean all dsci's
*/
xchg((u8 *)ind, 0);
tasklet_hi_schedule(&tiqdio_tasklet);
}
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
struct scssc_area *scssc_area;
char dbf_text[15];
void *ptr;
int rc;
scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
memset(scssc_area, 0, PAGE_SIZE);
if (reset) {
scssc_area->summary_indicator_addr = 0;
scssc_area->subchannel_indicator_addr = 0;
} else {
scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
scssc_area->subchannel_indicator_addr =
virt_to_phys(irq_ptr->dsci);
}
scssc_area->request = (struct chsc_header) {
.length = 0x0fe0,
.code = 0x0021,
};
scssc_area->operation_code = 0;
scssc_area->ks = PAGE_DEFAULT_KEY;
scssc_area->kc = PAGE_DEFAULT_KEY;
scssc_area->isc = QDIO_AIRQ_ISC;
scssc_area->schid = irq_ptr->schid;
/* enable the time delay disablement facility */
if (css_general_characteristics.aif_tdd)
scssc_area->word_with_d_bit = 0x10000000;
rc = chsc(scssc_area);
if (rc)
return -EIO;
rc = chsc_error_from_response(scssc_area->response.code);
if (rc) {
sprintf(dbf_text, "sidR%4x", scssc_area->response.code);
QDIO_DBF_TEXT1(0, trace, dbf_text);
QDIO_DBF_TEXT1(0, setup, dbf_text);
ptr = &scssc_area->response;
QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN);
return rc;
}
QDIO_DBF_TEXT2(0, setup, "setscind");
QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr,
sizeof(unsigned long));
QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr,
sizeof(unsigned long));
return 0;
}
/* allocate non-shared indicators and shared indicator */
int __init tiqdio_allocate_memory(void)
{
q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
GFP_KERNEL);
if (!q_indicators)
return -ENOMEM;
return 0;
}
void tiqdio_free_memory(void)
{
kfree(q_indicators);
}
int __init tiqdio_register_thinints(void)
{
char dbf_text[20];
isc_register(QDIO_AIRQ_ISC);
tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
NULL, QDIO_AIRQ_ISC);
if (IS_ERR(tiqdio_alsi)) {
sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi));
QDIO_DBF_TEXT0(0, setup, dbf_text);
tiqdio_alsi = NULL;
isc_unregister(QDIO_AIRQ_ISC);
return -ENOMEM;
}
return 0;
}
int qdio_establish_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return 0;
/* Check for aif time delay disablement. If installed,
* omit SVS even under LPAR
*/
if (css_general_characteristics.aif_tdd)
css_qdio_omit_svs = 1;
return set_subchannel_ind(irq_ptr, 0);
}
void qdio_setup_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return;
irq_ptr->dsci = get_indicator();
QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *));
}
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return;
/* reset adapter interrupt indicators */
put_indicator(irq_ptr->dsci);
set_subchannel_ind(irq_ptr, 1);
}
void __exit tiqdio_unregister_thinints(void)
{
tasklet_disable(&tiqdio_tasklet);
if (tiqdio_alsi) {
s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
isc_unregister(QDIO_AIRQ_ISC);
}
}
......@@ -239,11 +239,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/*not used unless the microcode gets patched*/
#define QETH_PCI_TIMER_VALUE(card) 3
#define QETH_MIN_INPUT_THRESHOLD 1
#define QETH_MAX_INPUT_THRESHOLD 500
#define QETH_MIN_OUTPUT_THRESHOLD 1
#define QETH_MAX_OUTPUT_THRESHOLD 300
/* priority queing */
#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
#define QETH_DEFAULT_QUEUE 2
......@@ -811,17 +806,14 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
enum qeth_ipa_cmds, enum qeth_prot_versions);
int qeth_query_setadapterparms(struct qeth_card *);
int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int,
unsigned int, const char *);
int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *);
void qeth_queue_input_buffer(struct qeth_card *, int);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *);
void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
unsigned int, unsigned int,
unsigned int, int, int,
unsigned long);
int, int, int, unsigned long);
void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);
......
......@@ -2073,7 +2073,7 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
static int qeth_qdio_activate(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP, 3, "qdioact");
return qdio_activate(CARD_DDEV(card), 0);
return qdio_activate(CARD_DDEV(card));
}
static int qeth_dm_act(struct qeth_card *card)
......@@ -2349,16 +2349,11 @@ int qeth_init_qdio_queues(struct qeth_card *card)
card->qdio.in_q->next_buf_to_init =
card->qdio.in_buf_pool.buf_count - 1;
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
card->qdio.in_buf_pool.buf_count - 1, NULL);
card->qdio.in_buf_pool.buf_count - 1);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
return rc;
}
rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
return rc;
}
/* outbound queue */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
memset(card->qdio.out_qs[i]->qdio_bufs, 0,
......@@ -2559,9 +2554,9 @@ int qeth_query_setadapterparms(struct qeth_card *card)
EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
unsigned int siga_error, const char *dbftext)
const char *dbftext)
{
if (qdio_error || siga_error) {
if (qdio_error) {
QETH_DBF_TEXT(TRACE, 2, dbftext);
QETH_DBF_TEXT(QERR, 2, dbftext);
QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
......@@ -2569,7 +2564,6 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
buf->element[14].flags & 0xff);
QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
QETH_DBF_TEXT_(QERR, 2, " serr=%X", siga_error);
return 1;
}
return 0;
......@@ -2622,9 +2616,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
card->perf_stats.inbound_do_qdio_start_time =
qeth_get_micros();
}
rc = do_QDIO(CARD_DDEV(card),
QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
0, queue->next_buf_to_init, count, NULL);
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
queue->next_buf_to_init, count);
if (card->options.performance_stats)
card->perf_stats.inbound_do_qdio_time +=
qeth_get_micros() -
......@@ -2643,14 +2636,13 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
static int qeth_handle_send_error(struct qeth_card *card,
struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err,
unsigned int siga_err)
struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
{
int sbalf15 = buffer->buffer->element[15].flags & 0xff;
int cc = siga_err & 3;
int cc = qdio_err & 3;
QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr");
qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
switch (cc) {
case 0:
if (qdio_err) {
......@@ -2662,7 +2654,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
}
return QETH_SEND_ERROR_NONE;
case 2:
if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
if (qdio_err & QDIO_ERROR_SIGA_BUSY) {
QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B");
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
return QETH_SEND_ERROR_KICK_IT;
......@@ -2758,8 +2750,8 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
return 0;
}
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
int index, int count)
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
int count)
{
struct qeth_qdio_out_buffer *buf;
int rc;
......@@ -2807,12 +2799,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
qeth_get_micros();
}
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (under_int)
qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count, NULL);
queue->queue_no, index, count);
if (queue->card->options.performance_stats)
queue->card->perf_stats.outbound_do_qdio_time +=
qeth_get_micros() -
......@@ -2866,16 +2856,15 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
queue->card->perf_stats.bufs_sent_pack +=
flush_cnt;
if (flush_cnt)
qeth_flush_buffers(queue, 1, index, flush_cnt);
qeth_flush_buffers(queue, index, flush_cnt);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
}
}
}
void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
unsigned int qdio_error, unsigned int siga_error,
unsigned int __queue, int first_element, int count,
unsigned long card_ptr)
void qeth_qdio_output_handler(struct ccw_device *ccwdev,
unsigned int qdio_error, int __queue, int first_element,
int count, unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
......@@ -2883,15 +2872,12 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
int i;
QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
QETH_DBF_TEXT(TRACE, 2, "achkcond");
QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 2, "%08x", status);
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
return;
}
if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
QETH_DBF_TEXT(TRACE, 2, "achkcond");
QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
return;
}
if (card->options.performance_stats) {
card->perf_stats.outbound_handler_cnt++;
......@@ -2901,8 +2887,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
for (i = first_element; i < (first_element + count); ++i) {
buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
/*we only handle the KICK_IT error by doing a recovery */
if (qeth_handle_send_error(card, buffer,
qdio_error, siga_error)
if (qeth_handle_send_error(card, buffer, qdio_error)
== QETH_SEND_ERROR_KICK_IT){
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
......@@ -3164,11 +3149,11 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
if (ctx == NULL) {
qeth_fill_buffer(queue, buffer, skb);
qeth_flush_buffers(queue, 0, index, 1);
qeth_flush_buffers(queue, index, 1);
} else {
flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
WARN_ON(buffers_needed != flush_cnt);
qeth_flush_buffers(queue, 0, index, flush_cnt);
qeth_flush_buffers(queue, index, flush_cnt);
}
return 0;
out:
......@@ -3221,8 +3206,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
* again */
if (atomic_read(&buffer->state) !=
QETH_QDIO_BUF_EMPTY){
qeth_flush_buffers(queue, 0,
start_index, flush_count);
qeth_flush_buffers(queue, start_index,
flush_count);
atomic_set(&queue->state,
QETH_OUT_Q_UNLOCKED);
return -EBUSY;
......@@ -3253,7 +3238,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
flush_count += tmp;
out:
if (flush_count)
qeth_flush_buffers(queue, 0, start_index, flush_count);
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
/*
......@@ -3274,7 +3259,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
flush_count += qeth_flush_buffers_on_no_pci(queue);
if (flush_count)
qeth_flush_buffers(queue, 0, start_index, flush_count);
qeth_flush_buffers(queue, start_index, flush_count);
}
/* at this point the queue is UNLOCKED again */
if (queue->card->options.performance_stats && do_pack)
......@@ -3686,10 +3671,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.q_format = qeth_get_qdio_q_format(card);
init_data.qib_param_field_format = 0;
init_data.qib_param_field = qib_param_field;
init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
init_data.no_input_qs = 1;
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = card->discipline.input_handler;
......@@ -3751,8 +3732,9 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
int qeth_core_hardsetup_card(struct qeth_card *card)
{
struct qdio_ssqd_desc *qdio_ssqd;
int retries = 3;
int mpno;
int mpno = 0;
int rc;
QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
......@@ -3784,7 +3766,10 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
return rc;
}
mpno = qdio_get_ssqd_pct(CARD_DDEV(card));
qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card));
if (qdio_ssqd)
mpno = qdio_ssqd->pcnt;
if (mpno)
mpno = min(mpno - 1, QETH_MAX_PORTNO);
if (card->info.portno > mpno) {
......
......@@ -726,8 +726,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
unsigned int status, unsigned int qdio_err,
unsigned int siga_err, unsigned int queue,
unsigned int qdio_err, unsigned int queue,
int first_element, int count, unsigned long card_ptr)
{
struct net_device *net_dev;
......@@ -742,23 +741,20 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_cnt++;
card->perf_stats.inbound_start_time = qeth_get_micros();
}
if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
QETH_DBF_TEXT(TRACE, 1, "qdinchk");
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
count);
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status);
qeth_schedule_recovery(card);
return;
}
if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
QETH_DBF_TEXT(TRACE, 1, "qdinchk");
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
count);
QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
qeth_schedule_recovery(card);
return;
}
for (i = first_element; i < (first_element + count); ++i) {
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
qeth_check_qdio_errors(buffer->buffer,
qdio_err, siga_err, "qinerr")))
if (!(qdio_err &&
qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr")))
qeth_l2_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
qeth_put_buffer_pool_entry(card, buffer->pool_entry);
......
......@@ -2939,8 +2939,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
}
static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
unsigned int status, unsigned int qdio_err,
unsigned int siga_err, unsigned int queue, int first_element,
unsigned int qdio_err, unsigned int queue, int first_element,
int count, unsigned long card_ptr)
{
struct net_device *net_dev;
......@@ -2955,23 +2954,21 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_cnt++;
card->perf_stats.inbound_start_time = qeth_get_micros();
}
if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
QETH_DBF_TEXT(TRACE, 1, "qdinchk");
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
first_element, count);
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status);
qeth_schedule_recovery(card);
return;
}
if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
QETH_DBF_TEXT(TRACE, 1, "qdinchk");
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
first_element, count);
QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
qeth_schedule_recovery(card);
return;
}
for (i = first_element; i < (first_element + count); ++i) {
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
if (!(qdio_err &&
qeth_check_qdio_errors(buffer->buffer,
qdio_err, siga_err, "qinerr")))
qdio_err, "qinerr")))
qeth_l3_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
qeth_put_buffer_pool_entry(card, buffer->pool_entry);
......
......@@ -297,15 +297,13 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
/**
* zfcp_hba_dbf_event_qdio - trace event for QDIO related failure
* @adapter: adapter affected by this QDIO related event
* @status: as passed by qdio module
* @qdio_error: as passed by qdio module
* @siga_error: as passed by qdio module
* @sbal_index: first buffer with error condition, as passed by qdio module
* @sbal_count: number of buffers affected, as passed by qdio module
*/
void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
unsigned int qdio_error, unsigned int siga_error,
int sbal_index, int sbal_count)
void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
unsigned int qdio_error, int sbal_index,
int sbal_count)
{
struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
unsigned long flags;
......@@ -313,9 +311,7 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
memset(r, 0, sizeof(*r));
strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE);
r->u.qdio.status = status;
r->u.qdio.qdio_error = qdio_error;
r->u.qdio.siga_error = siga_error;
r->u.qdio.sbal_index = sbal_index;
r->u.qdio.sbal_count = sbal_count;
debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
......@@ -398,9 +394,7 @@ static void zfcp_hba_dbf_view_status(char **p,
static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
{
zfcp_dbf_out(p, "status", "0x%08x", r->status);
zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
zfcp_dbf_out(p, "siga_error", "0x%08x", r->siga_error);
zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
}
......
......@@ -139,9 +139,7 @@ struct zfcp_hba_dbf_record_status {
} __attribute__ ((packed));
struct zfcp_hba_dbf_record_qdio {
u32 status;
u32 qdio_error;
u32 siga_error;
u8 sbal_index;
u8 sbal_count;
} __attribute__ ((packed));
......
......@@ -48,9 +48,8 @@ extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *);
extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
struct fsf_status_read_buffer *);
extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *,
unsigned int, unsigned int, unsigned int,
int, int);
extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
int);
extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
......
......@@ -74,17 +74,15 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
}
}
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int status,
unsigned int qdio_err, unsigned int siga_err,
unsigned int queue_no, int first, int count,
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int first, int count,
unsigned long parm)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
struct zfcp_qdio_queue *queue = &adapter->req_q;
if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err,
first, count);
if (unlikely(qdio_err)) {
zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
zfcp_qdio_handler_error(adapter, 140);
return;
}
......@@ -129,8 +127,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
count = atomic_read(&queue->count) + processed;
retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
0, start, count, NULL);
retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
if (unlikely(retval)) {
atomic_set(&queue->count, count);
......@@ -142,9 +139,8 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
}
}
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status,
unsigned int qdio_err, unsigned int siga_err,
unsigned int queue_no, int first, int count,
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int first, int count,
unsigned long parm)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
......@@ -152,9 +148,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status,
volatile struct qdio_buffer_element *sbale;
int sbal_idx, sbale_idx, sbal_no;
if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err,
first, count);
if (unlikely(qdio_err)) {
zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
zfcp_qdio_handler_error(adapter, 147);
return;
}
......@@ -362,7 +357,7 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
}
retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first,
count, NULL);
count);
if (unlikely(retval)) {
zfcp_qdio_zero_sbals(req_q->sbal, first, count);
return retval;
......@@ -400,10 +395,6 @@ int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
init_data->qib_param_field = NULL;
init_data->input_slib_elements = NULL;
init_data->output_slib_elements = NULL;
init_data->min_input_threshold = 1;
init_data->max_input_threshold = 5000;
init_data->min_output_threshold = 1;
init_data->max_output_threshold = 1000;
init_data->no_input_qs = 1;
init_data->no_output_qs = 1;
init_data->input_handler = zfcp_qdio_int_resp;
......@@ -436,9 +427,7 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock(&req_q->lock);
while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR)
== -EINPROGRESS)
ssleep(1);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
/* cleanup used outbound sbals */
count = atomic_read(&req_q->count);
......@@ -473,7 +462,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
return -EIO;
}
if (qdio_activate(adapter->ccw_device, 0)) {
if (qdio_activate(adapter->ccw_device)) {
dev_err(&adapter->ccw_device->dev,
"Activate of QDIO queues failed.\n");
goto failed_qdio;
......@@ -487,7 +476,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
}
if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
QDIO_MAX_BUFFERS_PER_Q, NULL)) {
QDIO_MAX_BUFFERS_PER_Q)) {
dev_err(&adapter->ccw_device->dev,
"Init of QDIO response queue failed.\n");
goto failed_qdio;
......@@ -501,9 +490,6 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
return 0;
failed_qdio:
while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR)
== -EINPROGRESS)
ssleep(1);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
return -EIO;
}
This diff is collapsed.
......@@ -65,6 +65,7 @@ extern unsigned long machine_flags;
#define MACHINE_FLAG_VM (1UL << 0)
#define MACHINE_FLAG_IEEE (1UL << 1)
#define MACHINE_FLAG_P390 (1UL << 2)
#define MACHINE_FLAG_CSP (1UL << 3)
#define MACHINE_FLAG_MVPG (1UL << 4)
#define MACHINE_FLAG_DIAG44 (1UL << 5)
......@@ -77,7 +78,6 @@ extern unsigned long machine_flags;
#define MACHINE_IS_VM (machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (machine_flags & MACHINE_FLAG_KVM)
#define MACHINE_IS_P390 (machine_flags & MACHINE_FLAG_P390)
#define MACHINE_HAS_DIAG9C (machine_flags & MACHINE_FLAG_DIAG9C)
#ifndef __s390x__
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment