Commit 779e6e1c authored by Jan Glauber's avatar Jan Glauber Committed by Heiko Carstens

[S390] qdio: new qdio driver.

List of major changes:
- split qdio driver into several files
- seperation of thin interrupt code
- improved handling for multiple thin interrupt devices
- inbound and outbound processing now always runs in tasklet context
- significant less tasklet schedules per interrupt needed
- merged qebsm with non-qebsm handling
- cleanup qdio interface and added kerneldoc
- coding style
Reviewed-by: default avatarCornelia Huck <cornelia.huck@de.ibm.com>
Reviewed-by: default avatarUtz Bacher <utz.bacher@de.ibm.com>
Reviewed-by: default avatarUrsula Braun <braunu@de.ibm.com>
Signed-off-by: default avatarJan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
parent dae39843
...@@ -9,4 +9,6 @@ ccw_device-objs += device_id.o device_pgid.o device_status.o ...@@ -9,4 +9,6 @@ ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o obj-$(CONFIG_CCWGROUP) += ccwgroup.o
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o obj-$(CONFIG_QDIO) += qdio.o
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* linux/drivers/s390/cio/qdio.h
*
* Copyright 2000,2008 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#ifndef _CIO_QDIO_H #ifndef _CIO_QDIO_H
#define _CIO_QDIO_H #define _CIO_QDIO_H
#include <asm/page.h> #include <asm/page.h>
#include <asm/isc.h>
#include <asm/schid.h> #include <asm/schid.h>
#include "chsc.h"
#ifdef CONFIG_QDIO_DEBUG #define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */
#define QDIO_VERBOSE_LEVEL 9 #define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */
#else /* CONFIG_QDIO_DEBUG */ #define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
#define QDIO_VERBOSE_LEVEL 5
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_USE_PROCESSING_STATE
#define QDIO_MINIMAL_BH_RELIEF_TIME 16
#define QDIO_TIMER_POLL_VALUE 1
#define IQDIO_TIMER_POLL_VALUE 1
/*
* unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as
* we never know, whether we'll get initiative again, e.g. to give the
* transmit skb's back to the stack, however the stack may be waiting for
* them... therefore we define 4 as threshold to start polling (which
* will stop as soon as the asynchronous queue catches up)
* btw, this only applies to the asynchronous HiperSockets queue
*/
#define IQDIO_FILL_LEVEL_TO_POLL 4
#define TIQDIO_THININT_ISC QDIO_AIRQ_ISC
#define TIQDIO_DELAY_TARGET 0
#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */
#define IQDIO_LOCAL_LAPS 4
#define IQDIO_LOCAL_LAPS_INT 1
#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2
/*#define IQDIO_IQDC_INT_PARM 0x1234*/
#define QDIO_Q_LAPS 5
#define QDIO_STORAGE_KEY PAGE_DEFAULT_KEY
#define L2_CACHELINE_SIZE 256
#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32))
#define QDIO_PERF "qdio_perf"
/* must be a power of 2 */
/*#define QDIO_STATS_NUMBER 4
#define QDIO_STATS_CLASSES 2
#define QDIO_STATS_COUNT_NEEDED 2*/
#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before
exiting without having use_count
of the queue to 0 */
#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ)
#define QDIO_ACTIVATE_TIMEOUT (5) /* 5 ms */
enum qdio_irq_states { enum qdio_irq_states {
QDIO_IRQ_STATE_INACTIVE, QDIO_IRQ_STATE_INACTIVE,
...@@ -72,565 +26,352 @@ enum qdio_irq_states { ...@@ -72,565 +26,352 @@ enum qdio_irq_states {
NR_QDIO_IRQ_STATES, NR_QDIO_IRQ_STATES,
}; };
/* used as intparm in do_IO: */ /* used as intparm in do_IO */
#define QDIO_DOING_SENSEID 0 #define QDIO_DOING_ESTABLISH 1
#define QDIO_DOING_ESTABLISH 1 #define QDIO_DOING_ACTIVATE 2
#define QDIO_DOING_ACTIVATE 2 #define QDIO_DOING_CLEANUP 3
#define QDIO_DOING_CLEANUP 3
#define SLSB_STATE_NOT_INIT 0x0
/************************* DEBUG FACILITY STUFF *********************/ #define SLSB_STATE_EMPTY 0x1
#define SLSB_STATE_PRIMED 0x2
#define QDIO_DBF_HEX(ex,name,level,addr,len) \ #define SLSB_STATE_HALTED 0xe
do { \ #define SLSB_STATE_ERROR 0xf
if (ex) \ #define SLSB_TYPE_INPUT 0x0
debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \ #define SLSB_TYPE_OUTPUT 0x20
else \ #define SLSB_OWNER_PROG 0x80
debug_event(qdio_dbf_##name,level,(void*)(addr),len); \ #define SLSB_OWNER_CU 0x40
} while (0)
#define QDIO_DBF_TEXT(ex,name,level,text) \ #define SLSB_P_INPUT_NOT_INIT \
do { \ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */
if (ex) \ #define SLSB_P_INPUT_ACK \
debug_text_exception(qdio_dbf_##name,level,text); \ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */
else \ #define SLSB_CU_INPUT_EMPTY \
debug_text_event(qdio_dbf_##name,level,text); \ (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */
} while (0) #define SLSB_P_INPUT_PRIMED \
(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */
#define SLSB_P_INPUT_HALTED \
#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len) (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */
#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len) #define SLSB_P_INPUT_ERROR \
#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len) (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */
#ifdef CONFIG_QDIO_DEBUG #define SLSB_P_OUTPUT_NOT_INIT \
#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len) (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len) #define SLSB_P_OUTPUT_EMPTY \
#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len) (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len) #define SLSB_CU_OUTPUT_PRIMED \
#else /* CONFIG_QDIO_DEBUG */ (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0) #define SLSB_P_OUTPUT_HALTED \
#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0) (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */
#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0) #define SLSB_P_OUTPUT_ERROR \
#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0) (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */
#endif /* CONFIG_QDIO_DEBUG */
#define SLSB_ERROR_DURING_LOOKUP 0xff
#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text)
#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text) /* additional CIWs returned by extended Sense-ID */
#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text) #define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
#ifdef CONFIG_QDIO_DEBUG #define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SETUP_NAME "qdio_setup"
#define QDIO_DBF_SETUP_LEN 8
#define QDIO_DBF_SETUP_PAGES 4
#define QDIO_DBF_SETUP_NR_AREAS 1
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_SETUP_LEVEL 6
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SETUP_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
#define QDIO_DBF_SBAL_LEN 256
#define QDIO_DBF_SBAL_PAGES 4
#define QDIO_DBF_SBAL_NR_AREAS 2
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_SBAL_LEVEL 6
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SBAL_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TRACE_NAME "qdio_trace"
#define QDIO_DBF_TRACE_LEN 8
#define QDIO_DBF_TRACE_NR_AREAS 2
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TRACE_PAGES 16
#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TRACE_PAGES 4
#define QDIO_DBF_TRACE_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SENSE_NAME "qdio_sense"
#define QDIO_DBF_SENSE_LEN 64
#define QDIO_DBF_SENSE_PAGES 2
#define QDIO_DBF_SENSE_NR_AREAS 1
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_SENSE_LEVEL 6
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SENSE_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q
#define QDIO_DBF_SLSB_OUT_PAGES 256
#define QDIO_DBF_SLSB_OUT_NR_AREAS 1
#define QDIO_DBF_SLSB_OUT_LEVEL 6
#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in"
#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q
#define QDIO_DBF_SLSB_IN_PAGES 256
#define QDIO_DBF_SLSB_IN_NR_AREAS 1
#define QDIO_DBF_SLSB_IN_LEVEL 6
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_PRINTK_HEADER QDIO_NAME ": "
#if QDIO_VERBOSE_LEVEL>8
#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_STUPID(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>7 /* flags for st qdio sch data */
#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) #define CHSC_FLAG_QDIO_CAPABILITY 0x80
#else #define CHSC_FLAG_VALIDITY 0x40
#define QDIO_PRINT_ALL(x...) do { } while (0)
#endif /* qdio adapter-characteristics-1 flag */
#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
#if QDIO_VERBOSE_LEVEL>6 #define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) #define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
#else #define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
#define QDIO_PRINT_INFO(x...) do { } while (0) #define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
#endif #define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
#if QDIO_VERBOSE_LEVEL>5
#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_WARN(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>4
#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ERR(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>3
#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_CRIT(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>2
#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ALERT(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>1 #ifdef CONFIG_64BIT
#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) static inline int do_sqbs(u64 token, unsigned char state, int queue,
#else int *start, int *count)
#define QDIO_PRINT_EMERG(x...) do { } while (0) {
#endif register unsigned long _ccq asm ("0") = *count;
register unsigned long _token asm ("1") = token;
#define QDIO_HEXDUMP16(importance,header,ptr) \ unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x\n",*(((char*)ptr)), \
*(((char*)ptr)+1),*(((char*)ptr)+2), \
*(((char*)ptr)+3),*(((char*)ptr)+4), \
*(((char*)ptr)+5),*(((char*)ptr)+6), \
*(((char*)ptr)+7),*(((char*)ptr)+8), \
*(((char*)ptr)+9),*(((char*)ptr)+10), \
*(((char*)ptr)+11),*(((char*)ptr)+12), \
*(((char*)ptr)+13),*(((char*)ptr)+14), \
*(((char*)ptr)+15)); \
QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x\n", \
*(((char*)ptr)+16),*(((char*)ptr)+17), \
*(((char*)ptr)+18),*(((char*)ptr)+19), \
*(((char*)ptr)+20),*(((char*)ptr)+21), \
*(((char*)ptr)+22),*(((char*)ptr)+23), \
*(((char*)ptr)+24),*(((char*)ptr)+25), \
*(((char*)ptr)+26),*(((char*)ptr)+27), \
*(((char*)ptr)+28),*(((char*)ptr)+29), \
*(((char*)ptr)+30),*(((char*)ptr)+31));
/****************** END OF DEBUG FACILITY STUFF *********************/
/* asm volatile(
* Some instructions as assembly " .insn rsy,0xeb000000008A,%1,0,0(%2)"
*/ : "+d" (_ccq), "+d" (_queuestart)
: "d" ((unsigned long)state), "d" (_token)
: "memory", "cc");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
static inline int return (_ccq >> 32) & 0xff;
do_sqbs(unsigned long sch, unsigned char state, int queue,
unsigned int *start, unsigned int *count)
{
#ifdef CONFIG_64BIT
register unsigned long _ccq asm ("0") = *count;
register unsigned long _sch asm ("1") = sch;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
asm volatile(
" .insn rsy,0xeb000000008A,%1,0,0(%2)"
: "+d" (_ccq), "+d" (_queuestart)
: "d" ((unsigned long)state), "d" (_sch)
: "memory", "cc");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
return (_ccq >> 32) & 0xff;
#else
return 0;
#endif
} }
static inline int static inline int do_eqbs(u64 token, unsigned char *state, int queue,
do_eqbs(unsigned long sch, unsigned char *state, int queue, int *start, int *count)
unsigned int *start, unsigned int *count)
{ {
#ifdef CONFIG_64BIT
register unsigned long _ccq asm ("0") = *count; register unsigned long _ccq asm ("0") = *count;
register unsigned long _sch asm ("1") = sch; register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start; unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
unsigned long _state = 0; unsigned long _state = 0;
asm volatile( asm volatile(
" .insn rrf,0xB99c0000,%1,%2,0,0" " .insn rrf,0xB99c0000,%1,%2,0,0"
: "+d" (_ccq), "+d" (_queuestart), "+d" (_state) : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
: "d" (_sch) : "d" (_token)
: "memory", "cc" ); : "memory", "cc");
*count = _ccq & 0xff; *count = _ccq & 0xff;
*start = _queuestart & 0xff; *start = _queuestart & 0xff;
*state = _state & 0xff; *state = _state & 0xff;
return (_ccq >> 32) & 0xff; return (_ccq >> 32) & 0xff;
#else
return 0;
#endif
}
static inline int
do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
{
register unsigned long reg0 asm ("0") = 2;
register struct subchannel_id reg1 asm ("1") = schid;
register unsigned long reg2 asm ("2") = mask1;
register unsigned long reg3 asm ("3") = mask2;
int cc;
asm volatile(
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc");
return cc;
}
static inline int
do_siga_input(struct subchannel_id schid, unsigned int mask)
{
register unsigned long reg0 asm ("0") = 1;
register struct subchannel_id reg1 asm ("1") = schid;
register unsigned long reg2 asm ("2") = mask;
int cc;
asm volatile(
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory");
return cc;
}
static inline int
do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
unsigned int fc)
{
register unsigned long __fc asm("0") = fc;
register unsigned long __schid asm("1") = schid;
register unsigned long __mask asm("2") = mask;
int cc;
asm volatile(
" siga 0\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
: "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
: "cc", "memory");
(*bb) = ((unsigned int) __fc) >> 31;
return cc;
}
static inline unsigned long
do_clear_global_summary(void)
{
register unsigned long __fn asm("1") = 3;
register unsigned long __tmp asm("2");
register unsigned long __time asm("3");
asm volatile(
" .insn rre,0xb2650000,2,0"
: "+d" (__fn), "=d" (__tmp), "=d" (__time));
return __time;
} }
#else
/* static inline int do_sqbs(u64 token, unsigned char state, int queue,
* QDIO device commands returned by extended Sense-ID int *start, int *count) { return 0; }
*/ static inline int do_eqbs(u64 token, unsigned char *state, int queue,
#define DEFAULT_ESTABLISH_QS_CMD 0x1b int *start, int *count) { return 0; }
#define DEFAULT_ESTABLISH_QS_COUNT 0x1000 #endif /* CONFIG_64BIT */
#define DEFAULT_ACTIVATE_QS_CMD 0x1f
#define DEFAULT_ACTIVATE_QS_COUNT 0
/*
* additional CIWs returned by extended Sense-ID
*/
#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
#define QDIO_CHSC_RESPONSE_CODE_OK 1 struct qdio_irq;
/* flags for st qdio sch data */
#define CHSC_FLAG_QDIO_CAPABILITY 0x80
#define CHSC_FLAG_VALIDITY 0x40
#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40 struct siga_flag {
#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20 u8 input:1;
#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10 u8 output:1;
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 u8 sync:1;
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 u8 no_sync_ti:1;
u8 no_sync_out_ti:1;
u8 no_sync_out_pci:1;
u8:2;
} __attribute__ ((packed));
struct qdio_chsc_ssqd { struct chsc_ssqd_area {
struct chsc_header request; struct chsc_header request;
u16 reserved1:10; u16:10;
u16 ssid:2; u8 ssid:2;
u16 fmt:4; u8 fmt:4;
u16 first_sch; u16 first_sch;
u16 reserved2; u16:16;
u16 last_sch; u16 last_sch;
u32 reserved3; u32:32;
struct chsc_header response; struct chsc_header response;
u32 reserved4; u32:32;
u8 flags; struct qdio_ssqd_desc qdio_ssqd;
u8 reserved5; } __attribute__ ((packed));
u16 sch;
u8 qfmt;
u8 parm;
u8 qdioac1;
u8 sch_class;
u8 pct;
u8 icnt;
u8 reserved7;
u8 ocnt;
u8 reserved8;
u8 mbccnt;
u16 qdioac2;
u64 sch_token;
};
struct qdio_perf_stats { struct scssc_area {
#ifdef CONFIG_64BIT struct chsc_header request;
atomic64_t tl_runs; u16 operation_code;
atomic64_t outbound_tl_runs; u16:16;
atomic64_t outbound_tl_runs_resched; u32:32;
atomic64_t inbound_tl_runs; u32:32;
atomic64_t inbound_tl_runs_resched; u64 summary_indicator_addr;
atomic64_t inbound_thin_tl_runs; u64 subchannel_indicator_addr;
atomic64_t inbound_thin_tl_runs_resched; u32 ks:4;
u32 kc:4;
atomic64_t siga_outs; u32:21;
atomic64_t siga_ins; u32 isc:3;
atomic64_t siga_syncs; u32 word_with_d_bit;
atomic64_t pcis; u32:32;
atomic64_t thinints; struct subchannel_id schid;
atomic64_t fast_reqs; u32 reserved[1004];
struct chsc_header response;
atomic64_t outbound_cnt; u32:32;
atomic64_t inbound_cnt; } __attribute__ ((packed));
#else /* CONFIG_64BIT */
atomic_t tl_runs; struct qdio_input_q {
atomic_t outbound_tl_runs; /* input buffer acknowledgement flag */
atomic_t outbound_tl_runs_resched; int polling;
atomic_t inbound_tl_runs;
atomic_t inbound_tl_runs_resched; /* last time of noticing incoming data */
atomic_t inbound_thin_tl_runs; u64 timestamp;
atomic_t inbound_thin_tl_runs_resched;
/* lock for clearing the acknowledgement */
atomic_t siga_outs; spinlock_t lock;
atomic_t siga_ins;
atomic_t siga_syncs;
atomic_t pcis;
atomic_t thinints;
atomic_t fast_reqs;
atomic_t outbound_cnt;
atomic_t inbound_cnt;
#endif /* CONFIG_64BIT */
}; };
/* unlikely as the later the better */ struct qdio_output_q {
#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) /* failed siga-w attempts*/
#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \ atomic_t busy_siga_counter;
qdio_siga_sync(q,~0U,~0U)
#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \
qdio_siga_sync(q,~0U,0)
#define NOW qdio_get_micros() /* start time of busy condition */
#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW u64 timestamp;
#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time)
#define SAVE_FRONTIER(q,val) q->last_move_ftc=val
#define GET_SAVED_FRONTIER(q) (q->last_move_ftc)
#define MY_MODULE_STRING(x) #x /* PCIs are enabled for the queue */
int pci_out_enabled;
#ifdef CONFIG_64BIT /* timer to check for more outbound work */
#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) struct timer_list timer;
#else /* CONFIG_64BIT */ };
#define QDIO_GET_ADDR(x) ((__u32)(long)x)
#endif /* CONFIG_64BIT */
struct qdio_q { struct qdio_q {
volatile struct slsb slsb; struct slsb slsb;
union {
struct qdio_input_q in;
struct qdio_output_q out;
} u;
char unused[QDIO_MAX_BUFFERS_PER_Q]; /* queue number */
int nr;
__u32 * dev_st_chg_ind; /* bitmask of queue number */
int mask;
/* input or output queue */
int is_input_q; int is_input_q;
struct subchannel_id schid;
struct ccw_device *cdev;
unsigned int is_iqdio_q;
unsigned int is_thinint_q;
/* bit 0 means queue 0, bit 1 means queue 1, ... */ /* list of thinint input queues */
unsigned int mask; struct list_head entry;
unsigned int q_no;
/* upper-layer program handler */
qdio_handler_t (*handler); qdio_handler_t (*handler);
/* points to the next buffer to be checked for having /*
* been processed by the card (outbound) * inbound: next buffer the program should check for
* or to the next buffer the program should check for (inbound) */ * outbound: next buffer to check for having been processed
volatile int first_to_check; * by the card
/* and the last time it was: */ */
volatile int last_move_ftc; int first_to_check;
atomic_t number_of_buffers_used; /* first_to_check of the last time */
atomic_t polling; int last_move_ftc;
unsigned int siga_in; /* beginning position for calling the program */
unsigned int siga_out; int first_to_kick;
unsigned int siga_sync;
unsigned int siga_sync_done_on_thinints;
unsigned int siga_sync_done_on_outb_tis;
unsigned int hydra_gives_outbound_pcis;
/* used to save beginning position when calling dd_handlers */ /* number of buffers in use by the adapter */
int first_element_to_kick; atomic_t nr_buf_used;
atomic_t use_count; struct qdio_irq *irq_ptr;
atomic_t is_in_shutdown;
void *irq_ptr;
struct timer_list timer;
#ifdef QDIO_USE_TIMERS_FOR_POLLING
atomic_t timer_already_set;
spinlock_t timer_lock;
#else /* QDIO_USE_TIMERS_FOR_POLLING */
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
#endif /* QDIO_USE_TIMERS_FOR_POLLING */
/* error condition during a data transfer */
enum qdio_irq_states state;
/* used to store the error condition during a data transfer */
unsigned int qdio_error; unsigned int qdio_error;
unsigned int siga_error;
unsigned int error_status_flags;
/* list of interesting queues */
volatile struct qdio_q *list_next;
volatile struct qdio_q *list_prev;
struct sl *sl; struct sl *sl;
volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q]; struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q]; /*
* Warning: Leave this member at the end so it won't be cleared in
unsigned long int_parm; * qdio_fill_qs. A page is allocated under this pointer and used for
* slib and sl. slib is 2048 bytes big and sl points to offset
/*struct { * PAGE_SIZE / 2.
int in_bh_check_limit; */
int threshold; struct slib *slib;
} threshold_classes[QDIO_STATS_CLASSES];*/
struct {
/* inbound: the time to stop polling
outbound: the time to kick peer */
int threshold; /* the real value */
/* outbound: last time of do_QDIO
inbound: last time of noticing incoming data */
/*__u64 last_transfer_times[QDIO_STATS_NUMBER];
int last_transfer_index; */
__u64 last_transfer_time;
__u64 busy_start;
} timing;
atomic_t busy_siga_counter;
unsigned int queue_type;
unsigned int is_pci_out;
/* leave this member at the end. won't be cleared in qdio_fill_qs */
struct slib *slib; /* a page is allocated under this pointer,
sl points into this page, offset PAGE_SIZE/2
(after slib) */
} __attribute__ ((aligned(256))); } __attribute__ ((aligned(256)));
struct qdio_irq { struct qdio_irq {
__u32 * volatile dev_st_chg_ind; struct qib qib;
u32 *dsci; /* address of device state change indicator */
struct ccw_device *cdev;
unsigned long int_parm; unsigned long int_parm;
struct subchannel_id schid; struct subchannel_id schid;
unsigned long sch_token; /* QEBSM facility */
unsigned int is_iqdio_irq;
unsigned int is_thinint_irq;
unsigned int hydra_gives_outbound_pcis;
unsigned int sync_done_on_outb_pcis;
/* QEBSM facility */
unsigned int is_qebsm;
unsigned long sch_token;
enum qdio_irq_states state; enum qdio_irq_states state;
unsigned int no_input_qs; struct siga_flag siga_flag; /* siga sync information from qdioac */
unsigned int no_output_qs;
unsigned char qdioac; int nr_input_qs;
int nr_output_qs;
struct ccw1 ccw; struct ccw1 ccw;
struct ciw equeue; struct ciw equeue;
struct ciw aqueue; struct ciw aqueue;
struct qib qib; struct qdio_ssqd_desc ssqd_desc;
void (*original_int_handler) (struct ccw_device *, void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
unsigned long, struct irb *);
/* leave these four members together at the end. won't be cleared in qdio_fill_irq */ /*
* Warning: Leave these members together at the end so they won't be
* cleared in qdio_setup_irq.
*/
struct qdr *qdr; struct qdr *qdr;
unsigned long chsc_page;
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct semaphore setting_up_sema;
struct mutex setup_mutex;
}; };
#endif
/* helper functions */
#define queue_type(q) q->irq_ptr->qib.qfmt
#define is_thinint_irq(irq) \
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
css_general_characteristics.aif_osa)
/* the highest iqdio queue is used for multicast */
static inline int multicast_outbound(struct qdio_q *q)
{
return (q->irq_ptr->nr_output_qs > 1) &&
(q->nr == q->irq_ptr->nr_output_qs - 1);
}
static inline unsigned long long get_usecs(void)
{
return monotonic_clock() >> 12;
}
#define pci_out_supported(q) \
(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync)
#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci)
#define for_each_input_queue(irq_ptr, q, i) \
for (i = 0, q = irq_ptr->input_qs[0]; \
i < irq_ptr->nr_input_qs; \
q = irq_ptr->input_qs[++i])
#define for_each_output_queue(irq_ptr, q, i) \
for (i = 0, q = irq_ptr->output_qs[0]; \
i < irq_ptr->nr_output_qs; \
q = irq_ptr->output_qs[++i])
#define prev_buf(bufnr) \
((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
#define next_buf(bufnr) \
((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
#define add_buf(bufnr, inc) \
((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
/* prototypes for thin interrupt */
void qdio_sync_after_thinint(struct qdio_q *q);
int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state);
void qdio_check_outbound_after_thinint(struct qdio_q *q);
int qdio_inbound_q_moved(struct qdio_q *q);
void qdio_kick_inbound_handler(struct qdio_q *q);
void qdio_stop_polling(struct qdio_q *q);
int qdio_siga_sync_q(struct qdio_q *q);
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
void tiqdio_inbound_processing(unsigned long q);
int tiqdio_allocate_memory(void);
void tiqdio_free_memory(void);
int tiqdio_register_thinints(void);
void tiqdio_unregister_thinints(void);
/* prototypes for setup */
void qdio_inbound_processing(unsigned long data);
void qdio_outbound_processing(unsigned long data);
void qdio_outbound_timer(unsigned long data);
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb);
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
int nr_output_qs);
void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
int qdio_setup_irq(struct qdio_initialize *init_data);
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
void qdio_release_memory(struct qdio_irq *irq_ptr);
int qdio_setup_init(void);
void qdio_setup_exit(void);
#endif /* _CIO_QDIO_H */
/*
* drivers/s390/cio/qdio_debug.c
*
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <asm/qdio.h>
#include <asm/debug.h>
#include "qdio_debug.h"
#include "qdio.h"
debug_info_t *qdio_dbf_setup;
debug_info_t *qdio_dbf_trace;
static struct dentry *debugfs_root;
#define MAX_DEBUGFS_QUEUES 32
static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
static DEFINE_MUTEX(debugfs_mutex);
void qdio_allocate_do_dbf(struct qdio_initialize *init_data)
{
char dbf_text[20];
sprintf(dbf_text, "qfmt:%x", init_data->q_format);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8);
sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *));
sprintf(dbf_text, "niq:%4x", init_data->no_input_qs);
QDIO_DBF_TEXT0(0, setup, dbf_text);
sprintf(dbf_text, "noq:%4x", init_data->no_output_qs);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long));
QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long));
QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *));
}
static void qdio_unregister_dbf_views(void)
{
if (qdio_dbf_setup)
debug_unregister(qdio_dbf_setup);
if (qdio_dbf_trace)
debug_unregister(qdio_dbf_trace);
}
static int qdio_register_dbf_views(void)
{
qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES,
QDIO_DBF_SETUP_NR_AREAS,
QDIO_DBF_SETUP_LEN);
if (!qdio_dbf_setup)
goto oom;
debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL);
qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES,
QDIO_DBF_TRACE_NR_AREAS,
QDIO_DBF_TRACE_LEN);
if (!qdio_dbf_trace)
goto oom;
debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view);
debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL);
return 0;
oom:
qdio_unregister_dbf_views();
return -ENOMEM;
}
static int qstat_show(struct seq_file *m, void *v)
{
unsigned char state;
struct qdio_q *q = m->private;
int i;
if (!q)
return 0;
seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci);
seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
seq_printf(m, "ftc: %d\n", q->first_to_check);
seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc);
seq_printf(m, "polling: %d\n", q->u.in.polling);
seq_printf(m, "slsb buffer states:\n");
qdio_siga_sync_q(q);
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
get_buf_state(q, i, &state);
switch (state) {
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_OUTPUT_NOT_INIT:
seq_printf(m, "N");
break;
case SLSB_P_INPUT_PRIMED:
case SLSB_CU_OUTPUT_PRIMED:
seq_printf(m, "+");
break;
case SLSB_P_INPUT_ACK:
seq_printf(m, "A");
break;
case SLSB_P_INPUT_ERROR:
case SLSB_P_OUTPUT_ERROR:
seq_printf(m, "x");
break;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_OUTPUT_EMPTY:
seq_printf(m, "-");
break;
case SLSB_P_INPUT_HALTED:
case SLSB_P_OUTPUT_HALTED:
seq_printf(m, ".");
break;
default:
seq_printf(m, "?");
}
if (i == 63)
seq_printf(m, "\n");
}
seq_printf(m, "\n");
return 0;
}
static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
struct qdio_q *q = seq->private;
if (!q)
return 0;
if (q->is_input_q)
xchg(q->irq_ptr->dsci, 1);
local_bh_disable();
tasklet_schedule(&q->tasklet);
local_bh_enable();
return count;
}
static int qstat_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qstat_show,
filp->f_path.dentry->d_inode->i_private);
}
static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name)
{
memset(name, 0, sizeof(name));
sprintf(name, "%s", cdev->dev.bus_id);
if (q->is_input_q)
sprintf(name + strlen(name), "_input");
else
sprintf(name + strlen(name), "_output");
sprintf(name + strlen(name), "_%d", q->nr);
}
static void remove_debugfs_entry(struct qdio_q *q)
{
int i;
for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
if (!debugfs_queues[i])
continue;
if (debugfs_queues[i]->d_inode->i_private == q) {
debugfs_remove(debugfs_queues[i]);
debugfs_queues[i] = NULL;
}
}
}
static struct file_operations debugfs_fops = {
.owner = THIS_MODULE,
.open = qstat_seq_open,
.read = seq_read,
.write = qstat_seq_write,
.llseek = seq_lseek,
.release = single_release,
};
static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
{
int i = 0;
char name[40];
while (debugfs_queues[i] != NULL) {
i++;
if (i >= MAX_DEBUGFS_QUEUES)
return;
}
get_queue_name(q, cdev, name);
debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
debugfs_root, q, &debugfs_fops);
}
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
{
struct qdio_q *q;
int i;
mutex_lock(&debugfs_mutex);
for_each_input_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
for_each_output_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
mutex_unlock(&debugfs_mutex);
}
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
{
struct qdio_q *q;
int i;
mutex_lock(&debugfs_mutex);
for_each_input_queue(irq_ptr, q, i)
remove_debugfs_entry(q);
for_each_output_queue(irq_ptr, q, i)
remove_debugfs_entry(q);
mutex_unlock(&debugfs_mutex);
}
int __init qdio_debug_init(void)
{
debugfs_root = debugfs_create_dir("qdio_queues", NULL);
return qdio_register_dbf_views();
}
void qdio_debug_exit(void)
{
debugfs_remove(debugfs_root);
qdio_unregister_dbf_views();
}
/*
* drivers/s390/cio/qdio_debug.h
*
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#ifndef QDIO_DEBUG_H
#define QDIO_DEBUG_H
#include <asm/debug.h>
#include <asm/qdio.h>
#include "qdio.h"
#define QDIO_DBF_HEX(ex, name, level, addr, len) \
do { \
if (ex) \
debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \
else \
debug_event(qdio_dbf_##name, level, (void *)(addr), len); \
} while (0)
#define QDIO_DBF_TEXT(ex, name, level, text) \
do { \
if (ex) \
debug_text_exception(qdio_dbf_##name, level, text); \
else \
debug_text_event(qdio_dbf_##name, level, text); \
} while (0)
#define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len)
#define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len)
#define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len)
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len)
#define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len)
#define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len)
#define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len)
#else
#define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0)
#define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0)
#define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0)
#define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0)
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text)
#define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text)
#define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text)
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text)
#define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text)
#define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text)
#define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text)
#else
#define QDIO_DBF_TEXT3(ex, name, text) do {} while (0)
#define QDIO_DBF_TEXT4(ex, name, text) do {} while (0)
#define QDIO_DBF_TEXT5(ex, name, text) do {} while (0)
#define QDIO_DBF_TEXT6(ex, name, text) do {} while (0)
#endif /* CONFIG_QDIO_DEBUG */
/* s390dbf views */
#define QDIO_DBF_SETUP_LEN 8
#define QDIO_DBF_SETUP_PAGES 4
#define QDIO_DBF_SETUP_NR_AREAS 1
#define QDIO_DBF_TRACE_LEN 8
#define QDIO_DBF_TRACE_NR_AREAS 2
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TRACE_PAGES 16
#define QDIO_DBF_SETUP_LEVEL 6
#define QDIO_DBF_TRACE_LEVEL 4
#else /* !CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TRACE_PAGES 4
#define QDIO_DBF_SETUP_LEVEL 2
#define QDIO_DBF_TRACE_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
extern debug_info_t *qdio_dbf_setup;
extern debug_info_t *qdio_dbf_trace;
void qdio_allocate_do_dbf(struct qdio_initialize *init_data);
void debug_print_bstat(struct qdio_q *q);
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
int qdio_debug_init(void);
void qdio_debug_exit(void);
#endif
/*
* linux/drivers/s390/cio/qdio_main.c
*
* Linux for s390 qdio support, buffer handling, qdio API and module support.
*
* Copyright 2000,2008 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
* 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <asm/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "qdio.h"
#include "qdio_debug.h"
#include "qdio_perf.h"
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
"Jan Glauber <jang@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("QDIO base support");
MODULE_LICENSE("GPL");
static inline int do_siga_sync(struct subchannel_id schid,
unsigned int out_mask, unsigned int in_mask)
{
register unsigned long __fc asm ("0") = 2;
register struct subchannel_id __schid asm ("1") = schid;
register unsigned long out asm ("2") = out_mask;
register unsigned long in asm ("3") = in_mask;
int cc;
asm volatile(
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
return cc;
}
static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
{
register unsigned long __fc asm ("0") = 1;
register struct subchannel_id __schid asm ("1") = schid;
register unsigned long __mask asm ("2") = mask;
int cc;
asm volatile(
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
return cc;
}
/**
* do_siga_output - perform SIGA-w/wt function
* @schid: subchannel id or in case of QEBSM the subchannel token
* @mask: which output queues to process
* @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
* @fc: function code to perform
*
* Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
* Note: For IQDC unicast queues only the highest priority queue is processed.
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
u32 *bb, unsigned int fc)
{
register unsigned long __fc asm("0") = fc;
register unsigned long __schid asm("1") = schid;
register unsigned long __mask asm("2") = mask;
int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
asm volatile(
" siga 0\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
: : "cc", "memory");
*bb = ((unsigned int) __fc) >> 31;
return cc;
}
static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
{
char dbf_text[15];
/* all done or next buffer state different */
if (ccq == 0 || ccq == 32)
return 0;
/* not all buffers processed */
if (ccq == 96 || ccq == 97)
return 1;
/* notify devices immediately */
sprintf(dbf_text, "%d", ccq);
QDIO_DBF_TEXT2(1, trace, dbf_text);
return -EIO;
}
/**
* qdio_do_eqbs - extract buffer states for QEBSM
* @q: queue to manipulate
* @state: state of the extracted buffers
* @start: buffer number to start at
* @count: count of buffers to examine
*
* Returns the number of successfull extracted equal buffer states.
* Stops processing if a state is different from the last buffers state.
*/
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int start, int count)
{
unsigned int ccq = 0;
int tmp_count = count, tmp_start = start;
int nr = q->nr;
int rc;
char dbf_text[15];
BUG_ON(!q->irq_ptr->sch_token);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
again:
ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
rc = qdio_check_ccq(q, ccq);
/* At least one buffer was processed, return and extract the remaining
* buffers later.
*/
if ((ccq == 96) && (count != tmp_count))
return (count - tmp_count);
if (rc == 1) {
QDIO_DBF_TEXT5(1, trace, "eqAGAIN");
goto again;
}
if (rc < 0) {
QDIO_DBF_TEXT2(1, trace, "eqberr");
sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr);
QDIO_DBF_TEXT2(1, trace, dbf_text);
q->handler(q->irq_ptr->cdev,
QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
0, -1, -1, q->irq_ptr->int_parm);
return 0;
}
return count - tmp_count;
}
/**
* qdio_do_sqbs - set buffer states for QEBSM
* @q: queue to manipulate
* @state: new state of the buffers
* @start: first buffer number to change
* @count: how many buffers to change
*
* Returns the number of successfully changed buffers.
* Does retrying until the specified count of buffer states is set or an
* error occurs.
*/
static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
int count)
{
unsigned int ccq = 0;
int tmp_count = count, tmp_start = start;
int nr = q->nr;
int rc;
char dbf_text[15];
BUG_ON(!q->irq_ptr->sch_token);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
rc = qdio_check_ccq(q, ccq);
if (rc == 1) {
QDIO_DBF_TEXT5(1, trace, "sqAGAIN");
goto again;
}
if (rc < 0) {
QDIO_DBF_TEXT3(1, trace, "sqberr");
sprintf(dbf_text, "%2x,%2x", count, tmp_count);
QDIO_DBF_TEXT3(1, trace, dbf_text);
sprintf(dbf_text, "%d,%d", ccq, nr);
QDIO_DBF_TEXT3(1, trace, dbf_text);
q->handler(q->irq_ptr->cdev,
QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
0, -1, -1, q->irq_ptr->int_parm);
return 0;
}
WARN_ON(tmp_count);
return count - tmp_count;
}
/* returns number of examined buffers and their common state in *state */
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, unsigned int count)
{
unsigned char __state = 0;
int i;
BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count);
for (i = 0; i < count; i++) {
if (!__state)
__state = q->slsb.val[bufnr];
else if (q->slsb.val[bufnr] != __state)
break;
bufnr = next_buf(bufnr);
}
*state = __state;
return i;
}
inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state)
{
return get_buf_states(q, bufnr, state, 1);
}
/* wrap-around safe setting of slsb states, returns number of changed buffers */
static inline int set_buf_states(struct qdio_q *q, int bufnr,
unsigned char state, int count)
{
int i;
BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
if (is_qebsm(q))
return qdio_do_sqbs(q, state, bufnr, count);
for (i = 0; i < count; i++) {
xchg(&q->slsb.val[bufnr], state);
bufnr = next_buf(bufnr);
}
return count;
}
static inline int set_buf_state(struct qdio_q *q, int bufnr,
unsigned char state)
{
return set_buf_states(q, bufnr, state, 1);
}
/* set slsb states to initial state */
void qdio_init_buf_states(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
for_each_input_queue(irq_ptr, q, i)
set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
QDIO_MAX_BUFFERS_PER_Q);
for_each_output_queue(irq_ptr, q, i)
set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
QDIO_MAX_BUFFERS_PER_Q);
}
static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
unsigned int input)
{
int cc;
if (!need_siga_sync(q))
return 0;
qdio_perf_stat_inc(&perf_stats.siga_sync);
cc = do_siga_sync(q->irq_ptr->schid, output, input);
if (cc) {
QDIO_DBF_TEXT4(0, trace, "sigasync");
QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
}
return cc;
}
inline int qdio_siga_sync_q(struct qdio_q *q)
{
if (q->is_input_q)
return qdio_siga_sync(q, 0, q->mask);
else
return qdio_siga_sync(q, q->mask, 0);
}
static inline int qdio_siga_sync_out(struct qdio_q *q)
{
return qdio_siga_sync(q, ~0U, 0);
}
static inline int qdio_siga_sync_all(struct qdio_q *q)
{
return qdio_siga_sync(q, ~0U, ~0U);
}
static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
{
unsigned int fc = 0;
unsigned long schid;
if (!is_qebsm(q))
schid = *((u32 *)&q->irq_ptr->schid);
else {
schid = q->irq_ptr->sch_token;
fc |= 0x80;
}
return do_siga_output(schid, q->mask, busy_bit, fc);
}
static int qdio_siga_output(struct qdio_q *q)
{
int cc;
u32 busy_bit;
u64 start_time = 0;
QDIO_DBF_TEXT5(0, trace, "sigaout");
QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
qdio_perf_stat_inc(&perf_stats.siga_out);
again:
cc = qdio_do_siga_output(q, &busy_bit);
if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) {
if (!start_time)
start_time = get_usecs();
else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
if (cc == 2 && busy_bit)
cc |= QDIO_ERROR_SIGA_BUSY;
if (cc)
QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
return cc;
}
static inline int qdio_siga_input(struct qdio_q *q)
{
int cc;
QDIO_DBF_TEXT4(0, trace, "sigain");
QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
qdio_perf_stat_inc(&perf_stats.siga_in);
cc = do_siga_input(q->irq_ptr->schid, q->mask);
if (cc)
QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
return cc;
}
/* called from thinint inbound handler */
void qdio_sync_after_thinint(struct qdio_q *q)
{
if (pci_out_supported(q)) {
if (need_siga_sync_thinint(q))
qdio_siga_sync_all(q);
else if (need_siga_sync_out_thinint(q))
qdio_siga_sync_out(q);
} else
qdio_siga_sync_q(q);
}
inline void qdio_stop_polling(struct qdio_q *q)
{
spin_lock_bh(&q->u.in.lock);
if (!q->u.in.polling) {
spin_unlock_bh(&q->u.in.lock);
return;
}
q->u.in.polling = 0;
qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
/* show the card that we are not polling anymore */
set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
spin_unlock_bh(&q->u.in.lock);
}
static void announce_buffer_error(struct qdio_q *q)
{
char dbf_text[15];
if (q->is_input_q)
QDIO_DBF_TEXT3(1, trace, "inperr");
else
QDIO_DBF_TEXT3(0, trace, "outperr");
sprintf(dbf_text, "%x-%x-%x", q->first_to_check,
q->sbal[q->first_to_check]->element[14].flags,
q->sbal[q->first_to_check]->element[15].flags);
QDIO_DBF_TEXT3(1, trace, dbf_text);
QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256);
q->qdio_error = QDIO_ERROR_SLSB_STATE;
}
static int get_inbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
unsigned char state;
/*
* If we still poll don't update last_move_ftc, keep the
* previously ACK buffer there.
*/
if (!q->u.in.polling)
q->last_move_ftc = q->first_to_check;
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
* would return 0.
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
/*
* No siga sync here, as a PCI or we after a thin interrupt
* will sync the queues.
*/
/* need to set count to 1 for non-qebsm */
if (!is_qebsm(q))
count = 1;
check_next:
if (q->first_to_check == stop)
goto out;
count = get_buf_states(q, q->first_to_check, &state, count);
if (!count)
goto out;
switch (state) {
case SLSB_P_INPUT_PRIMED:
QDIO_DBF_TEXT5(0, trace, "inptprim");
/*
* Only ACK the first buffer. The ACK will be removed in
* qdio_stop_polling.
*/
if (q->u.in.polling)
state = SLSB_P_INPUT_NOT_INIT;
else {
q->u.in.polling = 1;
state = SLSB_P_INPUT_ACK;
}
set_buf_state(q, q->first_to_check, state);
/*
* Need to change all PRIMED buffers to NOT_INIT, otherwise
* we're loosing initiative in the thinint code.
*/
if (count > 1)
set_buf_states(q, next_buf(q->first_to_check),
SLSB_P_INPUT_NOT_INIT, count - 1);
/*
* No siga-sync needed for non-qebsm here, as the inbound queue
* will be synced on the next siga-r, resp.
* tiqdio_is_inbound_q_done will do the siga-sync.
*/
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
goto check_next;
case SLSB_P_INPUT_ERROR:
announce_buffer_error(q);
/* process the buffer, the upper layer will take care of it */
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
break;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK:
QDIO_DBF_TEXT5(0, trace, "inpnipro");
break;
default:
BUG();
}
out:
QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int));
return q->first_to_check;
}
int qdio_inbound_q_moved(struct qdio_q *q)
{
int bufnr;
bufnr = get_inbound_buffer_frontier(q);
if ((bufnr != q->last_move_ftc) || q->qdio_error) {
if (!need_siga_sync(q) && !pci_out_supported(q))
q->u.in.timestamp = get_usecs();
QDIO_DBF_TEXT4(0, trace, "inhasmvd");
QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
return 1;
} else
return 0;
}
static int qdio_inbound_q_done(struct qdio_q *q)
{
unsigned char state;
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
#endif
if (!atomic_read(&q->nr_buf_used))
return 1;
/*
* We need that one for synchronization with the adapter, as it
* does a kind of PCI avoidance.
*/
qdio_siga_sync_q(q);
get_buf_state(q, q->first_to_check, &state);
if (state == SLSB_P_INPUT_PRIMED)
/* we got something to do */
return 0;
/* on VM, we don't poll, so the q is always done here */
if (need_siga_sync(q) || pci_out_supported(q))
return 1;
/*
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0, trace, "inqisdon");
QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
sprintf(dbf_text, "pf%02x", q->first_to_check);
QDIO_DBF_TEXT4(0, trace, dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
return 1;
} else {
#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0, trace, "inqisntd");
QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
sprintf(dbf_text, "pf%02x", q->first_to_check);
QDIO_DBF_TEXT4(0, trace, dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
return 0;
}
}
void qdio_kick_inbound_handler(struct qdio_q *q)
{
int count, start, end;
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
#endif
qdio_perf_stat_inc(&perf_stats.inbound_handler);
start = q->first_to_kick;
end = q->first_to_check;
if (end >= start)
count = end - start;
else
count = end + QDIO_MAX_BUFFERS_PER_Q - start;
#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text, "s=%2xc=%2x", start, count);
QDIO_DBF_TEXT4(0, trace, dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr,
start, count, q->irq_ptr->int_parm);
/* for the next time */
q->first_to_kick = q->first_to_check;
q->qdio_error = 0;
}
static void __qdio_inbound_processing(struct qdio_q *q)
{
qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
again:
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_inbound_handler(q);
if (!qdio_inbound_q_done(q))
/* means poll time is not yet over */
goto again;
qdio_stop_polling(q);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
if (!qdio_inbound_q_done(q))
goto again;
}
/* inbound tasklet */
void qdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
__qdio_inbound_processing(q);
}
static int get_outbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
unsigned char state;
if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
(queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
qdio_siga_sync_q(q);
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
* would return 0.
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
/* need to set count to 1 for non-qebsm */
if (!is_qebsm(q))
count = 1;
check_next:
if (q->first_to_check == stop)
return q->first_to_check;
count = get_buf_states(q, q->first_to_check, &state, count);
if (!count)
return q->first_to_check;
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
QDIO_DBF_TEXT5(0, trace, "outpempt");
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
/*
* We fetch all buffer states at once. get_buf_states may
* return count < stop. For QEBSM we do not loop.
*/
if (is_qebsm(q))
break;
goto check_next;
case SLSB_P_OUTPUT_ERROR:
announce_buffer_error(q);
/* process the buffer, the upper layer will take care of it */
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
break;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
QDIO_DBF_TEXT5(0, trace, "outpprim");
break;
case SLSB_P_OUTPUT_NOT_INIT:
case SLSB_P_OUTPUT_HALTED:
break;
default:
BUG();
}
return q->first_to_check;
}
/* all buffers processed? */
static inline int qdio_outbound_q_done(struct qdio_q *q)
{
return atomic_read(&q->nr_buf_used) == 0;
}
static inline int qdio_outbound_q_moved(struct qdio_q *q)
{
int bufnr;
bufnr = get_outbound_buffer_frontier(q);
if ((bufnr != q->last_move_ftc) || q->qdio_error) {
q->last_move_ftc = bufnr;
QDIO_DBF_TEXT4(0, trace, "oqhasmvd");
QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
return 1;
} else
return 0;
}
/*
* VM could present us cc=2 and busy bit set on SIGA-write
* during reconfiguration of their Guest LAN (only in iqdio mode,
* otherwise qdio is asynchronous and cc=2 and busy bit there will take
* the queues down immediately).
*
* Therefore qdio_siga_output will try for a short time constantly,
* if such a condition occurs. If it doesn't change, it will
* increase the busy_siga_counter and save the timestamp, and
* schedule the queue for later processing. qdio_outbound_processing
* will check out the counter. If non-zero, it will call qdio_kick_outbound_q
* as often as the value of the counter. This will attempt further SIGA
* instructions. For each successful SIGA, the counter is
* decreased, for failing SIGAs the counter remains the same, after
* all. After some time of no movement, qdio_kick_outbound_q will
* finally fail and reflect corresponding error codes to call
* the upper layer module and have it take the queues down.
*
* Note that this is a change from the original HiperSockets design
* (saying cc=2 and busy bit means take the queues down), but in
* these days Guest LAN didn't exist... excessive cc=2 with busy bit
* conditions will still take the queues down, but the threshold is
* higher due to the Guest LAN environment.
*
* Called from outbound tasklet and do_QDIO handler.
*/
static void qdio_kick_outbound_q(struct qdio_q *q)
{
int rc;
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
QDIO_DBF_TEXT5(0, trace, "kickoutq");
QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
#endif /* CONFIG_QDIO_DEBUG */
if (!need_siga_out(q))
return;
rc = qdio_siga_output(q);
switch (rc) {
case 0:
/* went smooth this time, reset timestamp */
q->u.out.timestamp = 0;
/* TODO: improve error handling for CC=0 case */
#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT3(0, trace, "cc2reslv");
sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
atomic_read(&q->u.out.busy_siga_counter));
QDIO_DBF_TEXT3(0, trace, dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
break;
/* cc=2 and busy bit */
case (2 | QDIO_ERROR_SIGA_BUSY):
atomic_inc(&q->u.out.busy_siga_counter);
/* if the last siga was successful, save timestamp here */
if (!q->u.out.timestamp)
q->u.out.timestamp = get_usecs();
/* if we're in time, don't touch qdio_error */
if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) {
tasklet_schedule(&q->tasklet);
break;
}
QDIO_DBF_TEXT2(0, trace, "cc2REPRT");
#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
atomic_read(&q->u.out.busy_siga_counter));
QDIO_DBF_TEXT3(0, trace, dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
default:
/* for plain cc=1, 2 or 3 */
q->qdio_error = rc;
}
}
static void qdio_kick_outbound_handler(struct qdio_q *q)
{
int start, end, count;
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
#endif
start = q->first_to_kick;
end = q->last_move_ftc;
if (end >= start)
count = end - start;
else
count = end + QDIO_MAX_BUFFERS_PER_Q - start;
#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0, trace, "kickouth");
QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
sprintf(dbf_text, "s=%2xc=%2x", start, count);
QDIO_DBF_TEXT4(0, trace, dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
q->irq_ptr->int_parm);
/* for the next time: */
q->first_to_kick = q->last_move_ftc;
q->qdio_error = 0;
}
static void __qdio_outbound_processing(struct qdio_q *q)
{
int siga_attempts;
qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
/* see comment in qdio_kick_outbound_q */
siga_attempts = atomic_read(&q->u.out.busy_siga_counter);
while (siga_attempts--) {
atomic_dec(&q->u.out.busy_siga_counter);
qdio_kick_outbound_q(q);
}
BUG_ON(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
qdio_kick_outbound_handler(q);
if (queue_type(q) == QDIO_ZFCP_QFMT) {
if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
tasklet_schedule(&q->tasklet);
return;
}
/* bail out for HiperSockets unicast queues */
if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
return;
if (q->u.out.pci_out_enabled)
return;
/*
* Now we know that queue type is either qeth without pci enabled
* or HiperSockets multicast. Make sure buffer switch from PRIMED to
* EMPTY is noticed and outbound_handler is called after some time.
*/
if (qdio_outbound_q_done(q))
del_timer(&q->u.out.timer);
else {
if (!timer_pending(&q->u.out.timer)) {
mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
}
}
}
/* outbound tasklet */
void qdio_outbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
__qdio_outbound_processing(q);
}
void qdio_outbound_timer(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
tasklet_schedule(&q->tasklet);
}
/* called from thinint inbound tasklet */
void qdio_check_outbound_after_thinint(struct qdio_q *q)
{
struct qdio_q *out;
int i;
if (!pci_out_supported(q))
return;
for_each_output_queue(q->irq_ptr, out, i)
if (!qdio_outbound_q_done(out))
tasklet_schedule(&out->tasklet);
}
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
enum qdio_irq_states state)
{
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
QDIO_DBF_TEXT5(0, trace, "newstate");
sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state);
QDIO_DBF_TEXT5(0, trace, dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
irq_ptr->state = state;
mb();
}
static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
{
char dbf_text[15];
if (irb->esw.esw0.erw.cons) {
sprintf(dbf_text, "sens%4x", schid.sch_no);
QDIO_DBF_TEXT2(1, trace, dbf_text);
QDIO_DBF_HEX0(0, trace, irb, 64);
QDIO_DBF_HEX0(0, trace, irb->ecw, 64);
}
}
/* PCI interrupt handler */
static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
{
int i;
struct qdio_q *q;
qdio_perf_stat_inc(&perf_stats.pci_int);
for_each_input_queue(irq_ptr, q, i)
tasklet_schedule(&q->tasklet);
if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
return;
for_each_output_queue(irq_ptr, q, i) {
if (qdio_outbound_q_done(q))
continue;
if (!siga_syncs_out_pci(q))
qdio_siga_sync_q(q);
tasklet_schedule(&q->tasklet);
}
}
static void qdio_handle_activate_check(struct ccw_device *cdev,
unsigned long intparm, int cstat, int dstat)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
char dbf_text[15];
QDIO_DBF_TEXT2(1, trace, "ick2");
sprintf(dbf_text, "%s", cdev->dev.bus_id);
QDIO_DBF_TEXT2(1, trace, dbf_text);
QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int));
QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
if (irq_ptr->nr_input_qs) {
q = irq_ptr->input_qs[0];
} else if (irq_ptr->nr_output_qs) {
q = irq_ptr->output_qs[0];
} else {
dump_stack();
goto no_handler;
}
q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
0, -1, -1, irq_ptr->int_parm);
no_handler:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
}
static void qdio_call_shutdown(struct work_struct *work)
{
struct ccw_device_private *priv;
struct ccw_device *cdev;
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
put_device(&cdev->dev);
}
static void qdio_int_error(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
break;
case QDIO_IRQ_STATE_ESTABLISHED:
case QDIO_IRQ_STATE_ACTIVE:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
if (get_device(&cdev->dev)) {
/* Can't call shutdown from interrupt context. */
PREPARE_WORK(&cdev->private->kick_work,
qdio_call_shutdown);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
break;
default:
WARN_ON(1);
}
wake_up(&cdev->private->wait_q);
}
static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
int dstat)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
QDIO_DBF_TEXT2(1, setup, "eq:ckcon");
goto error;
}
if (!(dstat & DEV_STAT_DEV_END)) {
QDIO_DBF_TEXT2(1, setup, "eq:no de");
goto error;
}
if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
QDIO_DBF_TEXT2(1, setup, "eq:badio");
goto error;
}
return 0;
error:
QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
return 1;
}
static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
int dstat)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
char dbf_text[15];
sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_TEXT0(0, trace, dbf_text);
if (!qdio_establish_check_errors(cdev, cstat, dstat))
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
}
/* qdio interrupt handler */
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
int cstat, dstat;
char dbf_text[15];
qdio_perf_stat_inc(&perf_stats.qdio_int);
if (!intparm || !irq_ptr) {
sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no);
QDIO_DBF_TEXT2(1, setup, dbf_text);
return;
}
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
sprintf(dbf_text, "ierr%4x",
cdev->private->schid.sch_no);
QDIO_DBF_TEXT2(1, setup, dbf_text);
qdio_int_error(cdev);
return;
case -ETIMEDOUT:
sprintf(dbf_text, "qtoh%4x",
cdev->private->schid.sch_no);
QDIO_DBF_TEXT2(1, setup, dbf_text);
qdio_int_error(cdev);
return;
default:
WARN_ON(1);
return;
}
}
qdio_irq_check_sense(irq_ptr->schid, irb);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
qdio_establish_handle_irq(cdev, cstat, dstat);
break;
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
break;
case QDIO_IRQ_STATE_ESTABLISHED:
case QDIO_IRQ_STATE_ACTIVE:
if (cstat & SCHN_STAT_PCI) {
qdio_int_handler_pci(irq_ptr);
/* no state change so no need to wake up wait_q */
return;
}
if ((cstat & ~SCHN_STAT_PCI) || dstat) {
qdio_handle_activate_check(cdev, intparm, cstat,
dstat);
break;
}
default:
WARN_ON(1);
}
wake_up(&cdev->private->wait_q);
}
/**
* qdio_get_ssqd_desc - get qdio subchannel description
* @cdev: ccw device to get description for
*
* Returns a pointer to the saved qdio subchannel description,
* or NULL for not setup qdio devices.
*/
struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr;
QDIO_DBF_TEXT0(0, setup, "getssqd");
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return NULL;
return &irq_ptr->ssqd_desc;
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
/**
* qdio_cleanup - shutdown queues and free data structures
* @cdev: associated ccw device
* @how: use halt or clear to shutdown
*
* This function calls qdio_shutdown() for @cdev with method @how
* and on success qdio_free() for @cdev.
*/
int qdio_cleanup(struct ccw_device *cdev, int how)
{
struct qdio_irq *irq_ptr;
char dbf_text[15];
int rc;
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
sprintf(dbf_text, "qcln%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT1(0, trace, dbf_text);
QDIO_DBF_TEXT0(0, setup, dbf_text);
rc = qdio_shutdown(cdev, how);
if (rc == 0)
rc = qdio_free(cdev);
return rc;
}
EXPORT_SYMBOL_GPL(qdio_cleanup);
static void qdio_shutdown_queues(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
int i;
for_each_input_queue(irq_ptr, q, i)
tasklet_disable(&q->tasklet);
for_each_output_queue(irq_ptr, q, i) {
tasklet_disable(&q->tasklet);
del_timer(&q->u.out.timer);
}
}
/**
* qdio_shutdown - shut down a qdio subchannel
* @cdev: associated ccw device
* @how: use halt or clear to shutdown
*/
int qdio_shutdown(struct ccw_device *cdev, int how)
{
struct qdio_irq *irq_ptr;
int rc;
unsigned long flags;
char dbf_text[15];
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
mutex_lock(&irq_ptr->setup_mutex);
/*
* Subchannel was already shot down. We cannot prevent being called
* twice since cio may trigger a shutdown asynchronously.
*/
if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
mutex_unlock(&irq_ptr->setup_mutex);
return 0;
}
sprintf(dbf_text, "qsqs%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT1(0, trace, dbf_text);
QDIO_DBF_TEXT0(0, setup, dbf_text);
tiqdio_remove_input_queues(irq_ptr);
qdio_shutdown_queues(cdev);
qdio_shutdown_debug_entries(irq_ptr, cdev);
/* cleanup subchannel */
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
else
/* default behaviour is halt */
rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
if (rc) {
sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT0(0, setup, dbf_text);
sprintf(dbf_text, "rc=%d", rc);
QDIO_DBF_TEXT0(0, setup, dbf_text);
goto no_cleanup;
}
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
wait_event_interruptible_timeout(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
irq_ptr->state == QDIO_IRQ_STATE_ERR,
10 * HZ);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
no_cleanup:
qdio_shutdown_thinint(irq_ptr);
/* restore interrupt handler */
if ((void *)cdev->handler == (void *)qdio_int_handler)
cdev->handler = irq_ptr->orig_handler;
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
mutex_unlock(&irq_ptr->setup_mutex);
module_put(THIS_MODULE);
if (rc)
return rc;
return 0;
}
EXPORT_SYMBOL_GPL(qdio_shutdown);
/**
* qdio_free - free data structures for a qdio subchannel
* @cdev: associated ccw device
*/
int qdio_free(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr;
char dbf_text[15];
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
mutex_lock(&irq_ptr->setup_mutex);
sprintf(dbf_text, "qfqs%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT1(0, trace, dbf_text);
QDIO_DBF_TEXT0(0, setup, dbf_text);
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
qdio_release_memory(irq_ptr);
return 0;
}
EXPORT_SYMBOL_GPL(qdio_free);
/**
* qdio_initialize - allocate and establish queues for a qdio subchannel
* @init_data: initialization data
*
* This function first allocates queues via qdio_allocate() and on success
* establishes them via qdio_establish().
*/
int qdio_initialize(struct qdio_initialize *init_data)
{
int rc;
char dbf_text[15];
sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_TEXT0(0, trace, dbf_text);
rc = qdio_allocate(init_data);
if (rc)
return rc;
rc = qdio_establish(init_data);
if (rc)
qdio_free(init_data->cdev);
return rc;
}
EXPORT_SYMBOL_GPL(qdio_initialize);
/**
* qdio_allocate - allocate qdio queues and associated data
* @init_data: initialization data
*/
int qdio_allocate(struct qdio_initialize *init_data)
{
struct qdio_irq *irq_ptr;
char dbf_text[15];
sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_TEXT0(0, trace, dbf_text);
if ((init_data->no_input_qs && !init_data->input_handler) ||
(init_data->no_output_qs && !init_data->output_handler))
return -EINVAL;
if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
(init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
return -EINVAL;
if ((!init_data->input_sbal_addr_array) ||
(!init_data->output_sbal_addr_array))
return -EINVAL;
qdio_allocate_do_dbf(init_data);
/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr)
goto out_err;
QDIO_DBF_TEXT0(0, setup, "irq_ptr:");
QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *));
mutex_init(&irq_ptr->setup_mutex);
/*
* Allocate a page for the chsc calls in qdio_establish.
* Must be pre-allocated since a zfcp recovery will call
* qdio_establish. In case of low memory and swap on a zfcp disk
* we may not be able to allocate memory otherwise.
*/
irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
if (!irq_ptr->chsc_page)
goto out_rel;
/* qdr is used in ccw1.cda which is u32 */
irq_ptr->qdr = kzalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
if (!irq_ptr->qdr)
goto out_rel;
WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
QDIO_DBF_TEXT0(0, setup, "qdr:");
QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *));
if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
init_data->no_output_qs))
goto out_rel;
init_data->cdev->private->qdio_data = irq_ptr;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
return 0;
out_rel:
qdio_release_memory(irq_ptr);
out_err:
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(qdio_allocate);
/**
* qdio_establish - establish queues on a qdio subchannel
* @init_data: initialization data
*/
int qdio_establish(struct qdio_initialize *init_data)
{
char dbf_text[20];
struct qdio_irq *irq_ptr;
struct ccw_device *cdev = init_data->cdev;
unsigned long saveflags;
int rc;
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL;
if (!try_module_get(THIS_MODULE))
return -EINVAL;
sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_TEXT0(0, trace, dbf_text);
mutex_lock(&irq_ptr->setup_mutex);
qdio_setup_irq(init_data);
rc = qdio_establish_thinint(irq_ptr);
if (rc) {
mutex_unlock(&irq_ptr->setup_mutex);
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return rc;
}
/* establish q */
irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
irq_ptr->ccw.flags = CCW_FLAG_SLI;
irq_ptr->ccw.count = irq_ptr->equeue.count;
irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
ccw_device_set_options_mask(cdev, 0);
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
if (rc) {
sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(1, setup, dbf_text);
sprintf(dbf_text, "eq:rc%4x", rc);
QDIO_DBF_TEXT2(1, setup, dbf_text);
}
spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
if (rc) {
mutex_unlock(&irq_ptr->setup_mutex);
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return rc;
}
wait_event_interruptible_timeout(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
mutex_unlock(&irq_ptr->setup_mutex);
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return -EIO;
}
qdio_setup_ssqd_info(irq_ptr);
sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac);
QDIO_DBF_TEXT2(0, setup, dbf_text);
/* qebsm is now setup if available, initialize buffer states */
qdio_init_buf_states(irq_ptr);
mutex_unlock(&irq_ptr->setup_mutex);
qdio_print_subchannel_info(irq_ptr, cdev);
qdio_setup_debug_entries(irq_ptr, cdev);
return 0;
}
EXPORT_SYMBOL_GPL(qdio_establish);
/**
* qdio_activate - activate queues on a qdio subchannel
* @cdev: associated cdev
*/
int qdio_activate(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr;
int rc;
unsigned long saveflags;
char dbf_text[20];
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL;
mutex_lock(&irq_ptr->setup_mutex);
if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
rc = -EBUSY;
goto out;
}
sprintf(dbf_text, "qact%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(0, setup, dbf_text);
QDIO_DBF_TEXT2(0, trace, dbf_text);
irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
irq_ptr->ccw.flags = CCW_FLAG_SLI;
irq_ptr->ccw.count = irq_ptr->aqueue.count;
irq_ptr->ccw.cda = 0;
spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
0, DOIO_DENY_PREFETCH);
if (rc) {
sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(1, setup, dbf_text);
sprintf(dbf_text, "aq:rc%4x", rc);
QDIO_DBF_TEXT2(1, setup, dbf_text);
}
spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
if (rc)
goto out;
if (is_thinint_irq(irq_ptr))
tiqdio_add_input_queues(irq_ptr);
/* wait for subchannel to become active */
msleep(5);
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_STOPPED:
case QDIO_IRQ_STATE_ERR:
mutex_unlock(&irq_ptr->setup_mutex);
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return -EIO;
default:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
rc = 0;
}
out:
mutex_unlock(&irq_ptr->setup_mutex);
return rc;
}
EXPORT_SYMBOL_GPL(qdio_activate);
static inline int buf_in_between(int bufnr, int start, int count)
{
int end = add_buf(start, count);
if (end > start) {
if (bufnr >= start && bufnr < end)
return 1;
else
return 0;
}
/* wrap-around case */
if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
(bufnr < end))
return 1;
else
return 0;
}
/**
* handle_inbound - reset processed input buffers
* @q: queue containing the buffers
* @callflags: flags
* @bufnr: first buffer to process
* @count: how many buffers are emptied
*/
static void handle_inbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
unsigned long flags;
int used, rc;
/*
* do_QDIO could run in parallel with the queue tasklet so the
* upper-layer programm could empty the ACK'ed buffer here.
* If that happens we must clear the polling flag, otherwise
* qdio_stop_polling() could set the buffer to NOT_INIT after
* it was set to EMPTY which would kill us.
*/
spin_lock_irqsave(&q->u.in.lock, flags);
if (q->u.in.polling)
if (buf_in_between(q->last_move_ftc, bufnr, count))
q->u.in.polling = 0;
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
spin_unlock_irqrestore(&q->u.in.lock, flags);
used = atomic_add_return(count, &q->nr_buf_used) - count;
BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
/* no need to signal as long as the adapter had free buffers */
if (used)
return;
if (need_siga_in(q)) {
rc = qdio_siga_input(q);
if (rc)
q->qdio_error = rc;
}
}
/**
* handle_outbound - process filled outbound buffers
* @q: queue containing the buffers
* @callflags: flags
* @bufnr: first buffer to process
* @count: how many buffers are filled
*/
static void handle_outbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
unsigned char state;
int used;
qdio_perf_stat_inc(&perf_stats.outbound_handler);
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
used = atomic_add_return(count, &q->nr_buf_used);
BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
if (callflags & QDIO_FLAG_PCI_OUT)
q->u.out.pci_out_enabled = 1;
else
q->u.out.pci_out_enabled = 0;
if (queue_type(q) == QDIO_IQDIO_QFMT) {
if (multicast_outbound(q))
qdio_kick_outbound_q(q);
else
/*
* One siga-w per buffer required for unicast
* HiperSockets.
*/
while (count--)
qdio_kick_outbound_q(q);
goto out;
}
if (need_siga_sync(q)) {
qdio_siga_sync_q(q);
goto out;
}
/* try to fast requeue buffers */
get_buf_state(q, prev_buf(bufnr), &state);
if (state != SLSB_CU_OUTPUT_PRIMED)
qdio_kick_outbound_q(q);
else {
QDIO_DBF_TEXT5(0, trace, "fast-req");
qdio_perf_stat_inc(&perf_stats.fast_requeue);
}
out:
/* Fixme: could wait forever if called from process context */
tasklet_schedule(&q->tasklet);
}
/**
* do_QDIO - process input or output buffers
* @cdev: associated ccw_device for the qdio subchannel
* @callflags: input or output and special flags from the program
* @q_nr: queue number
* @bufnr: buffer number
* @count: how many buffers to process
*/
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
int q_nr, int bufnr, int count)
{
struct qdio_irq *irq_ptr;
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[20];
sprintf(dbf_text, "doQD%04x", cdev->private->schid.sch_no);
QDIO_DBF_TEXT3(0, trace, dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
(count > QDIO_MAX_BUFFERS_PER_Q) ||
(q_nr > QDIO_MAX_QUEUES_PER_IRQ))
return -EINVAL;
if (!count)
return 0;
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
#ifdef CONFIG_QDIO_DEBUG
if (callflags & QDIO_FLAG_SYNC_INPUT)
QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr],
sizeof(void *));
else
QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr],
sizeof(void *));
sprintf(dbf_text, "flag%04x", callflags);
QDIO_DBF_TEXT3(0, trace, dbf_text);
sprintf(dbf_text, "qi%02xct%02x", bufnr, count);
QDIO_DBF_TEXT3(0, trace, dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
return -EBUSY;
if (callflags & QDIO_FLAG_SYNC_INPUT)
handle_inbound(irq_ptr->input_qs[q_nr],
callflags, bufnr, count);
else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
handle_outbound(irq_ptr->output_qs[q_nr],
callflags, bufnr, count);
else {
QDIO_DBF_TEXT3(1, trace, "doQD:inv");
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(do_QDIO);
static int __init init_QDIO(void)
{
int rc;
rc = qdio_setup_init();
if (rc)
return rc;
rc = tiqdio_allocate_memory();
if (rc)
goto out_cache;
rc = qdio_debug_init();
if (rc)
goto out_ti;
rc = qdio_setup_perf_stats();
if (rc)
goto out_debug;
rc = tiqdio_register_thinints();
if (rc)
goto out_perf;
return 0;
out_perf:
qdio_remove_perf_stats();
out_debug:
qdio_debug_exit();
out_ti:
tiqdio_free_memory();
out_cache:
qdio_setup_exit();
return rc;
}
static void __exit exit_QDIO(void)
{
tiqdio_unregister_thinints();
tiqdio_free_memory();
qdio_remove_perf_stats();
qdio_debug_exit();
qdio_setup_exit();
}
module_init(init_QDIO);
module_exit(exit_QDIO);
/*
* drivers/s390/cio/qdio_perf.c
*
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/ccwdev.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "chsc.h"
#include "qdio_debug.h"
#include "qdio_perf.h"
int qdio_performance_stats;
struct qdio_perf_stats perf_stats;
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *qdio_perf_pde;
#endif
inline void qdio_perf_stat_inc(atomic_long_t *count)
{
if (qdio_performance_stats)
atomic_long_inc(count);
}
inline void qdio_perf_stat_dec(atomic_long_t *count)
{
if (qdio_performance_stats)
atomic_long_dec(count);
}
/*
* procfs functions
*/
static int qdio_perf_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.qdio_int));
seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.pci_int));
seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.thin_int));
seq_printf(m, "\n");
seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.tasklet_inbound));
seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.tasklet_outbound));
seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
(long)atomic_long_read(&perf_stats.tasklet_thinint),
(long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
(long)atomic_long_read(&perf_stats.thinint_inbound),
(long)atomic_long_read(&perf_stats.thinint_inbound_loop));
seq_printf(m, "\n");
seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.siga_in));
seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.siga_out));
seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.siga_sync));
seq_printf(m, "\n");
seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.inbound_handler));
seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.outbound_handler));
seq_printf(m, "\n");
seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
(long)atomic_long_read(&perf_stats.fast_requeue));
seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
(long)atomic_long_read(&perf_stats.debug_tl_out_timer));
seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.debug_stop_polling));
seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
(long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
seq_printf(m, "\n");
return 0;
}
static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qdio_perf_proc_show, NULL);
}
static struct file_operations qdio_perf_proc_fops = {
.owner = THIS_MODULE,
.open = qdio_perf_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* sysfs functions
*/
static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
{
return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
}
static ssize_t qdio_perf_stats_store(struct bus_type *bus,
const char *buf, size_t count)
{
unsigned long i;
if (strict_strtoul(buf, 16, &i) != 0)
return -EINVAL;
if ((i != 0) && (i != 1))
return -EINVAL;
if (i == qdio_performance_stats)
return count;
qdio_performance_stats = i;
/* reset performance statistics */
if (i == 0)
memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
return count;
}
static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
qdio_perf_stats_store);
int __init qdio_setup_perf_stats(void)
{
int rc;
rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
if (rc)
return rc;
#ifdef CONFIG_PROC_FS
memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
NULL, &qdio_perf_proc_fops);
#endif
return 0;
}
void __exit qdio_remove_perf_stats(void)
{
#ifdef CONFIG_PROC_FS
remove_proc_entry("qdio_perf", NULL);
#endif
bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
}
/*
* drivers/s390/cio/qdio_perf.h
*
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#ifndef QDIO_PERF_H
#define QDIO_PERF_H
#include <linux/types.h>
#include <linux/device.h>
#include <asm/atomic.h>
struct qdio_perf_stats {
/* interrupt handler calls */
atomic_long_t qdio_int;
atomic_long_t pci_int;
atomic_long_t thin_int;
/* tasklet runs */
atomic_long_t tasklet_inbound;
atomic_long_t tasklet_outbound;
atomic_long_t tasklet_thinint;
atomic_long_t tasklet_thinint_loop;
atomic_long_t thinint_inbound;
atomic_long_t thinint_inbound_loop;
atomic_long_t thinint_inbound_loop2;
/* signal adapter calls */
atomic_long_t siga_out;
atomic_long_t siga_in;
atomic_long_t siga_sync;
/* misc */
atomic_long_t inbound_handler;
atomic_long_t outbound_handler;
atomic_long_t fast_requeue;
/* for debugging */
atomic_long_t debug_tl_out_timer;
atomic_long_t debug_stop_polling;
};
extern struct qdio_perf_stats perf_stats;
extern int qdio_performance_stats;
int qdio_setup_perf_stats(void);
void qdio_remove_perf_stats(void);
extern void qdio_perf_stat_inc(atomic_long_t *count);
extern void qdio_perf_stat_dec(atomic_long_t *count);
#endif
/*
* driver/s390/cio/qdio_setup.c
*
* qdio queue initialization
*
* Copyright (C) IBM Corp. 2008
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/qdio.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "chsc.h"
#include "qdio.h"
#include "qdio_debug.h"
static struct kmem_cache *qdio_q_cache;
/*
* qebsm is only available under 64bit but the adapter sets the feature
* flag anyway, so we manually override it.
*/
static inline int qebsm_possible(void)
{
#ifdef CONFIG_64BIT
return css_general_characteristics.qebsm;
#endif
return 0;
}
/*
* qib_param_field: pointer to 128 bytes or NULL, if no param field
* nr_input_qs: pointer to nr_queues*128 words of data or NULL
*/
static void set_impl_params(struct qdio_irq *irq_ptr,
unsigned int qib_param_field_format,
unsigned char *qib_param_field,
unsigned long *input_slib_elements,
unsigned long *output_slib_elements)
{
struct qdio_q *q;
int i, j;
if (!irq_ptr)
return;
WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
irq_ptr->qib.pfmt = qib_param_field_format;
if (qib_param_field)
memcpy(irq_ptr->qib.parm, qib_param_field,
QDIO_MAX_BUFFERS_PER_Q);
if (!input_slib_elements)
goto output;
for_each_input_queue(irq_ptr, q, i) {
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->slib->slibe[j].parms =
input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
}
output:
if (!output_slib_elements)
return;
for_each_output_queue(irq_ptr, q, i) {
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->slib->slibe[j].parms =
output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
}
}
static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
{
struct qdio_q *q;
int i;
for (i = 0; i < nr_queues; i++) {
q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
if (!q)
return -ENOMEM;
WARN_ON((unsigned long)q & 0xff);
q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
if (!q->slib) {
kmem_cache_free(qdio_q_cache, q);
return -ENOMEM;
}
WARN_ON((unsigned long)q->slib & 0x7ff);
irq_ptr_qs[i] = q;
}
return 0;
}
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
{
int rc;
rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
if (rc)
return rc;
rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
return rc;
}
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
qdio_handler_t *handler, int i)
{
/* must be cleared by every qdio_establish */
memset(q, 0, ((char *)&q->slib) - ((char *)q));
memset(q->slib, 0, PAGE_SIZE);
q->irq_ptr = irq_ptr;
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
void **sbals_array, char *dbf_text, int i)
{
struct qdio_q *prev;
int j;
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_HEX0(0, setup, &q, sizeof(void *));
q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
q->sbal[j] = *sbals_array++;
WARN_ON((unsigned long)q->sbal[j] & 0xff);
}
/* fill in slib */
if (i > 0) {
prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
: irq_ptr->output_qs[i - 1];
prev->slib->nsliba = (unsigned long)q->slib;
}
q->slib->sla = (unsigned long)q->sl;
q->slib->slsba = (unsigned long)&q->slsb.val[0];
/* fill in sl */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sl->element[j].sbal = (unsigned long)q->sbal[j];
QDIO_DBF_TEXT2(0, setup, "sl-sb-b0");
QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *));
QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *));
QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *));
}
static void setup_queues(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
char dbf_text[20];
struct qdio_q *q;
void **input_sbal_array = qdio_init->input_sbal_addr_array;
void **output_sbal_array = qdio_init->output_sbal_addr_array;
int i;
sprintf(dbf_text, "qfqs%4x", qdio_init->cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0, setup, dbf_text);
for_each_input_queue(irq_ptr, q, i) {
sprintf(dbf_text, "in-q%4x", i);
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
spin_lock_init(&q->u.in.lock);
setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
if (is_thinint_irq(irq_ptr))
tasklet_init(&q->tasklet, tiqdio_inbound_processing,
(unsigned long) q);
else
tasklet_init(&q->tasklet, qdio_inbound_processing,
(unsigned long) q);
}
for_each_output_queue(irq_ptr, q, i) {
sprintf(dbf_text, "outq%4x", i);
setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
q->is_input_q = 0;
setup_storage_lists(q, irq_ptr, output_sbal_array,
dbf_text, i);
output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
tasklet_init(&q->tasklet, qdio_outbound_processing,
(unsigned long) q);
setup_timer(&q->u.out.timer, (void(*)(unsigned long))
&qdio_outbound_timer, (unsigned long)q);
}
}
static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
{
if (qdioac & AC1_SIGA_INPUT_NEEDED)
irq_ptr->siga_flag.input = 1;
if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
irq_ptr->siga_flag.output = 1;
if (qdioac & AC1_SIGA_SYNC_NEEDED)
irq_ptr->siga_flag.sync = 1;
if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)
irq_ptr->siga_flag.no_sync_ti = 1;
if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)
irq_ptr->siga_flag.no_sync_out_pci = 1;
if (irq_ptr->siga_flag.no_sync_out_pci &&
irq_ptr->siga_flag.no_sync_ti)
irq_ptr->siga_flag.no_sync_out_ti = 1;
}
static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
unsigned char qdioac, unsigned long token)
{
char dbf_text[15];
if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
goto no_qebsm;
if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
(!(qdioac & AC1_SC_QEBSM_ENABLED)))
goto no_qebsm;
irq_ptr->sch_token = token;
QDIO_DBF_TEXT0(0, setup, "V=V:1");
sprintf(dbf_text, "%8lx", irq_ptr->sch_token);
QDIO_DBF_TEXT0(0, setup, dbf_text);
return;
no_qebsm:
irq_ptr->sch_token = 0;
irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
QDIO_DBF_TEXT0(0, setup, "noV=V");
}
static int __get_ssqd_info(struct qdio_irq *irq_ptr)
{
struct chsc_ssqd_area *ssqd;
int rc;
QDIO_DBF_TEXT0(0, setup, "getssqd");
ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
memset(ssqd, 0, PAGE_SIZE);
ssqd->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x0024,
};
ssqd->first_sch = irq_ptr->schid.sch_no;
ssqd->last_sch = irq_ptr->schid.sch_no;
ssqd->ssid = irq_ptr->schid.ssid;
if (chsc(ssqd))
return -EIO;
rc = chsc_error_from_response(ssqd->response.code);
if (rc)
return rc;
if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
(ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no))
return -EINVAL;
memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
sizeof(struct qdio_ssqd_desc));
return 0;
}
void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
{
unsigned char qdioac;
char dbf_text[15];
int rc;
rc = __get_ssqd_info(irq_ptr);
if (rc) {
QDIO_DBF_TEXT2(0, setup, "ssqdasig");
sprintf(dbf_text, "schno%x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(0, setup, dbf_text);
sprintf(dbf_text, "rc:%d", rc);
QDIO_DBF_TEXT2(0, setup, dbf_text);
/* all flags set, worst case */
qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
AC1_SIGA_SYNC_NEEDED;
} else
qdioac = irq_ptr->ssqd_desc.qdioac1;
check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
process_ac_flags(irq_ptr, qdioac);
sprintf(dbf_text, "qdioac%2x", qdioac);
QDIO_DBF_TEXT2(0, setup, dbf_text);
}
void qdio_release_memory(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
/*
* Must check queue array manually since irq_ptr->nr_input_queues /
* irq_ptr->nr_input_queues may not yet be set.
*/
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
q = irq_ptr->input_qs[i];
if (q) {
free_page((unsigned long) q->slib);
kmem_cache_free(qdio_q_cache, q);
}
}
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
q = irq_ptr->output_qs[i];
if (q) {
free_page((unsigned long) q->slib);
kmem_cache_free(qdio_q_cache, q);
}
}
kfree(irq_ptr->qdr);
free_page(irq_ptr->chsc_page);
free_page((unsigned long) irq_ptr);
}
static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
struct qdio_q **irq_ptr_qs,
int i, int nr)
{
irq_ptr->qdr->qdf0[i + nr].sliba =
(unsigned long)irq_ptr_qs[i]->slib;
irq_ptr->qdr->qdf0[i + nr].sla =
(unsigned long)irq_ptr_qs[i]->sl;
irq_ptr->qdr->qdf0[i + nr].slsba =
(unsigned long)&irq_ptr_qs[i]->slsb.val[0];
irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY;
irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY;
irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY;
irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY;
}
static void setup_qdr(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
int i;
irq_ptr->qdr->qfmt = qdio_init->q_format;
irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY;
for (i = 0; i < qdio_init->no_input_qs; i++)
__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
for (i = 0; i < qdio_init->no_output_qs; i++)
__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
qdio_init->no_input_qs);
}
static void setup_qib(struct qdio_irq *irq_ptr,
struct qdio_initialize *init_data)
{
if (qebsm_possible())
irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
irq_ptr->qib.qfmt = init_data->q_format;
if (init_data->no_input_qs)
irq_ptr->qib.isliba =
(unsigned long)(irq_ptr->input_qs[0]->slib);
if (init_data->no_output_qs)
irq_ptr->qib.osliba =
(unsigned long)(irq_ptr->output_qs[0]->slib);
memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
}
int qdio_setup_irq(struct qdio_initialize *init_data)
{
struct ciw *ciw;
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
int rc;
memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr));
/* wipes qib.ac, required by ar7063 */
memset(irq_ptr->qdr, 0, sizeof(struct qdr));
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
irq_ptr->cdev = init_data->cdev;
setup_queues(irq_ptr, init_data);
setup_qib(irq_ptr, init_data);
qdio_setup_thinint(irq_ptr);
set_impl_params(irq_ptr, init_data->qib_param_field_format,
init_data->qib_param_field,
init_data->input_slib_elements,
init_data->output_slib_elements);
/* fill input and output descriptors */
setup_qdr(irq_ptr, init_data);
/* qdr, qib, sls, slsbs, slibs, sbales are filled now */
/* get qdio commands */
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
QDIO_DBF_TEXT2(1, setup, "no eq");
rc = -EINVAL;
goto out_err;
}
irq_ptr->equeue = *ciw;
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
QDIO_DBF_TEXT2(1, setup, "no aq");
rc = -EINVAL;
goto out_err;
}
irq_ptr->aqueue = *ciw;
/* set new interrupt handler */
irq_ptr->orig_handler = init_data->cdev->handler;
init_data->cdev->handler = qdio_int_handler;
return 0;
out_err:
qdio_release_memory(irq_ptr);
return rc;
}
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
struct ccw_device *cdev)
{
char s[80];
sprintf(s, "%s ", cdev->dev.bus_id);
switch (irq_ptr->qib.qfmt) {
case QDIO_QETH_QFMT:
sprintf(s + strlen(s), "OSADE ");
break;
case QDIO_ZFCP_QFMT:
sprintf(s + strlen(s), "ZFCP ");
break;
case QDIO_IQDIO_QFMT:
sprintf(s + strlen(s), "HiperSockets ");
break;
}
sprintf(s + strlen(s), "using: ");
if (!is_thinint_irq(irq_ptr))
sprintf(s + strlen(s), "no");
sprintf(s + strlen(s), "AdapterInterrupts ");
if (!(irq_ptr->sch_token != 0))
sprintf(s + strlen(s), "no");
sprintf(s + strlen(s), "QEBSM ");
if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
sprintf(s + strlen(s), "no");
sprintf(s + strlen(s), "OutboundPCI ");
if (!css_general_characteristics.aif_tdd)
sprintf(s + strlen(s), "no");
sprintf(s + strlen(s), "TDD\n");
printk(KERN_INFO "qdio: %s", s);
memset(s, 0, sizeof(s));
sprintf(s, "%s SIGA required: ", cdev->dev.bus_id);
if (irq_ptr->siga_flag.input)
sprintf(s + strlen(s), "Read ");
if (irq_ptr->siga_flag.output)
sprintf(s + strlen(s), "Write ");
if (irq_ptr->siga_flag.sync)
sprintf(s + strlen(s), "Sync ");
if (!irq_ptr->siga_flag.no_sync_ti)
sprintf(s + strlen(s), "SyncAI ");
if (!irq_ptr->siga_flag.no_sync_out_ti)
sprintf(s + strlen(s), "SyncOutAI ");
if (!irq_ptr->siga_flag.no_sync_out_pci)
sprintf(s + strlen(s), "SyncOutPCI");
sprintf(s + strlen(s), "\n");
printk(KERN_INFO "qdio: %s", s);
}
int __init qdio_setup_init(void)
{
char dbf_text[15];
qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
256, 0, NULL);
if (!qdio_q_cache)
return -ENOMEM;
/* Check for OSA/FCP thin interrupts (bit 67). */
sprintf(dbf_text, "thini%1x",
(css_general_characteristics.aif_osa) ? 1 : 0);
QDIO_DBF_TEXT0(0, setup, dbf_text);
/* Check for QEBSM support in general (bit 58). */
sprintf(dbf_text, "cssQBS:%1x",
(qebsm_possible()) ? 1 : 0);
QDIO_DBF_TEXT0(0, setup, dbf_text);
return 0;
}
void __exit qdio_setup_exit(void)
{
kmem_cache_destroy(qdio_q_cache);
}
/*
* linux/drivers/s390/cio/thinint_qdio.c
*
* thin interrupt support for qdio
*
* Copyright 2000-2008 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/io.h>
#include <asm/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include "cio.h"
#include "ioasm.h"
#include "qdio.h"
#include "qdio_debug.h"
#include "qdio_perf.h"
/*
* Restriction: only 63 iqdio subchannels would have its own indicator,
* after that, subsequent subchannels share one indicator
*/
#define TIQDIO_NR_NONSHARED_IND 63
#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
#define TIQDIO_SHARED_IND 63
/* list of thin interrupt input queues */
static LIST_HEAD(tiq_list);
/* adapter local summary indicator */
static unsigned char *tiqdio_alsi;
/* device state change indicators */
struct indicator_t {
u32 ind; /* u32 because of compare-and-swap performance */
atomic_t count; /* use count, 0 or 1 for non-shared indicators */
};
static struct indicator_t *q_indicators;
static void tiqdio_tasklet_fn(unsigned long data);
static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
static int css_qdio_omit_svs;
static inline unsigned long do_clear_global_summary(void)
{
register unsigned long __fn asm("1") = 3;
register unsigned long __tmp asm("2");
register unsigned long __time asm("3");
asm volatile(
" .insn rre,0xb2650000,2,0"
: "+d" (__fn), "=d" (__tmp), "=d" (__time));
return __time;
}
/* returns addr for the device state change indicator */
static u32 *get_indicator(void)
{
int i;
for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
if (!atomic_read(&q_indicators[i].count)) {
atomic_set(&q_indicators[i].count, 1);
return &q_indicators[i].ind;
}
/* use the shared indicator */
atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
return &q_indicators[TIQDIO_SHARED_IND].ind;
}
static void put_indicator(u32 *addr)
{
int i;
if (!addr)
return;
i = ((unsigned long)addr - (unsigned long)q_indicators) /
sizeof(struct indicator_t);
atomic_dec(&q_indicators[i].count);
}
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
/* No TDD facility? If we must use SIGA-s we can also omit SVS. */
if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
css_qdio_omit_svs = 1;
for_each_input_queue(irq_ptr, q, i) {
list_add_rcu(&q->entry, &tiq_list);
synchronize_rcu();
}
xchg(irq_ptr->dsci, 1);
tasklet_schedule(&tiqdio_tasklet);
}
/*
* we cannot stop the tiqdio tasklet here since it is for all
* thinint qdio devices and it must run as long as there is a
* thinint device left
*/
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
for_each_input_queue(irq_ptr, q, i) {
list_del_rcu(&q->entry);
synchronize_rcu();
}
}
static inline int tiqdio_inbound_q_done(struct qdio_q *q)
{
unsigned char state;
if (!atomic_read(&q->nr_buf_used))
return 1;
qdio_siga_sync_q(q);
get_buf_state(q, q->first_to_check, &state);
if (state == SLSB_P_INPUT_PRIMED)
/* more work coming */
return 0;
return 1;
}
static inline int shared_ind(struct qdio_irq *irq_ptr)
{
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
}
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
qdio_perf_stat_inc(&perf_stats.thinint_inbound);
qdio_sync_after_thinint(q);
/*
* Maybe we have work on our outbound queues... at least
* we have to check the PCI capable queues.
*/
qdio_check_outbound_after_thinint(q);
again:
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_inbound_handler(q);
if (!tiqdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
goto again;
}
qdio_stop_polling(q);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
if (!tiqdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
goto again;
}
}
void tiqdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
__tiqdio_inbound_processing(q);
}
/* check for work on all inbound thinint queues */
static void tiqdio_tasklet_fn(unsigned long data)
{
struct qdio_q *q;
qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
again:
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
list_for_each_entry_rcu(q, &tiq_list, entry)
/* only process queues from changed sets */
if (*q->irq_ptr->dsci) {
/* only clear it if the indicator is non-shared */
if (!shared_ind(q->irq_ptr))
xchg(q->irq_ptr->dsci, 0);
/*
* don't call inbound processing directly since
* that could starve other thinint queues
*/
tasklet_schedule(&q->tasklet);
}
rcu_read_unlock();
/*
* if we used the shared indicator clear it now after all queues
* were processed
*/
if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) {
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
/* prevent racing */
if (*tiqdio_alsi)
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
}
/* check for more work */
if (*tiqdio_alsi) {
xchg(tiqdio_alsi, 0);
qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
goto again;
}
}
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @ind: pointer to adapter local summary indicator
* @drv_data: NULL
*/
static void tiqdio_thinint_handler(void *ind, void *drv_data)
{
qdio_perf_stat_inc(&perf_stats.thin_int);
/*
* SVS only when needed: issue SVS to benefit from iqdio interrupt
* avoidance (SVS clears adapter interrupt suppression overwrite)
*/
if (!css_qdio_omit_svs)
do_clear_global_summary();
/*
* reset local summary indicator (tiqdio_alsi) to stop adapter
* interrupts for now, the tasklet will clean all dsci's
*/
xchg((u8 *)ind, 0);
tasklet_hi_schedule(&tiqdio_tasklet);
}
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
struct scssc_area *scssc_area;
char dbf_text[15];
void *ptr;
int rc;
scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
memset(scssc_area, 0, PAGE_SIZE);
if (reset) {
scssc_area->summary_indicator_addr = 0;
scssc_area->subchannel_indicator_addr = 0;
} else {
scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
scssc_area->subchannel_indicator_addr =
virt_to_phys(irq_ptr->dsci);
}
scssc_area->request = (struct chsc_header) {
.length = 0x0fe0,
.code = 0x0021,
};
scssc_area->operation_code = 0;
scssc_area->ks = PAGE_DEFAULT_KEY;
scssc_area->kc = PAGE_DEFAULT_KEY;
scssc_area->isc = QDIO_AIRQ_ISC;
scssc_area->schid = irq_ptr->schid;
/* enable the time delay disablement facility */
if (css_general_characteristics.aif_tdd)
scssc_area->word_with_d_bit = 0x10000000;
rc = chsc(scssc_area);
if (rc)
return -EIO;
rc = chsc_error_from_response(scssc_area->response.code);
if (rc) {
sprintf(dbf_text, "sidR%4x", scssc_area->response.code);
QDIO_DBF_TEXT1(0, trace, dbf_text);
QDIO_DBF_TEXT1(0, setup, dbf_text);
ptr = &scssc_area->response;
QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN);
return rc;
}
QDIO_DBF_TEXT2(0, setup, "setscind");
QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr,
sizeof(unsigned long));
QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr,
sizeof(unsigned long));
return 0;
}
/* allocate non-shared indicators and shared indicator */
int __init tiqdio_allocate_memory(void)
{
q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
GFP_KERNEL);
if (!q_indicators)
return -ENOMEM;
return 0;
}
void tiqdio_free_memory(void)
{
kfree(q_indicators);
}
int __init tiqdio_register_thinints(void)
{
char dbf_text[20];
isc_register(QDIO_AIRQ_ISC);
tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
NULL, QDIO_AIRQ_ISC);
if (IS_ERR(tiqdio_alsi)) {
sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi));
QDIO_DBF_TEXT0(0, setup, dbf_text);
tiqdio_alsi = NULL;
isc_unregister(QDIO_AIRQ_ISC);
return -ENOMEM;
}
return 0;
}
int qdio_establish_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return 0;
/* Check for aif time delay disablement. If installed,
* omit SVS even under LPAR
*/
if (css_general_characteristics.aif_tdd)
css_qdio_omit_svs = 1;
return set_subchannel_ind(irq_ptr, 0);
}
void qdio_setup_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return;
irq_ptr->dsci = get_indicator();
QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *));
}
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return;
/* reset adapter interrupt indicators */
put_indicator(irq_ptr->dsci);
set_subchannel_ind(irq_ptr, 1);
}
void __exit tiqdio_unregister_thinints(void)
{
tasklet_disable(&tiqdio_tasklet);
if (tiqdio_alsi) {
s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
isc_unregister(QDIO_AIRQ_ISC);
}
}
...@@ -239,11 +239,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, ...@@ -239,11 +239,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/*not used unless the microcode gets patched*/ /*not used unless the microcode gets patched*/
#define QETH_PCI_TIMER_VALUE(card) 3 #define QETH_PCI_TIMER_VALUE(card) 3
#define QETH_MIN_INPUT_THRESHOLD 1
#define QETH_MAX_INPUT_THRESHOLD 500
#define QETH_MIN_OUTPUT_THRESHOLD 1
#define QETH_MAX_OUTPUT_THRESHOLD 300
/* priority queing */ /* priority queing */
#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING #define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
#define QETH_DEFAULT_QUEUE 2 #define QETH_DEFAULT_QUEUE 2
...@@ -811,17 +806,14 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, ...@@ -811,17 +806,14 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
enum qeth_ipa_cmds, enum qeth_prot_versions); enum qeth_ipa_cmds, enum qeth_prot_versions);
int qeth_query_setadapterparms(struct qeth_card *); int qeth_query_setadapterparms(struct qeth_card *);
int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *);
unsigned int, const char *);
void qeth_queue_input_buffer(struct qeth_card *, int); void qeth_queue_input_buffer(struct qeth_card *, int);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qdio_buffer *, struct qdio_buffer_element **, int *, struct qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **); struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *); void qeth_schedule_recovery(struct qeth_card *);
void qeth_qdio_output_handler(struct ccw_device *, unsigned int, void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
unsigned int, unsigned int, int, int, int, unsigned long);
unsigned int, int, int,
unsigned long);
void qeth_clear_ipacmd_list(struct qeth_card *); void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int); int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *); void qeth_clear_working_pool_list(struct qeth_card *);
......
...@@ -2073,7 +2073,7 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card, ...@@ -2073,7 +2073,7 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
static int qeth_qdio_activate(struct qeth_card *card) static int qeth_qdio_activate(struct qeth_card *card)
{ {
QETH_DBF_TEXT(SETUP, 3, "qdioact"); QETH_DBF_TEXT(SETUP, 3, "qdioact");
return qdio_activate(CARD_DDEV(card), 0); return qdio_activate(CARD_DDEV(card));
} }
static int qeth_dm_act(struct qeth_card *card) static int qeth_dm_act(struct qeth_card *card)
...@@ -2349,16 +2349,11 @@ int qeth_init_qdio_queues(struct qeth_card *card) ...@@ -2349,16 +2349,11 @@ int qeth_init_qdio_queues(struct qeth_card *card)
card->qdio.in_q->next_buf_to_init = card->qdio.in_q->next_buf_to_init =
card->qdio.in_buf_pool.buf_count - 1; card->qdio.in_buf_pool.buf_count - 1;
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
card->qdio.in_buf_pool.buf_count - 1, NULL); card->qdio.in_buf_pool.buf_count - 1);
if (rc) { if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
return rc; return rc;
} }
rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
return rc;
}
/* outbound queue */ /* outbound queue */
for (i = 0; i < card->qdio.no_out_queues; ++i) { for (i = 0; i < card->qdio.no_out_queues; ++i) {
memset(card->qdio.out_qs[i]->qdio_bufs, 0, memset(card->qdio.out_qs[i]->qdio_bufs, 0,
...@@ -2559,9 +2554,9 @@ int qeth_query_setadapterparms(struct qeth_card *card) ...@@ -2559,9 +2554,9 @@ int qeth_query_setadapterparms(struct qeth_card *card)
EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
unsigned int siga_error, const char *dbftext) const char *dbftext)
{ {
if (qdio_error || siga_error) { if (qdio_error) {
QETH_DBF_TEXT(TRACE, 2, dbftext); QETH_DBF_TEXT(TRACE, 2, dbftext);
QETH_DBF_TEXT(QERR, 2, dbftext); QETH_DBF_TEXT(QERR, 2, dbftext);
QETH_DBF_TEXT_(QERR, 2, " F15=%02X", QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
...@@ -2569,7 +2564,6 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, ...@@ -2569,7 +2564,6 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
QETH_DBF_TEXT_(QERR, 2, " F14=%02X", QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
buf->element[14].flags & 0xff); buf->element[14].flags & 0xff);
QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
QETH_DBF_TEXT_(QERR, 2, " serr=%X", siga_error);
return 1; return 1;
} }
return 0; return 0;
...@@ -2622,9 +2616,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) ...@@ -2622,9 +2616,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
card->perf_stats.inbound_do_qdio_start_time = card->perf_stats.inbound_do_qdio_start_time =
qeth_get_micros(); qeth_get_micros();
} }
rc = do_QDIO(CARD_DDEV(card), rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, queue->next_buf_to_init, count);
0, queue->next_buf_to_init, count, NULL);
if (card->options.performance_stats) if (card->options.performance_stats)
card->perf_stats.inbound_do_qdio_time += card->perf_stats.inbound_do_qdio_time +=
qeth_get_micros() - qeth_get_micros() -
...@@ -2643,14 +2636,13 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) ...@@ -2643,14 +2636,13 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
EXPORT_SYMBOL_GPL(qeth_queue_input_buffer); EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
static int qeth_handle_send_error(struct qeth_card *card, static int qeth_handle_send_error(struct qeth_card *card,
struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err, struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
unsigned int siga_err)
{ {
int sbalf15 = buffer->buffer->element[15].flags & 0xff; int sbalf15 = buffer->buffer->element[15].flags & 0xff;
int cc = siga_err & 3; int cc = qdio_err & 3;
QETH_DBF_TEXT(TRACE, 6, "hdsnderr"); QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr"); qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
switch (cc) { switch (cc) {
case 0: case 0:
if (qdio_err) { if (qdio_err) {
...@@ -2662,7 +2654,7 @@ static int qeth_handle_send_error(struct qeth_card *card, ...@@ -2662,7 +2654,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
} }
return QETH_SEND_ERROR_NONE; return QETH_SEND_ERROR_NONE;
case 2: case 2:
if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) { if (qdio_err & QDIO_ERROR_SIGA_BUSY) {
QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B"); QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B");
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
return QETH_SEND_ERROR_KICK_IT; return QETH_SEND_ERROR_KICK_IT;
...@@ -2758,8 +2750,8 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) ...@@ -2758,8 +2750,8 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
return 0; return 0;
} }
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
int index, int count) int count)
{ {
struct qeth_qdio_out_buffer *buf; struct qeth_qdio_out_buffer *buf;
int rc; int rc;
...@@ -2807,12 +2799,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, ...@@ -2807,12 +2799,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
qeth_get_micros(); qeth_get_micros();
} }
qdio_flags = QDIO_FLAG_SYNC_OUTPUT; qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (under_int)
qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
if (atomic_read(&queue->set_pci_flags_count)) if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT; qdio_flags |= QDIO_FLAG_PCI_OUT;
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count, NULL); queue->queue_no, index, count);
if (queue->card->options.performance_stats) if (queue->card->options.performance_stats)
queue->card->perf_stats.outbound_do_qdio_time += queue->card->perf_stats.outbound_do_qdio_time +=
qeth_get_micros() - qeth_get_micros() -
...@@ -2866,16 +2856,15 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) ...@@ -2866,16 +2856,15 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
queue->card->perf_stats.bufs_sent_pack += queue->card->perf_stats.bufs_sent_pack +=
flush_cnt; flush_cnt;
if (flush_cnt) if (flush_cnt)
qeth_flush_buffers(queue, 1, index, flush_cnt); qeth_flush_buffers(queue, index, flush_cnt);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
} }
} }
} }
void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, void qeth_qdio_output_handler(struct ccw_device *ccwdev,
unsigned int qdio_error, unsigned int siga_error, unsigned int qdio_error, int __queue, int first_element,
unsigned int __queue, int first_element, int count, int count, unsigned long card_ptr)
unsigned long card_ptr)
{ {
struct qeth_card *card = (struct qeth_card *) card_ptr; struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
...@@ -2883,15 +2872,12 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, ...@@ -2883,15 +2872,12 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
int i; int i;
QETH_DBF_TEXT(TRACE, 6, "qdouhdl"); QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
if (status & QDIO_STATUS_LOOK_FOR_ERROR) { if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { QETH_DBF_TEXT(TRACE, 2, "achkcond");
QETH_DBF_TEXT(TRACE, 2, "achkcond"); QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); netif_stop_queue(card->dev);
QETH_DBF_TEXT_(TRACE, 2, "%08x", status); qeth_schedule_recovery(card);
netif_stop_queue(card->dev); return;
qeth_schedule_recovery(card);
return;
}
} }
if (card->options.performance_stats) { if (card->options.performance_stats) {
card->perf_stats.outbound_handler_cnt++; card->perf_stats.outbound_handler_cnt++;
...@@ -2901,8 +2887,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, ...@@ -2901,8 +2887,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
for (i = first_element; i < (first_element + count); ++i) { for (i = first_element; i < (first_element + count); ++i) {
buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
/*we only handle the KICK_IT error by doing a recovery */ /*we only handle the KICK_IT error by doing a recovery */
if (qeth_handle_send_error(card, buffer, if (qeth_handle_send_error(card, buffer, qdio_error)
qdio_error, siga_error)
== QETH_SEND_ERROR_KICK_IT){ == QETH_SEND_ERROR_KICK_IT){
netif_stop_queue(card->dev); netif_stop_queue(card->dev);
qeth_schedule_recovery(card); qeth_schedule_recovery(card);
...@@ -3164,11 +3149,11 @@ int qeth_do_send_packet_fast(struct qeth_card *card, ...@@ -3164,11 +3149,11 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
if (ctx == NULL) { if (ctx == NULL) {
qeth_fill_buffer(queue, buffer, skb); qeth_fill_buffer(queue, buffer, skb);
qeth_flush_buffers(queue, 0, index, 1); qeth_flush_buffers(queue, index, 1);
} else { } else {
flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index); flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
WARN_ON(buffers_needed != flush_cnt); WARN_ON(buffers_needed != flush_cnt);
qeth_flush_buffers(queue, 0, index, flush_cnt); qeth_flush_buffers(queue, index, flush_cnt);
} }
return 0; return 0;
out: out:
...@@ -3221,8 +3206,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, ...@@ -3221,8 +3206,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
* again */ * again */
if (atomic_read(&buffer->state) != if (atomic_read(&buffer->state) !=
QETH_QDIO_BUF_EMPTY){ QETH_QDIO_BUF_EMPTY){
qeth_flush_buffers(queue, 0, qeth_flush_buffers(queue, start_index,
start_index, flush_count); flush_count);
atomic_set(&queue->state, atomic_set(&queue->state,
QETH_OUT_Q_UNLOCKED); QETH_OUT_Q_UNLOCKED);
return -EBUSY; return -EBUSY;
...@@ -3253,7 +3238,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, ...@@ -3253,7 +3238,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
flush_count += tmp; flush_count += tmp;
out: out:
if (flush_count) if (flush_count)
qeth_flush_buffers(queue, 0, start_index, flush_count); qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count)) else if (!atomic_read(&queue->set_pci_flags_count))
atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
/* /*
...@@ -3274,7 +3259,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, ...@@ -3274,7 +3259,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (!flush_count && !atomic_read(&queue->set_pci_flags_count)) if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
flush_count += qeth_flush_buffers_on_no_pci(queue); flush_count += qeth_flush_buffers_on_no_pci(queue);
if (flush_count) if (flush_count)
qeth_flush_buffers(queue, 0, start_index, flush_count); qeth_flush_buffers(queue, start_index, flush_count);
} }
/* at this point the queue is UNLOCKED again */ /* at this point the queue is UNLOCKED again */
if (queue->card->options.performance_stats && do_pack) if (queue->card->options.performance_stats && do_pack)
...@@ -3686,10 +3671,6 @@ static int qeth_qdio_establish(struct qeth_card *card) ...@@ -3686,10 +3671,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.q_format = qeth_get_qdio_q_format(card); init_data.q_format = qeth_get_qdio_q_format(card);
init_data.qib_param_field_format = 0; init_data.qib_param_field_format = 0;
init_data.qib_param_field = qib_param_field; init_data.qib_param_field = qib_param_field;
init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
init_data.no_input_qs = 1; init_data.no_input_qs = 1;
init_data.no_output_qs = card->qdio.no_out_queues; init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = card->discipline.input_handler; init_data.input_handler = card->discipline.input_handler;
...@@ -3751,8 +3732,9 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev, ...@@ -3751,8 +3732,9 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
int qeth_core_hardsetup_card(struct qeth_card *card) int qeth_core_hardsetup_card(struct qeth_card *card)
{ {
struct qdio_ssqd_desc *qdio_ssqd;
int retries = 3; int retries = 3;
int mpno; int mpno = 0;
int rc; int rc;
QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
...@@ -3784,7 +3766,10 @@ int qeth_core_hardsetup_card(struct qeth_card *card) ...@@ -3784,7 +3766,10 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
return rc; return rc;
} }
mpno = qdio_get_ssqd_pct(CARD_DDEV(card));
qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card));
if (qdio_ssqd)
mpno = qdio_ssqd->pcnt;
if (mpno) if (mpno)
mpno = min(mpno - 1, QETH_MAX_PORTNO); mpno = min(mpno - 1, QETH_MAX_PORTNO);
if (card->info.portno > mpno) { if (card->info.portno > mpno) {
......
...@@ -726,8 +726,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -726,8 +726,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
unsigned int status, unsigned int qdio_err, unsigned int qdio_err, unsigned int queue,
unsigned int siga_err, unsigned int queue,
int first_element, int count, unsigned long card_ptr) int first_element, int count, unsigned long card_ptr)
{ {
struct net_device *net_dev; struct net_device *net_dev;
...@@ -742,23 +741,20 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, ...@@ -742,23 +741,20 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_cnt++; card->perf_stats.inbound_cnt++;
card->perf_stats.inbound_start_time = qeth_get_micros(); card->perf_stats.inbound_start_time = qeth_get_micros();
} }
if (status & QDIO_STATUS_LOOK_FOR_ERROR) { if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { QETH_DBF_TEXT(TRACE, 1, "qdinchk");
QETH_DBF_TEXT(TRACE, 1, "qdinchk"); QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, count);
count); QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); qeth_schedule_recovery(card);
qeth_schedule_recovery(card); return;
return;
}
} }
for (i = first_element; i < (first_element + count); ++i) { for (i = first_element; i < (first_element + count); ++i) {
index = i % QDIO_MAX_BUFFERS_PER_Q; index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index]; buffer = &card->qdio.in_q->bufs[index];
if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && if (!(qdio_err &&
qeth_check_qdio_errors(buffer->buffer, qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr")))
qdio_err, siga_err, "qinerr")))
qeth_l2_process_inbound_buffer(card, buffer, index); qeth_l2_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */ /* clear buffer and give back to hardware */
qeth_put_buffer_pool_entry(card, buffer->pool_entry); qeth_put_buffer_pool_entry(card, buffer->pool_entry);
......
...@@ -2939,8 +2939,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) ...@@ -2939,8 +2939,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
} }
static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
unsigned int status, unsigned int qdio_err, unsigned int qdio_err, unsigned int queue, int first_element,
unsigned int siga_err, unsigned int queue, int first_element,
int count, unsigned long card_ptr) int count, unsigned long card_ptr)
{ {
struct net_device *net_dev; struct net_device *net_dev;
...@@ -2955,23 +2954,21 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, ...@@ -2955,23 +2954,21 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_cnt++; card->perf_stats.inbound_cnt++;
card->perf_stats.inbound_start_time = qeth_get_micros(); card->perf_stats.inbound_start_time = qeth_get_micros();
} }
if (status & QDIO_STATUS_LOOK_FOR_ERROR) { if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { QETH_DBF_TEXT(TRACE, 1, "qdinchk");
QETH_DBF_TEXT(TRACE, 1, "qdinchk"); QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, count);
first_element, count); QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); qeth_schedule_recovery(card);
qeth_schedule_recovery(card); return;
return;
}
} }
for (i = first_element; i < (first_element + count); ++i) { for (i = first_element; i < (first_element + count); ++i) {
index = i % QDIO_MAX_BUFFERS_PER_Q; index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index]; buffer = &card->qdio.in_q->bufs[index];
if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && if (!(qdio_err &&
qeth_check_qdio_errors(buffer->buffer, qeth_check_qdio_errors(buffer->buffer,
qdio_err, siga_err, "qinerr"))) qdio_err, "qinerr")))
qeth_l3_process_inbound_buffer(card, buffer, index); qeth_l3_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */ /* clear buffer and give back to hardware */
qeth_put_buffer_pool_entry(card, buffer->pool_entry); qeth_put_buffer_pool_entry(card, buffer->pool_entry);
......
...@@ -297,15 +297,13 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, ...@@ -297,15 +297,13 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
/** /**
* zfcp_hba_dbf_event_qdio - trace event for QDIO related failure * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure
* @adapter: adapter affected by this QDIO related event * @adapter: adapter affected by this QDIO related event
* @status: as passed by qdio module
* @qdio_error: as passed by qdio module * @qdio_error: as passed by qdio module
* @siga_error: as passed by qdio module
* @sbal_index: first buffer with error condition, as passed by qdio module * @sbal_index: first buffer with error condition, as passed by qdio module
* @sbal_count: number of buffers affected, as passed by qdio module * @sbal_count: number of buffers affected, as passed by qdio module
*/ */
void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
unsigned int qdio_error, unsigned int siga_error, unsigned int qdio_error, int sbal_index,
int sbal_index, int sbal_count) int sbal_count)
{ {
struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
unsigned long flags; unsigned long flags;
...@@ -313,9 +311,7 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, ...@@ -313,9 +311,7 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
spin_lock_irqsave(&adapter->hba_dbf_lock, flags); spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
memset(r, 0, sizeof(*r)); memset(r, 0, sizeof(*r));
strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE);
r->u.qdio.status = status;
r->u.qdio.qdio_error = qdio_error; r->u.qdio.qdio_error = qdio_error;
r->u.qdio.siga_error = siga_error;
r->u.qdio.sbal_index = sbal_index; r->u.qdio.sbal_index = sbal_index;
r->u.qdio.sbal_count = sbal_count; r->u.qdio.sbal_count = sbal_count;
debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
...@@ -398,9 +394,7 @@ static void zfcp_hba_dbf_view_status(char **p, ...@@ -398,9 +394,7 @@ static void zfcp_hba_dbf_view_status(char **p,
static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r) static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
{ {
zfcp_dbf_out(p, "status", "0x%08x", r->status);
zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
zfcp_dbf_out(p, "siga_error", "0x%08x", r->siga_error);
zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
} }
......
...@@ -139,9 +139,7 @@ struct zfcp_hba_dbf_record_status { ...@@ -139,9 +139,7 @@ struct zfcp_hba_dbf_record_status {
} __attribute__ ((packed)); } __attribute__ ((packed));
struct zfcp_hba_dbf_record_qdio { struct zfcp_hba_dbf_record_qdio {
u32 status;
u32 qdio_error; u32 qdio_error;
u32 siga_error;
u8 sbal_index; u8 sbal_index;
u8 sbal_count; u8 sbal_count;
} __attribute__ ((packed)); } __attribute__ ((packed));
......
...@@ -48,9 +48,8 @@ extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *); ...@@ -48,9 +48,8 @@ extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *);
extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
struct fsf_status_read_buffer *); struct fsf_status_read_buffer *);
extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
unsigned int, unsigned int, unsigned int, int);
int, int);
extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
......
...@@ -74,17 +74,15 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) ...@@ -74,17 +74,15 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
} }
} }
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int status, static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
unsigned int qdio_err, unsigned int siga_err, int queue_no, int first, int count,
unsigned int queue_no, int first, int count,
unsigned long parm) unsigned long parm)
{ {
struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
struct zfcp_qdio_queue *queue = &adapter->req_q; struct zfcp_qdio_queue *queue = &adapter->req_q;
if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { if (unlikely(qdio_err)) {
zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
first, count);
zfcp_qdio_handler_error(adapter, 140); zfcp_qdio_handler_error(adapter, 140);
return; return;
} }
...@@ -129,8 +127,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) ...@@ -129,8 +127,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
count = atomic_read(&queue->count) + processed; count = atomic_read(&queue->count) + processed;
retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
0, start, count, NULL);
if (unlikely(retval)) { if (unlikely(retval)) {
atomic_set(&queue->count, count); atomic_set(&queue->count, count);
...@@ -142,9 +139,8 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) ...@@ -142,9 +139,8 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
} }
} }
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status, static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
unsigned int qdio_err, unsigned int siga_err, int queue_no, int first, int count,
unsigned int queue_no, int first, int count,
unsigned long parm) unsigned long parm)
{ {
struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
...@@ -152,9 +148,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status, ...@@ -152,9 +148,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status,
volatile struct qdio_buffer_element *sbale; volatile struct qdio_buffer_element *sbale;
int sbal_idx, sbale_idx, sbal_no; int sbal_idx, sbale_idx, sbal_no;
if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { if (unlikely(qdio_err)) {
zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
first, count);
zfcp_qdio_handler_error(adapter, 147); zfcp_qdio_handler_error(adapter, 147);
return; return;
} }
...@@ -362,7 +357,7 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) ...@@ -362,7 +357,7 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
} }
retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first, retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first,
count, NULL); count);
if (unlikely(retval)) { if (unlikely(retval)) {
zfcp_qdio_zero_sbals(req_q->sbal, first, count); zfcp_qdio_zero_sbals(req_q->sbal, first, count);
return retval; return retval;
...@@ -400,10 +395,6 @@ int zfcp_qdio_allocate(struct zfcp_adapter *adapter) ...@@ -400,10 +395,6 @@ int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
init_data->qib_param_field = NULL; init_data->qib_param_field = NULL;
init_data->input_slib_elements = NULL; init_data->input_slib_elements = NULL;
init_data->output_slib_elements = NULL; init_data->output_slib_elements = NULL;
init_data->min_input_threshold = 1;
init_data->max_input_threshold = 5000;
init_data->min_output_threshold = 1;
init_data->max_output_threshold = 1000;
init_data->no_input_qs = 1; init_data->no_input_qs = 1;
init_data->no_output_qs = 1; init_data->no_output_qs = 1;
init_data->input_handler = zfcp_qdio_int_resp; init_data->input_handler = zfcp_qdio_int_resp;
...@@ -436,9 +427,7 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter) ...@@ -436,9 +427,7 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock(&req_q->lock); spin_unlock(&req_q->lock);
while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
== -EINPROGRESS)
ssleep(1);
/* cleanup used outbound sbals */ /* cleanup used outbound sbals */
count = atomic_read(&req_q->count); count = atomic_read(&req_q->count);
...@@ -473,7 +462,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter) ...@@ -473,7 +462,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
return -EIO; return -EIO;
} }
if (qdio_activate(adapter->ccw_device, 0)) { if (qdio_activate(adapter->ccw_device)) {
dev_err(&adapter->ccw_device->dev, dev_err(&adapter->ccw_device->dev,
"Activate of QDIO queues failed.\n"); "Activate of QDIO queues failed.\n");
goto failed_qdio; goto failed_qdio;
...@@ -487,7 +476,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter) ...@@ -487,7 +476,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
} }
if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
QDIO_MAX_BUFFERS_PER_Q, NULL)) { QDIO_MAX_BUFFERS_PER_Q)) {
dev_err(&adapter->ccw_device->dev, dev_err(&adapter->ccw_device->dev,
"Init of QDIO response queue failed.\n"); "Init of QDIO response queue failed.\n");
goto failed_qdio; goto failed_qdio;
...@@ -501,9 +490,6 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter) ...@@ -501,9 +490,6 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
return 0; return 0;
failed_qdio: failed_qdio:
while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
== -EINPROGRESS)
ssleep(1);
return -EIO; return -EIO;
} }
/* /*
* linux/include/asm-s390/qdio.h * linux/include/asm-s390/qdio.h
* *
* Linux for S/390 QDIO base support, Hipersocket base support * Copyright 2000,2008 IBM Corp.
* version 2
*
* Copyright 2000,2002 IBM Corporation
* Author(s): Utz Bacher <utz.bacher@de.ibm.com> * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
* *
*/ */
#ifndef __QDIO_H__ #ifndef __QDIO_H__
#define __QDIO_H__ #define __QDIO_H__
/* note, that most of the typedef's are from ingo. */
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/cio.h> #include <asm/cio.h>
#include <asm/ccwdev.h> #include <asm/ccwdev.h>
#define QDIO_NAME "qdio " #define QDIO_MAX_QUEUES_PER_IRQ 32
#define QDIO_MAX_BUFFERS_PER_Q 128
#ifndef __s390x__ #define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1)
#define QDIO_32_BIT #define QDIO_MAX_ELEMENTS_PER_BUFFER 16
#endif /* __s390x__ */ #define QDIO_SBAL_SIZE 256
/**** CONSTANTS, that are relied on without using these symbols *****/ #define QDIO_QETH_QFMT 0
#define QDIO_MAX_QUEUES_PER_IRQ 32 /* used in width of unsigned int */ #define QDIO_ZFCP_QFMT 1
/************************ END of CONSTANTS **************************/ #define QDIO_IQDIO_QFMT 2
#define QDIO_MAX_BUFFERS_PER_Q 128 /* must be a power of 2 (%x=&(x-1)*/
#define QDIO_BUF_ORDER 7 /* 2**this == number of pages used for sbals in 1 q */ /**
#define QDIO_MAX_ELEMENTS_PER_BUFFER 16 * struct qdesfmt0 - queue descriptor, format 0
#define SBAL_SIZE 256 * @sliba: storage list information block address
* @sla: storage list address
#define QDIO_QETH_QFMT 0 * @slsba: storage list state block address
#define QDIO_ZFCP_QFMT 1 * @akey: access key for DLIB
#define QDIO_IQDIO_QFMT 2 * @bkey: access key for SL
#define QDIO_IQDIO_QFMT_ASYNCH 3 * @ckey: access key for SBALs
* @dkey: access key for SLSB
struct qdio_buffer_element{ */
unsigned int flags;
unsigned int length;
#ifdef QDIO_32_BIT
void *reserved;
#endif /* QDIO_32_BIT */
void *addr;
} __attribute__ ((packed,aligned(16)));
struct qdio_buffer{
volatile struct qdio_buffer_element element[16];
} __attribute__ ((packed,aligned(256)));
/* params are: ccw_device, status, qdio_error, siga_error,
queue_number, first element processed, number of elements processed,
int_parm */
typedef void qdio_handler_t(struct ccw_device *,unsigned int,unsigned int,
unsigned int,unsigned int,int,int,unsigned long);
#define QDIO_STATUS_INBOUND_INT 0x01
#define QDIO_STATUS_OUTBOUND_INT 0x02
#define QDIO_STATUS_LOOK_FOR_ERROR 0x04
#define QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR 0x08
#define QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR 0x10
#define QDIO_STATUS_ACTIVATE_CHECK_CONDITION 0x20
#define QDIO_SIGA_ERROR_ACCESS_EXCEPTION 0x10
#define QDIO_SIGA_ERROR_B_BIT_SET 0x20
/* for qdio_initialize */
#define QDIO_INBOUND_0COPY_SBALS 0x01
#define QDIO_OUTBOUND_0COPY_SBALS 0x02
#define QDIO_USE_OUTBOUND_PCIS 0x04
/* for qdio_cleanup */
#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01
#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
struct qdio_initialize {
struct ccw_device *cdev;
unsigned char q_format;
unsigned char adapter_name[8];
unsigned int qib_param_field_format; /*adapter dependent*/
/* pointer to 128 bytes or NULL, if no param field */
unsigned char *qib_param_field; /* adapter dependent */
/* pointer to no_queues*128 words of data or NULL */
unsigned long *input_slib_elements;
unsigned long *output_slib_elements;
unsigned int min_input_threshold;
unsigned int max_input_threshold;
unsigned int min_output_threshold;
unsigned int max_output_threshold;
unsigned int no_input_qs;
unsigned int no_output_qs;
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
unsigned long int_parm;
unsigned long flags;
void **input_sbal_addr_array; /* addr of n*128 void ptrs */
void **output_sbal_addr_array; /* addr of n*128 void ptrs */
};
extern int qdio_initialize(struct qdio_initialize *init_data);
extern int qdio_allocate(struct qdio_initialize *init_data);
extern int qdio_establish(struct qdio_initialize *init_data);
extern int qdio_activate(struct ccw_device *,int flags);
#define QDIO_STATE_MUST_USE_OUTB_PCI 0x00000001
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_initialize */
#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
#define QDIO_STATE_STOPPED 0x00000010 /* after queues went down */
extern unsigned long qdio_get_status(int irq);
#define QDIO_FLAG_SYNC_INPUT 0x01
#define QDIO_FLAG_SYNC_OUTPUT 0x02
#define QDIO_FLAG_UNDER_INTERRUPT 0x04
#define QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT 0x08 /* no effect on
adapter interrupts */
#define QDIO_FLAG_DONT_SIGA 0x10
#define QDIO_FLAG_PCI_OUT 0x20
extern int do_QDIO(struct ccw_device*, unsigned int flags,
unsigned int queue_number,
unsigned int qidx,unsigned int count,
struct qdio_buffer *buffers);
extern int qdio_get_ssqd_pct(struct ccw_device*);
extern int qdio_synchronize(struct ccw_device*, unsigned int flags,
unsigned int queue_number);
extern int qdio_cleanup(struct ccw_device*, int how);
extern int qdio_shutdown(struct ccw_device*, int how);
extern int qdio_free(struct ccw_device*);
unsigned char qdio_get_slsb_state(struct ccw_device*, unsigned int flag,
unsigned int queue_number,
unsigned int qidx);
extern void qdio_init_scrubber(void);
struct qdesfmt0 { struct qdesfmt0 {
#ifdef QDIO_32_BIT u64 sliba;
unsigned long res1; /* reserved */ u64 sla;
#endif /* QDIO_32_BIT */ u64 slsba;
unsigned long sliba; /* storage-list-information-block u32 : 32;
address */ u32 akey : 4;
#ifdef QDIO_32_BIT u32 bkey : 4;
unsigned long res2; /* reserved */ u32 ckey : 4;
#endif /* QDIO_32_BIT */ u32 dkey : 4;
unsigned long sla; /* storage-list address */ u32 : 16;
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long slsba; /* storage-list-state-block address */
unsigned int res4; /* reserved */
unsigned int akey : 4; /* access key for DLIB */
unsigned int bkey : 4; /* access key for SL */
unsigned int ckey : 4; /* access key for SBALs */
unsigned int dkey : 4; /* access key for SLSB */
unsigned int res5 : 16; /* reserved */
} __attribute__ ((packed)); } __attribute__ ((packed));
/* /**
* Queue-Description record (QDR) * struct qdr - queue description record (QDR)
* @qfmt: queue format
* @pfmt: implementation dependent parameter format
* @ac: adapter characteristics
* @iqdcnt: input queue descriptor count
* @oqdcnt: output queue descriptor count
* @iqdsz: inpout queue descriptor size
* @oqdsz: output queue descriptor size
* @qiba: queue information block address
* @qkey: queue information block key
* @qdf0: queue descriptions
*/ */
struct qdr { struct qdr {
unsigned int qfmt : 8; /* queue format */ u32 qfmt : 8;
unsigned int pfmt : 8; /* impl. dep. parameter format */ u32 pfmt : 8;
unsigned int res1 : 8; /* reserved */ u32 : 8;
unsigned int ac : 8; /* adapter characteristics */ u32 ac : 8;
unsigned int res2 : 8; /* reserved */ u32 : 8;
unsigned int iqdcnt : 8; /* input-queue-descriptor count */ u32 iqdcnt : 8;
unsigned int res3 : 8; /* reserved */ u32 : 8;
unsigned int oqdcnt : 8; /* output-queue-descriptor count */ u32 oqdcnt : 8;
unsigned int res4 : 8; /* reserved */ u32 : 8;
unsigned int iqdsz : 8; /* input-queue-descriptor size */ u32 iqdsz : 8;
unsigned int res5 : 8; /* reserved */ u32 : 8;
unsigned int oqdsz : 8; /* output-queue-descriptor size */ u32 oqdsz : 8;
unsigned int res6[9]; /* reserved */ /* private: */
#ifdef QDIO_32_BIT u32 res[9];
unsigned long res7; /* reserved */ /* public: */
#endif /* QDIO_32_BIT */ u64 qiba;
unsigned long qiba; /* queue-information-block address */ u32 : 32;
unsigned int res8; /* reserved */ u32 qkey : 4;
unsigned int qkey : 4; /* queue-information-block key */ u32 : 28;
unsigned int res9 : 28; /* reserved */ struct qdesfmt0 qdf0[126];
/* union _qd {*/ /* why this? */ } __attribute__ ((packed, aligned(4096)));
struct qdesfmt0 qdf0[126];
/* } qd;*/ #define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
} __attribute__ ((packed,aligned(4096)));
/*
* queue information block (QIB)
*/
#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80
#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
#define QIB_RFLAGS_ENABLE_QEBSM 0x80 #define QIB_RFLAGS_ENABLE_QEBSM 0x80
/**
* struct qib - queue information block (QIB)
* @qfmt: queue format
* @pfmt: implementation dependent parameter format
* @rflags: QEBSM
* @ac: adapter characteristics
* @isliba: absolute address of first input SLIB
* @osliba: absolute address of first output SLIB
* @ebcnam: adapter identifier in EBCDIC
* @parm: implementation dependent parameters
*/
struct qib { struct qib {
unsigned int qfmt : 8; /* queue format */ u32 qfmt : 8;
unsigned int pfmt : 8; /* impl. dep. parameter format */ u32 pfmt : 8;
unsigned int rflags : 8; /* QEBSM */ u32 rflags : 8;
unsigned int ac : 8; /* adapter characteristics */ u32 ac : 8;
unsigned int res2; /* reserved */ u32 : 32;
#ifdef QDIO_32_BIT u64 isliba;
unsigned long res3; /* reserved */ u64 osliba;
#endif /* QDIO_32_BIT */ u32 : 32;
unsigned long isliba; /* absolute address of 1st u32 : 32;
input SLIB */ u8 ebcnam[8];
#ifdef QDIO_32_BIT /* private: */
unsigned long res4; /* reserved */ u8 res[88];
#endif /* QDIO_32_BIT */ /* public: */
unsigned long osliba; /* absolute address of 1st u8 parm[QDIO_MAX_BUFFERS_PER_Q];
output SLIB */ } __attribute__ ((packed, aligned(256)));
unsigned int res5; /* reserved */
unsigned int res6; /* reserved */ /**
unsigned char ebcnam[8]; /* adapter identifier in EBCDIC */ * struct slibe - storage list information block element (SLIBE)
unsigned char res7[88]; /* reserved */ * @parms: implementation dependent parameters
unsigned char parm[QDIO_MAX_BUFFERS_PER_Q];
/* implementation dependent
parameters */
} __attribute__ ((packed,aligned(256)));
/*
* storage-list-information block element (SLIBE)
*/ */
struct slibe { struct slibe {
#ifdef QDIO_32_BIT u64 parms;
unsigned long res; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long parms; /* implementation dependent
parameters */
}; };
/* /**
* storage-list-information block (SLIB) * struct slib - storage list information block (SLIB)
* @nsliba: next SLIB address (if any)
* @sla: SL address
* @slsba: SLSB address
* @slibe: SLIB elements
*/ */
struct slib { struct slib {
#ifdef QDIO_32_BIT u64 nsliba;
unsigned long res1; /* reserved */ u64 sla;
#endif /* QDIO_32_BIT */ u64 slsba;
unsigned long nsliba; /* next SLIB address (if any) */ /* private: */
#ifdef QDIO_32_BIT u8 res[1000];
unsigned long res2; /* reserved */ /* public: */
#endif /* QDIO_32_BIT */ struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q];
unsigned long sla; /* SL address */ } __attribute__ ((packed, aligned(2048)));
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */ /**
#endif /* QDIO_32_BIT */ * struct sbal_flags - storage block address list flags
unsigned long slsba; /* SLSB address */ * @last: last entry
unsigned char res4[1000]; /* reserved */ * @cont: contiguous storage
struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q]; /* SLIB elements */ * @frag: fragmentation
} __attribute__ ((packed,aligned(2048))); */
struct sbal_flags { struct sbal_flags {
unsigned char res1 : 1; /* reserved */ u8 : 1;
unsigned char last : 1; /* last entry */ u8 last : 1;
unsigned char cont : 1; /* contiguous storage */ u8 cont : 1;
unsigned char res2 : 1; /* reserved */ u8 : 1;
unsigned char frag : 2; /* fragmentation (s.below) */ u8 frag : 2;
unsigned char res3 : 2; /* reserved */ u8 : 2;
} __attribute__ ((packed)); } __attribute__ ((packed));
#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL #define SBAL_FLAGS_FIRST_FRAG 0x04000000UL
#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL #define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL
#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL #define SBAL_FLAGS_LAST_FRAG 0x0c000000UL
#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL #define SBAL_FLAGS_LAST_ENTRY 0x40000000UL
#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL #define SBAL_FLAGS_CONTIGUOUS 0x20000000UL
#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL #define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL
/* Awesome OpenFCP extensions */ /* Awesome OpenFCP extensions */
#define SBAL_FLAGS0_TYPE_STATUS 0x00UL #define SBAL_FLAGS0_TYPE_STATUS 0x00UL
#define SBAL_FLAGS0_TYPE_WRITE 0x08UL #define SBAL_FLAGS0_TYPE_WRITE 0x08UL
#define SBAL_FLAGS0_TYPE_READ 0x10UL #define SBAL_FLAGS0_TYPE_READ 0x10UL
#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL #define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL
#define SBAL_FLAGS0_MORE_SBALS 0x04UL #define SBAL_FLAGS0_MORE_SBALS 0x04UL
#define SBAL_FLAGS0_COMMAND 0x02UL #define SBAL_FLAGS0_COMMAND 0x02UL
#define SBAL_FLAGS0_LAST_SBAL 0x00UL #define SBAL_FLAGS0_LAST_SBAL 0x00UL
#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND #define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND
#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS #define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS
#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND #define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND
/* Naught of interest beyond this point */ #define SBAL_FLAGS0_PCI 0x40
#define SBAL_FLAGS0_PCI 0x40 /**
* struct sbal_sbalf_0 - sbal flags for sbale 0
* @pci: PCI indicator
* @cont: data continuation
* @sbtype: storage-block type (FCP)
*/
struct sbal_sbalf_0 { struct sbal_sbalf_0 {
unsigned char res1 : 1; /* reserved */ u8 : 1;
unsigned char pci : 1; /* PCI indicator */ u8 pci : 1;
unsigned char cont : 1; /* data continuation */ u8 cont : 1;
unsigned char sbtype: 2; /* storage-block type (OpenFCP) */ u8 sbtype : 2;
unsigned char res2 : 3; /* reserved */ u8 : 3;
} __attribute__ ((packed)); } __attribute__ ((packed));
/**
* struct sbal_sbalf_1 - sbal flags for sbale 1
* @key: storage key
*/
struct sbal_sbalf_1 { struct sbal_sbalf_1 {
unsigned char res1 : 4; /* reserved */ u8 : 4;
unsigned char key : 4; /* storage key */ u8 key : 4;
} __attribute__ ((packed)); } __attribute__ ((packed));
/**
* struct sbal_sbalf_14 - sbal flags for sbale 14
* @erridx: error index
*/
struct sbal_sbalf_14 { struct sbal_sbalf_14 {
unsigned char res1 : 4; /* reserved */ u8 : 4;
unsigned char erridx : 4; /* error index */ u8 erridx : 4;
} __attribute__ ((packed)); } __attribute__ ((packed));
/**
* struct sbal_sbalf_15 - sbal flags for sbale 15
* @reason: reason for error state
*/
struct sbal_sbalf_15 { struct sbal_sbalf_15 {
unsigned char reason; /* reserved */ u8 reason;
} __attribute__ ((packed)); } __attribute__ ((packed));
/**
* union sbal_sbalf - storage block address list flags
* @i0: sbalf0
* @i1: sbalf1
* @i14: sbalf14
* @i15: sblaf15
* @value: raw value
*/
union sbal_sbalf { union sbal_sbalf {
struct sbal_sbalf_0 i0; struct sbal_sbalf_0 i0;
struct sbal_sbalf_1 i1; struct sbal_sbalf_1 i1;
struct sbal_sbalf_14 i14; struct sbal_sbalf_14 i14;
struct sbal_sbalf_15 i15; struct sbal_sbalf_15 i15;
unsigned char value; u8 value;
}; };
struct sbal_element { /**
union { * struct qdio_buffer_element - SBAL entry
struct sbal_flags bits; /* flags */ * @flags: flags
unsigned char value; * @length: length
} flags; * @addr: address
unsigned int res1 : 16; /* reserved */ */
union sbal_sbalf sbalf; /* SBAL flags */ struct qdio_buffer_element {
unsigned int res2 : 16; /* reserved */ u32 flags;
unsigned int count : 16; /* data count */ u32 length;
#ifdef QDIO_32_BIT #ifdef CONFIG_32BIT
unsigned long res3; /* reserved */ /* private: */
#endif /* QDIO_32_BIT */ void *reserved;
unsigned long addr; /* absolute data address */ /* public: */
} __attribute__ ((packed,aligned(16))); #endif
void *addr;
} __attribute__ ((packed, aligned(16)));
/* /**
* strorage-block access-list (SBAL) * struct qdio_buffer - storage block address list (SBAL)
* @element: SBAL entries
*/ */
struct sbal { struct qdio_buffer {
struct sbal_element element[QDIO_MAX_ELEMENTS_PER_BUFFER]; struct qdio_buffer_element element[QDIO_MAX_ELEMENTS_PER_BUFFER];
} __attribute__ ((packed,aligned(256))); } __attribute__ ((packed, aligned(256)));
/* /**
* storage-list (SL) * struct sl_element - storage list entry
* @sbal: absolute SBAL address
*/ */
struct sl_element { struct sl_element {
#ifdef QDIO_32_BIT #ifdef CONFIG_32BIT
unsigned long res; /* reserved */ /* private: */
#endif /* QDIO_32_BIT */ unsigned long reserved;
unsigned long sbal; /* absolute SBAL address */ /* public: */
#endif
unsigned long sbal;
} __attribute__ ((packed)); } __attribute__ ((packed));
/**
* struct sl - storage list (SL)
* @element: SL entries
*/
struct sl { struct sl {
struct sl_element element[QDIO_MAX_BUFFERS_PER_Q]; struct sl_element element[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed,aligned(1024))); } __attribute__ ((packed, aligned(1024)));
/* /**
* storage-list-state block (SLSB) * struct slsb - storage list state block (SLSB)
* @val: state per buffer
*/ */
struct slsb_flags { struct slsb {
unsigned char owner : 2; /* SBAL owner */ u8 val[QDIO_MAX_BUFFERS_PER_Q];
unsigned char type : 1; /* buffer type */ } __attribute__ ((packed, aligned(256)));
unsigned char state : 5; /* processing state */
struct qdio_ssqd_desc {
u8 flags;
u8:8;
u16 sch;
u8 qfmt;
u8 parm;
u8 qdioac1;
u8 sch_class;
u8 pcnt;
u8 icnt;
u8:8;
u8 ocnt;
u8:8;
u8 mbccnt;
u16 qdioac2;
u64 sch_token;
u64:64;
} __attribute__ ((packed)); } __attribute__ ((packed));
/* params are: ccw_device, qdio_error, queue_number,
first element processed, number of elements processed, int_parm */
typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
int, int, unsigned long);
struct slsb { /* qdio errors reported to the upper-layer program */
union { #define QDIO_ERROR_SIGA_ACCESS_EXCEPTION 0x10
unsigned char val[QDIO_MAX_BUFFERS_PER_Q]; #define QDIO_ERROR_SIGA_BUSY 0x20
struct slsb_flags flags[QDIO_MAX_BUFFERS_PER_Q]; #define QDIO_ERROR_ACTIVATE_CHECK_CONDITION 0x40
} acc; #define QDIO_ERROR_SLSB_STATE 0x80
} __attribute__ ((packed,aligned(256)));
/* /* for qdio_initialize */
* SLSB values #define QDIO_INBOUND_0COPY_SBALS 0x01
#define QDIO_OUTBOUND_0COPY_SBALS 0x02
#define QDIO_USE_OUTBOUND_PCIS 0x04
/* for qdio_cleanup */
#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01
#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
/**
* struct qdio_initialize - qdio initalization data
* @cdev: associated ccw device
* @q_format: queue format
* @adapter_name: name for the adapter
* @qib_param_field_format: format for qib_parm_field
* @qib_param_field: pointer to 128 bytes or NULL, if no param field
* @input_slib_elements: pointer to no_input_qs * 128 words of data or NULL
* @output_slib_elements: pointer to no_output_qs * 128 words of data or NULL
* @no_input_qs: number of input queues
* @no_output_qs: number of output queues
* @input_handler: handler to be called for input queues
* @output_handler: handler to be called for output queues
* @int_parm: interruption parameter
* @flags: initialization flags
* @input_sbal_addr_array: address of no_input_qs * 128 pointers
* @output_sbal_addr_array: address of no_output_qs * 128 pointers
*/ */
#define SLSB_OWNER_PROG 1 struct qdio_initialize {
#define SLSB_OWNER_CU 2 struct ccw_device *cdev;
unsigned char q_format;
#define SLSB_TYPE_INPUT 0 unsigned char adapter_name[8];
#define SLSB_TYPE_OUTPUT 1 unsigned int qib_param_field_format;
unsigned char *qib_param_field;
#define SLSB_STATE_NOT_INIT 0 unsigned long *input_slib_elements;
#define SLSB_STATE_EMPTY 1 unsigned long *output_slib_elements;
#define SLSB_STATE_PRIMED 2 unsigned int no_input_qs;
#define SLSB_STATE_HALTED 0xe unsigned int no_output_qs;
#define SLSB_STATE_ERROR 0xf qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
#define SLSB_P_INPUT_NOT_INIT 0x80 unsigned long int_parm;
#define SLSB_P_INPUT_PROCESSING 0x81 unsigned long flags;
#define SLSB_CU_INPUT_EMPTY 0x41 void **input_sbal_addr_array;
#define SLSB_P_INPUT_PRIMED 0x82 void **output_sbal_addr_array;
#define SLSB_P_INPUT_HALTED 0x8E };
#define SLSB_P_INPUT_ERROR 0x8F
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
#define SLSB_P_OUTPUT_NOT_INIT 0xA0 #define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */
#define SLSB_P_OUTPUT_EMPTY 0xA1 #define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
#define SLSB_CU_OUTPUT_PRIMED 0x62 #define QDIO_STATE_STOPPED 0x00000010 /* after queues went down */
#define SLSB_P_OUTPUT_HALTED 0xAE
#define SLSB_P_OUTPUT_ERROR 0xAF #define QDIO_FLAG_SYNC_INPUT 0x01
#define QDIO_FLAG_SYNC_OUTPUT 0x02
#define SLSB_ERROR_DURING_LOOKUP 0xFF #define QDIO_FLAG_PCI_OUT 0x10
extern int qdio_initialize(struct qdio_initialize *init_data);
extern int qdio_allocate(struct qdio_initialize *init_data);
extern int qdio_establish(struct qdio_initialize *init_data);
extern int qdio_activate(struct ccw_device *);
extern int do_QDIO(struct ccw_device*, unsigned int flags,
int q_nr, int qidx, int count);
extern int qdio_cleanup(struct ccw_device*, int how);
extern int qdio_shutdown(struct ccw_device*, int how);
extern int qdio_free(struct ccw_device *);
extern struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev);
#endif /* __QDIO_H__ */ #endif /* __QDIO_H__ */
...@@ -65,6 +65,7 @@ extern unsigned long machine_flags; ...@@ -65,6 +65,7 @@ extern unsigned long machine_flags;
#define MACHINE_FLAG_VM (1UL << 0) #define MACHINE_FLAG_VM (1UL << 0)
#define MACHINE_FLAG_IEEE (1UL << 1) #define MACHINE_FLAG_IEEE (1UL << 1)
#define MACHINE_FLAG_P390 (1UL << 2)
#define MACHINE_FLAG_CSP (1UL << 3) #define MACHINE_FLAG_CSP (1UL << 3)
#define MACHINE_FLAG_MVPG (1UL << 4) #define MACHINE_FLAG_MVPG (1UL << 4)
#define MACHINE_FLAG_DIAG44 (1UL << 5) #define MACHINE_FLAG_DIAG44 (1UL << 5)
...@@ -77,7 +78,6 @@ extern unsigned long machine_flags; ...@@ -77,7 +78,6 @@ extern unsigned long machine_flags;
#define MACHINE_IS_VM (machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_VM (machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (machine_flags & MACHINE_FLAG_KVM) #define MACHINE_IS_KVM (machine_flags & MACHINE_FLAG_KVM)
#define MACHINE_IS_P390 (machine_flags & MACHINE_FLAG_P390)
#define MACHINE_HAS_DIAG9C (machine_flags & MACHINE_FLAG_DIAG9C) #define MACHINE_HAS_DIAG9C (machine_flags & MACHINE_FLAG_DIAG9C)
#ifndef __s390x__ #ifndef __s390x__
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment