Commit e105d28a authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s/390 patches for 2.5.20 (2 of 4).

Second patch of the s/390 update. Contains all the include file changes in
include/asm-{s390,s390x}.
parent c1997c8d
......@@ -26,29 +26,17 @@ typedef struct { volatile int counter; } __attribute__ ((aligned (4))) atomic_t;
#define atomic_eieio() __asm__ __volatile__ ("BCR 15,0")
#define __CS_LOOP(old_val, new_val, ptr, op_val, op_string) \
__asm__ __volatile__(" l %0,0(%2)\n" \
__asm__ __volatile__(" l %0,0(%3)\n" \
"0: lr %1,%0\n" \
op_string " %1,%3\n" \
" cs %0,%1,0(%2)\n" \
op_string " %1,%4\n" \
" cs %0,%1,0(%3)\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val) \
: "=&d" (old_val), "=&d" (new_val), \
"+m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val) : "cc" );
static __inline__ int atomic_read(atomic_t *v)
{
int retval;
__asm__ __volatile__("bcr 15,0\n\t"
"l %0,%1"
: "=d" (retval) : "m" (*v) );
return retval;
}
static __inline__ void atomic_set(atomic_t *v, int i)
{
__asm__ __volatile__("st %1,%0\n\t"
"bcr 15,0"
: "=m" (*v) : "d" (i) );
}
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
static __inline__ void atomic_add(int i, atomic_t *v)
{
......@@ -138,14 +126,14 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
int retval;
__asm__ __volatile__(
" lr 0,%2\n"
" cs 0,%3,0(%1)\n"
" lr %0,%3\n"
" cs %0,%4,0(%2)\n"
" ipm %0\n"
" srl %0,28\n"
"0:"
: "=&d" (retval)
: "=&d" (retval), "+m" (v->counter)
: "a" (v), "d" (expected_oldval) , "d" (new_val)
: "0", "cc");
: "cc" );
return retval;
}
......@@ -155,12 +143,14 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
static __inline__ void
atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__(
"0: lr 0,%1\n"
" cs 0,%2,0(%0)\n"
"0: lr %1,%3\n"
" cs %1,%4,0(%2)\n"
" jl 0b\n"
: : "a" (v), "d" (expected_oldval) , "d" (new_val)
: "cc", "0" );
: "+m" (v->counter), "=&d" (tmp)
: "a" (v), "d" (expected_oldval) , "d" (new_val)
: "cc" );
}
#define atomic_compare_and_swap_debug(where,from,to) \
......
This diff is collapsed.
......@@ -13,75 +13,48 @@
#ifdef __GNUC__
static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
static __inline__ __const__ __u32 ___arch__swab32p(__u32 *x)
{
__u32 temp;
__asm__ __volatile__ (
" st %0,0(%1)\n"
" icm %0,8,3(%1)\n"
" icm %0,4,2(%1)\n"
" icm %0,2,1(%1)\n"
" ic %0,0(%1)"
: "+&d" (x) : "a" (&temp) : "cc" );
return x;
__u32 result;
__asm__ __volatile__ (
" icm %0,8,3(%1)\n"
" icm %0,4,2(%1)\n"
" icm %0,2,1(%1)\n"
" ic %0,0(%1)"
: "=&d" (result) : "a" (x) : "cc" );
return result;
}
static __inline__ __const__ __u32 ___arch__swab32p(__u32 *x)
static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
{
__u32 result;
__asm__ __volatile__ (
" icm %0,8,3(%1)\n"
" icm %0,4,2(%1)\n"
" icm %0,2,1(%1)\n"
" ic %0,0(%1)"
: "=&d" (result) : "a" (x) : "cc" );
return result;
return ___arch__swab32p(&x);
}
static __inline__ void ___arch__swab32s(__u32 *x)
{
__asm__ __volatile__ (
" icm 0,8,3(%0)\n"
" icm 0,4,2(%0)\n"
" icm 0,2,1(%0)\n"
" ic 0,0(%0)\n"
" st 0,0(%0)"
: : "a" (x) : "0", "memory", "cc");
*x = ___arch__swab32p(x);
}
static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
static __inline__ __const__ __u16 ___arch__swab16p(__u16 *x)
{
__u16 temp;
__asm__ __volatile__ (
" sth %0,0(%1)\n"
" icm %0,2,1(%1)\n"
" ic %0,0(%1)\n"
: "+&d" (x) : "a" (&temp) : "memory", "cc" );
return x;
__u16 result;
__asm__ __volatile__ (
" icm %0,2,1(%1)\n"
" ic %0,0(%1)\n"
: "=&d" (result) : "a" (x) : "cc" );
return result;
}
static __inline__ __const__ __u16 ___arch__swab16p(__u16 *x)
static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
{
__u16 result;
__asm__ __volatile__ (
" sr %0,%0\n"
" icm %0,2,1(%1)\n"
" ic %0,0(%1)\n"
: "=&d" (result) : "a" (x) : "cc" );
return result;
return ___arch__swab16p(&x);
}
static __inline__ void ___arch__swab16s(__u16 *x)
{
__asm__ __volatile__(
" icm 0,2,1(%0)\n"
" ic 0,0(%0)\n"
" sth 0,0(%0)"
: : "a" (x) : "0", "memory", "cc" );
*x = ___arch__swab16p(x);
}
#define __arch__swab32(x) ___arch__swab32(x)
......
#ifndef _S390_CACHEFLUSH_H
#define _S390_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
/* Caches aren't brain-dead on the s390. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#endif /* _S390_CACHEFLUSH_H */
/*
* File...........: linux/include/asm-s390/ccwcache.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
*/
#ifndef CCWCACHE_H
#define CCWCACHE_H
#include <linux/slab.h>
#include <asm/irq.h>
#ifndef __KERNEL__
#define kmem_cache_t void
#endif /* __KERNEL__ */
typedef struct ccw_req_t {
/* eye catcher plus queueing information */
unsigned int magic;
struct ccw_req_t *next; /* pointer to next ccw_req_t in queue */
struct ccw_req_t *int_next; /* for internal queueing */
struct ccw_req_t *int_prev; /* for internal queueing */
/* Where to execute what... */
void *device; /* index of the device the req is for */
void *req; /* pointer to originating request */
ccw1_t *cpaddr; /* address of channel program */
char status; /* reflecting the status of this request */
char flags; /* see below */
short retries; /* A retry counter to be set when filling */
/* ... and how */
int options; /* options for execution */
char lpm; /* logical path mask */
void *data; /* pointer to data area */
devstat_t *dstat; /* The device status in case of an error */
/* these are important for recovering erroneous requests */
struct ccw_req_t *refers; /* Does this request refer to another one? */
void *function; /* refers to the originating ERP action */ ;
unsigned long long expires; /* expiratioj period */
/* these are for profiling purposes */
unsigned long long buildclk; /* TOD-clock of request generation */
unsigned long long startclk; /* TOD-clock of request start */
unsigned long long stopclk; /* TOD-clock of request interrupt */
unsigned long long endclk; /* TOD-clock of request termination */
/* these are for internal use */
int cplength; /* length of the channel program in CCWs */
int datasize; /* amount of additional data in bytes */
kmem_cache_t *cache; /* the cache this data comes from */
} __attribute__ ((aligned(4))) ccw_req_t;
/*
* ccw_req_t -> status can be:
*/
#define CQR_STATUS_EMPTY 0x00 /* request is empty */
#define CQR_STATUS_FILLED 0x01 /* request is ready to be preocessed */
#define CQR_STATUS_QUEUED 0x02 /* request is queued to be processed */
#define CQR_STATUS_IN_IO 0x03 /* request is currently in IO */
#define CQR_STATUS_DONE 0x04 /* request is completed successfully */
#define CQR_STATUS_ERROR 0x05 /* request is completed with error */
#define CQR_STATUS_FAILED 0x06 /* request is finally failed */
#define CQR_STATUS_PENDING 0x07 /* request is waiting for interrupt - ERP only */
#define CQR_FLAGS_CHAINED 0x01 /* request is chained by another (last CCW is TIC) */
#ifdef __KERNEL__
#define SMALLEST_SLAB (sizeof(struct ccw_req_t) <= 128 ? 128 :\
sizeof(struct ccw_req_t) <= 256 ? 256 : 512 )
/* SMALLEST_SLAB(1),... PAGE_SIZE(CCW_NUMBER_CACHES) */
#define CCW_NUMBER_CACHES (sizeof(struct ccw_req_t) <= 128 ? 6 :\
sizeof(struct ccw_req_t) <= 256 ? 5 : 4 )
int ccwcache_init (void);
ccw_req_t *ccw_alloc_request (char *magic, int cplength, int additional_data);
void ccw_free_request (ccw_req_t * request);
#endif /* __KERNEL__ */
#endif /* CCWCACHE_H */
......@@ -13,16 +13,14 @@
#ifdef __KERNEL__
#include <asm/thread_info.h>
struct task_struct;
static inline struct task_struct * get_current(void)
{
struct task_struct *current;
__asm__("lhi %0,-8192\n\t"
"al %0,0xc40"
: "=&r" (current) : : "cc" );
return current;
}
return current_thread_info()->task;
}
#define current get_current()
......
......@@ -10,6 +10,10 @@
*
* History of changes (starts July 2000)
* 05/04/01 created by moving the kernel interface to drivers/s390/block/dasd_int.h
* 12/06/01 DASD_API_VERSION 2 - binary compatible to 0 (new BIODASDINFO2)
* 01/23/02 DASD_API_VERSION 3 - added BIODASDPSRD (and BIODASDENAPAV) IOCTL
* 02/15/02 DASD_API_VERSION 4 - added BIODASDSATTR IOCTL
*
*/
#ifndef DASD_H
......@@ -18,10 +22,125 @@
#define DASD_IOCTL_LETTER 'D'
#if (DASD_API_VERSION == 0)
#define DASD_API_VERSION 4
/*
* struct dasd_information2_t
* represents any data about the device, which is visible to userspace.
* including foramt and featueres.
*/
typedef struct dasd_information2_t {
unsigned int devno; /* S/390 devno */
unsigned int real_devno; /* for aliases */
unsigned int schid; /* S/390 subchannel identifier */
unsigned int cu_type : 16; /* from SenseID */
unsigned int cu_model : 8; /* from SenseID */
unsigned int dev_type : 16; /* from SenseID */
unsigned int dev_model : 8; /* from SenseID */
unsigned int open_count;
unsigned int req_queue_len;
unsigned int chanq_len; /* length of chanq */
char type[4]; /* from discipline.name, 'none' for unknown */
unsigned int status; /* current device level */
unsigned int label_block; /* where to find the VOLSER */
unsigned int FBA_layout; /* fixed block size (like AIXVOL) */
unsigned int characteristics_size;
unsigned int confdata_size;
char characteristics[64]; /* from read_device_characteristics */
char configuration_data[256]; /* from read_configuration_data */
unsigned int format; /* format info like formatted/cdl/ldl/... */
unsigned int features; /* dasd features like 'ro',... */
unsigned int reserved0; /* reserved for further use ,... */
unsigned int reserved1; /* reserved for further use ,... */
unsigned int reserved2; /* reserved for further use ,... */
unsigned int reserved3; /* reserved for further use ,... */
unsigned int reserved4; /* reserved for further use ,... */
unsigned int reserved5; /* reserved for further use ,... */
unsigned int reserved6; /* reserved for further use ,... */
unsigned int reserved7; /* reserved for further use ,... */
} dasd_information2_t;
/*
* values to be used for dasd_information_t.format
* 0x00: NOT formatted
* 0x01: Linux disc layout
* 0x02: Common disc layout
*/
#define DASD_FORMAT_NONE 0
#define DASD_FORMAT_LDL 1
#define DASD_FORMAT_CDL 2
/*
* values to be used for dasd_information_t.features
* 0x00: default features
* 0x01: readonly (ro)
*/
#define DASD_FEATURE_DEFAULT 0
#define DASD_FEATURE_READONLY 1
#define DASD_PARTN_BITS 2
/*
* struct dasd_information_t
* represents any data about the data, which is visible to userspace
*/
typedef struct dasd_information_t {
unsigned int devno; /* S/390 devno */
unsigned int real_devno; /* for aliases */
unsigned int schid; /* S/390 subchannel identifier */
unsigned int cu_type : 16; /* from SenseID */
unsigned int cu_model : 8; /* from SenseID */
unsigned int dev_type : 16; /* from SenseID */
unsigned int dev_model : 8; /* from SenseID */
unsigned int open_count;
unsigned int req_queue_len;
unsigned int chanq_len; /* length of chanq */
char type[4]; /* from discipline.name, 'none' for unknown */
unsigned int status; /* current device level */
unsigned int label_block; /* where to find the VOLSER */
unsigned int FBA_layout; /* fixed block size (like AIXVOL) */
unsigned int characteristics_size;
unsigned int confdata_size;
char characteristics[64]; /* from read_device_characteristics */
char configuration_data[256]; /* from read_configuration_data */
} dasd_information_t;
/*
* Read Subsystem Data - Perfomance Statistics
*/
typedef struct dasd_rssd_perf_stats_t {
unsigned char invalid:1;
unsigned char format:3;
unsigned char data_format:4;
unsigned char unit_address;
unsigned short device_status;
unsigned int nr_read_normal;
unsigned int nr_read_normal_hits;
unsigned int nr_write_normal;
unsigned int nr_write_fast_normal_hits;
unsigned int nr_read_seq;
unsigned int nr_read_seq_hits;
unsigned int nr_write_seq;
unsigned int nr_write_fast_seq_hits;
unsigned int nr_read_cache;
unsigned int nr_read_cache_hits;
unsigned int nr_write_cache;
unsigned int nr_write_fast_cache_hits;
unsigned int nr_inhibit_cache;
unsigned int nr_bybass_cache;
unsigned int nr_seq_dasd_to_cache;
unsigned int nr_dasd_to_cache;
unsigned int nr_cache_to_dasd;
unsigned int nr_delayed_fast_write;
unsigned int nr_normal_fast_write;
unsigned int nr_seq_fast_write;
unsigned int nr_cache_miss;
unsigned char status2;
unsigned int nr_quick_write_promotes;
unsigned char reserved;
unsigned short ssid;
unsigned char reseved2[96];
} __attribute__((packed)) dasd_rssd_perf_stats_t;
/*
* struct profile_info_t
* holds the profinling information
......@@ -62,30 +181,36 @@ typedef struct format_data_t {
#define DASD_FMT_INT_INVAL 4 /* invalidate tracks */
#define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */
/*
* struct dasd_information_t
* represents any data about the data, which is visible to userspace
* struct attrib_data_t
* represents the operation (cache) bits for the device.
* Used in DE to influence caching of the DASD.
*/
typedef struct dasd_information_t {
unsigned int devno; /* S/390 devno */
unsigned int real_devno; /* for aliases */
unsigned int schid; /* S/390 subchannel identifier */
unsigned int cu_type : 16; /* from SenseID */
unsigned int cu_model : 8; /* from SenseID */
unsigned int dev_type : 16; /* from SenseID */
unsigned int dev_model : 8; /* from SenseID */
unsigned int open_count;
unsigned int req_queue_len;
unsigned int chanq_len;
char type[4]; /* from discipline.name, 'none' for unknown */
unsigned int status; /* current device level */
unsigned int label_block; /* where to find the VOLSER */
unsigned int FBA_layout; /* fixed block size (like AIXVOL) */
unsigned int characteristics_size;
unsigned int confdata_size;
char characteristics[64]; /* from read_device_characteristics */
char configuration_data[256]; /* from read_configuration_data */
} dasd_information_t;
typedef struct attrib_data_t {
unsigned char operation:3; /* cache operation mode */
unsigned char reserved:5; /* cache operation mode */
__u16 nr_cyl; /* no of cyliners for read ahaed */
__u8 reserved2[29]; /* for future use */
} __attribute__ ((packed)) attrib_data_t;
/* definition of operation (cache) bits within attributes of DE */
#define DASD_NORMAL_CACHE 0x0
#define DASD_BYPASS_CACHE 0x1
#define DASD_INHIBIT_LOAD 0x2
#define DASD_SEQ_ACCESS 0x3
#define DASD_SEQ_PRESTAGE 0x4
#define DASD_REC_ACCESS 0x5
/********************************************************************************
* SECTION: Definition of IOCTLs
*
* Here ist how the ioctl-nr should be used:
* 0 - 31 DASD driver itself
* 32 - 239 still open
* 240 - 255 reserved for EMC
*******************************************************************************/
/* Disable the volume (for Linux) */
#define BIODASDDISABLE _IO(DASD_IOCTL_LETTER,0)
......@@ -97,15 +222,28 @@ typedef struct dasd_information_t {
#define BIODASDSLCK _IO(DASD_IOCTL_LETTER,4) /* steal lock */
/* reset profiling information of a device */
#define BIODASDPRRST _IO(DASD_IOCTL_LETTER,5)
/* enable PAV */
#define BIODASDENAPAV _IO(DASD_IOCTL_LETTER,6)
/* retrieve API version number */
#define DASDAPIVER _IOR(DASD_IOCTL_LETTER,0,int)
/* Get information on a dasd device */
#define BIODASDINFO _IOR(DASD_IOCTL_LETTER,1,dasd_information_t)
/* retrieve profiling information of a device */
#define BIODASDPRRD _IOR(DASD_IOCTL_LETTER,2,dasd_profile_info_t)
/* Get information on a dasd device (enhanced) */
#define BIODASDINFO2 _IOR(DASD_IOCTL_LETTER,3,dasd_information2_t)
/* Performance Statistics Read */
#define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t)
/* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */
#define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t)
#endif /* DASD_API_VERSION */
/* Set Attributes (cache operations) */
#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t)
#endif /* DASD_H */
/*
......
......@@ -54,7 +54,7 @@ struct __debug_entry{
#define DEBUG_DATA(entry) (char*)(entry + 1) /* data is stored behind */
/* the entry information */
#define STCK(x) asm volatile ("STCK %0" : "=m" (x) : : "cc" )
#define STCK(x) asm volatile ("STCK 0(%1)" : "=m" (x) : "a" (&(x)) : "cc")
typedef struct __debug_entry debug_entry_t;
......
......@@ -24,7 +24,7 @@ extern __u8 _ebc_toupper[]; /* EBCDIC -> uppercase */
extern __inline__
void codepage_convert(const __u8 *codepage, volatile __u8 * addr, int nr)
{
if (nr <= 0)
if (nr-- <= 0)
return;
__asm__ __volatile__(
" bras 1,1f\n"
......@@ -34,7 +34,7 @@ void codepage_convert(const __u8 *codepage, volatile __u8 * addr, int nr)
"1: ahi %1,-256\n"
" jp 0b\n"
" ex %1,0(1)"
: "+&a" (addr), "+&a" (nr-1)
: "+&a" (addr), "+&a" (nr)
: "a" (codepage) : "cc", "memory", "1" );
}
......
This diff is collapsed.
......@@ -16,6 +16,7 @@
#include <linux/threads.h>
#include <asm/lowcore.h>
#include <linux/sched.h>
#include <linux/cache.h>
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
......
/*
* File...........: linux/include/asm-s390x/idals.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000a
* History of changes
* 07/24/00 new file
* File...........: linux/include/asm-s390x/idals.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000a
* History of changes
* 07/24/00 new file
* 05/04/02 code restructuring.
*/
#ifndef _S390_IDALS_H
#define _S390_IDALS_H
#include <linux/config.h>
#include <linux/errno.h>
#include <asm/irq.h>
#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
static inline addr_t *
idal_alloc ( int nridaws )
/*
* Test if an address/length pair needs an idal list.
*/
static inline int
idal_is_needed(void *vaddr, unsigned int length)
{
if ( nridaws > 33 )
BUG();
return kmalloc(nridaws * sizeof(addr_t), GFP_ATOMIC | GFP_DMA );
#if defined(CONFIG_ARCH_S390X)
return ((__pa(vaddr) + length) >> 31) != 0;
#else
return 0;
#endif
}
static inline void
idal_free ( addr_t *idal )
/*
* Return the number of idal words needed for an address/length pair.
*/
static inline unsigned int
idal_nr_words(void *vaddr, unsigned int length)
{
kfree (idal);
#if defined(CONFIG_ARCH_S390X)
if (idal_is_needed(vaddr, length))
return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
(IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
#endif
return 0;
}
/*
* Create the list of idal words for an address/length pair.
*/
static inline unsigned long *
idal_create_words(unsigned long *idaws, void *vaddr, unsigned int length)
{
#if defined(CONFIG_ARCH_S390X)
extern unsigned long __create_idal(unsigned long address, int count);
unsigned long paddr;
unsigned int cidaw;
paddr = __pa(vaddr);
cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
(IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
*idaws++ = paddr;
paddr &= -IDA_BLOCK_SIZE;
while (--cidaw > 0) {
paddr += IDA_BLOCK_SIZE;
*idaws++ = paddr;
}
#endif
return idaws;
}
/*
* Function: set_normalized_cda
* sets the address of the data in CCW
* if necessary it allocates an IDAL and sets sthe appropriate flags
* Sets the address of the data in CCW.
* If necessary it allocates an IDAL and sets the appropriate flags.
*/
static inline int
set_normalized_cda(ccw1_t * ccw, unsigned long address)
set_normalized_cda(ccw1_t * ccw, void *vaddr)
{
int ret = 0;
#if defined (CONFIG_ARCH_S390X)
if (((address + ccw->count) >> 31) != 0) {
if (ccw->flags & CCW_FLAG_IDA)
BUG();
address = __create_idal(address, ccw->count);
if (address)
ccw->flags |= CCW_FLAG_IDA;
else
ret = -ENOMEM;
unsigned int nridaws;
unsigned long *idal;
if (ccw->flags & CCW_FLAG_IDA)
return -EINVAL;
nridaws = idal_nr_words(vaddr, ccw->count);
if (nridaws > 0) {
idal = kmalloc(nridaws * sizeof(unsigned long),
GFP_ATOMIC | GFP_DMA );
if (idal == NULL)
return -ENOMEM;
idal_create_words(idal, vaddr, ccw->count);
ccw->flags |= CCW_FLAG_IDA;
vaddr = idal;
}
#endif
ccw->cda = (__u32) address;
return ret;
ccw->cda = (__u32)(unsigned long) vaddr;
return 0;
}
/*
* Function: clear_normalized_cda
* releases any allocated IDAL related to the CCW
* Releases any allocated IDAL related to the CCW.
*/
static inline void
clear_normalized_cda ( ccw1_t * ccw )
clear_normalized_cda(ccw1_t * ccw)
{
#if defined(CONFIG_ARCH_S390X)
if ( ccw -> flags & CCW_FLAG_IDA ) {
idal_free ( (addr_t *)(unsigned long) (ccw -> cda ));
ccw -> flags &= ~CCW_FLAG_IDA;
if (ccw->flags & CCW_FLAG_IDA) {
kfree((void *)(unsigned long) ccw->cda);
ccw->flags &= ~CCW_FLAG_IDA;
}
#endif
ccw -> cda = 0;
ccw->cda = 0;
}
#endif
......@@ -4,26 +4,4 @@
* S390 version
*/
#ifndef _S390_INIT_H
#define _S390_INIT_H
#define __init __attribute__ ((constructor))
/* don't know, if need on S390 */
#define __initdata
#define __initfunc(__arginit) \
__arginit __init; \
__arginit
/* For assembly routines
* need to define ?
*/
/*
#define __INIT .section ".text.init",#alloc,#execinstr
#define __FINIT .previous
#define __INITDATA .section ".data.init",#alloc,#write
*/
#define __cacheline_aligned __attribute__ ((__aligned__(256)))
#endif
#error "<asm/init.h> should never be used - use <linux/init.h> instead"
......@@ -40,6 +40,11 @@ extern inline void * phys_to_virt(unsigned long address)
return __io_virt(address);
}
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
extern inline void * ioremap (unsigned long offset, unsigned long size)
......
......@@ -10,14 +10,12 @@
*/
#define __MAX_SUBCHANNELS 65536
#define NR_IRQS __MAX_SUBCHANNELS
#define NR_CHPIDS 256
#define LPM_ANYPATH 0xff /* doesn't really belong here, Ingo? */
#define INVALID_STORAGE_AREA ((void *)(-1 - 0x3FFF ))
extern int disable_irq(unsigned int);
extern int enable_irq(unsigned int);
/*
* path management control word
*/
......@@ -362,6 +360,92 @@ typedef struct {
/* extended part */
ciw_t ciw[MAX_CIWS]; /* variable # of CIWs */
} __attribute__ ((packed,aligned(4))) senseid_t;
/*
* where we put the ssd info
*/
typedef struct _ssd_info {
__u8 valid:1;
__u8 type:7; /* subchannel type */
__u8 chpid[8]; /* chpids */
__u16 fla[8]; /* full link addresses */
} __attribute__ ((packed)) ssd_info_t;
/*
* area for store event information
*/
typedef struct chsc_area_t {
struct {
/* word 0 */
__u16 command_code1;
__u16 command_code2;
union {
struct {
/* word 1 */
__u32 reserved1;
/* word 2 */
__u32 reserved2;
} __attribute__ ((packed,aligned(8))) sei_req;
struct {
/* word 1 */
__u16 reserved1;
__u16 f_sch; /* first subchannel */
/* word 2 */
__u16 reserved2;
__u16 l_sch; /* last subchannel */
} __attribute__ ((packed,aligned(8))) ssd_req;
} request_block_data;
/* word 3 */
__u32 reserved3;
} __attribute__ ((packed,aligned(8))) request_block;
struct {
/* word 0 */
__u16 length;
__u16 response_code;
/* word 1 */
__u32 reserved1;
union {
struct {
/* word 2 */
__u8 flags;
__u8 vf; /* validity flags */
__u8 rs; /* reporting source */
__u8 cc; /* content code */
/* word 3 */
__u16 fla; /* full link address */
__u16 rsid; /* reporting source id */
/* word 4 */
__u32 reserved2;
/* word 5 */
__u32 reserved3;
/* word 6 */
__u32 ccdf; /* content-code dependent field */
/* word 7 */
__u32 reserved4;
/* word 8 */
__u32 reserved5;
/* word 9 */
__u32 reserved6;
} __attribute__ ((packed,aligned(8))) sei_res;
struct {
/* word 2 */
__u8 sch_valid : 1;
__u8 dev_valid : 1;
__u8 st : 3; /* subchannel type */
__u8 zeroes : 3;
__u8 unit_addr; /* unit address */
__u16 devno; /* device number */
/* word 3 */
__u8 path_mask;
__u8 fla_valid_mask;
__u16 sch; /* subchannel */
/* words 4-5 */
__u8 chpid[8]; /* chpids 0-7 */
/* words 6-9 */
__u16 fla[8]; /* full link addresses 0-7 */
} __attribute__ ((packed,aligned(8))) ssd_res;
} response_block_data;
} __attribute__ ((packed,aligned(8))) response_block;
} __attribute__ ((packed,aligned(PAGE_SIZE))) chsc_area_t;
#endif /* __KERNEL__ */
/*
......@@ -491,6 +575,7 @@ typedef struct {
/* ... for suspended CCWs */
#define DOIO_TIMEOUT 0x0080 /* 3 secs. timeout for sync. I/O */
#define DOIO_DONT_CALL_INTHDLR 0x0100 /* don't call interrupt handler */
#define DOIO_CANCEL_ON_TIMEOUT 0x0200 /* cancel I/O if it timed out */
/*
* do_IO()
......@@ -513,11 +598,6 @@ int do_IO( int irq, /* IRQ aka. subchannel number */
__u8 lpm, /* logical path mask */
unsigned long flag); /* flags : see above */
int start_IO( int irq, /* IRQ aka. subchannel number */
ccw1_t *cpa, /* logical channel program address */
unsigned long intparm, /* interruption parameter */
__u8 lpm, /* logical path mask */
unsigned int flag); /* flags : see above */
void do_crw_pending( void ); /* CRW handler */
......@@ -531,14 +611,6 @@ int clear_IO( int irq, /* IRQ aka. subchannel number */
unsigned long intparm, /* dummy intparm */
unsigned long flag); /* possible DOIO_WAIT_FOR_INTERRUPT */
int process_IRQ( struct pt_regs regs,
unsigned int irq,
unsigned int intparm);
int enable_cpu_sync_isc ( int irq );
int disable_cpu_sync_isc( int irq );
typedef struct {
int irq; /* irq, aka. subchannel */
__u16 devno; /* device number */
......@@ -546,8 +618,6 @@ typedef struct {
senseid_t sid_data; /* senseID data */
} s390_dev_info_t;
int get_dev_info( int irq, s390_dev_info_t *); /* to be eliminated - don't use */
int get_dev_info_by_irq ( int irq, s390_dev_info_t *pdi);
int get_dev_info_by_devno( __u16 devno, s390_dev_info_t *pdi);
......@@ -560,8 +630,6 @@ int get_irq_next ( int irq );
int read_dev_chars( int irq, void **buffer, int length );
int read_conf_data( int irq, void **buffer, int *length, __u8 lpm );
int s390_DevicePathVerification( int irq, __u8 domask );
int s390_request_irq_special( int irq,
io_handler_func_t io_handler,
not_oper_handler_func_t not_oper_handler,
......@@ -570,7 +638,6 @@ int s390_request_irq_special( int irq,
void *dev_id);
extern int set_cons_dev(int irq);
extern int reset_cons_dev(int irq);
extern int wait_cons_dev(int irq);
extern schib_t *s390_get_schib( int irq );
......@@ -630,11 +697,6 @@ extern __inline__ int msch_err(int irq, volatile schib_t *addr)
" .align 8\n"
" .quad 0b,2b\n"
".previous"
" lr 1,%1\n"
" msch 0(%2)\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
#else
".section .fixup,\"ax\"\n"
"2: l %0,%3\n"
......@@ -743,6 +805,21 @@ extern __inline__ int hsch(int irq)
return ccode;
}
extern __inline__ int xsch(int irq)
{
int ccode;
__asm__ __volatile__(
" lr 1,%1\n"
" .insn rre,0xb2760000,%1,0\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (irq | 0x10000L)
: "cc", "1" );
return ccode;
}
extern __inline__ int iac( void)
{
int ccode;
......@@ -805,6 +882,20 @@ extern __inline__ int diag210( diag210_t * addr)
: "cc" );
return ccode;
}
extern __inline__ int chsc( chsc_area_t * chsc_area)
{
int cc;
__asm__ __volatile__ (
".insn rre,0xb25f0000,%1,0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
: "d" (chsc_area)
: "cc" );
return cc;
}
/*
* Various low-level irq details needed by irq.c, process.c,
......@@ -813,13 +904,6 @@ extern __inline__ int diag210( diag210_t * addr)
* Interrupt entry/exit code at both C and assembly level
*/
void mask_irq(unsigned int irq);
void unmask_irq(unsigned int irq);
#define MAX_IRQ_SOURCES 128
extern spinlock_t irq_controller_lock;
#ifdef CONFIG_SMP
#include <asm/atomic.h>
......@@ -849,17 +933,10 @@ static inline void irq_exit(int cpu, unsigned int irq)
#define __STR(x) #x
#define STR(x) __STR(x)
#ifdef CONFIG_SMP
/*
* SMP has a few special interrupts for IPI messages
*/
#endif /* CONFIG_SMP */
/*
* x86 profiling function, SMP safe. We might want to do this in
* assembly totally?
* is this ever used anyway?
*/
extern char _stext;
static inline void s390_do_profile (unsigned long addr)
......@@ -883,16 +960,19 @@ static inline void s390_do_profile (unsigned long addr)
#include <asm/s390io.h>
#define get_irq_lock(irq) &ioinfo[irq]->irq_lock
#define s390irq_spin_lock(irq) \
spin_lock(&(ioinfo[irq]->irq_lock))
spin_lock(get_irq_lock(irq))
#define s390irq_spin_unlock(irq) \
spin_unlock(&(ioinfo[irq]->irq_lock))
spin_unlock(get_irq_lock(irq))
#define s390irq_spin_lock_irqsave(irq,flags) \
spin_lock_irqsave(&(ioinfo[irq]->irq_lock), flags)
spin_lock_irqsave(get_irq_lock(irq), flags)
#define s390irq_spin_unlock_irqrestore(irq,flags) \
spin_unlock_irqrestore(&(ioinfo[irq]->irq_lock), flags)
spin_unlock_irqrestore(get_irq_lock(irq), flags)
#define touch_nmi_watchdog() do { } while(0)
......
......@@ -45,6 +45,9 @@
#define __LC_CPUID 0xC60
#define __LC_CPUADDR 0xC68
#define __LC_IPLDEV 0xC7C
#define __LC_JIFFY_TIMER 0xC80
#define __LC_PANIC_MAGIC 0xE00
#define __LC_PFAULT_INTPARM 0x080
......@@ -161,7 +164,7 @@ struct _lowcore
/* entry.S sensitive area end */
/* SMP info area: defined by DJB */
__u64 jiffy_timer_cc; /* 0xc80 */
__u64 jiffy_timer; /* 0xc80 */
atomic_t ext_call_fast; /* 0xc88 */
__u8 pad11[0xe00-0xc8c]; /* 0xc8c */
......@@ -182,12 +185,12 @@ extern __inline__ void set_prefix(__u32 address)
extern struct _lowcore *lowcore_ptr[];
#ifndef CONFIG_SMP
#define get_cpu_lowcore(cpu) S390_lowcore
#define safe_get_cpu_lowcore(cpu) S390_lowcore
#define get_cpu_lowcore(cpu) (&S390_lowcore)
#define safe_get_cpu_lowcore(cpu) (&S390_lowcore)
#else
#define get_cpu_lowcore(cpu) (*lowcore_ptr[cpu])
#define get_cpu_lowcore(cpu) (lowcore_ptr[(cpu)])
#define safe_get_cpu_lowcore(cpu) \
((cpu)==smp_processor_id() ? S390_lowcore:(*lowcore_ptr[(cpu)]))
((cpu) == smp_processor_id() ? &S390_lowcore : lowcore_ptr[(cpu)])
#endif
#endif /* __ASSEMBLY__ */
......
......@@ -12,6 +12,7 @@
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
#define PROT_NONE 0x0 /* page can not be accessed */
#define MAP_SHARED 0x01 /* Share changes */
......
......@@ -59,8 +59,8 @@ static inline void copy_page(void *to, void *from)
: "memory" );
}
#define clear_user_page(page, vaddr) clear_page(page)
#define copy_user_page(to, from, vaddr) copy_page(to, from)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
......@@ -116,9 +116,13 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define __PAGE_OFFSET 0x0UL
#define PAGE_OFFSET 0x0UL
#define __pa(x) (unsigned long)(x)
#define __va(x) (void *)(x)
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#define __va(x) (void *)(unsigned long)(x)
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
......@@ -4,6 +4,7 @@
/* S/390 systems don't have a PCI bus. This file is just here because some stupid .c code
* includes it even if CONFIG_PCI is not set.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
#endif /* __ASM_S390_PCI_H */
#ifndef __ARCH_S390_PERCPU__
#define __ARCH_S390_PERCPU__
#include <asm-generic/percpu.h>
#endif /* __ARCH_S390_PERCPU__ */
......@@ -17,10 +17,7 @@
#include <asm/processor.h>
#include <linux/threads.h>
#define pgd_quicklist (S390_lowcore.cpu_data.pgd_quick)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (S390_lowcore.cpu_data.pte_quick)
#define pgtable_cache_size (S390_lowcore.cpu_data.pgtable_cache_sz)
#define check_pgt_cache() do {} while (0)
/*
* Allocate and free page tables. The xxx_kernel() versions are
......@@ -28,67 +25,35 @@
* if any.
*/
extern __inline__ pgd_t* get_pgd_slow(void)
{
pgd_t *ret;
int i;
ret = (pgd_t *) __get_free_pages(GFP_KERNEL,1);
if (ret != NULL)
for (i = 0; i < USER_PTRS_PER_PGD; i++)
pmd_clear(pmd_offset(ret + i, i*PGDIR_SIZE));
return ret;
}
extern __inline__ pgd_t* get_pgd_fast(void)
{
unsigned long *ret = pgd_quicklist;
if (ret != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size -= 2;
}
return (pgd_t *)ret;
}
extern __inline__ pgd_t *pgd_alloc(struct mm_struct *mm)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
int i;
pgd = get_pgd_fast();
if (!pgd)
pgd = get_pgd_slow();
pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,1);
if (pgd != NULL)
for (i = 0; i < USER_PTRS_PER_PGD; i++)
pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
return pgd;
}
extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size += 2;
}
extern __inline__ void free_pgd_slow(pgd_t *pgd)
static inline void pgd_free(pgd_t *pgd)
{
free_pages((unsigned long) pgd, 1);
}
#define pgd_free(pgd) free_pgd_fast(pgd)
/*
* page middle directory allocation/free routines.
* We don't use pmd cache, so these are dummy routines. This
* code never triggers because the pgd will always be present.
*/
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0)
#define pmd_free_slow(x) do { } while (0)
#define pmd_free_fast(x) do { } while (0)
#define pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
extern inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
......@@ -96,50 +61,53 @@ extern inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
{
pmd_populate_kernel(mm, pmd, (pte_t *)((page-mem_map) << PAGE_SHIFT));
}
/*
* page table entry allocation/free routines.
*/
extern inline pte_t * pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
{
pte_t *pte;
int count;
int i;
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte != NULL) {
for (i=0; i < PTRS_PER_PTE; i++)
pte_clear(pte+i);
}
count = 0;
do {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte != NULL) {
for (i=0; i < PTRS_PER_PTE; i++)
pte_clear(pte+i);
} else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
extern __inline__ pte_t *
pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
unsigned long *ret = (unsigned long *) pte_quicklist;
if (ret != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
}
return (pte_t *)ret;
return virt_to_page(pte_alloc_one_kernel(mm, vmaddr));
}
extern __inline__ void pte_free_fast(pte_t *pte)
static inline void pte_free_kernel(pte_t *pte)
{
*(unsigned long *)pte = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
free_page((unsigned long) pte);
}
extern __inline__ void pte_free_slow(pte_t *pte)
static inline void pte_free(struct page *pte)
{
free_page((unsigned long) pte);
__free_page(pte);
}
#define pte_free(pte) pte_free_fast(pte)
extern int do_check_pgt_cache(int, int);
#define pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
/*
* This establishes kernel virtual mappings (e.g., as a result of a
......@@ -148,151 +116,6 @@ extern int do_check_pgt_cache(int, int);
*/
#define set_pgdir(addr,entry) do { } while(0)
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* called only from vmalloc/vfree
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
/*
* S/390 has three ways of flushing TLBs
* 'ptlb' does a flush of the local processor
* 'csp' flushes the TLBs on all PUs of a SMP
* 'ipte' invalidates a pte in a page table and flushes that out of
* the TLBs of all PUs of a SMP
*/
#define local_flush_tlb() \
do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
#ifndef CONFIG_SMP
/*
* We always need to flush, since s390 does not flush tlb
* on each context switch
*/
static inline void flush_tlb(void)
{
local_flush_tlb();
}
static inline void flush_tlb_all(void)
{
local_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
local_flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
local_flush_tlb();
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
local_flush_tlb();
}
#else
#include <asm/smp.h>
extern void smp_ptlb_all(void);
static inline void global_flush_tlb_csp(void)
{
int cs1=0,dum=0;
int *adr;
long long dummy=0;
adr = (int*) (((int)(((int*) &dummy)+1) & 0xfffffffc)|1);
__asm__ __volatile__("lr 2,%0\n\t"
"lr 3,%1\n\t"
"lr 4,%2\n\t"
"csp 2,4" :
: "d" (cs1), "d" (dum), "d" (adr)
: "2", "3", "4");
}
static inline void global_flush_tlb(void)
{
if (MACHINE_HAS_CSP)
global_flush_tlb_csp();
else
smp_ptlb_all();
}
/*
* We only have to do global flush of tlb if process run since last
* flush on any other pu than current.
* If we have threads (mm->count > 1) we always do a global flush,
* since the process runs on more than one processor at the same time.
*/
static inline void __flush_tlb_mm(struct mm_struct * mm)
{
if ((smp_num_cpus > 1) &&
((atomic_read(&mm->mm_count) != 1) ||
(mm->cpu_vm_mask != (1UL << smp_processor_id())))) {
mm->cpu_vm_mask = (1UL << smp_processor_id());
global_flush_tlb();
} else {
local_flush_tlb();
}
}
static inline void flush_tlb(void)
{
__flush_tlb_mm(current->mm);
}
static inline void flush_tlb_all(void)
{
global_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
__flush_tlb_mm(mm);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
__flush_tlb_mm(vma->vm_mm);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
__flush_tlb_mm(vma->vm_mm);
}
#endif
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* S/390 does not keep any page table caches in TLB */
}
static inline int ptep_test_and_clear_and_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
/* No need to flush TLB; bits are in storage key */
return ptep_test_and_clear_young(ptep);
}
static inline int ptep_test_and_clear_and_flush_dirty(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
/* No need to flush TLB; bits are in storage key */
return ptep_test_and_clear_dirty(ptep);
}
static inline pte_t ptep_invalidate(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
......
......@@ -33,17 +33,6 @@
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
/* Caches aren't brain-dead on S390. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
/*
* The S390 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
......@@ -156,7 +145,8 @@ extern char empty_zero_page[PAGE_SIZE];
/* Bits in the page table entry */
#define _PAGE_PRESENT 0x001 /* Software */
#define _PAGE_MKCLEAR 0x002 /* Software */
#define _PAGE_MKCLEAN 0x002 /* Software */
#define _PAGE_ISCLEAN 0x004 /* Software */
#define _PAGE_RO 0x200 /* HW read-only */
#define _PAGE_INVALID 0x400 /* HW invalid */
......@@ -189,12 +179,14 @@ extern char empty_zero_page[PAGE_SIZE];
/*
* No mapping available
*/
#define PAGE_INVALID __pgprot(_PAGE_INVALID)
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RO)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RO)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT)
#define PAGE_INVALID __pgprot(_PAGE_INVALID)
#define PAGE_NONE_SHARED __pgprot(_PAGE_PRESENT|_PAGE_INVALID)
#define PAGE_NONE_PRIVATE __pgprot(_PAGE_PRESENT|_PAGE_INVALID|_PAGE_ISCLEAN)
#define PAGE_RO_SHARED __pgprot(_PAGE_PRESENT|_PAGE_RO)
#define PAGE_RO_PRIVATE __pgprot(_PAGE_PRESENT|_PAGE_RO|_PAGE_ISCLEAN)
#define PAGE_COPY __pgprot(_PAGE_PRESENT|_PAGE_RO|_PAGE_ISCLEAN)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT)
/*
* The S390 can't do page protection for execute, and considers that the
......@@ -202,21 +194,21 @@ extern char empty_zero_page[PAGE_SIZE];
* the closest we can get..
*/
/*xwr*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P000 PAGE_NONE_PRIVATE
#define __P001 PAGE_RO_PRIVATE
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_READONLY
#define __P101 PAGE_READONLY
#define __P100 PAGE_RO_PRIVATE
#define __P101 PAGE_RO_PRIVATE
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S000 PAGE_NONE_SHARED
#define __S001 PAGE_RO_SHARED
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
#define __S100 PAGE_READONLY
#define __S101 PAGE_READONLY
#define __S100 PAGE_RO_SHARED
#define __S101 PAGE_RO_SHARED
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
......@@ -227,10 +219,10 @@ extern char empty_zero_page[PAGE_SIZE];
*/
extern inline void set_pte(pte_t *pteptr, pte_t pteval)
{
if ((pte_val(pteval) & (_PAGE_MKCLEAR|_PAGE_INVALID))
== _PAGE_MKCLEAR)
if ((pte_val(pteval) & (_PAGE_MKCLEAN|_PAGE_INVALID))
== _PAGE_MKCLEAN)
{
pte_val(pteval) &= ~_PAGE_MKCLEAR;
pte_val(pteval) &= ~_PAGE_MKCLEAN;
asm volatile ("sske %0,%1"
: : "d" (0), "a" (pte_val(pteval)));
......@@ -239,8 +231,6 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval)
*pteptr = pteval;
}
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
* pgd/pmd/pte query functions
*/
......@@ -277,6 +267,8 @@ extern inline int pte_dirty(pte_t pte)
{
int skey;
if (pte_val(pte) & _PAGE_ISCLEAN)
return 0;
asm volatile ("iske %0,%1" : "=d" (skey) : "a" (pte_val(pte)));
return skey & _PAGE_CHANGED;
}
......@@ -307,15 +299,14 @@ extern inline void pte_clear(pte_t *ptep)
pte_val(*ptep) = _PAGE_INVALID;
}
#define PTE_INIT(x) pte_clear(x)
/*
* The following pte modification functions only work if
* pte_present() is true. Undefined behaviour if not..
*/
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & PAGE_MASK) | pgprot_val(newprot);
pte_val(pte) &= PAGE_MASK | _PAGE_ISCLEAN;
pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_ISCLEAN;
return pte;
}
......@@ -342,13 +333,11 @@ extern inline pte_t pte_mkclean(pte_t pte)
extern inline pte_t pte_mkdirty(pte_t pte)
{
/* We can't set the changed bit atomically. For now we
* set (!) the page referenced bit. */
asm volatile ("sske %0,%1"
: : "d" (_PAGE_CHANGED|_PAGE_REFERENCED),
"a" (pte_val(pte)));
pte_val(pte) &= ~_PAGE_MKCLEAR;
/* We do not explicitly set the dirty bit because the
* sske instruction is slow. It is faster to let the
* next instruction set the dirty bit.
*/
pte_val(pte) &= ~(_PAGE_MKCLEAN | _PAGE_ISCLEAN);
return pte;
}
......@@ -382,6 +371,8 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep)
{
int skey;
if (pte_val(*ptep) & _PAGE_ISCLEAN)
return 0;
asm volatile ("iske %0,%1" : "=d" (skey) : "a" (*ptep));
if ((skey & _PAGE_CHANGED) == 0)
return 0;
......@@ -414,7 +405,7 @@ static inline void ptep_mkdirty(pte_t *ptep)
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
pte_t __pte;
pte_val(__pte) = physpage + pgprot_val(pgprot);
......@@ -424,24 +415,41 @@ extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
#define mk_pte(pg, pgprot) \
({ \
struct page *__page = (pg); \
pgprot_t __pgprot = (pgprot); \
unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \
pte_t __pte = mk_pte_phys(__physpage, (pgprot)); \
pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
\
if (__page != ZERO_PAGE(__physpage)) { \
int __users = page_count(__page); \
__users -= !!PagePrivate(__page) + !!__page->mapping; \
\
if (__users == 1) \
pte_val(__pte) |= _PAGE_MKCLEAR; \
} \
if (!(pgprot_val(__pgprot) & _PAGE_ISCLEAN)) { \
int __users = !!PagePrivate(__page) + !!__page->mapping; \
if (__users + page_count(__page) == 1) \
pte_val(__pte) |= _PAGE_MKCLEAN; \
} \
__pte; \
})
#define pfn_pte(pfn, pgprot) \
({ \
struct page *__page = mem_map+(pfn); \
pgprot_t __pgprot = (pgprot); \
unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \
pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
\
if (!(pgprot_val(__pgprot) & _PAGE_ISCLEAN)) { \
int __users = !!PagePrivate(__page) + !!__page->mapping; \
if (__users + page_count(__page) == 1) \
pte_val(__pte) |= _PAGE_MKCLEAN; \
} \
__pte; \
})
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pmd_page_kernel(pmd) (pmd_val(pmd) & PAGE_MASK)
#define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT))
#define pmd_page(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pgd_page_kernel(pgd) (pgd_val(pgd) & PAGE_MASK)
/* to find an entry in a page-table-directory */
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
......@@ -457,8 +465,13 @@ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
}
/* Find an entry in the third-level page table.. */
#define pte_offset(pmd, address) \
((pte_t *) (pmd_page(*pmd) + ((address>>10) & ((PTRS_PER_PTE-1)<<2))))
#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pte_offset_kernel(pmd, address) \
((pte_t *) pmd_page_kernel(*(pmd)) + __pte_offset(address))
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
/*
* A page-table entry has some bits we have to treat in a special way.
......
......@@ -16,6 +16,7 @@
#include <asm/page.h>
#include <asm/ptrace.h>
#ifdef __KERNEL__
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
......@@ -61,11 +62,8 @@ extern struct task_struct *last_task_used_math;
*/
#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
#define THREAD_SIZE (2*PAGE_SIZE)
typedef struct {
unsigned long seg;
unsigned long acc4;
__u32 ar4;
} mm_segment_t;
/* if you change the thread_struct structure, you must
......@@ -74,8 +72,6 @@ typedef struct {
struct thread_struct
{
struct pt_regs *regs; /* the user registers can be found on*/
s390_fp_regs fp_regs;
__u32 ar2; /* kernel access register 2 */
__u32 ar4; /* kernel access register 4 */
......@@ -84,8 +80,6 @@ struct thread_struct
__u32 error_code; /* error-code of last prog-excep. */
__u32 prot_addr; /* address of protection-excep. */
__u32 trap_no;
/* perform syscall argument validation (get/set_fs) */
mm_segment_t fs;
per_struct per_info;/* Must be aligned on an 4 byte boundary*/
/* Used to give failing instruction back to user for ieee exceptions */
addr_t ieee_instruction_pointer;
......@@ -95,14 +89,12 @@ struct thread_struct
typedef struct thread_struct thread_struct;
#define INIT_THREAD { (struct pt_regs *) 0, \
{ 0,{{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, \
#define INIT_THREAD {{0,{{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, \
{0},{0},{0},{0},{0},{0}}}, \
0, 0, \
sizeof(init_stack) + (__u32) &init_stack, \
(__pa((__u32) &swapper_pg_dir[0]) + _SEGMENT_TABLE),\
0,0,0, \
(mm_segment_t) { 0,1}, \
(per_struct) {{{{0,}}},0,0,0,0,{{0,}}}, \
0, 0 \
}
......@@ -115,6 +107,7 @@ typedef struct thread_struct thread_struct;
} while (0)
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
......@@ -126,28 +119,20 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define release_segments(mm) do { } while (0)
/*
* Return saved PC of a blocked thread. used in kernel/sched
* Return saved PC of a blocked thread.
*/
extern inline unsigned long thread_saved_pc(struct thread_struct *t)
{
return (t->regs) ? ((unsigned long)t->regs->psw.addr) : 0;
}
extern unsigned long thread_saved_pc(struct task_struct *t);
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.regs->psw.addr)
#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
/* Allocation and freeing of basic task resources. */
/*
* NOTE! The task struct and the stack go together
* Print register of task into buffer. Used in fs/proc/array.c.
*/
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
extern char *task_show_regs(struct task_struct *task, char *buffer);
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
unsigned long get_wchan(struct task_struct *p);
#define __KSTK_PTREGS(tsk) ((struct pt_regs *) \
(((addr_t) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs)) & -8L))
#define KSTK_EIP(tsk) (__KSTK_PTREGS(tsk)->psw.addr)
#define KSTK_ESP(tsk) (__KSTK_PTREGS(tsk)->gprs[15])
#define cpu_relax() do { } while (0)
......@@ -164,6 +149,46 @@ unsigned long get_wchan(struct task_struct *p);
#define USER_STD_MASK 0x00000080UL
#define PSW_PROBLEM_STATE 0x00010000UL
/*
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
static inline void __load_psw_mask (unsigned long mask)
{
unsigned long addr;
psw_t psw;
psw.mask = mask;
asm volatile (
" basr %0,0\n"
"0: ahi %0,1f-0b\n"
" st %0,4(%1)\n"
" lpsw 0(%1)\n"
"1:"
: "=&d" (addr) : "a" (&psw) : "memory", "cc" );
}
/*
* Function to stop a processor until an interruption occured
*/
static inline void enabled_wait(void)
{
unsigned long reg;
psw_t wait_psw;
wait_psw.mask = 0x070e0000;
asm volatile (
" basr %0,0\n"
"0: la %0,1f-0b(%0)\n"
" st %0,4(%1)\n"
" oi 4(%1),0x80\n"
" lpsw 0(%1)\n"
"1:"
: "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
}
/*
* Function to drop a processor into disabled wait state
*/
......@@ -200,4 +225,6 @@ static inline void disabled_wait(unsigned long code)
: : "a" (dw_psw), "a" (&ctl_buf) : "cc" );
}
#endif
#endif /* __ASM_S390_PROCESSOR_H */
......@@ -105,12 +105,16 @@
#define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */
#define PTRACE_SETOPTIONS 21
/* options set using PTRACE_SETOPTIONS */
#define PTRACE_O_TRACESYSGOOD 0x00000001
#ifndef __ASSEMBLY__
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <asm/current.h>
#include <asm/setup.h>
/* this typedef defines how a Program Status Word looks like */
......@@ -118,7 +122,7 @@ typedef struct
{
__u32 mask;
__u32 addr;
} psw_t __attribute__ ((aligned(8)));
} __attribute__ ((aligned(8))) psw_t;
#ifdef __KERNEL__
#define FIX_PSW(addr) ((unsigned long)(addr)|0x80000000UL)
......@@ -150,8 +154,8 @@ typedef struct
#define FPC_VALID_MASK 0xF8F8FF03
/*
* The first entries in pt_regs, gdb_pt_regs and user_regs_struct
* are common for all three structures. The s390_regs structure
* The first entries in pt_regs and user_regs_struct
* are common for the two structures. The s390_regs structure
* covers the common parts. It simplifies copying the common part
* between the three structures.
*/
......@@ -174,34 +178,15 @@ struct pt_regs
__u32 acrs[NUM_ACRS];
__u32 orig_gpr2;
__u32 trap;
__u32 old_ilc;
};
/*
* The gdb_pt_regs struct is used instead of the pt_regs structure
* if kernel remote debugging is used.
*/
#if CONFIG_REMOTE_DEBUG
struct gdb_pt_regs
{
psw_t psw;
__u32 gprs[NUM_GPRS];
__u32 acrs[NUM_ACRS];
__u32 orig_gpr2;
__u32 trap;
__u32 crs[16];
s390_fp_regs fp_regs;
__u32 old_ilc;
};
#endif
/*
* Now for the program event recording (trace) definitions.
*/
typedef struct
{
__u32 cr[3];
} per_cr_words __attribute__((packed));
} per_cr_words;
#define PER_EM_MASK 0xE8000000
......@@ -223,14 +208,14 @@ typedef struct
unsigned : 21;
addr_t starting_addr;
addr_t ending_addr;
} per_cr_bits __attribute__((packed));
} per_cr_bits;
typedef struct
{
__u16 perc_atmid; /* 0x096 */
__u32 address; /* 0x098 */
__u8 access_id; /* 0x0a1 */
} per_lowcore_words __attribute__((packed));
} per_lowcore_words;
typedef struct
{
......@@ -249,14 +234,14 @@ typedef struct
addr_t address; /* 0x098 */
unsigned : 4; /* 0x0a1 */
unsigned access_id : 4;
} per_lowcore_bits __attribute__((packed));
} per_lowcore_bits;
typedef struct
{
union {
per_cr_words words;
per_cr_bits bits;
} control_regs __attribute__((packed));
} control_regs;
/*
* Use these flags instead of setting em_instruction_fetch
* directly they are used so that single stepping can be
......@@ -275,7 +260,7 @@ typedef struct
per_lowcore_words words;
per_lowcore_bits bits;
} lowcore;
} per_struct __attribute__((packed));
} per_struct;
typedef struct
{
......@@ -294,6 +279,7 @@ typedef struct
#define PTRACE_PEEKDATA_AREA 0x5003
#define PTRACE_POKETEXT_AREA 0x5004
#define PTRACE_POKEDATA_AREA 0x5005
/*
* PT_PROT definition is loosely based on hppa bsd definition in
* gdb/hppab-nat.c
......@@ -345,7 +331,6 @@ struct user_regs_struct
#define user_mode(regs) (((regs)->psw.mask & PSW_PROBLEM_STATE) != 0)
#define instruction_pointer(regs) ((regs)->psw.addr)
extern void show_regs(struct pt_regs * regs);
extern char *task_show_regs(struct task_struct *task, char *buffer);
#endif
#endif /* __ASSEMBLY__ */
......
This diff is collapsed.
#ifndef _S390_RWSEM_H
#define _S390_RWSEM_H
/*
* include/asm-s390/rwsem.h
*
* S390 version
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
*/
/*
*
* The MSW of the count is the negated number of active writers and waiting
* lockers, and the LSW is the total number of active locks
*
* The lock count is initialized to 0 (no active and no waiting lockers).
*
* When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
* uncontended lock. This can be determined because XADD returns the old value.
* Readers increment by 1 and see a positive value when uncontended, negative
* if there are writers (and maybe) readers waiting (in which case it goes to
* sleep).
*
* The value of WAITING_BIAS supports up to 32766 waiting processes. This can
* be extended to 65534 by manually checking the whole MSW rather than relying
* on the S flag.
*
* The value of ACTIVE_BIAS supports up to 65535 active processes.
*
* This should be totally fair - if anything is waiting, a process that wants a
* lock will go to the back of the queue. When the currently active lock is
* released, if there's a writer at the front of the queue, then that and only
* that will be woken up; if there's a bunch of consequtive readers at the
* front, then they'll all be woken up, but no other readers will be.
*/
#ifndef _LINUX_RWSEM_H
#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead
#endif
#ifdef __KERNEL__
#include <linux/list.h>
#include <linux/spinlock.h>
struct rwsem_waiter;
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
/*
* the semaphore definition
*/
struct rw_semaphore {
signed long count;
spinlock_t wait_lock;
struct list_head wait_list;
};
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
* initialisation
*/
#define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
static inline void init_rwsem(struct rw_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
" l %0,0(%2)\n"
"0: lr %1,%0\n"
" ahi %1,%3\n"
" cs %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory" );
if (old < 0)
rwsem_down_read_failed(sem);
}
/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
{
signed long old, new, tmp;
tmp = RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__(
" l %0,0(%2)\n"
"0: lr %1,%0\n"
" a %1,%3\n"
" cs %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "m" (tmp)
: "cc", "memory" );
if (old != 0)
rwsem_down_write_failed(sem);
}
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
" l %0,0(%2)\n"
"0: lr %1,%0\n"
" ahi %1,%3\n"
" cs %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory" );
if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
{
signed long old, new, tmp;
tmp = -RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__(
" l %0,0(%2)\n"
"0: lr %1,%0\n"
" a %1,%3\n"
" cs %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "m" (tmp)
: "cc", "memory" );
if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
" l %0,0(%2)\n"
"0: lr %1,%0\n"
" ar %1,%3\n"
" cs %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "d" (delta)
: "cc", "memory" );
}
/*
* implement exchange and add functionality
*/
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
" l %0,0(%2)\n"
"0: lr %1,%0\n"
" ar %1,%3\n"
" cs %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "d" (delta)
: "cc", "memory" );
return new;
}
#endif /* __KERNEL__ */
#endif /* _S390_RWSEM_H */
/*
* include/asm-s390/s390-gdbregs.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*
* used both by the linux kernel for remote debugging & gdb
*/
#ifndef _S390_GDBREGS_H
#define _S390_GDBREGS_H
#ifdef __KERNEL__
#include <asm/s390-regs-common.h>
#else
#include <s390/s390-regs-common.h>
#endif
#define S390_MAX_INSTR_SIZE 6
#define NUM_REGS (2+NUM_GPRS+NUM_ACRS+NUM_CRS+1+NUM_FPRS)
#define FIRST_ACR (2+NUM_GPRS)
#define LAST_ACR (FIRST_ACR+NUM_ACRS-1)
#define FIRST_CR (FIRST_ACR+NUM_ACRS)
#define LAST_CR (FIRST_CR+NUM_CRS-1)
#define PSWM_REGNUM 0
#define PC_REGNUM 1
#define GP0_REGNUM 2 /* GPR register 0 */
#define GP_LAST_REGNUM (GP0_REGNUM+NUM_GPRS-1)
#define RETADDR_REGNUM (GP0_REGNUM+14) /* Usually return address */
#define SP_REGNUM (GP0_REGNUM+15) /* Contains address of top of stack */
#define FP_REGNUM SP_REGNUM /* needed in findvar.c still */
#define FRAME_REGNUM (GP0_REGNUM+11)
#define FPC_REGNUM (GP0_REGNUM+NUM_GPRS+NUM_ACRS+NUM_CRS)
#define FP0_REGNUM (FPC_REGNUM+1) /* FPR (Floating point) register 0 */
#define FPLAST_REGNUM (FP0_REGNUM+NUM_FPRS-1) /* Last floating point register */
/* The top of this structure is as similar as possible to a pt_regs structure to */
/* simplify code */
typedef struct
{
S390_REGS_COMMON
__u32 crs[NUM_CRS];
s390_fp_regs fp_regs;
} s390_gdb_regs __attribute__((packed));
#define REGISTER_NAMES \
{ \
"pswm","pswa", \
"gpr0","gpr1","gpr2","gpr3","gpr4","gpr5","gpr6","gpr7", \
"gpr8","gpr9","gpr10","gpr11","gpr12","gpr13","gpr14","gpr15", \
"acr0","acr1","acr2","acr3","acr4","acr5","acr6","acr7", \
"acr8","acr9","acr10","acr11","acr12","acr13","acr14","acr15", \
"cr0","cr1","cr2","cr3","cr4","cr5","cr6","cr7", \
"cr8","cr9","cr10","cr11","cr12","cr13","cr14","cr15", \
"fpc", \
"fpr0","fpr1","fpr2","fpr3","fpr4","fpr5","fpr6","fpr7", \
"fpr8","fpr9","fpr10","fpr11","fpr12","fpr13","fpr14","fpr15" \
}
/* Index within `registers' of the first byte of the space for
register N. */
#define FP0_OFFSET ((PSW_MASK_SIZE+PSW_ADDR_SIZE)+ \
(GPR_SIZE*NUM_GPRS)+(ACR_SIZE+NUM_ACRS)+ \
(CR_SIZE*NUM_CRS)+(FPC_SIZE+FPC_PAD_SIZE))
#define REGISTER_BYTES \
((FP0_OFFSET)+(FPR_SIZE*NUM_FPRS))
#define REGISTER_BYTE(N) ((N) < FP0_REGNUM ? (N)*4:(FP0_OFFSET+((N)-FP0_REGNUM)*FPR_SIZE))
#endif
......@@ -25,6 +25,10 @@ typedef struct ext_int_info_t {
extern ext_int_info_t *ext_int_hash[];
int register_external_interrupt(__u16 code, ext_int_handler_t handler);
int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
ext_int_info_t *info);
int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
ext_int_info_t *info);
#endif
......@@ -18,10 +18,13 @@
typedef struct _ioinfo {
unsigned int irq; /* aka. subchannel number */
spinlock_t irq_lock; /* irq lock */
void *private_data; /* pointer to private data */
struct _ioinfo *prev;
struct _ioinfo *next;
__u8 st; /* subchannel type */
union {
unsigned int info;
struct {
......@@ -50,10 +53,9 @@ typedef struct _ioinfo {
unsigned int esid : 1; /* Ext. SenseID supported by HW */
unsigned int rcd : 1; /* RCD supported by HW */
unsigned int repnone : 1; /* don't call IRQ handler on interrupt */
unsigned int newreq : 1; /* new register interface */
unsigned int dval : 1; /* device number valid */
unsigned int unknown : 1; /* unknown device - if SenseID failed */
unsigned int unused : (sizeof(unsigned int)*8 - 24); /* unused */
unsigned int unused : (sizeof(unsigned int)*8 - 23); /* unused */
} __attribute__ ((packed)) flags;
} ui;
......@@ -75,6 +77,7 @@ typedef struct _ioinfo {
unsigned long qintparm; /* queued interruption parameter */
unsigned long qflag; /* queued flags */
__u8 qlpm; /* queued logical path mask */
ssd_info_t ssd_info; /* subchannel description */
} __attribute__ ((aligned(8))) ioinfo_t;
......@@ -89,6 +92,12 @@ typedef struct _ioinfo {
#define IOINFO_FLAGS_REPALL 0x00800000
extern ioinfo_t *ioinfo[];
int s390_set_private_data(int irq, void * data);
void * s390_get_private_data(int irq);
#define CHSC_SEI_ACC_CHPID 1
#define CHSC_SEI_ACC_LINKADDR 2
#define CHSC_SEI_ACC_FULLLINKADDR 3
#endif /* __s390io_h */
......@@ -13,10 +13,23 @@
#include <asm/types.h>
typedef struct _mci {
__u32 to_be_defined_1 : 9;
__u32 cp : 1; /* channel-report pending */
__u32 to_be_defined_2 : 22;
__u32 to_be_defined_3;
__u32 sd : 1; /* 00 system damage */
__u32 pd : 1; /* 01 instruction-processing damage */
__u32 sr : 1; /* 02 system recovery */
__u32 to_be_defined_1 : 4; /* 03-06 */
__u32 dg : 1; /* 07 degradation */
__u32 w : 1; /* 08 warning pending */
__u32 cp : 1; /* 09 channel-report pending */
__u32 to_be_defined_2 : 6; /* 10-15 */
__u32 se : 1; /* 16 storage error uncorrected */
__u32 sc : 1; /* 17 storage error corrected */
__u32 ke : 1; /* 18 storage-key error uncorrected */
__u32 ds : 1; /* 19 storage degradation */
__u32 to_be_defined_3 : 4; /* 20-23 */
__u32 fa : 1; /* 24 failing storage address validity */
__u32 to_be_defined_4 : 7; /* 25-31 */
__u32 ie : 1; /* 32 indirect storage error */
__u32 to_be_defined_5 : 31; /* 33-63 */
} mci_t;
//
......
#ifndef _ASMS390X_SCATTERLIST_H
#define _ASMS390X_SCATTERLIST_H
#ifndef _ASMS390_SCATTERLIST_H
#define _ASMS390_SCATTERLIST_H
struct scatterlist {
struct page *page;
......
......@@ -2,7 +2,7 @@
* include/asm-s390/semaphore.h
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
*
* Derived from "include/asm-i386/semaphore.h"
* (C) Copyright 1996 Linus Torvalds
......@@ -17,16 +17,17 @@
#include <linux/rwsem.h>
struct semaphore {
/*
* Note that any negative value of count is equivalent to 0,
* but additionally indicates that some process(es) might be
* sleeping on `wait'.
*/
atomic_t count;
int sleepers;
wait_queue_head_t wait;
};
#define __SEM_DEBUG_INIT(name)
#define __SEMAPHORE_INITIALIZER(name,count) \
{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
__SEM_DEBUG_INIT(name) }
{ ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
#define __MUTEX_INITIALIZER(name) \
__SEMAPHORE_INITIALIZER(name,1)
......@@ -39,7 +40,7 @@ struct semaphore {
static inline void sema_init (struct semaphore *sem, int val)
{
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER((*sem),val);
}
static inline void init_MUTEX (struct semaphore *sem)
......@@ -52,11 +53,6 @@ static inline void init_MUTEX_LOCKED (struct semaphore *sem)
sema_init(sem, 0);
}
asmlinkage void __down_failed(void /* special register calling convention */);
asmlinkage int __down_failed_interruptible(void /* params in registers */);
asmlinkage int __down_failed_trylock(void /* params in registers */);
asmlinkage void __up_wakeup(void /* special register calling convention */);
asmlinkage void __down(struct semaphore * sem);
asmlinkage int __down_interruptible(struct semaphore * sem);
asmlinkage int __down_trylock(struct semaphore * sem);
......@@ -79,11 +75,28 @@ static inline int down_interruptible(struct semaphore * sem)
static inline int down_trylock(struct semaphore * sem)
{
int ret = 0;
if (atomic_dec_return(&sem->count) < 0)
ret = __down_trylock(sem);
return ret;
int old_val, new_val;
/*
* This inline assembly atomically implements the equivalent
* to the following C code:
* old_val = sem->count.counter;
* if ((new_val = old_val) > 0)
* sem->count.counter = --new_val;
* In the ppc code this is called atomic_dec_if_positive.
*/
__asm__ __volatile__ (
" l %0,0(%3)\n"
"0: ltr %1,%0\n"
" jle 1f\n"
" ahi %1,-1\n"
" cs %0,%1,0(%3)\n"
" jl 0b\n"
"1:"
: "=&d" (old_val), "=&d" (new_val),
"+m" (sem->count.counter)
: "a" (&sem->count.counter) : "cc" );
return old_val <= 0;
}
static inline void up(struct semaphore * sem)
......
......@@ -13,7 +13,7 @@
#define RAMDISK_ORIGIN 0x800000
#define RAMDISK_SIZE 0x800000
#ifndef __ASSEMBLER__
#ifndef __ASSEMBLY__
#define IPL_DEVICE (*(unsigned long *) (0x10404))
#define INITRD_START (*(unsigned long *) (0x1040C))
......
This diff is collapsed.
......@@ -13,6 +13,7 @@
/* Avoid too many header ordering problems. */
struct siginfo;
struct pt_regs;
#ifdef __KERNEL__
/* Most things should be clean enough to redefine this at will, if care
......
......@@ -59,9 +59,6 @@ typedef enum
typedef enum
{
ec_schedule=0,
ec_restart,
ec_halt,
ec_power_off,
ec_call_function,
ec_bit_last
} ec_bit_sig;
......@@ -129,6 +126,6 @@ signal_processor_ps(__u32 *statusptr, __u32 parameter,
return ccode;
}
#endif __SIGP__
#endif /* __SIGP__ */
......@@ -10,6 +10,8 @@
#define __ASM_SMP_H
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/ptrace.h>
#if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
......@@ -26,7 +28,7 @@ typedef struct
__u16 cpu;
} sigp_info;
extern unsigned long cpu_online_map;
extern volatile unsigned long cpu_online_map;
#define NO_PROC_ID 0xFF /* No processor magic marker */
......@@ -42,7 +44,7 @@ extern unsigned long cpu_online_map;
#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
#define smp_processor_id() (current->processor)
#define smp_processor_id() (current_thread_info()->cpu)
extern __inline__ int cpu_logical_map(int cpu)
{
......@@ -64,7 +66,5 @@ extern __inline__ __u16 hard_smp_processor_id(void)
#define cpu_logical_map(cpu) (cpu)
void smp_local_timer_interrupt(struct pt_regs * regs);
#endif
#endif
This diff is collapsed.
#ifndef __ASM_S390_SUSPEND_H
#define __ASM_S390_SUSPEND_H
#endif
This diff is collapsed.
/*************************************************************************
*
* tape390.h
* enables user programs to display messages on the tape device
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Despina Papadopoulou <despina_p@de.ibm.com>
*
*************************************************************************/
#ifndef _TAPE390_H
#define _TAPE390_H
#define TAPE390_DISPLAY _IOW('d', 1, struct display_struct)
/*
* The TAPE390_DISPLAY ioctl calls the Load Display command
* which transfers 17 bytes of data from the channel to the subsystem:
* - 1 format control byte, and
* - two 8-byte messages
*
* Format control byte:
* 0-2: New Message Overlay
* 3: Alternate Messages
* 4: Blink Message
* 5: Display Low/High Message
* 6: Reserved
* 7: Automatic Load Request
*
*/
typedef struct display_struct {
char cntrl;
char message1[8];
char message2[8];
} display_struct;
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment