Commit c768f081 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s390: tape driver.

Rework of tape driver

  Almost complete rewrite of the s390 tape driver code by
  Martin Schwidefsky. Skipping the 2.4 era, this makes the tape
  code ready for the future.

  This driver probably doesn't have to change much before 2.6.

Authors:
	Martin Schwidefsky <schwidefsky@de.ibm.com>
	Stefan Bader <shbader@de.ibm.com>
parent f7a9fa93
......@@ -292,16 +292,6 @@ config S390_TAPE
comment "S/390 tape interface support"
depends on S390_TAPE
config S390_TAPE_CHAR
bool "Support for tape character devices"
depends on S390_TAPE
help
Select this option if you want to access your channel-attached
tape devices using the character device interface.
This interface is similar to other Linux tape devices like
SCSI-Tapes (st) and the floppy tape device (ftape).
If unsure, say "Y".
config S390_TAPE_BLOCK
bool "Support for tape block devices"
depends on S390_TAPE
......@@ -316,21 +306,14 @@ config S390_TAPE_BLOCK
comment "S/390 tape hardware support"
depends on S390_TAPE
config S390_TAPE_3490
tristate "Support for 3490 tape hardware"
config S390_TAPE_34XX
tristate "Support for 3480/3490 tape hardware"
depends on S390_TAPE
help
Select this option if you want to access IBM 3480 magnetic
Select this option if you want to access IBM 3480/3490 magnetic
tape subsystems and 100% compatibles.
It is safe to say "Y" here.
config S390_TAPE_3480
tristate "Support for 3480 tape hardware"
depends on S390_TAPE
help
Select this option if you want to access IBM 3490 magnetic
tape subsystems and 100% compatibles.
endmenu
......
/***************************************************************************
*
/*
* drivers/s390/char/tape.h
* tape device driver for 3480/3490E/3590 tapes.
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
****************************************************************************
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _TAPE_H
......@@ -20,348 +18,338 @@
#include <linux/version.h>
#include <linux/module.h>
#include <linux/mtio.h>
#include <linux/interrupt.h>
#include <asm/ccwdev.h>
#include <asm/debug.h>
#ifdef CONFIG_DEVFS_FS
#include <linux/devfs_fs_kernel.h>
#endif
#include "tape_idalbuf.h"
#include <asm/idals.h>
#define TAPE_VERSION_MAJOR 1
#define TAPE_VERSION_MINOR 10
/*
* macros s390 debug feature (dbf)
*/
#define DBF_EVENT(d_level, d_str...) \
do { \
debug_sprintf_event(tape_dbf_area, d_level, d_str); \
} while (0)
#define DBF_EXCEPTION(d_level, d_str...) \
do { \
debug_sprintf_exception(tape_dbf_area, d_level, d_str); \
} while (0)
#define TAPE_VERSION_MAJOR 2
#define TAPE_VERSION_MINOR 0
#define TAPE_MAGIC "tape"
#define TAPE_MINORS_PER_DEV 2 /* two minors per device */
#define TAPEBLOCK_HSEC_SIZE 2048
#define TAPEBLOCK_HSEC_S2B 2
#define TAPEBLOCK_RETRIES 5
#define TAPE_BUSY(td) (td->treq != NULL)
#define TAPE_MINORS_PER_DEV 2 /* two minors per device */
#define TAPE_MERGE_RC(treq,rc) \
if( ((rc) == 0) && ((treq)->rc != 0) ) \
rc = (treq)->rc;
typedef enum{
TAPE_NO_WAIT,
TAPE_WAIT,
TAPE_WAIT_INTERRUPTIBLE,
TAPE_WAIT_INTERRUPTIBLE_NOHALTIO,
TAPE_REMOVE_REQ_ON_WAKEUP,
TAPE_SCHED_BLOCK,
} tape_wait_t;
typedef enum {
enum tape_medium_state {
MS_UNKNOWN,
MS_LOADED,
MS_UNLOADED,
MS_SIZE
} tape_medium_state_t;
};
typedef enum {
enum tape_state {
TS_UNUSED=0,
TS_IN_USE,
TS_INIT,
TS_NOT_OPER,
TS_SIZE
} tape_state_t;
typedef enum {
TO_BLOCK,
TO_BSB,
TO_BSF,
TO_DSE,
TO_EGA,
TO_FSB,
TO_FSF,
TO_LDI,
TO_LBL,
TO_MSE,
TO_NOP,
TO_RBA,
TO_RBI,
TO_RBU,
TO_RBL,
TO_RDC,
TO_RFO,
TO_RSD,
TO_REW,
TO_RUN,
TO_SEN,
TO_SID,
TO_SNP,
TO_SPG,
TO_SWI,
TO_SMR,
TO_SYN,
TO_TIO,
TO_UNA,
TO_WRI,
TO_WTM,
TO_MSEN,
TO_LOAD,
TO_READ_CONFIG, /* 3590 */
TO_READ_ATTMSG, /* 3590 */
TO_NOTHING,
TO_DIS,
TO_SIZE
} tape_op_t;
#define TAPE_INTERRUPTIBLE_OP(op) \
(op == MTEOM) || \
(op == MTRETEN)
struct _tape_dev_t; //Forward declaration
/* The tape device list lock */
extern rwlock_t tape_dev_lock;
};
enum tape_op {
TO_BLOCK, /* Block read */
TO_BSB, /* Backward space block */
TO_BSF, /* Backward space filemark */
TO_DSE, /* Data security erase */
TO_FSB, /* Forward space block */
TO_FSF, /* Forward space filemark */
TO_LBL, /* Locate block label */
TO_NOP, /* No operation */
TO_RBA, /* Read backward */
TO_RBI, /* Read block information */
TO_RFO, /* Read forward */
TO_REW, /* Rewind tape */
TO_RUN, /* Rewind and unload tape */
TO_WRI, /* Write block */
TO_WTM, /* Write tape mark */
TO_MSEN, /* Medium sense */
TO_LOAD, /* Load tape */
TO_READ_CONFIG, /* Read configuration data */
TO_READ_ATTMSG, /* Read attention message */
TO_DIS, /* Tape display */
TO_ASSIGN, /* Assign tape to channel path */
TO_UNASSIGN, /* Unassign tape from channel path */
TO_SIZE /* #entries in tape_op_t */
};
/* Forward declaration */
struct tape_device;
/* tape_request->status can be: */
enum tape_request_status {
TAPE_REQUEST_INIT, /* request is ready to be processed */
TAPE_REQUEST_QUEUED, /* request is queued to be processed */
TAPE_REQUEST_IN_IO, /* request is currently in IO */
TAPE_REQUEST_DONE, /* request is completed. */
};
/* Tape CCW request */
typedef struct _tape_ccw_req_t{
wait_queue_head_t wq;
ccw1_t* cpaddr;
size_t cplength;
int options;
void* kernbuf;
size_t kernbuf_size;
idalbuf_t* idal_buf;
void* userbuf;
size_t userbuf_size;
tape_op_t op;
void (*wakeup)(struct _tape_ccw_req_t* treq);
void (*wait)(struct _tape_ccw_req_t* treq);
struct _tape_dev_t* tape_dev; // Pointer for back reference
struct tape_request {
struct list_head list; /* list head for request queueing. */
struct tape_device *device; /* tape device of this request */
struct ccw1 *cpaddr; /* address of the channel program. */
void *cpdata; /* pointer to ccw data. */
enum tape_request_status status;/* status of this request */
int options; /* options for execution. */
int retries; /* retry counter for error recovery. */
int rescnt; /* residual count from devstat. */
/* Callback for delivering final status. */
void (*callback)(struct tape_request *, void *);
void *callback_data;
enum tape_op op;
int rc;
struct _tape_ccw_req_t* recover;
} tape_ccw_req_t;
/* Callback typedefs */
typedef void (*tape_disc_shutdown_t) (void);
typedef void (*tape_event_handler_t) (struct _tape_dev_t*);
typedef tape_ccw_req_t* (*tape_reqgen_ioctl_t)(struct _tape_dev_t* td,int op,int count,int* rc);
typedef tape_ccw_req_t* (*tape_reqgen_bread_t)(struct request* req,struct _tape_dev_t* td,int tapeblock_major);
typedef void (*tape_reqgen_enable_loc_t) (tape_ccw_req_t*);
typedef void (*tape_free_bread_t)(tape_ccw_req_t*);
typedef tape_ccw_req_t* (*tape_reqgen_rw_t)(const char* data,size_t count,struct _tape_dev_t* td);
typedef int (*tape_setup_device_t) (struct _tape_dev_t*);
typedef void (*tape_cleanup_device_t) (struct _tape_dev_t*);
typedef int (*tape_disc_ioctl_overl_t)(struct _tape_dev_t*, unsigned int,unsigned long);
#ifdef CONFIG_DEVFS_FS
typedef devfs_handle_t (*tape_devfs_constructor_t) (struct _tape_dev_t*);
typedef void (*tape_devfs_destructor_t) (struct _tape_dev_t*);
#endif
};
/* Tape Discipline */
/* Function type for magnetic tape commands */
typedef int (*tape_mtop_fn)(struct tape_device *, int);
typedef struct _tape_discipline_t {
struct module *owner;
unsigned int cu_type;
tape_setup_device_t setup_device;
tape_cleanup_device_t cleanup_device;
tape_event_handler_t init_device;
tape_event_handler_t process_eov;
tape_event_handler_t irq;
tape_reqgen_bread_t bread;
tape_free_bread_t free_bread;
tape_reqgen_enable_loc_t bread_enable_locate;
tape_reqgen_rw_t write_block;
tape_reqgen_rw_t read_block;
tape_reqgen_ioctl_t ioctl;
tape_disc_shutdown_t shutdown;
tape_disc_ioctl_overl_t discipline_ioctl_overload;
void* next;
} tape_discipline_t __attribute__ ((aligned(8)));
/* Frontend */
typedef struct _tape_frontend_t {
tape_setup_device_t device_setup;
#ifdef CONFIG_DEVFS_FS
tape_devfs_constructor_t mkdevfstree;
tape_devfs_destructor_t rmdevfstree;
/* Size of the arry containing the mtops for a discipline */
#define TAPE_NR_MTOPS (MTMKPART+1)
/* Tape Discipline */
struct tape_discipline {
struct module *owner;
int (*setup_device)(struct tape_device *);
void (*cleanup_device)(struct tape_device *);
int (*assign)(struct tape_device *);
int (*unassign)(struct tape_device *);
int (*irq)(struct tape_device *, struct tape_request *, struct irb *);
struct tape_request *(*read_block)(struct tape_device *, size_t);
struct tape_request *(*write_block)(struct tape_device *, size_t);
void (*process_eov)(struct tape_device*);
#ifdef CONFIG_S390_TAPE_BLOCK
/* Block device stuff. */
struct tape_request *(*bread)(struct tape_device *, struct request *);
void (*check_locate)(struct tape_device *, struct tape_request *);
void (*free_bread)(struct tape_request *);
#endif
void* next;
} tape_frontend_t __attribute__ ((aligned(8)));
/* ioctl function for additional ioctls. */
int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
/* Array of tape commands with TAPE_NR_MTOPS entries */
tape_mtop_fn *mtop_array;
};
/* Char Frontend Data */
/*
* The discipline irq function either returns an error code (<0) which
* means that the request has failed with an error or one of the following:
*/
#define TAPE_IO_SUCCESS 0 /* request sucessful */
#define TAPE_IO_PENDING 1 /* request still running */
#define TAPE_IO_RETRY 2 /* retry to current request */
#define TAPE_IO_STOP 3 /* stop the running request */
typedef struct _tape_char_front_data_t{
int block_size; /* block size of tape */
} tape_char_data_t;
/* Char Frontend Data */
struct tape_char_data {
struct idal_buffer *idal_buf; /* idal buffer for user char data */
int block_size; /* of size block_size. */
};
#ifdef CONFIG_S390_TAPE_BLOCK
/* Block Frontend Data */
typedef struct _tape_blk_front_data_t{
struct tape_blk_data
{
/* Block device request queue. */
request_queue_t request_queue;
struct request* current_request;
int blk_retries;
long position;
atomic_t bh_scheduled;
struct tq_struct bh_tq;
} tape_blk_data_t;
spinlock_t request_queue_lock;
/* Block frontend tasklet */
struct tasklet_struct tasklet;
/* Current position on the tape. */
long block_position;
};
#endif
/* Tape Info */
typedef struct _tape_dev_t {
atomic_t use_count; /* Reference count, when == 0 delete */
int first_minor; /* each tape device has two minors */
s390_dev_info_t devinfo; /* device info from Common I/O */
devstat_t devstat; /* contains irq, devno, status */
struct file *filp; /* backpointer to file structure */
int tape_state; /* State of the device. See tape_stat */
int medium_state; /* loaded, unloaded, unkown etc. */
tape_discipline_t* discipline; /* The used discipline */
void* discdata; /* discipline specific data */
tape_ccw_req_t* treq; /* Active Tape request */
tape_op_t last_op; /* Last Tape operation */
void* next; /* ptr to next tape_dev */
tape_char_data_t char_data; /* Character dev frontend data */
tape_blk_data_t blk_data; /* Block dev frontend data */
} tape_dev_t __attribute__ ((aligned(8)));
/* tape functions */
#define TAPE_MEMB_IRQ 0
#define TAPE_MEMB_MINOR 1
#define TAPE_MEMB_QUEUE 2
tape_dev_t* __tape_get_device_by_member(unsigned long value, int member);
struct tape_device {
/* entry in tape_device_list */
struct list_head node;
/*
* Search for tape structure with specific minor number
*/
static inline tape_dev_t *
tape_get_device_by_minor(int minor)
{
return __tape_get_device_by_member(minor, TAPE_MEMB_MINOR);
}
struct ccw_device *cdev;
/*
* Search for tape structure with specific IRQ
*/
static inline tape_dev_t *
tape_get_device_by_irq(int irq)
{
return __tape_get_device_by_member(irq, TAPE_MEMB_IRQ);
}
/* Device discipline information. */
struct tape_discipline *discipline;
void *discdata;
/*
* Search for tape structure with specific queue
*/
/* Generic status flags */
long tape_generic_status;
static inline tape_dev_t*
tape_get_device_by_queue(void* queue)
{
return __tape_get_device_by_member((unsigned long)queue, TAPE_MEMB_QUEUE);
}
/* Device state information. */
wait_queue_head_t state_change_wq;
enum tape_state tape_state;
enum tape_medium_state medium_state;
unsigned char *modeset_byte;
/*
* Increment use count of tape structure
*/
static inline void
tape_get_device(tape_dev_t* td)
/* Reference count. */
atomic_t ref_count;
/* Request queue. */
struct list_head req_queue;
int first_minor; /* each tape device has two minors */
/* Character device frontend data */
struct tape_char_data char_data;
#ifdef CONFIG_S390_TAPE_BLOCK
/* Block dev frontend data */
struct tape_blk_data blk_data;
#endif
};
/* Externals from tape_core.c */
extern struct tape_request *tape_alloc_request(int cplength, int datasize);
extern void tape_free_request(struct tape_request *);
extern int tape_do_io(struct tape_device *, struct tape_request *);
extern int tape_do_io_async(struct tape_device *, struct tape_request *);
extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
static inline int
tape_do_io_free(struct tape_device *device, struct tape_request *request)
{
if (td!=NULL)
atomic_inc(&(td->use_count));
}
int rc;
void tape_put_device(tape_dev_t* td);
rc = tape_do_io(device, request);
tape_free_request(request);
return rc;
}
/* Discipline functions */
int tape_register_discipline(tape_discipline_t* disc);
void tape_unregister_discipline(tape_discipline_t* disc);
extern int tape_oper_handler(int irq, int status);
extern void tape_noper_handler(int irq, int status);
extern int tape_open(struct tape_device *);
extern int tape_release(struct tape_device *);
extern int tape_assign(struct tape_device *);
extern int tape_unassign(struct tape_device *);
extern int tape_mtop(struct tape_device *, int, int);
extern int tape_enable_device(struct tape_device *, struct tape_discipline *);
extern void tape_disable_device(struct tape_device *device);
/* Externals from tape_devmap.c */
extern int tape_generic_probe(struct ccw_device *);
extern int tape_generic_remove(struct ccw_device *);
extern struct tape_device *tape_get_device(int devindex);
extern void tape_put_device(struct tape_device *);
/* Externals from tape_char.c */
extern int tapechar_init(void);
extern void tapechar_exit(void);
extern int tapechar_setup_device(struct tape_device *);
extern void tapechar_cleanup_device(struct tape_device *);
/* Externals from tape_block.c */
#ifdef CONFIG_S390_TAPE_BLOCK
extern int tapeblock_init (void);
extern void tapeblock_exit(void);
extern int tapeblock_setup_device(struct tape_device *);
extern void tapeblock_cleanup_device(struct tape_device *);
#else
static inline int tapeblock_init (void) {return 0;}
static inline void tapeblock_exit (void) {;}
static inline int tapeblock_setup_device(struct tape_device *t) {return 0;}
static inline void tapeblock_cleanup_device (struct tape_device *t) {;}
#endif
/* tape initialisation functions */
int tape_init(void);
#ifdef CONFIG_PROC_FS
extern void tape_proc_init (void);
extern void tape_proc_cleanup (void);
#else
static inline void tape_proc_init (void) {;}
static inline void tape_proc_cleanup (void) {;}
#endif
/* a function for dumping device sense info */
void tape_dump_sense (tape_dev_t* td);
void tape_dump_sense_dbf(tape_dev_t* td);
extern void tape_dump_sense(struct tape_device *, struct tape_request *,
struct irb *);
extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *,
struct irb *);
/* functions for handling the status of a device */
inline void tape_state_set (tape_dev_t* td, tape_state_t newstate);
inline tape_state_t tape_state_get (tape_dev_t* td);
inline void tape_med_state_set(tape_dev_t* td, tape_medium_state_t newstate);
/* functions for alloc'ing ccw and IO stuff */
inline tape_ccw_req_t* tape_alloc_ccw_req(int cplength,int datasize,int idal_buf_size, tape_op_t op);
void tape_free_ccw_req (tape_ccw_req_t * request);
int tape_do_io(tape_dev_t * td,tape_ccw_req_t *treq,tape_wait_t type);
int tape_do_io_irq(tape_dev_t * td,tape_ccw_req_t *treq,tape_wait_t type);
int tape_do_io_and_wait(tape_dev_t * td,tape_ccw_req_t *treq,tape_wait_t type);
int tape_do_wait_req(tape_dev_t * td,tape_ccw_req_t *treq,tape_wait_t type);
int tape_remove_ccw_req(tape_dev_t* td,tape_ccw_req_t* treq);
tape_ccw_req_t* tape_get_active_ccw_req(tape_dev_t* td);
extern void tape_med_state_set(struct tape_device *, enum tape_medium_state);
/* The debug area */
#ifdef TAPE_DEBUG
extern debug_info_t *tape_dbf_area;
#define tape_sprintf_event debug_sprintf_event
#define tape_sprintf_exception debug_sprintf_exception
#else
#define tape_sprintf_event
#define tape_sprintf_exception
#endif
extern debug_info_t *tape_dbf_area;
/* functions for building ccws */
static inline ccw1_t*
__ccwprep(ccw1_t* ccw, __u8 cmd_code, __u8 flags, __u16 memsize, void* cda,int ccw_count)
static inline struct ccw1 *
tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
{
int i;
#ifdef CONFIG_ARCH_S390X
if ((unsigned long)cda >= (1UL<<31)){
printk("cda: %p\n",cda);
BUG();
}
#endif /* CONFIG_ARCH_S390X */
for(i = 0 ; i < ccw_count; i++){
ccw[i].cmd_code = cmd_code;
ccw[i].flags |= CCW_FLAG_CC;
ccw[i].count = memsize;
if(cda == 0)
ccw[i].cda = (unsigned long)&(ccw[i].cmd_code);
else
ccw[i].cda = (unsigned long)cda;
}
ccw[ccw_count-1].flags = flags;
return &ccw[ccw_count];
};
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = memsize;
ccw->cda = (__u32)(addr_t) cda;
return ccw + 1;
}
static inline struct ccw1 *
tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
{
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = memsize;
ccw->cda = (__u32)(addr_t) cda;
return ccw + 1;
}
extern inline ccw1_t*
tape_ccw_cc(ccw1_t *ccw,__u8 cmd_code,__u16 memsize,void* cda,int ccw_count)
static inline struct ccw1 *
tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
{
return __ccwprep(ccw,cmd_code,CCW_FLAG_CC,memsize,cda,ccw_count);
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = 0;
ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
return ccw + 1;
}
extern inline ccw1_t*
tape_ccw_end(ccw1_t *ccw,__u8 cmd_code,__u16 memsize,void* cda,int ccw_count)
static inline struct ccw1 *
tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
{
return __ccwprep(ccw,cmd_code,0,memsize,cda,ccw_count);
while (count-- > 0) {
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = 0;
ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
ccw++;
}
return ccw;
}
extern inline ccw1_t*
tape_ccw_cc_idal(ccw1_t *ccw,__u8 cmd_code,idalbuf_t* idal)
static inline struct ccw1 *
tape_ccw_cc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
{
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
idalbuf_set_normalized_cda(ccw,idal);
idal_buffer_set_cda(idal, ccw);
return ccw++;
}
extern inline ccw1_t*
tape_ccw_end_idal(ccw1_t *ccw,__u8 cmd_code,idalbuf_t* idal)
static inline struct ccw1 *
tape_ccw_end_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
{
ccw->cmd_code = cmd_code;
ccw->flags = 0;
idalbuf_set_normalized_cda(ccw,idal);
return ccw++;
ccw->cmd_code = cmd_code;
ccw->flags = 0;
idal_buffer_set_cda(idal, ccw);
return ccw++;
}
/* Global vars */
extern const char* tape_state_verbose[TS_SIZE];
extern const char* tape_op_verbose[TO_SIZE];
/* Some linked lists for storing plugins and devices */
extern tape_dev_t *tape_first_dev;
extern tape_frontend_t *tape_first_front;
extern const char *tape_state_verbose[];
extern const char *tape_op_verbose[];
#endif /* for ifdef tape.h */
/***************************************************************************
*
* drivers/s390/char/tape3480.c
* tape device discipline for 3480 tapes.
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
****************************************************************************
*/
#include "tapedefs.h"
#include <linux/version.h>
#include <linux/compatmac.h>
#include "tape.h"
#include "tape34xx.h"
#include "tape3480.h"
#ifdef CONFIG_S390_TAPE_3480_MODULE
static tape_discipline_t* disc;
void
init_module(void)
{
disc = tape3480_init();
if (disc!= NULL)
tape_register_discipline(disc);
}
void
cleanup_module(void)
{
if (disc!=NULL){
tape_unregister_discipline(disc);
kfree(disc);
}
}
#endif /* CONFIG_S390_TAPE_3480_MODULE */
int
tape3480_setup_device(tape_dev_t * td)
{
tape3480_disc_data_t *data = NULL;
tape_sprintf_event (tape_dbf_area,6,"3480 dsetup: %x\n",td->first_minor);
data = kmalloc (sizeof (tape3480_disc_data_t), GFP_KERNEL | GFP_DMA);
if(data == NULL)
return -1;
data->modeset_byte = 0x00;
td->discdata = (void *) data;
return 0;
}
void
tape3480_cleanup_device(tape_dev_t * td)
{
if(td->discdata){
kfree(td->discdata);
td->discdata = NULL;
}
}
void
tape3480_shutdown (void) {
}
tape_discipline_t *
tape3480_init (void)
{
tape_discipline_t *disc;
tape_sprintf_event (tape_dbf_area,3,"3480 init\n");
disc = kmalloc (sizeof (tape_discipline_t), GFP_ATOMIC);
if (disc == NULL) {
tape_sprintf_exception (tape_dbf_area,3,"disc:nomem\n");
return disc;
}
disc->owner = THIS_MODULE;
disc->cu_type = 0x3480;
disc->setup_device = tape3480_setup_device;
disc->cleanup_device = tape3480_cleanup_device;
disc->init_device = NULL;
disc->process_eov = tape34xx_process_eov;
disc->irq = tape34xx_irq;
disc->write_block = tape34xx_write_block;
disc->read_block = tape34xx_read_block;
disc->ioctl = tape34xx_ioctl;
disc->shutdown = tape3480_shutdown;
disc->discipline_ioctl_overload = tape34xx_ioctl_overload;
disc->bread = tape34xx_bread;
disc->free_bread = tape34xx_free_bread;
disc->bread_enable_locate = tape34xx_bread_enable_locate;
disc->next = NULL;
tape_sprintf_event (tape_dbf_area,3,"3480 regis\n");
return disc;
}
/***************************************************************************
*
* drivers/s390/char/tape3480.h
* tape device discipline for 3480 tapes.
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
****************************************************************************
*/
#ifndef _TAPE3480_H
#define _TAPE3480_H
typedef struct _tape3480_disc_data_t {
__u8 modeset_byte;
} tape3480_disc_data_t __attribute__ ((packed, aligned(8)));
tape_discipline_t * tape3480_init (void);
#endif // _TAPE3480_H
/***************************************************************************
*
* drivers/s390/char/tape3490.c
* tape device discipline for 3490E tapes.
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
****************************************************************************
*/
#include "tapedefs.h"
#include <linux/version.h>
#include <linux/compatmac.h>
#include "tape.h"
#include "tape34xx.h"
#include "tape3490.h"
#ifdef CONFIG_S390_TAPE_3490_MODULE
static tape_discipline_t* disc;
void
init_module(void)
{
disc = tape3490_init();
if (disc!=NULL)
tape_register_discipline(disc);
}
void
cleanup_module(void)
{
if (disc!=NULL){
tape_unregister_discipline(disc);
kfree(disc);
}
}
#endif /* CONFIG_S390_TAPE_3490_MODULE */
int
tape3490_setup_device (tape_dev_t * td)
{
tape3490_disc_data_t *data = NULL;
tape_sprintf_event (tape_dbf_area,1,"3490 dsetup: %x\n",td->first_minor);
data = kmalloc (sizeof (tape3490_disc_data_t), GFP_KERNEL | GFP_DMA);
if(data == NULL)
return -1;
data->modeset_byte = 0x00;
td->discdata = (void *) data;
return 0;
}
void
tape3490_cleanup_device(tape_dev_t * td)
{
if(td->discdata){
kfree(td->discdata);
td->discdata = NULL;
}
}
void
tape3490_shutdown (void) {
}
tape_discipline_t *
tape3490_init (void)
{
tape_discipline_t *disc;
tape_sprintf_event (tape_dbf_area,3,"3490 init\n");
disc = kmalloc (sizeof (tape_discipline_t), GFP_ATOMIC);
if (disc == NULL) {
tape_sprintf_exception (tape_dbf_area,3,"disc:nomem\n");
return disc;
}
disc->owner = THIS_MODULE;
disc->cu_type = 0x3490;
disc->setup_device = tape3490_setup_device;
disc->cleanup_device = tape3490_cleanup_device;
disc->init_device = NULL;
disc->process_eov = tape34xx_process_eov;
disc->irq = tape34xx_irq;
disc->write_block = tape34xx_write_block;
disc->read_block = tape34xx_read_block;
disc->ioctl = tape34xx_ioctl;
disc->shutdown = tape3490_shutdown;
disc->discipline_ioctl_overload = tape34xx_ioctl_overload;
disc->bread = tape34xx_bread;
disc->free_bread = tape34xx_free_bread;
disc->bread_enable_locate = tape34xx_bread_enable_locate;
disc->next = NULL;
tape_sprintf_event (tape_dbf_area,3,"3490 regis\n");
return disc;
}
/***************************************************************************
*
* drivers/s390/char/tape3490.h
* tape device discipline for 3490E tapes.
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
****************************************************************************
*/
#ifndef _TAPE3490_H
#define _TAPE3490_H
typedef struct _tape3490_disc_data_t {
__u8 modeset_byte;
} tape3490_disc_data_t __attribute__ ((packed, aligned(8)));
tape_discipline_t * tape3490_init (void);
#endif // _TAPE3490_H
/***************************************************************************
*
* drivers/s390/char/tape34xx.c
* common tape device discipline for 34xx tapes.
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
****************************************************************************
*/
#include "tapedefs.h"
#include <linux/config.h>
#include <linux/version.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <asm/types.h>
#include <linux/compatmac.h>
#include "tape.h"
#include "tape34xx.h"
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/tape390.h>
#define PRINTK_HEADER "T3xxx:"
/*
* Done Handler is called when dev stat = DEVICE-END (successfull operation)
*/
void tape34xx_done_handler(tape_dev_t* td)
{
__u8 *data = NULL;
int i;
tape_ccw_req_t *treq = tape_get_active_ccw_req(td);
tape_sprintf_event (tape_dbf_area,6,"%s\n",tape_op_verbose[treq->op]);
tape_sprintf_event (tape_dbf_area,6,"done\n");
treq->rc = 0;
switch(treq->op){
case TO_BSB:
case TO_BSF:
case TO_DSE:
case TO_FSB:
case TO_FSF:
case TO_LBL:
case TO_RFO:
case TO_RBA:
case TO_REW:
case TO_WRI:
case TO_WTM:
case TO_BLOCK:
case TO_LOAD:
case TO_DIS:
tape_med_state_set(td,MS_LOADED);
break;
case TO_NOP:
break;
case TO_RUN:
tape_med_state_set(td,MS_UNLOADED);
break;
case TO_RBI:
data = treq->kernbuf;
tape_sprintf_event (tape_dbf_area,6,"data: %04x %04x\n",*((unsigned int*)&data[0]),*((unsigned int*)&data[4]));
i = 0;
i = data[3];
i += 256 * data[2];
i += 65536 * (data[1] & 0x3F);
memcpy(data,&i,4);
tape_med_state_set(td,MS_LOADED);
break;
default:
tape_sprintf_exception (tape_dbf_area,3,"TE UNEXPEC\n");
tape34xx_default_handler (td);
return;
}
if(treq->wakeup)
treq->wakeup (treq);
return;
}
/*
* This function is called, when no request is outstanding and we get an
* interrupt
*/
void tape34xx_unsolicited_irq(tape_dev_t* td)
{
if(td->devstat.dstat == 0x85 /* READY */) {
// A medium was inserted in the drive!
tape_sprintf_event (tape_dbf_area,6,"xuud med\n");
tape_med_state_set(td,MS_LOADED);
} else {
tape_sprintf_event (tape_dbf_area,3,"unsol.irq! dev end: %x\n",td->devinfo.irq);
PRINT_WARN ("Unsolicited IRQ (Device End) caught.\n");
tape_dump_sense (td);
}
}
/*
* tape34xx_display
*/
int
tape34xx_display (tape_dev_t* td, unsigned long arg)
{
struct display_struct d_struct;
tape_ccw_req_t *treq = NULL;
ccw1_t *ccw = NULL;
int ds = 17; /* datasize */
int ccw_cnt = 2; /* ccw count */
int op = TO_DIS; /* tape operation */
int i = 0, rc = -1;
rc = copy_from_user(&d_struct, (char *)arg, sizeof(d_struct));
if (rc != 0)
goto error;
treq=tape_alloc_ccw_req(ccw_cnt, ds, 0, op);
if (!treq)
goto error;
((unsigned char *)treq->kernbuf)[0] = d_struct.cntrl;
for (i = 0; i < 8; i++) {
((unsigned char *)treq->kernbuf)[i+1] = d_struct.message1[i];
((unsigned char *)treq->kernbuf)[i+9] = d_struct.message2[i];
}
ASCEBC (((unsigned char*)treq->kernbuf) + 1, 16);
ccw = treq->cpaddr;
ccw = tape_ccw_cc(ccw, LOAD_DISPLAY, 17, treq->kernbuf, 1);
ccw = tape_ccw_end(ccw,NOP,0,0,1);
tape_do_io_and_wait(td,treq,TAPE_WAIT_INTERRUPTIBLE);
tape_free_ccw_req(treq);
return(0);
error:
return -EINVAL;
}
/*
* ioctl_overload
*/
int
tape34xx_ioctl_overload (tape_dev_t* td, unsigned int cmd, unsigned long arg)
{
if (cmd == TAPE390_DISPLAY)
return tape34xx_display(td, arg);
else
return -EINVAL; // no additional ioctls
}
/*******************************************************************
* Request creating functions:
*******************************************************************/
/*
* 34xx IOCTLS
*
* MTFSF: Forward space over 'count' file marks. The tape is positioned
* at the EOT (End of Tape) side of the file mark.
*
* MTBSF: Backward space over 'count' file marks. The tape is positioned at
* the EOT (End of Tape) side of the last skipped file mark.
*
* MTFSR: Forward space over 'count' tape blocks (blocksize is set
* via MTSETBLK.
*
* MTBSR: Backward space over 'count' tape blocks.
* (blocksize is set via MTSETBLK.
*
* MTWEOF: Write 'count' file marks at the current position.
*
* MTREW: Rewind the tape.
*
* MTOFFL: Rewind the tape and put the drive off-line.
* Implement 'rewind unload'
*
* MTNOP: 'No operation'.
*
* MTBSFM: Backward space over 'count' file marks.
* The tape is positioned at the BOT (Begin Of Tape) side of the
* last skipped file mark.
*
* MTFSFM: Forward space over 'count' file marks.
* The tape is positioned at the BOT (Begin Of Tape) side
* of the last skipped file mark.
*
* MTEOM: positions at the end of the portion of the tape already used
* for recordind data. MTEOM positions after the last file mark, ready for
* appending another file.
* MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
*
* MTERASE: erases the tape.
*
* MTSETDENSITY: set tape density.
*
* MTSEEK: seek to the specified block.
*
* MTTELL: Tell block. Return the number of block relative to current file.
*
* MTSETDRVBUFFER: Set the tape drive buffer code to number.
* Implement NOP.
*
* MTLOCK: Locks the tape drive door.
* Implement NOP CCW command.
*
* MTUNLOCK: Unlocks the tape drive door.
* Implement the NOP CCW command.
*
* MTLOAD: Loads the tape.
* This function is not implemented and returns NULL, which causes the
* Frontend to wait for a medium being loaded.
* The 3480/3490 type Tapes do not support a load command
*
* MTUNLOAD: Rewind the tape and unload it.
*
* MTCOMPRESSION: used to enable compression.
* Sets the IDRC on/off.
*
* MTSTPART: Move the tape head at the partition with the number 'count'.
* Implement the NOP CCW command.
*
* MTMKPART: .... dummy .
* Implement the NOP CCW command.
*
* MTIOCGET: query the tape drive status.
*
* MTIOCPOS: query the tape position.
*
*/
tape_ccw_req_t *
tape34xx_ioctl(tape_dev_t* td, int mtcmd,int count, int* rc)
{
tape_ccw_req_t *treq = NULL;
ccw1_t *ccw = NULL;
int ds = 0; /* datasize */
int ccw_cnt; /* ccw count */
int op = -1; /* tape operation */
tape_sprintf_event(tape_dbf_area,6,"34xxioctl: op(%x) count(%x)\n",mtcmd,count);
/* Preprocessing */
switch(mtcmd){
case MTLOAD: *rc = -EINVAL; goto error;
case MTIOCGET: *rc = -EINVAL; goto error;
case MTIOCPOS: *rc = -EINVAL; goto error;
case MTFSF: op=TO_FSF; ccw_cnt=count+2; ds=0; break;
case MTBSF: op=TO_BSF; ccw_cnt=count+2; ds=0; break;
case MTFSR: op=TO_FSB; ccw_cnt=count+2; ds=0; break;
case MTBSR: op=TO_BSB; ccw_cnt=count+2; ds=0; break;
case MTWEOF: op=TO_WTM; ccw_cnt=count+2; ds=0; break;
case MTREW: op=TO_REW; ccw_cnt=3; ds=0; break;
case MTOFFL: op=TO_RUN; ccw_cnt=3; ds=0; break;
case MTNOP: op=TO_NOP; ccw_cnt=2; ds=0; break;
case MTBSFM: op=TO_BSF; ccw_cnt=count+2; ds=0; break;
case MTFSFM: op=TO_FSF; ccw_cnt=count+2; ds=0; break;
case MTEOM: op=TO_FSF; ccw_cnt=4; ds=0; break;
case MTERASE: op=TO_DSE; ccw_cnt=5; ds=0; break;
case MTSETDENSITY: op=TO_NOP; ccw_cnt=3; ds=0; break;
case MTSEEK: op=TO_LBL; ccw_cnt=3; ds=4; break;
case MTTELL: op=TO_RBI; ccw_cnt=3; ds=8; break;
case MTSETDRVBUFFER: op=TO_NOP; ccw_cnt=3; ds=0; break;
case MTLOCK: op=TO_NOP; ccw_cnt=3; ds=0; break;
case MTUNLOCK: op=TO_NOP; ccw_cnt=3; ds=0; break;
case MTUNLOAD: op=TO_RUN; ccw_cnt=3; ds=32; break;
case MTCOMPRESSION: op=TO_NOP; ccw_cnt=3; ds=0; break;
case MTSETPART: op=TO_NOP; ccw_cnt=3; ds=0; break;
case MTMKPART: op=TO_NOP; ccw_cnt=3; ds=0; break;
default:
PRINT_ERR( "IOCTL %x not implemented\n",op );
*rc = -EINVAL;
goto error;
}
if (ccw_cnt > 510) {
tape_sprintf_exception (tape_dbf_area,6,"wrng parm\n");
*rc = -EINVAL;
goto error;
}
treq=tape_alloc_ccw_req(ccw_cnt,ds,0,op);
if (!treq){
*rc = -ENOSPC;
goto error;
}
ccw = treq->cpaddr;
/* setup first ccw */
ccw = tape_ccw_cc(ccw,MODE_SET_DB,1,&MOD_BYTE,1);
/* setup middle ccw(s) */
switch(mtcmd){
case MTFSF:
ccw = tape_ccw_cc(ccw,FORSPACEFILE,0,0,count);
break;
case MTBSF:
ccw = tape_ccw_cc(ccw,BACKSPACEFILE,0,0,count);
break;
case MTFSR:
ccw = tape_ccw_cc(ccw,FORSPACEBLOCK,0,0,count);
break;
case MTBSR:
ccw = tape_ccw_cc(ccw,BACKSPACEBLOCK,0,0,count);
break;
case MTWEOF:
ccw = tape_ccw_cc(ccw,WRITETAPEMARK,0,0,count); // this operation does _always_ write only one tape mark :(
break;
case MTREW:
ccw = tape_ccw_cc(ccw,REWIND,0,0,1);
break;
case MTOFFL:
ccw = tape_ccw_cc(ccw,REWIND_UNLOAD,0,0,1);
break;
case MTUNLOCK:
case MTLOCK:
case MTSETDRVBUFFER:
case MTSETDENSITY:
case MTSETPART:
case MTMKPART:
case MTNOP:
ccw = tape_ccw_cc(ccw,NOP,0,0,1);
break;
case MTBSFM:
ccw = tape_ccw_cc(ccw,BACKSPACEFILE,0,0,count);
break;
case MTFSFM:
ccw = tape_ccw_cc(ccw,FORSPACEFILE,0,0,count);
break;
case MTEOM:
ccw = tape_ccw_cc(ccw,FORSPACEFILE,0,0,1);
ccw = tape_ccw_cc(ccw,NOP,0,0,1);
break;
case MTERASE:
ccw = tape_ccw_cc(ccw,REWIND,0,0,1);
ccw = tape_ccw_cc(ccw,ERASE_GAP,0,0,1);
ccw = tape_ccw_cc(ccw,DATA_SEC_ERASE,0,0,1);
break;
case MTTELL:
ccw = tape_ccw_cc(ccw,READ_BLOCK_ID,8,treq->kernbuf,1);
break;
case MTUNLOAD:
ccw = tape_ccw_cc(ccw,REWIND_UNLOAD,0,0,1);
break;
case MTCOMPRESSION:
if((count < 0) || (count > 1)){
tape_sprintf_exception (tape_dbf_area,6,"xcom parm\n");
goto error;
}
if(count == 0){
PRINT_INFO( "(%x) Compression switched off\n", td->devstat.devno);
MOD_BYTE = 0x00; // IDRC off
} else {
PRINT_INFO( "(%x) Compression switched on\n", td->devstat.devno);
MOD_BYTE = 0x08; // IDRC on
}
ccw = tape_ccw_cc(ccw,NOP,0,0,1);
break; // Modset does the job
case MTSEEK:
{
__u8* data = treq->kernbuf;
data[0] = 0x01;
data[1] = data[2] = data[3] = 0x00;
if (count >= 4194304){
tape_sprintf_exception(tape_dbf_area,6,"xsee parm\n");
*rc = -EINVAL;
goto error;
}
if(MOD_BYTE && 0x08)
data[1] = data[1] | 0x80;
data[3] += count % 256;
data[2] += (count / 256) % 256;
data[1] += (count / 65536);
ccw = tape_ccw_cc(ccw,LOCATE,4,treq->kernbuf,1);
break;
}
default:
PRINT_WARN( "IOCTL %x not implemented\n",op );
*rc = -EINVAL;
goto error;
}
/* setup last ccw */
switch(mtcmd){
case MTEOM:
ccw = tape_ccw_end(ccw,CCW_CMD_TIC,0,treq->cpaddr,1);
break;
case MTUNLOAD:
ccw = tape_ccw_end(ccw,SENSE,32,treq->kernbuf,1);
break;
default:
ccw = tape_ccw_end(ccw,NOP,0,0,1);
break;
}
*rc = 0;
return treq;
error:
if (treq)
tape_free_ccw_req(treq);
return NULL;
}
/*
* Write Block
*/
tape_ccw_req_t *
tape34xx_write_block (const char *data, size_t count, tape_dev_t* td)
{
tape_ccw_req_t *treq = NULL;
ccw1_t *ccw;
treq = tape_alloc_ccw_req (2, 0, count,TO_WRI);
if (!treq)
goto error;
if (idalbuf_copy_from_user (treq->idal_buf, data, count)) {
tape_sprintf_exception (tape_dbf_area,6,"xwbl segf.\n");
goto error;
}
ccw = treq->cpaddr;
ccw = tape_ccw_cc(ccw,MODE_SET_DB,1,&MOD_BYTE,1);
ccw = tape_ccw_end_idal(ccw,WRITE_CMD,treq->idal_buf);
treq->userbuf = (void *) data;
tape_sprintf_event (tape_dbf_area,6,"xwbl ccwg\n");
return treq;
error:
tape_sprintf_exception (tape_dbf_area,6,"xwbl fail\n");
if (treq)
tape_free_ccw_req(treq);
return NULL;
}
/*
* Read Block
*/
tape_ccw_req_t *
tape34xx_read_block (const char *data, size_t count, tape_dev_t* td)
{
tape_ccw_req_t *treq = NULL;
ccw1_t *ccw;
/* we have to alloc 4 ccws in order to be able to transform request */
/* into a read backward request in error case */
treq = tape_alloc_ccw_req (4, 0, count,TO_RFO);
if (!treq)
goto error;
treq->userbuf = (void*)data;
treq->userbuf_size = count;
ccw = treq->cpaddr;
ccw = tape_ccw_cc(ccw,MODE_SET_DB,1,&MOD_BYTE,1);
ccw = tape_ccw_end_idal(ccw,READ_FORWARD,treq->idal_buf);
tape_sprintf_event (tape_dbf_area,6,"xrbl ccwg\n");
return treq;
error:
tape_sprintf_exception (tape_dbf_area,6,"xrbl fail");
if (treq)
tape_free_ccw_req(treq);
return NULL;
}
/*
* Read Opposite Error Recovery Function:
* Used, when Read Forward does not work
*/
tape_ccw_req_t *
tape34xx_read_opposite (tape_dev_t* td)
{
ccw1_t *ccw;
tape_ccw_req_t* treq = tape_get_active_ccw_req(td);
if (treq==NULL) // no request to recover?
BUG();
// transform read forward request into read backward request.
ccw = treq->cpaddr;
ccw = tape_ccw_cc(ccw,MODE_SET_DB,1,&MOD_BYTE,1);
ccw = tape_ccw_cc_idal(ccw,READ_BACKWARD,treq->idal_buf);
ccw = tape_ccw_cc(ccw,FORSPACEBLOCK,0,0,1);
ccw = tape_ccw_end(ccw,NOP,0,0,1);
treq->op = TO_RBA;
tape_sprintf_event (tape_dbf_area,6,"xrop ccwg");
return treq;
}
/*
* Tape Block READ
*/
tape_ccw_req_t * tape34xx_bread (struct request *req,tape_dev_t* td,int tapeblock_major) {
tape_ccw_req_t *treq;
ccw1_t *ccw;
__u8 *data;
int s2b = blksize_size[tapeblock_major][td->first_minor]/hardsect_size[tapeblock_major][td->first_minor];
int realcount = 0;
int size,bhct = 0;
struct buffer_head* bh;
for (bh = req->bh; bh; bh = bh->b_reqnext) {
if (bh->b_size > blksize_size[tapeblock_major][td->first_minor])
for (size = 0; size < bh->b_size; size += blksize_size[tapeblock_major][td->first_minor])
bhct++;
else
bhct++;
}
tape_sprintf_event (tape_dbf_area,6,"xBREDid:");
treq = tape_alloc_ccw_req (2+bhct+1, 4,0,TO_BLOCK);
if (!treq) {
tape_sprintf_exception (tape_dbf_area,6,"xBREDnomem\n");
goto error;
}
data = treq->kernbuf;
data[0] = 0x01;
data[1] = data[2] = data[3] = 0x00;
realcount=req->sector/s2b;
if (MOD_BYTE & 0x08) // IDRC on
data[1] = data[1] | 0x80;
data[3] += realcount % 256;
data[2] += (realcount / 256) % 256;
data[1] += (realcount / 65536);
tape_sprintf_event (tape_dbf_area,6,"realcount = %i\n",realcount);
ccw = treq->cpaddr;
ccw = tape_ccw_cc(ccw,MODE_SET_DB,1,&MOD_BYTE,1);
if (realcount!=td->blk_data.position)
ccw = tape_ccw_cc(ccw,LOCATE,4,treq->kernbuf,1);
else
ccw = tape_ccw_cc(ccw,NOP,0,0,1);
td->blk_data.position=realcount+req->nr_sectors/s2b;
for (bh=req->bh;bh!=NULL;) {
if (bh->b_size >= blksize_size[tapeblock_major][td->first_minor]) {
for (size = 0; size < bh->b_size; size += blksize_size[tapeblock_major][td->first_minor]){
ccw->flags = CCW_FLAG_CC;
ccw->cmd_code = READ_FORWARD;
ccw->count = blksize_size[tapeblock_major][td->first_minor];
set_normalized_cda(ccw,__pa(bh->b_data+size));
ccw++;
}
bh = bh->b_reqnext;
} else { /* group N bhs to fit into byt_per_blk */
BUG();
}
}
ccw = tape_ccw_end(ccw,NOP,0,0,1);
tape_sprintf_event (tape_dbf_area,6,"xBREDccwg\n");
return treq;
error:
tape_sprintf_exception (tape_dbf_area,6,"xBREDccwg fail");
if (treq)
tape_free_ccw_req(treq);
return NULL;
}
void tape34xx_free_bread (tape_ccw_req_t* treq) {
ccw1_t* ccw;
for (ccw=(ccw1_t*)treq->cpaddr;ccw->flags & CCW_FLAG_CC;ccw++)
if (ccw->cmd_code == READ_FORWARD)
clear_normalized_cda(ccw);
tape_free_ccw_req(treq);
}
// FIXME: Comment?
void tape34xx_bread_enable_locate (tape_ccw_req_t * treq) {
ccw1_t *ccw;
if (treq==NULL) BUG();
ccw=treq->cpaddr;
ccw++;
ccw = tape_ccw_cc(ccw,LOCATE,4,treq->kernbuf,1);
return;
}
/*******************************************************************
* Event Handlers
*******************************************************************/
/*
* Default Handler is called, when an unexpected IRQ comes in
*/
void
tape34xx_default_handler (tape_dev_t * td)
{
tape_ccw_req_t* treq = tape_get_active_ccw_req(td);
tape_sprintf_event (tape_dbf_area,6,"xdefhandle\n");
PRINT_ERR ("TAPE34XX: An unexpected Unit Check occurred.\n");
PRINT_ERR ("TAPE34XX: Please read Documentation/s390/TAPE and report it!\n");
PRINT_ERR ("TAPE34XX: Current op is: %s",tape_op_verbose[treq->op]);
tape_dump_sense (td);
treq->rc = -EIO;
if(treq->wakeup)
treq->wakeup (treq);
}
/* This function analyses the tape's sense-data in case of a unit-check. */
/* If possible, it tries to recover from the error. Else the user is */
/* informed about the problem. */
void
tape34xx_error_recovery (tape_dev_t* td)
{
__u8* sense=td->devstat.ii.sense.data;
int inhibit_cu_recovery=0;
int cu_type=td->discipline->cu_type;
tape_ccw_req_t *treq = tape_get_active_ccw_req(td);
if (treq==NULL) {
// Nothing to recover! Why call me?
BUG();
}
if (MOD_BYTE&0x80) inhibit_cu_recovery=1;
if (treq->op==TO_BLOCK) {
// no recovery for block device, bottom half will retry...
tape34xx_error_recovery_has_failed(td,EIO);
return;
}
if (sense[0]&SENSE_COMMAND_REJECT)
switch (treq->op) {
case TO_DSE:
case TO_EGA:
case TO_WRI:
case TO_WTM:
if (sense[1]&SENSE_WRITE_PROTECT) {
// trying to write, but medium is write protected
tape34xx_error_recovery_has_failed(td,EACCES);
return;
}
default:
tape34xx_error_recovery_HWBUG(td,1);
return;
}
// special cases for various tape-states when reaching end of recorded area
if (((sense[0]==0x08) || (sense[0]==0x10) || (sense[0]==0x12)) &&
((sense[1]==0x40) || (sense[1]==0x0c)))
switch (treq->op) {
case TO_FSF:
// Trying to seek beyond end of recorded area
tape34xx_error_recovery_has_failed(td,EIO);
return;
case TO_LBL:
// Block could not be located.
tape34xx_error_recovery_has_failed(td,EIO);
return;
case TO_RFO:
// Try to read beyond end of recorded area -> 0 bytes read
tape34xx_error_recovery_has_failed(td,0);
return;
default:
PRINT_ERR("Invalid op in %s:%i\n",__FUNCTION__,__LINE__);
tape34xx_error_recovery_has_failed(td,0);
return;
}
// Sensing special bits
if (sense[0]&SENSE_BUS_OUT_CHECK) {
tape34xx_error_recovery_do_retry(td);
return;
}
if (sense[0]&SENSE_DATA_CHECK) {
// hardware failure, damaged tape or improper operating conditions
switch (sense[3]) {
case 0x23:
// a read data check occurred
if ((sense[2]&SENSE_TAPE_SYNC_MODE) ||
(inhibit_cu_recovery)) {
// data check is not permanent, may be recovered.
// We always use async-mode with cu-recovery, so this should *never* happen.
tape34xx_error_recovery_HWBUG(td,2);
return;
} else {
// data check is permanent, CU recovery has failed
PRINT_WARN("Permanent read error, recovery failed!\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
}
case 0x25:
// a write data check occurred
if ((sense[2]&SENSE_TAPE_SYNC_MODE) ||
(inhibit_cu_recovery)) {
// data check is not permanent, may be recovered.
// We always use async-mode with cu-recovery, so this should *never* happen.
tape34xx_error_recovery_HWBUG(td,3);
return;
} else {
// data check is permanent, cu-recovery has failed
PRINT_WARN("Permanent write error, recovery failed!\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
}
case 0x26:
// Data Check (read opposite) occurred. We'll recover this.
tape34xx_error_recovery_read_opposite(td);
return;
case 0x28:
// The ID-Mark at the beginning of the tape could not be written. This is fatal, we'll report and exit.
PRINT_WARN("ID-Mark could not be written. Check your hardware!\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x31:
// Tape void. Tried to read beyond end of device. We'll report and exit.
PRINT_WARN("Try to read beyond end of recorded area!\n");
tape34xx_error_recovery_has_failed(td,ENOSPC);
return;
case 0x41:
// Record sequence error. cu detected incorrect block-id sequence on tape. We'll report and exit.
PRINT_WARN("Illegal block-id sequence found!\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
default:
// well, all data checks for 3480 should result in one of the above erpa-codes. if not -> bug
// On 3490, other data-check conditions do exist.
if (cu_type==0x3480) {
tape34xx_error_recovery_HWBUG(td,4);
return;
}
}
}
if (sense[0]&SENSE_OVERRUN) {
// A data overrun between cu and drive occurred. The channel speed is to slow! We'll report this and exit!
switch (sense[3]) {
case 0x40: // overrun error
PRINT_WARN ("Data overrun error between control-unit and drive. Use a faster channel connection, if possible! \n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
default:
// Overrun bit is set, but erpa does not show overrun error. This is a bug.
tape34xx_error_recovery_HWBUG(td,5);
return;
}
}
if (sense[1]&SENSE_RECORD_SEQUENCE_ERR) {
switch (sense[3]) {
case 0x41:
// Record sequence error. cu detected incorrect block-id sequence on tape. We'll report and exit.
PRINT_WARN("Illegal block-id sequence found!\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
default:
// Record sequence error bit is set, but erpa does not show record sequence error. This is a bug.
tape34xx_error_recovery_HWBUG(td,6);
return;
}
}
// Sensing erpa codes
switch (sense[3]) {
case 0x00:
// Everything is fine, but we got a unit check. Report and ignore!
PRINT_WARN ("Non-error sense was found. Unit-check will be ignored, expect errors...\n");
return;
case 0x21:
// Data streaming not operational. Cu switches to interlock mode, we reissue the command.
PRINT_WARN ("Data streaming not operational. Switching to interlock-mode! \n");
tape34xx_error_recovery_do_retry(td);
return;
case 0x22:
// Path equipment check. Might be drive adapter error, buffer error on the lower interface, internal path not useable, or error during cartridge load.
// All of the above are not recoverable
PRINT_WARN ("A path equipment check occurred. One of the following conditions occurred:\n");
PRINT_WARN ("drive adapter error,buffer error on the lower interface, internal path not useable, error during cartridge load.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x23:
// Read data check. Should have been be covered earlier -> Bug!
tape34xx_error_recovery_HWBUG(td,7);
return;
case 0x24:
// Load display check. Load display was command was issued, but the drive is displaying a drive check message. Can be threated as "device end".
tape34xx_error_recovery_succeded(td);
return;
case 0x25:
// Write data check. Should have been covered earlier -> Bug!
tape34xx_error_recovery_HWBUG(td,8);
return;
case 0x26:
// Data check (read opposite). Should have been covered earlier -> Bug!
tape34xx_error_recovery_HWBUG(td,9);
return;
case 0x27:
// Command reject. May indicate illegal channel program or buffer over/underrun.
// Since all channel programms are issued by this driver and ought be correct,
// we assume a over/underrun situaltion and retry the channel program.
tape34xx_error_recovery_do_retry(td);
return;
case 0x28:
// Write id mark check. Should have beed covered earlier -> bug!
tape34xx_error_recovery_HWBUG(td,10);
return;
case 0x29:
// Function incompatible. Either idrc is on but hardware not capable doing idrc
// or a perform subsystem func is issued and the cu is not online. Anyway, this
// cannot be recovered and is an I/O error.
PRINT_WARN ("Function incompatible. Try to switch off idrc! \n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x2a:
// Unsolicited environmental data. An internal counter overflows, we can ignore
// this and reissue the cmd.
tape34xx_error_recovery_do_retry(td);
return;
case 0x2b:
// Environmental data present. Indicates either unload completed ok or read buffered
// log command completed ok.
if (treq->op==TO_RUN) {
// Rewind unload completed ok.
tape34xx_error_recovery_succeded(td);
return;
}
// Since we do not issue read buffered log commands, this should never occur -> bug.
tape34xx_error_recovery_HWBUG(td,11);
return;
case 0x2c:
// Permanent equipment check. cu has tried recovery, but did not succeed. This is an
// I/O error.
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x2d:
// Data security erase failure.
if (treq->op==TO_DSE) {
// report an I/O error
tape34xx_error_recovery_has_failed(td,EIO);
return;
}
// Data security erase failure, but no such command issued. This is a bug.
tape34xx_error_recovery_HWBUG(td,12);
return;
case 0x2e:
// Not capable. This indicates either that the drive fails reading the format id mark
// or that that format specified is not supported by the drive. We write a message and
// return an I/O error.
PRINT_WARN("Drive not capable processing the tape format!");
tape34xx_error_recovery_has_failed(td,EMEDIUMTYPE);
return;
case 0x2f:
// This erpa is reserved. This is a bug.
tape34xx_error_recovery_HWBUG(td,13);
return;
case 0x30:
// The medium is write protected, while trying to write on it. We'll report this.
PRINT_WARN("Medium is write protected!\n");
tape34xx_error_recovery_has_failed(td,EACCES);
return;
case 0x31:
// Tape void. Should have beed covered ealier -> bug
tape34xx_error_recovery_HWBUG(td,14);
return;
case 0x32:
// Tension loss. We cannot recover this, it's an I/O error.
PRINT_WARN("The drive lost tape tension.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x33:
// Load Failure. The catridge was not inserted correctly or the tape is not threaded
// correctly. We cannot recover this, the user has to reload the catridge.
PRINT_WARN("Cartridge load failure. Reload the cartridge and try again.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x34:
// Unload failure. The drive cannot maintain tape tension and control tape movement
// during an unload operation.
PRINT_WARN("Failure during cartridge unload. Please try manually.\n");
if (treq->op!=TO_RUN) {
tape34xx_error_recovery_HWBUG(td,15);
return;
}
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x35:
// Drive equipment check. One of the following:
// - cu cannot recover from a drive detected error
// - a check code message is displayed on drive message/load displays
// - the cartridge loader does not respond correctly
// - a failure occurs during an index, load, or unload cycle
PRINT_WARN("Equipment check! Please check the drive and the cartridge loader.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x36:
switch (cu_type) {
case 0x3480:
// This erpa is reserved for 3480 -> BUG
tape34xx_error_recovery_HWBUG(td,16);
return;
case 0x3490:
// End of data. This is a permanent I/O error, which cannot be recovered.
// A read-type command has reached the end-of-data mark.
tape34xx_error_recovery_has_failed(td,EIO);
return;
}
case 0x37:
// Tape length error. The tape is shorter than reported in the beginning-of-tape data.
PRINT_WARN("Tape length error.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x38:
// Physical end of tape. A read/write operation reached the physical end of tape.
if (treq->op==TO_WRI ||
treq->op==TO_DSE ||
treq->op==TO_EGA ||
treq->op==TO_WTM){
tape34xx_error_recovery_has_failed(td,ENOSPC);
} else {
tape34xx_error_recovery_has_failed(td,EIO);
}
return;
case 0x39:
// Backward at BOT. The drive is at BOT and is requestet to move backward.
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x3a:
// Drive switched not ready, but the command needs the drive to be ready.
PRINT_WARN("Drive not ready. Turn the ready/not ready switch to ready position and try again.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x3b:
// Manual rewind or unload. This causes an I/O error.
PRINT_WARN("Medium was rewound or unloaded manually. Expect errors! Please do only use the mtoffl and mtrew ioctl to unload tapes or rewind tapes.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x3c:
case 0x3d:
case 0x3e:
case 0x3f:
// These erpas are reserved -> BUG
tape34xx_error_recovery_HWBUG(td,17);
return;
case 0x40:
// Overrun error. This should have been covered earlier -> bug.
tape34xx_error_recovery_HWBUG(td,18);
return;
case 0x41:
// Record sequence error. This should have been covered earlier -> bug.
tape34xx_error_recovery_HWBUG(td,19);
return;
case 0x42:
// Degraded mode. A condition that can cause degraded performace is detected.
PRINT_WARN("Subsystem is running in degraded mode. This may compromise your performace.\n");
tape34xx_error_recovery_do_retry(td);
return;
case 0x43:
// Drive not ready. Probably swith the ready/not ready switch to ready?
PRINT_WARN("The drive is not ready. Maybe no medium in?\n");
tape_med_state_set(td,MS_UNLOADED);
tape34xx_error_recovery_has_failed(td,ENOMEDIUM);
return;
case 0x44:
// Locate Block unsuccessfull. We'll report this.
if ((treq->op!=TO_BLOCK) &&
(treq->op!=TO_LBL)) {
tape34xx_error_recovery_HWBUG(td,20); // No locate block was issued...
return;
}
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x45:
// The drive is assigned elsewhere [to a different channel path/computer].
PRINT_WARN("The drive is assigned elsewhere.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x46:
// Drive not online. Drive may be switched offline, the power supply may be switched off
// or the drive address may not be set correctly.
PRINT_WARN("The drive is not online.");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x47:
// Volume fenced. cu reports volume integrity is lost!
PRINT_WARN("Volume fenced. The volume integrity is lost! \n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x48:
// Log sense data and retry request. We'll do so...
tape34xx_error_recovery_do_retry(td);
return;
case 0x49:
// Bus out check. A parity check error on the bus was found. PRINT_WARN("Bus out check. A data transfer over the bus was corrupted.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x4a:
// Control unit erp failed. We'll report this.
PRINT_WARN("The control unit failed recovering an I/O error.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x4b:
// Cu and drive incompatible. The drive requests micro-program patches, which are not available on the cu.
PRINT_WARN("The drive needs microprogram patches from the control unit, which are not available.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x4c:
// Recovered Check-One failure. Cu develops a hardware error, but is able to recover. We'll reissue the command.
tape34xx_error_recovery_do_retry(td);
return;
case 0x4d:
switch (cu_type) {
case 0x3480:
// This erpa is reserved for 3480 -> bug
tape34xx_error_recovery_HWBUG(td,21);
return;
case 0x3490:
// Resetting event received. Since the driver does not support resetting event recovery
// (which has to be handled by the I/O Layer), we'll report and retry our command.
tape34xx_error_recovery_do_retry(td);
return;
}
case 0x4e:
switch (cu_type) {
case 0x3480:
// This erpa is reserved for 3480 -> bug.
tape34xx_error_recovery_HWBUG(td,22);
return;
case 0x3490:
// Maximum block size exeeded. This indicates, that the block to be written is larger
// than allowed for buffered mode. We'll report this...
PRINT_WARN("Maximum block size for buffered mode exceeded.\n");
tape34xx_error_recovery_has_failed(td,ENOBUFS);
return;
}
case 0x4f:
// These erpas are reserved -> bug
tape34xx_error_recovery_HWBUG(td,23);
return;
case 0x50:
// Read buffered log (Overflow). Cu is running in extended beffered log mode, and a counter overflows.
// This should never happen, since we're never running in extended buffered log mode -> bug.
tape34xx_error_recovery_do_retry(td);
return;
case 0x51:
// Read buffered log (EOV). EOF processing occurs while the cu is in extended buffered log mode.
// This should never happen, since we're never running in extended buffered log mode -> bug.
tape34xx_error_recovery_do_retry(td);
return;
case 0x52:
// End of Volume complete. Rewind unload completed ok. We'll report to the user...
if (treq->op!=TO_RUN) {
tape34xx_error_recovery_HWBUG(td,24);
return;
}
tape34xx_error_recovery_succeded(td);
return;
case 0x53:
// Global command intercept. We'll have to reissue our command.
tape34xx_error_recovery_do_retry(td);
return;
case 0x54:
// Channel interface recovery (temporary). This can be recovered by reissuing the command.
tape34xx_error_recovery_do_retry(td);
return;
case 0x55:
// Channel interface recovery (permanent). This cannot be recovered, we'll inform the user.
PRINT_WARN("A permanent channel interface error occurred.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x56:
// Channel protocol error. This cannot be recovered.
PRINT_WARN("A channel protocol error occurred.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x57:
switch (cu_type) {
case 0x3480:
// Attention intercept. We have to reissue the command.
PRINT_WARN("An attention intercept occurred, which will be recovered.\n");
tape34xx_error_recovery_do_retry(td);
return;
case 0x3490:
// Global status intercept. We have to reissue the command.
PRINT_WARN("An global status intercept was received, which will be recovered.\n");
tape34xx_error_recovery_do_retry(td);
return;
}
case 0x58:
case 0x59:
// These erpas are reserved -> bug.
tape34xx_error_recovery_HWBUG(td,25);
return;
case 0x5a:
// Tape length incompatible. The tape inserted is too long,
// which could cause damage to the tape or the drive.
PRINT_WARN("Tape length incompatible [should be IBM Cartridge System Tape]. May cause damage to drive or tape.n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x5b:
// Format 3480 XF incompatible
if (sense[1]&SENSE_BEGINNING_OF_TAPE) {
// Everything is fine. The tape will be overwritten in a different format.
tape34xx_error_recovery_do_retry(td);
return;
}
PRINT_WARN("Tape format is incompatible to the drive, which writes 3480-2 XF.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x5c:
// Format 3480-2 XF incompatible
PRINT_WARN("Tape format is incompatible to the drive. The drive cannot access 3480-2 XF volumes.\n");
tape34xx_error_recovery_has_failed(td,EIO);
return;
case 0x5d:
// Tape length violation.
PRINT_WARN("Tape length violation [should be IBM Enhanced Capacity Cartridge System Tape]. May cause damage to drive or tape.\n");
tape34xx_error_recovery_has_failed(td,EMEDIUMTYPE);
return;
case 0x5e:
// Compaction algorithm incompatible.
PRINT_WARN("The volume is recorded using an incompatible compaction algorith, which is not supported by the control unit.\n");
tape34xx_error_recovery_has_failed(td,EMEDIUMTYPE);
return;
default:
// Reserved erpas -> bug
tape34xx_error_recovery_HWBUG(td,26);
return;
}
}
void tape34xx_error_recovery_has_failed (tape_dev_t* td,int error_id) {
tape_ccw_req_t *treq = tape_get_active_ccw_req(td);
tape_sprintf_event (tape_dbf_area,3,"Error Recovery failed for %s\n", tape_op_verbose[treq->op]);
treq->rc = -error_id;
if(treq->wakeup)
treq->wakeup (treq);
}
void tape34xx_error_recovery_succeded(tape_dev_t* td) {
tape_ccw_req_t *treq = tape_get_active_ccw_req(td);
tape_sprintf_event (tape_dbf_area,3,"Error Recovery successfull for %s\n", tape_op_verbose[treq->op]);
tape34xx_done_handler(td);
}
void tape34xx_error_recovery_do_retry(tape_dev_t* td) {
tape_ccw_req_t* treq = tape_get_active_ccw_req(td);
tape_sprintf_event (tape_dbf_area,3,"xerp retr\n");
tape_sprintf_event (tape_dbf_area,3, "%s\n",tape_op_verbose[treq->op]);
tape_remove_ccw_req(td,treq);
tape_do_io_irq(td, treq,TAPE_NO_WAIT);
}
void
tape34xx_error_recovery_read_opposite (tape_dev_t* td) {
tape_ccw_req_t *treq = tape_get_active_ccw_req(td);
switch (treq->op) {
case TO_RFO:
// We did read forward, but the data could not be read
// *correctly*. We will read backward and then skip
// forward again.
if(tape34xx_read_opposite(td))
tape34xx_error_recovery_do_retry(td);
else
tape34xx_error_recovery_has_failed(td,EIO);
break;
case TO_RBA:
// We tried to read forward and backward, but hat no
// success -> failed.
tape34xx_error_recovery_has_failed(td,EIO);
break;
default:
PRINT_ERR("read_opposite_recovery_called_with_state:%s\n", tape_op_verbose[treq->op]);
tape34xx_error_recovery_has_failed(td,EIO);
}
}
void
tape34xx_error_recovery_HWBUG (tape_dev_t* td,int condno) {
tape_ccw_req_t *treq = tape_get_active_ccw_req(td);
PRINT_WARN("An unexpected condition #%d was caught in tape error recovery.\n",condno);
PRINT_WARN("Please report this incident.\n");
if(treq)
PRINT_WARN("Operation of tape:%s\n", tape_op_verbose[treq->op]);
tape_dump_sense(td);
tape34xx_error_recovery_has_failed(td,EIO);
}
/*
* This routine is called by frontend after an ENOSP on write
*/
void tape34xx_process_eov(tape_dev_t* ti)
{
tape_ccw_req_t *treq;
int rc;
int tm_written = 0;
/* End of volume: We have to backspace the last written record, then */
/* we TRY to write a tapemark and then backspace over the written TM */
treq = tape34xx_ioctl(ti,MTBSR,1,&rc);
if(treq){
tape_do_io_and_wait(ti,treq,TAPE_WAIT);
tape_free_ccw_req(treq);
}
treq = tape34xx_ioctl(ti,MTWEOF,1,&rc);
if(treq){
rc = tape_do_io_and_wait(ti,treq,TAPE_WAIT);
if((rc == 0) && (treq->rc == 0))
tm_written = 1;
tape_free_ccw_req(treq);
}
if(tm_written){
treq = tape34xx_ioctl(ti,MTBSR,1,&rc);
if(treq){
tape_do_io_and_wait(ti,treq,TAPE_WAIT);
tape_free_ccw_req(treq);
}
}
}
/*
* 34xx first level interrupt handler
*/
void tape34xx_irq(tape_dev_t* td)
{
tape_ccw_req_t* treq = tape_get_active_ccw_req(td);
if (treq == NULL) {
tape34xx_unsolicited_irq(td);
} else if ((td->devstat.dstat & DEV_STAT_UNIT_EXCEP) &&
(td->devstat.dstat & DEV_STAT_DEV_END) &&
(treq->op == TO_WRI)){
/* Write at end of volume */
PRINT_INFO("End of volume\n"); /* XXX */
tape34xx_error_recovery_has_failed(td,ENOSPC);
} else if (td->devstat.dstat & DEV_STAT_UNIT_CHECK) {
tape34xx_error_recovery(td);
} else if (td->devstat.dstat & (DEV_STAT_DEV_END)) {
tape34xx_done_handler(td);
} else {
tape34xx_default_handler(td);
}
}
EXPORT_SYMBOL(tape34xx_irq);
EXPORT_SYMBOL(tape34xx_write_block);
EXPORT_SYMBOL(tape34xx_read_block);
EXPORT_SYMBOL(tape34xx_ioctl);
EXPORT_SYMBOL(tape34xx_ioctl_overload);
EXPORT_SYMBOL(tape34xx_bread);
EXPORT_SYMBOL(tape34xx_free_bread);
EXPORT_SYMBOL(tape34xx_process_eov);
EXPORT_SYMBOL(tape34xx_bread_enable_locate);
/***************************************************************************
*
* drivers/s390/char/tape34xx.h
* common tape device discipline for 34xx tapes.
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
****************************************************************************
*/
#ifndef _TAPE34XX_H
#define _TAPE34XX_H
/*
* The CCW commands for the Tape type of command.
*/
#define INVALID_00 0x00 /* Invalid cmd */
#define BACKSPACEBLOCK 0x27 /* Back Space block */
#define BACKSPACEFILE 0x2f /* Back Space file */
#define DATA_SEC_ERASE 0x97 /* Data security erase */
#define ERASE_GAP 0x17 /* Erase Gap */
#define FORSPACEBLOCK 0x37 /* Forward space block */
#define FORSPACEFILE 0x3F /* Forward Space file */
#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */
#define NOP 0x03 /* No operation */
#define READ_FORWARD 0x02 /* Read forward */
#define REWIND 0x07 /* Rewind */
#define REWIND_UNLOAD 0x0F /* Rewind and Unload */
#define SENSE 0x04 /* Sense */
#define NEW_MODE_SET 0xEB /* Guess it is Mode set */
#define WRITE_CMD 0x01 /* Write */
#define WRITETAPEMARK 0x1F /* Write Tape Mark */
#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */
#define CONTROL_ACCESS 0xE3 /* Set high speed */
#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT*/
#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */
#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */
#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */
#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */
#define MODE_SET_C3 0xC3 /* for 3420 */
#define MODE_SET_CB 0xCB /* for 3420 */
#define MODE_SET_D3 0xD3 /* for 3420 */
#define READ_BACKWARD 0x0C /* */
#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */
#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */
#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */
#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT*/
#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT*/
#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT*/
#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */
#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */
#define READ_DEV_CHAR 0x64 /* Read device characteristics */
#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT*/
#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */
#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */
#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */
#define SYNC 0x43 /* Synchronize (flush buffer) */
#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */
#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */
#define READ_CONFIG_DATA 0xFA /* 3490 CMD */
#define READ_MESSAGE_ID 0x4E /* 3490 CMD */
#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */
#define SET_INTERFACE_ID 0x73 /* 3490 CMD */
#define COMMAND_CHAIN CCW_FLAG_CC /* redefine from irq.h */
#define CHANNEL_END DEV_STAT_CHN_END /* redefine from irq.h */
#define DEVICE_END DEV_STAT_DEV_END /* redefine from irq.h */
#define UNIT_CHECK DEV_STAT_UNIT_CHECK /* redefine from irq.h */
#define UNIT_EXCEPTION DEV_STAT_UNIT_EXCEP /* redefine from irq.h */
#define CONTROL_UNIT_END DEV_STAT_CU_END /* redefine from irq.h */
#define INCORR_LEN SCHN_STAT_INCORR_LEN /* redefine from irq.h */
#define SENSE_COMMAND_REJECT 0x80
#define SENSE_INTERVENTION_REQUIRED 0x40
#define SENSE_BUS_OUT_CHECK 0x20
#define SENSE_EQUIPMENT_CHECK 0x10
#define SENSE_DATA_CHECK 0x08
#define SENSE_OVERRUN 0x04
#define SENSE_DEFERRED_UNIT_CHECK 0x02
#define SENSE_ASSIGNED_ELSEWHERE 0x01
#define SENSE_LOCATE_FAILURE 0x80
#define SENSE_DRIVE_ONLINE 0x40
#define SENSE_RESERVED 0x20
#define SENSE_RECORD_SEQUENCE_ERR 0x10
#define SENSE_BEGINNING_OF_TAPE 0x08
#define SENSE_WRITE_MODE 0x04
#define SENSE_WRITE_PROTECT 0x02
#define SENSE_NOT_CAPABLE 0x01
#define SENSE_CHANNEL_ADAPTER_CODE 0xE0
#define SENSE_CHANNEL_ADAPTER_LOC 0x10
#define SENSE_REPORTING_CU 0x08
#define SENSE_AUTOMATIC_LOADER 0x04
#define SENSE_TAPE_SYNC_MODE 0x02
#define SENSE_TAPE_POSITIONING 0x01
typedef struct _tape34xx_disc_data_t {
__u8 modeset_byte;
} tape34xx_disc_data_t __attribute__ ((packed, aligned(8)));
#define MOD_BYTE ((tape34xx_disc_data_t *)td->discdata)->modeset_byte
/* discipline functions */
int tape34xx_ioctl_overload (tape_dev_t* td, unsigned int cmd, unsigned long arg);
tape_ccw_req_t * tape34xx_write_block (const char *data, size_t count, tape_dev_t* td);
tape_ccw_req_t * tape34xx_read_block (const char *data, size_t count, tape_dev_t* td);
tape_ccw_req_t * tape34xx_ioctl(tape_dev_t* td, int op,int count, int* rc);
tape_ccw_req_t * tape34xx_bread (struct request *req, tape_dev_t* td,int tapeblock_major);
void tape34xx_free_bread (tape_ccw_req_t* treq);
void tape34xx_bread_enable_locate (tape_ccw_req_t * treq);
tape_ccw_req_t * tape34xx_bwrite (struct request *req, tape_dev_t* td,int tapeblock_major);
/* Event handlers */
void tape34xx_default_handler (tape_dev_t * td);
void tape34xx_unexpect_uchk_handler (tape_dev_t * td);
void tape34xx_irq (tape_dev_t* td);
void tape34xx_process_eov(tape_dev_t* td);
// the error recovery stuff:
void tape34xx_error_recovery (tape_dev_t* td);
void tape34xx_error_recovery_has_failed (tape_dev_t* td,int error_id);
void tape34xx_error_recovery_succeded(tape_dev_t* td);
void tape34xx_error_recovery_do_retry(tape_dev_t* td);
void tape34xx_error_recovery_read_opposite (tape_dev_t* td);
void tape34xx_error_recovery_HWBUG (tape_dev_t* td,int condno);
#endif // _TAPE34XX_H
/*
* drivers/s390/char/tape_34xx.c
* tape device discipline for 3480/3490 tapes.
*
* S390 and zSeries version
* Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/workqueue.h>
#include <asm/tape390.h>
#include "tape.h"
#include "tape_std.h"
#define PRINTK_HEADER "T34xx:"
enum tape34xx_type {
tape_3480,
tape_3490,
};
/*
* Medium sense (asyncronous with callback) for 34xx tapes. There is no 'real'
* medium sense call. So we just do a normal sense.
*/
static void
__tape_34xx_medium_sense_callback(struct tape_request *request, void *data)
{
unsigned char *sense;
struct tape_device *device;
request->callback = NULL;
if(request->rc == 0 && (device = request->device) != NULL) {
sense = request->cpdata;
/*
* This isn't quite correct. But since INTERVENTION_REQUIRED
* means that the drive is 'neither ready nor online' it is
* only slightly inaccurate to say there is no tape loaded if
* the drive isn't online...
*/
if(sense[0] & SENSE_INTERVENTION_REQUIRED)
tape_med_state_set(device, MS_UNLOADED);
else
tape_med_state_set(device, MS_LOADED);
if(sense[1] & SENSE_WRITE_PROTECT)
device->tape_generic_status |= GMT_WR_PROT(~0);
else
device->tape_generic_status &= ~GMT_WR_PROT(~0);
}
tape_free_request(request);
}
static int
tape_34xx_medium_sense(struct tape_device *device)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(1, 32);
if(IS_ERR(request)) {
DBF_EXCEPTION(6, "MSEN fail\n");
return PTR_ERR(request);
}
request->op = TO_MSEN;
tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
request->callback = __tape_34xx_medium_sense_callback;
rc = tape_do_io_async(device, request);
return rc;
}
/*
* These functions are currently used only to schedule a medium_sense for
* later execution. This is because we get an interrupt whenever a medium
* is inserted but cannot call tape_do_io* from an interrupt context.
* Maybe that's useful for other actions we want to start from the
* interrupt handler.
*/
static void
tape_34xx_work_handler(void *data)
{
struct {
struct tape_device *device;
enum tape_op op;
struct work_struct work;
} *p = data;
switch(p->op) {
case TO_MSEN:
tape_34xx_medium_sense(p->device);
default:
DBF_EVENT(3, "T34XX: internal error: unknown work\n");
}
tape_put_device(p->device);
kfree(p);
}
static int
tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
{
struct {
struct tape_device *device;
enum tape_op op;
struct work_struct work;
} *p;
if((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
return -ENOMEM;
memset(p, 0, sizeof(*p));
INIT_WORK(&p->work, tape_34xx_work_handler, p);
atomic_inc(&device->ref_count);
p->device = device;
p->op = op;
schedule_work(&p->work);
return 0;
}
/*
* Done Handler is called when dev stat = DEVICE-END (successfull operation)
*/
static int
tape_34xx_done(struct tape_device *device, struct tape_request *request)
{
DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
return TAPE_IO_SUCCESS;
}
static inline int
tape_34xx_erp_failed(struct tape_device *device,
struct tape_request *request, int rc)
{
DBF_EVENT(3, "Error recovery failed for %s\n",
tape_op_verbose[request->op]);
return rc;
}
static inline int
tape_34xx_erp_succeded(struct tape_device *device,
struct tape_request *request)
{
DBF_EVENT(3, "Error Recovery successfull for %s\n",
tape_op_verbose[request->op]);
return tape_34xx_done(device, request);
}
static inline int
tape_34xx_erp_retry(struct tape_device *device, struct tape_request *request)
{
DBF_EVENT(3, "xerp retr %s\n",
tape_op_verbose[request->op]);
return TAPE_IO_RETRY;
}
/*
* This function is called, when no request is outstanding and we get an
* interrupt
*/
static int
tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
{
if (irb->scsw.dstat == 0x85 /* READY */) {
/* A medium was inserted in the drive. */
DBF_EVENT(6, "xuud med\n");
tape_34xx_schedule_work(device, TO_MSEN);
} else {
DBF_EVENT(3, "unsol.irq! dev end: %s\n",
device->cdev->dev.bus_id);
PRINT_WARN("Unsolicited IRQ (Device End) caught.\n");
tape_dump_sense(device, NULL, irb);
}
return TAPE_IO_SUCCESS;
}
/*
* Read Opposite Error Recovery Function:
* Used, when Read Forward does not work
*/
static int
tape_34xx_erp_read_opposite(struct tape_device *device,
struct tape_request *request)
{
if (request->op == TO_RFO) {
/*
* We did read forward, but the data could not be read
* *correctly*. We transform the request to a read backward
* and try again.
*/
tape_std_read_backward(device, request);
return tape_34xx_erp_retry(device, request);
}
if (request->op != TO_RBA)
PRINT_ERR("read_opposite called with state:%s\n",
tape_op_verbose[request->op]);
/*
* We tried to read forward and backward, but hat no
* success -> failed.
*/
return tape_34xx_erp_failed(device, request, -EIO);
}
static int
tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request,
struct irb *irb, int no)
{
if (request->op != TO_ASSIGN) {
PRINT_WARN("An unexpected condition #%d was caught in "
"tape error recovery.\n", no);
PRINT_WARN("Please report this incident.\n");
if (request)
PRINT_WARN("Operation of tape:%s\n",
tape_op_verbose[request->op]);
tape_dump_sense(device, request, irb);
}
return tape_34xx_erp_failed(device, request, -EIO);
}
/*
* Handle data overrun between cu and drive. The channel speed might
* be too slow.
*/
static int
tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
if (irb->ecw[3] == 0x40) {
PRINT_WARN ("Data overrun error between control-unit "
"and drive. Use a faster channel connection, "
"if possible! \n");
return tape_34xx_erp_failed(device, request, -EIO);
}
return tape_34xx_erp_bug(device, request, irb, -1);
}
/*
* Handle record sequence error.
*/
static int
tape_34xx_erp_sequence(struct tape_device *device,
struct tape_request *request, struct irb *irb)
{
if (irb->ecw[3] == 0x41) {
/*
* cu detected incorrect block-id sequence on tape.
*/
PRINT_WARN("Illegal block-id sequence found!\n");
return tape_34xx_erp_failed(device, request, -EIO);
}
/*
* Record sequence error bit is set, but erpa does not
* show record sequence error.
*/
return tape_34xx_erp_bug(device, request, irb, -2);
}
/*
* This function analyses the tape's sense-data in case of a unit-check.
* If possible, it tries to recover from the error. Else the user is
* informed about the problem.
*/
static int
tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
int inhibit_cu_recovery;
__u8* sense;
#ifdef CONFIG_S390_TAPE_BLOCK
if (request->op == TO_BLOCK) {
/*
* Recovery for block device requests. Set the block_position
* to something invalid and retry.
*/
device->blk_data.block_position = -1;
if (request->retries-- <= 0)
return tape_34xx_erp_failed(device, request, -EIO);
else
return tape_34xx_erp_retry(device, request);
}
#endif
inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
sense = irb->ecw;
if (sense[0] & SENSE_COMMAND_REJECT) {
if ((sense[1] & SENSE_WRITE_PROTECT) &&
(request->op == TO_DSE ||
request->op == TO_WRI ||
request->op == TO_WTM))
/* medium is write protected */
return tape_34xx_erp_failed(device, request, -EACCES);
else
return tape_34xx_erp_bug(device, request, irb, -3);
}
/*
* special cases for various tape-states when reaching
* end of recorded area
*/
if ((sense[0] == 0x08 || sense[0] == 0x10 || sense[0] == 0x12) &&
(sense[1] == 0x40 || sense[1] == 0x0c))
switch (request->op) {
case TO_FSF:
/* Trying to seek beyond end of recorded area */
return tape_34xx_erp_failed(device, request, -ENOSPC);
case TO_LBL:
/* Block could not be located. */
return tape_34xx_erp_failed(device, request, -EIO);
case TO_RFO:
/* Read beyond end of recorded area -> 0 bytes read */
return tape_34xx_erp_failed(device, request, 0);
default:
PRINT_ERR("Invalid op in %s:%i\n",
__FUNCTION__, __LINE__);
return tape_34xx_erp_failed(device, request, 0);
}
/* Sensing special bits */
if (sense[0] & SENSE_BUS_OUT_CHECK)
return tape_34xx_erp_retry(device, request);
if (sense[0] & SENSE_DATA_CHECK) {
/*
* hardware failure, damaged tape or improper
* operating conditions
*/
switch (sense[3]) {
case 0x23:
/* a read data check occurred */
if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
inhibit_cu_recovery)
// data check is not permanent, may be
// recovered. We always use async-mode with
// cu-recovery, so this should *never* happen.
return tape_34xx_erp_bug(device, request,
irb, -4);
/* data check is permanent, CU recovery has failed */
PRINT_WARN("Permanent read error\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x25:
// a write data check occurred
if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
inhibit_cu_recovery)
// data check is not permanent, may be
// recovered. We always use async-mode with
// cu-recovery, so this should *never* happen.
return tape_34xx_erp_bug(device, request,
irb, -5);
// data check is permanent, cu-recovery has failed
PRINT_WARN("Permanent write error\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x26:
/* Data Check (read opposite) occurred. */
return tape_34xx_erp_read_opposite(device, request);
case 0x28:
/* ID-Mark at tape start couldn't be written */
PRINT_WARN("ID-Mark could not be written.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x31:
/* Tape void. Tried to read beyond end of device. */
PRINT_WARN("Read beyond end of recorded area.\n");
return tape_34xx_erp_failed(device, request, -ENOSPC);
case 0x41:
/* Record sequence error. */
PRINT_WARN("Invalid block-id sequence found.\n");
return tape_34xx_erp_failed(device, request, -EIO);
default:
/* all data checks for 3480 should result in one of
* the above erpa-codes. For 3490, other data-check
* conditions do exist. */
if (device->cdev->id.driver_info == tape_3480)
return tape_34xx_erp_bug(device, request,
irb, -6);
}
}
if (sense[0] & SENSE_OVERRUN)
return tape_34xx_erp_overrun(device, request, irb);
if (sense[1] & SENSE_RECORD_SEQUENCE_ERR)
return tape_34xx_erp_sequence(device, request, irb);
/* Sensing erpa codes */
switch (sense[3]) {
case 0x00:
/* Unit check with erpa code 0. Report and ignore. */
PRINT_WARN("Non-error sense was found. "
"Unit-check will be ignored.\n");
return TAPE_IO_SUCCESS;
case 0x21:
/*
* Data streaming not operational. CU will switch to
* interlock mode. Reissue the command.
*/
PRINT_WARN("Data streaming not operational. "
"Switching to interlock-mode.\n");
return tape_34xx_erp_retry(device, request);
case 0x22:
/*
* Path equipment check. Might be drive adapter error, buffer
* error on the lower interface, internal path not useable,
* or error during cartridge load.
*/
PRINT_WARN("A path equipment check occurred. One of the "
"following conditions occurred:\n");
PRINT_WARN("drive adapter error, buffer error on the lower "
"interface, internal path not useable, error "
"during cartridge load.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x24:
/*
* Load display check. Load display was command was issued,
* but the drive is displaying a drive check message. Can
* be threated as "device end".
*/
return tape_34xx_erp_succeded(device, request);
case 0x27:
/*
* Command reject. May indicate illegal channel program or
* buffer over/underrun. Since all channel programms are
* issued by this driver and ought be correct, we assume a
* over/underrun situaltion and retry the channel program.
*/
return tape_34xx_erp_retry(device, request);
case 0x29:
/*
* Function incompatible. Either the tape is idrc compressed
* but the hardware isn't capable to do idrc, or a perform
* subsystem func is issued and the CU is not online.
*/
PRINT_WARN ("Function incompatible. Try to switch off idrc\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x2a:
/*
* Unsolicited environmental data. An internal counter
* overflows, we can ignore this and reissue the cmd.
*/
return tape_34xx_erp_retry(device, request);
case 0x2b:
/*
* Environmental data present. Indicates either unload
* completed ok or read buffered log command completed ok.
*/
if (request->op == TO_RUN) {
/* Rewind unload completed ok. */
tape_med_state_set(device, MS_UNLOADED);
return tape_34xx_erp_succeded(device, request);
}
/* tape_34xx doesn't use read buffered log commands. */
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x2c:
/*
* Permanent equipment check. CU has tried recovery, but
* did not succeed.
*/
return tape_34xx_erp_failed(device, request, -EIO);
case 0x2d:
/* Data security erase failure. */
if (request->op == TO_DSE)
return tape_34xx_erp_failed(device, request, -EIO);
/* Data security erase failure, but no such command issued. */
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x2e:
/*
* Not capable. This indicates either that the drive fails
* reading the format id mark or that that format specified
* is not supported by the drive.
*/
PRINT_WARN("Drive not capable processing the tape format!");
return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE);
case 0x30:
/* The medium is write protected. */
PRINT_WARN("Medium is write protected!\n");
return tape_34xx_erp_failed(device, request, -EACCES);
case 0x32:
// Tension loss. We cannot recover this, it's an I/O error.
PRINT_WARN("The drive lost tape tension.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x33:
/*
* Load Failure. The catridge was not inserted correctly or
* the tape is not threaded correctly.
*/
PRINT_WARN("Cartridge load failure. Reload the cartridge "
"and try again.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x34:
/*
* Unload failure. The drive cannot maintain tape tension
* and control tape movement during an unload operation.
*/
PRINT_WARN("Failure during cartridge unload. "
"Please try manually.\n");
if (request->op == TO_RUN)
return tape_34xx_erp_failed(device, request, -EIO);
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x35:
/*
* Drive equipment check. One of the following:
* - cu cannot recover from a drive detected error
* - a check code message is shown on drive display
* - the cartridge loader does not respond correctly
* - a failure occurs during an index, load, or unload cycle
*/
PRINT_WARN("Equipment check! Please check the drive and "
"the cartridge loader.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x36:
if (device->cdev->id.driver_info == tape_3490)
/* End of data. */
return tape_34xx_erp_failed(device, request, -EIO);
/* This erpa is reserved for 3480 */
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x37:
/*
* Tape length error. The tape is shorter than reported in
* the beginning-of-tape data.
*/
PRINT_WARN("Tape length error.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x38:
/*
* Physical end of tape. A read/write operation reached
* the physical end of tape.
*/
if (request->op==TO_WRI ||
request->op==TO_DSE ||
request->op==TO_WTM)
return tape_34xx_erp_failed(device, request, -ENOSPC);
return tape_34xx_erp_failed(device, request, -EIO);
case 0x39:
/* Backward at Beginnig of tape. */
return tape_34xx_erp_failed(device, request, -EIO);
case 0x3a:
/* Drive switched to not ready. */
PRINT_WARN("Drive not ready. Turn the ready/not ready switch "
"to ready position and try again.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x3b:
/* Manual rewind or unload. This causes an I/O error. */
PRINT_WARN("Medium was rewound or unloaded manually.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x42:
/*
* Degraded mode. A condition that can cause degraded
* performace is detected.
*/
PRINT_WARN("Subsystem is running in degraded mode.\n");
return tape_34xx_erp_retry(device, request);
case 0x43:
/* Drive not ready. */
tape_med_state_set(device, MS_UNLOADED);
/* Some commands commands are sucessful even in this case */
if(sense[1] & SENSE_DRIVE_ONLINE) {
switch(request->op) {
case TO_ASSIGN:
case TO_UNASSIGN:
case TO_DIS:
return tape_34xx_done(device, request);
break;
default:
break;
}
}
PRINT_WARN("The drive is not ready.\n");
return tape_34xx_erp_failed(device, request, -ENOMEDIUM);
case 0x44:
/* Locate Block unsuccessfull. */
if (request->op != TO_BLOCK && request->op != TO_LBL)
/* No locate block was issued. */
return tape_34xx_erp_bug(device, request,
irb, sense[3]);
return tape_34xx_erp_failed(device, request, -EIO);
case 0x45:
/* The drive is assigned to a different channel path. */
PRINT_WARN("The drive is assigned elsewhere.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x46:
/*
* Drive not online. Drive may be switched offline,
* the power supply may be switched off or
* the drive address may not be set correctly.
*/
PRINT_WARN("The drive is not online.");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x47:
/* Volume fenced. CU reports volume integrity is lost. */
PRINT_WARN("Volume fenced. The volume integrity is lost.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x48:
/* Log sense data and retry request. */
return tape_34xx_erp_retry(device, request);
case 0x49:
/* Bus out check. A parity check error on the bus was found. */
PRINT_WARN("Bus out check. A data transfer over the bus "
"has been corrupted.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x4a:
/* Control unit erp failed. */
PRINT_WARN("The control unit I/O error recovery failed.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x4b:
/*
* CU and drive incompatible. The drive requests micro-program
* patches, which are not available on the CU.
*/
PRINT_WARN("The drive needs microprogram patches from the "
"control unit, which are not available.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x4c:
/*
* Recovered Check-One failure. Cu develops a hardware error,
* but is able to recover.
*/
return tape_34xx_erp_retry(device, request);
case 0x4d:
if (device->cdev->id.driver_info == tape_3490)
/*
* Resetting event received. Since the driver does
* not support resetting event recovery (which has to
* be handled by the I/O Layer), retry our command.
*/
return tape_34xx_erp_retry(device, request);
/* This erpa is reserved for 3480. */
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x4e:
if (device->cdev->id.driver_info == tape_3490) {
/*
* Maximum block size exeeded. This indicates, that
* the block to be written is larger than allowed for
* buffered mode.
*/
PRINT_WARN("Maximum block size for buffered "
"mode exceeded.\n");
return tape_34xx_erp_failed(device, request, -ENOBUFS);
}
/* This erpa is reserved for 3480. */
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x50:
/*
* Read buffered log (Overflow). CU is running in extended
* buffered log mode, and a counter overflows. This should
* never happen, since we're never running in extended
* buffered log mode.
*/
return tape_34xx_erp_retry(device, request);
case 0x51:
/*
* Read buffered log (EOV). EOF processing occurs while the
* CU is in extended buffered log mode. This should never
* happen, since we're never running in extended buffered
* log mode.
*/
return tape_34xx_erp_retry(device, request);
case 0x52:
/* End of Volume complete. Rewind unload completed ok. */
if (request->op == TO_RUN) {
tape_med_state_set(device, MS_UNLOADED);
return tape_34xx_erp_succeded(device, request);
}
return tape_34xx_erp_bug(device, request, irb, sense[3]);
case 0x53:
/* Global command intercept. */
return tape_34xx_erp_retry(device, request);
case 0x54:
/* Channel interface recovery (temporary). */
return tape_34xx_erp_retry(device, request);
case 0x55:
/* Channel interface recovery (permanent). */
PRINT_WARN("A permanent channel interface error occurred.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x56:
/* Channel protocol error. */
PRINT_WARN("A channel protocol error occurred.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x57:
if (device->cdev->id.driver_info == tape_3480) {
/* Attention intercept. */
PRINT_WARN("An attention intercept occurred, "
"which will be recovered.\n");
return tape_34xx_erp_retry(device, request);
} else {
/* Global status intercept. */
PRINT_WARN("An global status intercept was recieved, "
"which will be recovered.\n");
return tape_34xx_erp_retry(device, request);
}
case 0x5a:
/*
* Tape length incompatible. The tape inserted is too long,
* which could cause damage to the tape or the drive.
*/
PRINT_WARN("Tape length incompatible [should be IBM Cartridge "
"System Tape]. May cause damage to drive or tape.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x5b:
/* Format 3480 XF incompatible */
if (sense[1] & SENSE_BEGINNING_OF_TAPE)
/* The tape will get overwritten. */
return tape_34xx_erp_retry(device, request);
PRINT_WARN("Tape format is incompatible to the drive, "
"which writes 3480-2 XF.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x5c:
/* Format 3480-2 XF incompatible */
PRINT_WARN("Tape format is incompatible to the drive. "
"The drive cannot access 3480-2 XF volumes.\n");
return tape_34xx_erp_failed(device, request, -EIO);
case 0x5d:
/* Tape length violation. */
PRINT_WARN("Tape length violation [should be IBM Enhanced "
"Capacity Cartridge System Tape]. May cause "
"damage to drive or tape.\n");
return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE);
case 0x5e:
/* Compaction algorithm incompatible. */
PRINT_WARN("The volume is recorded using an incompatible "
"compaction algorith, which is not supported by "
"the control unit.\n");
return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE);
/* The following erpas should have been covered earlier. */
case 0x23: /* Read data check. */
case 0x25: /* Write data check. */
case 0x26: /* Data check (read opposite). */
case 0x28: /* Write id mark check. */
case 0x31: /* Tape void. */
case 0x40: /* Overrun error. */
case 0x41: /* Record sequence error. */
/* All other erpas are reserved for future use. */
default:
return tape_34xx_erp_bug(device, request, irb, sense[3]);
}
}
/*
* 3480/3490 interrupt handler
*/
static int
tape_34xx_irq(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
if (request == NULL)
return tape_34xx_unsolicited_irq(device, irb);
if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) &&
(irb->scsw.dstat & DEV_STAT_DEV_END) &&
(request->op == TO_WRI)) {
/* Write at end of volume */
PRINT_INFO("End of volume\n"); /* XXX */
return tape_34xx_erp_failed(device, request, -ENOSPC);
}
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
return tape_34xx_unit_check(device, request, irb);
if (irb->scsw.dstat & DEV_STAT_DEV_END)
return tape_34xx_done(device, request);
DBF_EVENT(6, "xunknownirq\n");
PRINT_ERR("Unexpected interrupt.\n");
PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]);
tape_dump_sense(device, request, irb);
return TAPE_IO_STOP;
}
/*
* ioctl_overload
*/
static int
tape_34xx_ioctl(struct tape_device *device,
unsigned int cmd, unsigned long arg)
{
if (cmd == TAPE390_DISPLAY)
return tape_std_display(device, cmd, arg);
else
return -EINVAL;
}
static int
tape_34xx_setup_device(struct tape_device * device)
{
DBF_EVENT(6, "34xx minor1: %x\n", device->first_minor);
tape_34xx_medium_sense(device);
return 0;
}
static void
tape_34xx_cleanup_device(struct tape_device * device)
{
if (device->discdata) {
kfree(device->discdata);
device->discdata = NULL;
}
}
/*
* MTTELL: Tell block. Return the number of block relative to current file.
*/
static int
tape_34xx_mttell(struct tape_device *device, int mt_count)
{
__u64 block_id;
int rc;
rc = tape_std_read_block_id(device, &block_id);
if (rc)
return rc;
return (block_id >> 32) & 0x3fffff;
}
/*
* MTSEEK: seek to the specified block.
*/
static int
tape_34xx_mtseek(struct tape_device *device, int mt_count)
{
struct tape_request *request;
if (mt_count > 0x400000) {
DBF_EXCEPTION(6, "xsee parm\n");
return -EINVAL;
}
request = tape_alloc_request(3, 4);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_LBL;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
*(__u32 *) request->cpdata = mt_count |
((*device->modeset_byte & 0x08) ? 0x01800000 : 0x01000000);
tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
#ifdef CONFIG_S390_TAPE_BLOCK
/*
* Tape block read for 34xx.
*/
static struct tape_request *
tape_34xx_bread(struct tape_device *device, struct request *req)
{
struct tape_request *request;
struct ccw1 *ccw;
int count = 0,start_block,i;
unsigned off;
char *dst;
struct bio_vec *bv;
struct bio *bio;
DBF_EVENT(6, "xBREDid:");
start_block = req->sector >> TAPEBLOCK_HSEC_S2B;
DBF_EVENT(6, "start_block = %i\n", start_block);
/* Count the number of blocks for the request. */
rq_for_each_bio(bio, req) {
bio_for_each_segment(bv, bio, i) {
count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
}
}
/* Allocate the ccw request. */
request = tape_alloc_request(2+count+1, 4);
if (IS_ERR(request))
return request;
/* Setup ccws. */
request->op = TO_BLOCK;
*(__u32 *) request->cpdata = (start_block & 0x3fffff) |
((*device->modeset_byte & 0x08) ? 0x81000000 : 0x01000000);
ccw = request->cpaddr;
ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
/*
* We always setup a nop after the mode set ccw. This slot is
* used in tape_std_check_locate to insert a locate ccw if the
* current tape position doesn't match the start block to be read.
*/
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
rq_for_each_bio(bio, req) {
bio_for_each_segment(bv, bio, i) {
dst = kmap(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len;
off += TAPEBLOCK_HSEC_SIZE) {
ccw->flags = CCW_FLAG_CC;
ccw->cmd_code = READ_FORWARD;
ccw->count = TAPEBLOCK_HSEC_SIZE;
set_normalized_cda(ccw, (void*) __pa(dst));
ccw++;
dst += TAPEBLOCK_HSEC_SIZE;
}
}
}
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
DBF_EVENT(6, "xBREDccwg\n");
return request;
}
static void
tape_34xx_free_bread (struct tape_request *request)
{
struct ccw1* ccw;
/* Last ccw is a nop and doesn't need clear_normalized_cda */
for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++)
if (ccw->cmd_code == READ_FORWARD)
clear_normalized_cda(ccw);
tape_free_request(request);
}
/*
* check_locate is called just before the tape request is passed to
* the common io layer for execution. It has to check the current
* tape position and insert a locate ccw if it doesn't match the
* start block for the request.
*/
static void
tape_34xx_check_locate (struct tape_device *device,
struct tape_request *request)
{
int start_block;
start_block = *(__u32 *) request->cpdata & 0x3fffff;
if (start_block != device->blk_data.block_position)
tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
}
#endif
/*
* List of 3480/3490 magnetic tape commands.
*/
static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = {
[MTRESET] = tape_std_mtreset,
[MTFSF] = tape_std_mtfsf,
[MTBSF] = tape_std_mtbsf,
[MTFSR] = tape_std_mtfsr,
[MTBSR] = tape_std_mtbsr,
[MTWEOF] = tape_std_mtweof,
[MTREW] = tape_std_mtrew,
[MTOFFL] = tape_std_mtoffl,
[MTNOP] = tape_std_mtnop,
[MTRETEN] = tape_std_mtreten,
[MTBSFM] = tape_std_mtbsfm,
[MTFSFM] = tape_std_mtfsfm,
[MTEOM] = tape_std_mteom,
[MTERASE] = tape_std_mterase,
[MTRAS1] = NULL,
[MTRAS2] = NULL,
[MTRAS3] = NULL,
[MTSETBLK] = tape_std_mtsetblk,
[MTSETDENSITY] = NULL,
[MTSEEK] = tape_34xx_mtseek,
[MTTELL] = tape_34xx_mttell,
[MTSETDRVBUFFER] = NULL,
[MTFSS] = NULL,
[MTBSS] = NULL,
[MTWSM] = NULL,
[MTLOCK] = NULL,
[MTUNLOCK] = NULL,
[MTLOAD] = tape_std_mtload,
[MTUNLOAD] = tape_std_mtunload,
[MTCOMPRESSION] = tape_std_mtcompression,
[MTSETPART] = NULL,
[MTMKPART] = NULL
};
/*
* Tape discipline structure for 3480 and 3490.
*/
static struct tape_discipline tape_discipline_34xx = {
.owner = THIS_MODULE,
.setup_device = tape_34xx_setup_device,
.cleanup_device = tape_34xx_cleanup_device,
.process_eov = tape_std_process_eov,
.irq = tape_34xx_irq,
.read_block = tape_std_read_block,
.write_block = tape_std_write_block,
.assign = tape_std_assign,
.unassign = tape_std_unassign,
#ifdef CONFIG_S390_TAPE_BLOCK
.bread = tape_34xx_bread,
.free_bread = tape_34xx_free_bread,
.check_locate = tape_34xx_check_locate,
#endif
.ioctl_fn = tape_34xx_ioctl,
.mtop_array = tape_34xx_mtop
};
static struct ccw_device_id tape_34xx_ids[] = {
{ CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), driver_info: tape_3480},
{ CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), driver_info: tape_3490},
{ /* end of list */ }
};
static int
tape_34xx_enable(struct ccw_device *cdev)
{
return tape_enable_device(cdev->dev.driver_data,
&tape_discipline_34xx);
}
static int
tape_34xx_disable(struct ccw_device *cdev)
{
tape_disable_device(cdev->dev.driver_data);
return 0;
}
static struct ccw_driver tape_34xx_driver = {
.name = "tape_34xx",
.owner = THIS_MODULE,
.ids = tape_34xx_ids,
.probe = tape_generic_probe,
.remove = tape_generic_remove,
.set_online = tape_34xx_enable,
.set_offline = tape_34xx_disable,
};
static int
tape_34xx_init (void)
{
int rc;
DBF_EVENT(3, "34xx init: $Revision: 1.6 $\n");
/* Register driver for 3480/3490 tapes. */
rc = ccw_driver_register(&tape_34xx_driver);
if (rc)
DBF_EVENT(3, "34xx init failed\n");
else
DBF_EVENT(3, "34xx registered\n");
return rc;
}
static void
tape_34xx_exit(void)
{
ccw_driver_unregister(&tape_34xx_driver);
}
MODULE_DEVICE_TABLE(ccw, tape_34xx_ids);
MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH");
MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape "
"device driver ($Revision: 1.6 $)");
MODULE_LICENSE("GPL");
module_init(tape_34xx_init);
module_exit(tape_34xx_exit);
/*
* drivers/s390/char/tape_block.c
* block device frontend for tape device driver
*
* S390 and zSeries version
* Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/blk.h>
#include <linux/interrupt.h>
#include <linux/buffer_head.h>
#include <asm/debug.h>
#include "tape.h"
#define PRINTK_HEADER "TBLOCK:"
#define TAPEBLOCK_MAX_SEC 100
#define TAPEBLOCK_MIN_REQUEUE 3
/*
* file operation structure for tape block frontend
*/
static int tapeblock_open(struct inode *, struct file *);
static int tapeblock_release(struct inode *, struct file *);
static struct block_device_operations tapeblock_fops = {
.owner = THIS_MODULE,
.open = tapeblock_open,
.release = tapeblock_release,
};
static int tapeblock_major = 0;
/*
* Post finished request.
*/
static inline void
tapeblock_end_request(struct request *req, int uptodate)
{
if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
BUG();
end_that_request_last(req);
}
static void
__tapeblock_end_request(struct tape_request *ccw_req, void *data)
{
struct tape_device *device;
struct request *req;
device = ccw_req->device;
req = (struct request *) data;
tapeblock_end_request(req, ccw_req->rc == 0);
if (ccw_req->rc == 0)
/* Update position. */
device->blk_data.block_position =
(req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B;
else
/* We lost the position information due to an error. */
device->blk_data.block_position = -1;
device->discipline->free_bread(ccw_req);
if (!list_empty(&device->req_queue) ||
!blk_queue_empty(&device->blk_data.request_queue))
tasklet_schedule(&device->blk_data.tasklet);
}
/*
* Fetch requests from block device queue.
*/
static inline void
__tape_process_blk_queue(struct tape_device *device, struct list_head *new_req)
{
request_queue_t *queue;
struct list_head *l;
struct request *req;
struct tape_request *ccw_req;
int nr_queued;
/* FIXME: we have to make sure that the tapeblock frontend
owns the device. tape_state != TS_IN_USE is NOT enough. */
if (device->tape_state != TS_IN_USE)
return;
queue = &device->blk_data.request_queue;
nr_queued = 0;
/* Count number of requests on ccw queue. */
list_for_each(l, &device->req_queue)
nr_queued++;
while (!blk_queue_plugged(queue) &&
!blk_queue_empty(queue) &&
nr_queued < TAPEBLOCK_MIN_REQUEUE) {
req = elv_next_request(queue);
if (rq_data_dir(req) == WRITE) {
DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
blkdev_dequeue_request(req);
tapeblock_end_request(req, 0);
continue;
}
ccw_req = device->discipline->bread(device, req);
if (IS_ERR(ccw_req)) {
if (PTR_ERR(ccw_req) == -ENOMEM)
break; /* don't try again */
DBF_EVENT(1, "TBLOCK: bread failed\n");
blkdev_dequeue_request(req);
tapeblock_end_request(req, 0);
continue;
}
ccw_req->callback = __tapeblock_end_request;
ccw_req->callback_data = (void *) req;
ccw_req->retries = TAPEBLOCK_RETRIES;
blkdev_dequeue_request(req);
list_add_tail(new_req, &ccw_req->list);
nr_queued++;
}
}
/*
* Feed requests to the tape device.
*/
static inline int
tape_queue_requests(struct tape_device *device, struct list_head *new_req)
{
struct list_head *l, *n;
struct tape_request *ccw_req;
struct request *req;
int rc, fail;
fail = 0;
list_for_each_safe(l, n, new_req) {
ccw_req = list_entry(l, struct tape_request, list);
list_del(&ccw_req->list);
rc = tape_do_io_async(device, ccw_req);
if (rc) {
/*
* Start/enqueueing failed. No retries in
* this case.
*/
req = (struct request *) ccw_req->callback_data;
tapeblock_end_request(req, 0);
device->discipline->free_bread(ccw_req);
fail = 1;
}
}
return fail;
}
/*
* Tape request queue function. Called from ll_rw_blk.c
*/
static void
tapeblock_request_fn(request_queue_t *queue)
{
struct list_head new_req;
struct tape_device *device;
device = (struct tape_device *) queue->queuedata;
while (!blk_queue_empty(queue)) {
INIT_LIST_HEAD(&new_req);
spin_lock(get_ccwdev_lock(device->cdev));
__tape_process_blk_queue(device, &new_req);
spin_unlock(get_ccwdev_lock(device->cdev));
/*
* Now queue the new request to the tape. This needs to be
* done without the device lock held.
*/
if (tape_queue_requests(device, &new_req) == 0)
/* All requests queued. Thats enough for now. */
break;
}
}
/*
* Acquire the device lock and process queues for the device.
*/
static void
tapeblock_tasklet(unsigned long data)
{
struct list_head new_req;
struct tape_device *device;
device = (struct tape_device *) data;
while (!blk_queue_empty(&device->blk_data.request_queue)) {
INIT_LIST_HEAD(&new_req);
spin_lock_irq(get_ccwdev_lock(device->cdev));
__tape_process_blk_queue(device, &new_req);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
/*
* Now queue the new request to the tape. This needs to be
* done without the device lock held.
*/
if (tape_queue_requests(device, &new_req) == 0)
/* All requests queued. Thats enough for now. */
break;
}
}
/*
* This function is called for every new tapedevice
*/
int
tapeblock_setup_device(struct tape_device * device)
{
request_queue_t *blk_queue;
int rc;
/* Setup request queue and initialize gendisk for this device. */
tasklet_init(&device->blk_data.tasklet, tapeblock_tasklet,
(unsigned long) device);
spin_lock_init(&device->blk_data.request_queue_lock);
blk_queue = &device->blk_data.request_queue;
rc = blk_init_queue(blk_queue, tapeblock_request_fn,
&device->blk_data.request_queue_lock);
elevator_exit(blk_queue);
rc = elevator_init(blk_queue, &elevator_noop);
if (rc) {
blk_cleanup_queue(blk_queue);
return rc;
}
/* FIXME: We should be able to sense the sectore size */
blk_queue_hardsect_size(blk_queue, TAPEBLOCK_HSEC_SIZE);
blk_queue_max_sectors(blk_queue, TAPEBLOCK_MAX_SEC);
blk_queue_max_phys_segments(blk_queue, -1L);
blk_queue_max_hw_segments(blk_queue, -1L);
blk_queue_max_segment_size(blk_queue, -1L);
blk_queue_segment_boundary(blk_queue, -1L);
return 0;
}
void
tapeblock_cleanup_device(struct tape_device *device)
{
blk_cleanup_queue(&device->blk_data.request_queue);
tasklet_kill(&device->blk_data.tasklet);
}
/*
* Detect number of blocks of the tape.
* FIXME: can we extent this to detect the blocks size as well ?
*/
static int tapeblock_mediumdetect(struct tape_device *device)
{
unsigned int nr_of_blks;
int rc;
PRINT_INFO("Detecting media size...\n");
rc = tape_mtop(device, MTREW, 1);
if (rc)
return rc;
rc = tape_mtop(device, MTFSF, 1);
if (rc)
return rc;
rc = tape_mtop(device, MTTELL, 1);
if (rc)
return rc;
nr_of_blks = rc - 1; /* don't count FM */
rc = tape_mtop(device, MTREW, 1);
if (rc)
return rc;
PRINT_INFO("Found %i blocks on media\n", nr_of_blks);
return 0;
}
/*
* Block frontend tape device open function.
*/
int
tapeblock_open(struct inode *inode, struct file *filp) {
struct tape_device *device;
int minor, rc;
MOD_INC_USE_COUNT;
if (major(filp->f_dentry->d_inode->i_rdev) != tapeblock_major)
return -ENODEV;
minor = minor(filp->f_dentry->d_inode->i_rdev);
device = tape_get_device(minor >> TAPE_MINORS_PER_DEV);
if (IS_ERR(device)) {
MOD_DEC_USE_COUNT;
return PTR_ERR(device);
}
DBF_EVENT(6, "TBLOCK:open: %x\n", device->first_minor);
rc = tape_open(device);
if (rc == 0) {
rc = tape_assign(device);
if (rc == 0) {
device->blk_data.block_position = -1;
rc = tapeblock_mediumdetect(device);
if (rc == 0) {
filp->private_data = device;
return 0;
}
tape_unassign(device);
}
tape_release(device);
}
tape_put_device(device);
MOD_DEC_USE_COUNT;
return rc;
}
/*
* Block frontend tape device release function.
*/
int
tapeblock_release(struct inode *inode, struct file *filp) {
struct tape_device *device;
/* Remove all buffers at device close. */
/* FIXME: can we do that a tape unload ? */
invalidate_buffers(inode->i_rdev);
device = (struct tape_device *) filp->private_data;
tape_release(device);
tape_unassign(device);
tape_put_device(device);
MOD_DEC_USE_COUNT;
return 0;
}
/*
* Initialize block device frontend.
*/
int
tapeblock_init(void)
{
int rc;
/* Register the tape major number to the kernel */
rc = register_blkdev(tapeblock_major, "tBLK", &tapeblock_fops);
if (rc < 0) {
PRINT_ERR("can't get major %d for block device\n",
tapeblock_major);
return rc;
}
if (tapeblock_major == 0)
tapeblock_major = rc;
PRINT_INFO("tape gets major %d for block device\n", tapeblock_major);
return 0;
}
/*
* Deregister major for block device frontend
*/
void
tapeblock_exit(void)
{
unregister_blkdev(tapeblock_major, "tBLK");
}
/*
* drivers/s390/char/tape_char.c
* character device frontend for tape device driver
*
* S390 and zSeries version
* Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/mtio.h>
#include <asm/uaccess.h>
#include "tape.h"
#define PRINTK_HEADER "TCHAR:"
#define TAPECHAR_MAJOR 0 /* get dynamic major */
/*
* file operation structure for tape character frontend
*/
static ssize_t tapechar_read(struct file *, char *, size_t, loff_t *);
static ssize_t tapechar_write(struct file *, const char *, size_t, loff_t *);
static int tapechar_open(struct inode *,struct file *);
static int tapechar_release(struct inode *,struct file *);
static int tapechar_ioctl(struct inode *, struct file *, unsigned int,
unsigned long);
static struct file_operations tape_fops =
{
.read = tapechar_read,
.write = tapechar_write,
.ioctl = tapechar_ioctl,
.open = tapechar_open,
.release = tapechar_release,
};
static int tapechar_major = TAPECHAR_MAJOR;
/*
* This function is called for every new tapedevice
*/
int
tapechar_setup_device(struct tape_device * device)
{
return 0;
}
void
tapechar_cleanup_device(struct tape_device *device)
{
}
/*
* Terminate write command (we write two TMs and skip backward over last)
* This ensures that the tape is always correctly terminated.
* When the user writes afterwards a new file, he will overwrite the
* second TM and therefore one TM will remain to seperate the
* two files on the tape...
*/
static inline void
tapechar_terminate_write(struct tape_device *device)
{
if (tape_mtop(device, MTWEOF, 1) == 0 &&
tape_mtop(device, MTWEOF, 1) == 0)
tape_mtop(device, MTBSR, 1);
}
static inline int
tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
{
struct idal_buffer *new;
if (device->char_data.idal_buf != NULL &&
device->char_data.idal_buf->size >= block_size)
return 0;
/* The current idal buffer is not big enough. Allocate a new one. */
new = idal_buffer_alloc(block_size, 0);
if (new == NULL)
return -ENOMEM;
if (device->char_data.idal_buf != NULL)
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = new;
return 0;
}
/*
* Tape device read function
*/
ssize_t
tapechar_read (struct file *filp, char *data, size_t count, loff_t *ppos)
{
struct tape_device *device;
struct tape_request *request;
size_t block_size;
int rc;
DBF_EVENT(6, "TCHAR:read\n");
device = (struct tape_device *) filp->private_data;
/* Check position. */
if (ppos != &filp->f_pos) {
/*
* "A request was outside the capabilities of the device."
* This check uses internal knowledge about how pread and
* read work...
*/
DBF_EVENT(6, "TCHAR:ppos wrong\n");
return -EOVERFLOW;
}
/* Find out block size to use */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
DBF_EVENT(3, "TCHAR:read smaller than block "
"size was requested\n");
return -EINVAL;
}
block_size = device->char_data.block_size;
} else {
block_size = count;
rc = tapechar_check_idalbuffer(device, block_size);
if (rc)
return rc;
}
DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
/* Let the discipline build the ccw chain. */
request = device->discipline->read_block(device, block_size);
if (IS_ERR(request))
return PTR_ERR(request);
/* Execute it. */
rc = tape_do_io(device, request);
if (rc == 0) {
rc = block_size - request->rescnt;
DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
filp->f_pos += rc;
/* Copy data from idal buffer to user space. */
if (idal_buffer_to_user(device->char_data.idal_buf,
data, rc) != 0)
rc = -EFAULT;
}
tape_free_request(request);
return rc;
}
/*
* Tape device write function
*/
ssize_t
tapechar_write(struct file *filp, const char *data, size_t count, loff_t *ppos)
{
struct tape_device *device;
struct tape_request *request;
size_t block_size;
size_t written;
int nblocks;
int i, rc;
DBF_EVENT(6, "TCHAR:write\n");
device = (struct tape_device *) filp->private_data;
/* Check position */
if (ppos != &filp->f_pos) {
/* "A request was outside the capabilities of the device." */
DBF_EVENT(6, "TCHAR:ppos wrong\n");
return -EOVERFLOW;
}
/* Find out block size and number of blocks */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
DBF_EVENT(3, "TCHAR:write smaller than block "
"size was requested\n");
return -EINVAL;
}
block_size = device->char_data.block_size;
nblocks = count / block_size;
} else {
block_size = count;
rc = tapechar_check_idalbuffer(device, block_size);
if (rc)
return rc;
nblocks = 1;
}
DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
/* Let the discipline build the ccw chain. */
request = device->discipline->write_block(device, block_size);
if (IS_ERR(request))
return PTR_ERR(request);
rc = 0;
written = 0;
for (i = 0; i < nblocks; i++) {
/* Copy data from user space to idal buffer. */
if (idal_buffer_from_user(device->char_data.idal_buf,
data, block_size)) {
rc = -EFAULT;
break;
}
rc = tape_do_io(device, request);
if (rc)
break;
DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
block_size - request->rescnt);
filp->f_pos += block_size - request->rescnt;
written += block_size - request->rescnt;
if (request->rescnt != 0)
break;
data += block_size;
}
tape_free_request(request);
if (rc == -ENOSPC) {
/*
* Ok, the device has no more space. It has NOT written
* the block.
*/
if (device->discipline->process_eov)
device->discipline->process_eov(device);
if (written > 0)
rc = 0;
}
return rc ? rc : written;
}
/*
* Character frontend tape device open function.
*/
int
tapechar_open (struct inode *inode, struct file *filp)
{
struct tape_device *device;
int minor, rc;
MOD_INC_USE_COUNT;
if (major(filp->f_dentry->d_inode->i_rdev) != tapechar_major)
return -ENODEV;
minor = minor(filp->f_dentry->d_inode->i_rdev);
device = tape_get_device(minor / TAPE_MINORS_PER_DEV);
if (IS_ERR(device)) {
MOD_DEC_USE_COUNT;
return PTR_ERR(device);
}
DBF_EVENT(6, "TCHAR:open: %x\n", minor(inode->i_rdev));
rc = tape_open(device);
if (rc == 0) {
rc = tape_assign(device);
if (rc == 0) {
filp->private_data = device;
return 0;
}
tape_release(device);
}
tape_put_device(device);
MOD_DEC_USE_COUNT;
return rc;
}
/*
* Character frontend tape device release function.
*/
int
tapechar_release(struct inode *inode, struct file *filp)
{
struct tape_device *device;
device = (struct tape_device *) filp->private_data;
DBF_EVENT(6, "TCHAR:release: %x\n", minor(inode->i_rdev));
#if 0
// FIXME: this is broken. Either MTWEOF/MTWEOF/MTBSR is done
// EVERYTIME the user switches from write to something different
// or it is not done at all. The second is IMHO better because
// we should NEVER do something the user didn't request.
if (device->last_op == TO_WRI)
tapechar_terminate_write(device);
#endif
/*
* If this is the rewinding tape minor then rewind.
*/
if ((minor(inode->i_rdev) & 1) != 0)
tape_mtop(device, MTREW, 1);
if (device->char_data.idal_buf != NULL) {
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = NULL;
}
device->char_data.block_size = 0;
tape_release(device);
tape_unassign(device);
tape_put_device(device);
MOD_DEC_USE_COUNT;
return 0;
}
/*
* Tape device io controls.
*/
static int
tapechar_ioctl(struct inode *inp, struct file *filp,
unsigned int no, unsigned long data)
{
struct tape_device *device;
int rc;
DBF_EVENT(6, "TCHAR:ioct\n");
device = (struct tape_device *) filp->private_data;
if (no == MTIOCTOP) {
struct mtop op;
if (copy_from_user(&op, (char *) data, sizeof(op)) != 0)
return -EFAULT;
if (op.mt_count < 0)
return -EINVAL;
return tape_mtop(device, op.mt_op, op.mt_count);
}
if (no == MTIOCPOS) {
/* MTIOCPOS: query the tape position. */
struct mtpos pos;
rc = tape_mtop(device, MTTELL, 1);
if (rc < 0)
return rc;
pos.mt_blkno = rc;
if (copy_to_user((char *) data, &pos, sizeof(pos)) != 0)
return -EFAULT;
return 0;
}
if (no == MTIOCGET) {
/* MTIOCGET: query the tape drive status. */
struct mtget get;
memset(&get, 0, sizeof(get));
rc = tape_mtop(device, MTTELL, 1);
if (rc < 0)
return rc;
get.mt_type = MT_ISUNKNOWN;
get.mt_dsreg = device->tape_state;
/* FIXME: mt_gstat, mt_erreg, mt_fileno */
get.mt_resid = 0 /* device->devstat.rescnt */;
get.mt_gstat = 0;
get.mt_erreg = 0;
get.mt_fileno = 0;
get.mt_blkno = rc;
if (copy_to_user((char *) data, &get, sizeof(get)) != 0)
return -EFAULT;
return 0;
}
/* Try the discipline ioctl function. */
if (device->discipline->ioctl_fn == NULL)
return -EINVAL;
return device->discipline->ioctl_fn(device, no, data);
}
/*
* Initialize character device frontend.
*/
int
tapechar_init (void)
{
int rc;
/* Register the tape major number to the kernel */
rc = register_chrdev(tapechar_major, "tape", &tape_fops);
if (rc < 0) {
PRINT_ERR("can't get major %d\n", tapechar_major);
DBF_EVENT(3, "TCHAR:initfail\n");
return rc;
}
if (tapechar_major == 0)
tapechar_major = rc; /* accept dynamic major number */
PRINT_ERR("Tape gets major %d for char device\n", tapechar_major);
DBF_EVENT(3, "Tape gets major %d for char device\n", rc);
DBF_EVENT(3, "TCHAR:init ok\n");
return 0;
}
/*
* cleanup
*/
void
tapechar_exit(void)
{
unregister_chrdev (tapechar_major, "tape");
}
/*
* drivers/s390/char/tape_core.c
* basic function of the tape device driver
*
* S390 and zSeries version
* Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h> // for kernel parameters
#include <linux/kmod.h> // for requesting modules
#include <linux/spinlock.h> // for locks
#include <linux/vmalloc.h>
#include <linux/list.h>
#include <asm/types.h> // for variable types
#include "tape.h"
#include "tape_std.h"
#define PRINTK_HEADER "T390:"
static void tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
/*
* One list to contain all tape devices of all disciplines, so
* we can assign the devices to minor numbers of the same major
* The list is protected by the rwlock
*/
static struct list_head tape_device_list = LIST_HEAD_INIT(tape_device_list);
static rwlock_t tape_device_lock = RW_LOCK_UNLOCKED;
/*
* Wait queue for tape_delete_device waits.
*/
static DECLARE_WAIT_QUEUE_HEAD(tape_delete_wq);
/*
* Pointer to debug area.
*/
debug_info_t *tape_dbf_area = NULL;
/*
* Printable strings for tape enumerations.
*/
const char *tape_state_verbose[TS_SIZE] =
{
[TS_UNUSED] = "UNUSED", [TS_IN_USE] = "IN_USE",
[TS_INIT] = "INIT ", [TS_NOT_OPER] = "NOT_OP"
};
const char *tape_op_verbose[TO_SIZE] =
{
[TO_BLOCK] = "BLK", [TO_BSB] = "BSB",
[TO_BSF] = "BSF", [TO_DSE] = "DSE",
[TO_FSB] = "FSB", [TO_FSF] = "FSF",
[TO_LBL] = "LBL", [TO_NOP] = "NOP",
[TO_RBA] = "RBA", [TO_RBI] = "RBI",
[TO_RFO] = "RFO", [TO_REW] = "REW",
[TO_RUN] = "RUN", [TO_WRI] = "WRI",
[TO_WTM] = "WTM", [TO_MSEN] = "MSN",
[TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
[TO_READ_ATTMSG] = "RAT",
[TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
[TO_UNASSIGN] = "UAS"
};
/*
* Tape state functions
*/
static void
tape_state_set(struct tape_device *device, enum tape_state newstate)
{
const char *str;
if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(3, "ts_set err: not oper\n");
return;
}
DBF_EVENT(4, "ts. dev: %x\n", device->first_minor);
if (device->tape_state < TO_SIZE && device->tape_state >= 0)
str = tape_state_verbose[device->tape_state];
else
str = "UNKNOWN TS";
DBF_EVENT(4, "old ts: %s\n", str);
if (device->tape_state < TO_SIZE && device->tape_state >=0 )
str = tape_state_verbose[device->tape_state];
else
str = "UNKNOWN TS";
DBF_EVENT(4, "%s\n", str);
DBF_EVENT(4, "new ts:\t\n");
if (newstate < TO_SIZE && newstate >= 0)
str = tape_state_verbose[newstate];
else
str = "UNKNOWN TS";
DBF_EVENT(4, "%s\n", str);
device->tape_state = newstate;
wake_up(&device->state_change_wq);
}
void
tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
{
if (device->medium_state == newstate)
return;
switch(newstate){
case MS_UNLOADED:
device->tape_generic_status |= GMT_DR_OPEN(~0);
PRINT_INFO("(%s): Tape is unloaded\n",
device->cdev->dev.bus_id);
break;
case MS_LOADED:
device->tape_generic_status &= ~GMT_DR_OPEN(~0);
PRINT_INFO("(%s): Tape has been mounted\n",
device->cdev->dev.bus_id);
break;
default:
// print nothing
break;
}
device->medium_state = newstate;
wake_up(&device->state_change_wq);
}
/*
* Stop running ccw. Has to be called with the device lock held.
*/
static inline int
__tape_halt_io(struct tape_device *device, struct tape_request *request)
{
int retries;
int rc;
/* Check if interrupt has already been processed */
if (request->callback == NULL)
return 0;
rc = 0;
for (retries = 0; retries < 5; retries++) {
if (retries < 2)
rc = ccw_device_halt(device->cdev, (long) request);
else
rc = ccw_device_clear(device->cdev, (long) request);
if (rc == 0)
break; /* termination successful */
if (rc == -ENODEV)
DBF_EXCEPTION(2, "device gone, retry\n");
else if (rc == -EIO)
DBF_EXCEPTION(2, "I/O error, retry\n");
else if (rc == -EBUSY)
DBF_EXCEPTION(2, "device busy, retry late\n");
else
BUG();
}
if (rc == 0)
request->status = TAPE_REQUEST_DONE;
return rc;
}
/*
* Add device into the sorted list, giving it the first
* available minor number.
*/
static int
tape_assign_minor(struct tape_device *device)
{
struct tape_device *tmp;
int minor;
minor = 0;
write_lock(&tape_device_lock);
list_for_each_entry(tmp, &tape_device_list, node) {
if (minor < tmp->first_minor)
break;
minor += TAPE_MINORS_PER_DEV;
}
if (minor >= (1 << KDEV_MINOR_BITS)) {
write_unlock(&tape_device_lock);
return -ENODEV;
}
device->first_minor = minor;
list_add_tail(&device->node, &tmp->node);
write_unlock(&tape_device_lock);
return 0;
}
/* remove device from the list */
static void
tape_remove_minor(struct tape_device *device)
{
write_lock(&tape_device_lock);
list_del_init(&device->node);
device->first_minor = -1;
write_unlock(&tape_device_lock);
}
/*
* Enable tape device
*/
int
tape_enable_device(struct tape_device *device,
struct tape_discipline *discipline)
{
int rc;
if (device->tape_state != TS_INIT)
return -EINVAL;
/* Let the discipline have a go at the device. */
device->discipline = discipline;
rc = discipline->setup_device(device);
if (rc)
goto out;
rc = tape_assign_minor(device);
if (rc)
goto out_discipline;
rc = tapechar_setup_device(device);
if (rc)
goto out_minor;
rc = tapeblock_setup_device(device);
if (rc)
goto out_char;
tape_state_set(device, TS_UNUSED);
return 0;
out_char:
tapechar_cleanup_device(device);
out_discipline:
device->discipline->cleanup_device(device);
device->discipline = NULL;
out_minor:
tape_remove_minor(device);
out:
return rc;
}
/*
* Disable tape device. Check if there is a running request and
* terminate it. Post all queued requests with -EIO.
*/
void
tape_disable_device(struct tape_device *device)
{
struct list_head *l, *n;
struct tape_request *request;
spin_lock_irq(get_ccwdev_lock(device->cdev));
tape_state_set(device, TS_NOT_OPER);
/* Post remaining requests with -EIO */
list_for_each_safe(l, n, &device->req_queue) {
request = list_entry(l, struct tape_request, list);
if (request->status == TAPE_REQUEST_IN_IO)
__tape_halt_io(device, request);
list_del(&request->list);
/* Decrease ref_count for removed request. */
tape_put_device(device);
request->rc = -EIO;
if (request->callback != NULL)
request->callback(request, request->callback_data);
}
spin_unlock_irq(get_ccwdev_lock(device->cdev));
tapeblock_cleanup_device(device);
tapechar_cleanup_device(device);
device->discipline->cleanup_device(device);
tape_remove_minor(device);
}
/*
* Allocate memory for a new device structure.
*/
static struct tape_device *
tape_alloc_device(void)
{
struct tape_device *device;
device = (struct tape_device *)
kmalloc(sizeof(struct tape_device), GFP_KERNEL);
if (device == NULL) {
DBF_EXCEPTION(2, "ti:no mem\n");
PRINT_INFO ("can't allocate memory for "
"tape info structure\n");
return ERR_PTR(-ENOMEM);
}
memset(device, 0, sizeof(struct tape_device));
device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA);
if (device->modeset_byte == NULL) {
DBF_EXCEPTION(2, "ti:no mem\n");
PRINT_INFO("can't allocate memory for modeset byte\n");
kfree(device);
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&device->req_queue);
INIT_LIST_HEAD(&device->node);
init_waitqueue_head(&device->state_change_wq);
device->tape_state = TS_INIT;
device->medium_state = MS_UNKNOWN;
*device->modeset_byte = 0;
return device;
}
/*
* Free memory of a device structure.
*/
static void
tape_free_device(struct tape_device *device)
{
kfree(device->modeset_byte);
kfree(device);
}
/*
* Find tape device by a device index.
*/
struct tape_device *
tape_get_device(int devindex)
{
struct tape_device *device, *tmp;
device = ERR_PTR(-ENODEV);
read_lock(&tape_device_lock);
list_for_each_entry(tmp, &tape_device_list, node) {
if (tmp->first_minor * TAPE_MINORS_PER_DEV == devindex) {
device = tmp;
atomic_inc(&device->ref_count);
break;
}
}
read_unlock(&tape_device_lock);
return device;
}
/*
* Decrease the reference counter of a devices structure. If the
* reference counter reaches zero free the device structure and
* wake up sleepers.
*/
void
tape_put_device(struct tape_device *device)
{
if (atomic_dec_return(&device->ref_count) > 0)
return;
/*
* Reference counter dropped to zero. This means
* that the device is deleted and the last user
* of the device structure is gone. That is what
* tape_delete_device is waiting for. Do a wake up.
*/
wake_up(&tape_delete_wq);
}
/*
* Driverfs tape probe function.
*/
int
tape_generic_probe(struct ccw_device *cdev)
{
struct tape_device *device;
char *bus_id = cdev->dev.bus_id;
device = tape_alloc_device();
if (IS_ERR(device))
return -ENODEV;
PRINT_INFO("tape device %s found\n", bus_id);
atomic_inc(&device->ref_count);
cdev->dev.driver_data = device;
device->cdev = cdev;
cdev->handler = tape_do_irq;
return 0;
}
/*
* Driverfs tape remove function.
*/
int
tape_generic_remove(struct ccw_device *cdev)
{
struct tape_device *device;
device = cdev->dev.driver_data;
cdev->dev.driver_data = NULL;
if (device != NULL) {
tape_put_device(device);
wait_event(tape_delete_wq, atomic_read(&device->ref_count) == 0);
tape_free_device(device);
}
return 0;
}
/*
* Allocate a new tape ccw request
*/
struct tape_request *
tape_alloc_request(int cplength, int datasize)
{
struct tape_request *request;
if (datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
BUG();
request = (struct tape_request *) kmalloc(sizeof(struct tape_request),
GFP_KERNEL);
if (request == NULL) {
DBF_EXCEPTION(1, "cqra nomem\n");
return ERR_PTR(-ENOMEM);
}
memset(request, 0, sizeof(struct tape_request));
/* allocate channel program */
if (cplength > 0) {
request->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
GFP_ATOMIC | GFP_DMA);
if (request->cpaddr == NULL) {
DBF_EXCEPTION(1, "cqra nomem\n");
kfree(request);
return ERR_PTR(-ENOMEM);
}
memset(request->cpaddr, 0, cplength*sizeof(struct ccw1));
}
/* alloc small kernel buffer */
if (datasize > 0) {
request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA);
if (request->cpdata == NULL) {
DBF_EXCEPTION(1, "cqra nomem\n");
if (request->cpaddr != NULL)
kfree(request->cpaddr);
kfree(request);
return ERR_PTR(-ENOMEM);
}
memset(request->cpdata, 0, datasize);
}
return request;
}
/*
* Free tape ccw request
*/
void
tape_free_request (struct tape_request * request)
{
if (request->device != NULL) {
tape_put_device(request->device);
request->device = NULL;
}
if (request->cpdata != NULL)
kfree(request->cpdata);
if (request->cpaddr != NULL)
kfree(request->cpaddr);
kfree(request);
}
/*
* Write sense data to console/dbf
*/
void
tape_dump_sense(struct tape_device* device, struct tape_request *request,
struct irb *irb)
{
unsigned int *sptr;
PRINT_INFO("-------------------------------------------------\n");
PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa);
PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id);
if (request != NULL)
PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
sptr = (unsigned int *) irb->ecw;
PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
sptr[0], sptr[1], sptr[2], sptr[3]);
PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
sptr[4], sptr[5], sptr[6], sptr[7]);
PRINT_INFO("--------------------------------------------------\n");
}
/*
* Write sense data to dbf
*/
void
tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
struct irb *irb)
{
unsigned int *sptr;
const char* op;
if (request != NULL)
op = tape_op_verbose[request->op];
else
op = "---";
DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
irb->scsw.dstat,irb->scsw.cstat);
DBF_EVENT(3, "DEVICE: %s OP\t: %s\n", device->cdev->dev.bus_id,op);
sptr = (unsigned int *) irb->ecw;
DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
}
/*
* I/O helper function. Adds the request to the request queue
* and starts it if the tape is idle. Has to be called with
* the device lock held.
*/
static inline int
__tape_do_io(struct tape_device *device, struct tape_request *request)
{
int rc;
if (device->tape_state != TS_IN_USE)
return -ENODEV;
/* Increase use count of device for the added request. */
atomic_inc(&device->ref_count);
request->device = device;
if (list_empty(&device->req_queue)) {
/* No other requests are on the queue. Start this one. */
#ifdef CONFIG_S390_TAPE_BLOCK
if (request->op == TO_BLOCK)
device->discipline->check_locate(device, request);
#endif
rc = ccw_device_start(device->cdev, request->cpaddr,
(unsigned long) request, 0x00,
request->options);
if (rc) {
DBF_EVENT(1, "tape: DOIO failed with rc = %i\n", rc);
return rc;
}
list_add(&request->list, &device->req_queue);
request->status = TAPE_REQUEST_IN_IO;
} else {
list_add_tail(&request->list, &device->req_queue);
request->status = TAPE_REQUEST_QUEUED;
}
return 0;
}
/*
* Add the request to the request queue, try to start it if the
* tape is idle. Return without waiting for end of i/o.
*/
int
tape_do_io_async(struct tape_device *device, struct tape_request *request)
{
int rc;
spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Add request to request queue and try to start it. */
rc = __tape_do_io(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
/*
* tape_do_io/__tape_wake_up
* Add the request to the request queue, try to start it if the
* tape is idle and wait uninterruptible for its completion.
*/
static void
__tape_wake_up(struct tape_request *request, void *data)
{
request->callback = NULL;
wake_up((wait_queue_head_t *) data);
}
int
tape_do_io(struct tape_device *device, struct tape_request *request)
{
wait_queue_head_t wq;
int rc;
init_waitqueue_head(&wq);
spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Setup callback */
request->callback = __tape_wake_up;
request->callback_data = &wq;
/* Add request to request queue and try to start it. */
rc = __tape_do_io(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
if (rc)
return rc;
/* Request added to the queue. Wait for its completion. */
wait_event(wq, (request->callback == NULL));
/* Get rc from request */
return request->rc;
}
/*
* tape_do_io_interruptible/__tape_wake_up_interruptible
* Add the request to the request queue, try to start it if the
* tape is idle and wait uninterruptible for its completion.
*/
static void
__tape_wake_up_interruptible(struct tape_request *request, void *data)
{
request->callback = NULL;
wake_up_interruptible((wait_queue_head_t *) data);
}
int
tape_do_io_interruptible(struct tape_device *device,
struct tape_request *request)
{
wait_queue_head_t wq;
int rc;
init_waitqueue_head(&wq);
spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Setup callback */
request->callback = __tape_wake_up_interruptible;
request->callback_data = &wq;
rc = __tape_do_io(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
if (rc)
return rc;
/* Request added to the queue. Wait for its completion. */
rc = wait_event_interruptible(wq, (request->callback == NULL));
if (rc != -ERESTARTSYS)
/* Request finished normally. */
return request->rc;
/* Interrupted by a signal. We have to stop the current request. */
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = __tape_halt_io(device, request);
if (rc == 0) {
DBF_EVENT(3, "IO stopped on %s\n", device->cdev->dev.bus_id);
rc = -ERESTARTSYS;
}
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
static inline void
__tape_do_io_list(struct tape_device *device)
{
struct list_head *l, *n;
struct tape_request *request;
int rc;
if (device->tape_state != TS_IN_USE)
return;
/*
* Try to start each request on request queue until one is
* started successful.
*/
list_for_each_safe(l, n, &device->req_queue) {
request = list_entry(l, struct tape_request, list);
#ifdef CONFIG_S390_TAPE_BLOCK
if (request->op == TO_BLOCK)
device->discipline->check_locate(device, request);
#endif
rc = ccw_device_start(device->cdev, request->cpaddr,
(unsigned long) request, 0x00,
request->options);
if (rc == 0) {
request->status = TAPE_REQUEST_IN_IO;
break;
}
/* Start failed. Remove request and indicate failure. */
DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc);
list_del(&request->list);
/* Set ending status and do callback. */
request->rc = rc;
request->status = TAPE_REQUEST_DONE;
if (request->callback != NULL)
request->callback(request, request->callback_data);
}
}
/*
* Tape interrupt routine, called from the ccw_device layer
*/
static void
tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
{
struct tape_device *device;
struct tape_request *request;
int final;
int rc;
device = (struct tape_device *) cdev->dev.driver_data;
if (device == NULL) {
PRINT_ERR("could not get device structure for bus_id %s "
"in interrupt\n", cdev->dev.bus_id);
return;
}
request = (struct tape_request *) intparm;
/* May be an unsolicited irq */
if(request != NULL)
request->rescnt = irb->scsw.count;
if (irb->scsw.dstat != 0x0c){
/* Set the 'ONLINE' flag depending on sense byte 1 */
if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
device->tape_generic_status |= GMT_ONLINE(~0);
else
device->tape_generic_status &= ~GMT_ONLINE(~0);
/*
* Any request that does not come back with channel end
* and device end is unusual. Log the sense data.
*/
DBF_EVENT(3,"-- Tape Interrupthandler --\n");
tape_dump_sense_dbf(device, request, irb);
} else {
/* Upon normal completion the device _is_ online */
device->tape_generic_status |= GMT_ONLINE(~0);
}
if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(6, "tape:device is not operational\n");
return;
}
rc = device->discipline->irq(device, request, irb);
/*
* rc < 0 : request finished unsuccessfully.
* rc == TAPE_IO_SUCCESS: request finished successfully.
* rc == TAPE_IO_PENDING: request is still running. Ignore rc.
* rc == TAPE_IO_RETRY: request finished but needs another go.
* rc == TAPE_IO_STOP: request needs to get terminated.
*/
final = 0;
switch (rc) {
case TAPE_IO_SUCCESS:
final = 1;
break;
case TAPE_IO_PENDING:
break;
case TAPE_IO_RETRY:
#ifdef CONFIG_S390_TAPE_BLOCK
if (request->op == TO_BLOCK)
device->discipline->check_locate(device, request);
#endif
rc = ccw_device_start(cdev, request->cpaddr,
(unsigned long) request, 0x00,
request->options);
if (rc) {
DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc);
final = 1;
}
break;
case TAPE_IO_STOP:
__tape_halt_io(device, request);
rc = -EIO;
final = 1;
break;
default:
if (rc > 0) {
DBF_EVENT(6, "xunknownrc\n");
PRINT_ERR("Invalid return code from discipline "
"interrupt function.\n");
rc = -EIO;
}
final = 1;
break;
}
if (final) {
/* May be an unsolicited irq */
if(request != NULL) {
/* Set ending status. */
request->rc = rc;
request->status = TAPE_REQUEST_DONE;
/* Remove from request queue. */
list_del(&request->list);
/* Do callback. */
if (request->callback != NULL)
request->callback(request, request->callback_data);
}
/* Start next request. */
__tape_do_io_list(device);
}
}
/*
* Lock a shared tape for our exclusive use.
*/
int
tape_assign(struct tape_device *device)
{
int rc;
rc = device->discipline->assign(device);
if (rc) {
PRINT_WARN("(%s): assign failed - device might be busy\n",
device->cdev->dev.bus_id);
DBF_EVENT(3, "(%s): assign failed - device might be busy\n",
device->cdev->dev.bus_id);
return rc;
}
DBF_EVENT(3, "(%s): assign lpum = %02x\n",
device->cdev->dev.bus_id,
0 /* FIXME: device->devstat.lpum */ );
return 0;
}
/*
* Unlock a shared tape.
*/
int
tape_unassign(struct tape_device *device)
{
int rc;
rc = device->discipline->unassign(device);
if (rc) {
PRINT_WARN("(%s): unassign failed\n",
device->cdev->dev.bus_id);
DBF_EVENT(3, "(%s): unassign failed\n",
device->cdev->dev.bus_id);
return rc;
}
DBF_EVENT(3, "(%s): unassign lpum = %02x\n",
device->cdev->dev.bus_id,
0 /* FIXME: device->devstat.lpum */ );
return 0;
}
/*
* Tape device open function used by tape_char & tape_block frontends.
*/
int
tape_open(struct tape_device *device)
{
int rc;
spin_lock(get_ccwdev_lock(device->cdev));
if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(6, "TAPE:nodev\n");
rc = -ENODEV;
} else if (device->tape_state == TS_IN_USE) {
DBF_EVENT(6, "TAPE:dbusy\n");
rc = -EBUSY;
} else if (device->discipline != NULL &&
!try_inc_mod_count(device->discipline->owner)) {
DBF_EVENT(6, "TAPE:nodisc\n");
rc = -ENODEV;
} else {
tape_state_set(device, TS_IN_USE);
rc = 0;
}
spin_unlock(get_ccwdev_lock(device->cdev));
return rc;
}
/*
* Tape device release function used by tape_char & tape_block frontends.
*/
int
tape_release(struct tape_device *device)
{
spin_lock(get_ccwdev_lock(device->cdev));
if (device->tape_state == TS_IN_USE)
tape_state_set(device, TS_UNUSED);
if (device->discipline->owner)
__MOD_DEC_USE_COUNT(device->discipline->owner);
spin_unlock(get_ccwdev_lock(device->cdev));
return 0;
}
/*
* Execute a magnetic tape command a number of times.
*/
int
tape_mtop(struct tape_device *device, int mt_op, int mt_count)
{
tape_mtop_fn fn;
int rc;
DBF_EVENT(6, "TAPE:mtio\n");
DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
DBF_EVENT(6, "TAPE:arg: %x\n", mt_count);
if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
return -EINVAL;
fn = device->discipline->mtop_array[mt_op];
if (fn == NULL)
return -EINVAL;
/* We assume that the backends can handle count up to 500. */
if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF ||
mt_op == MTBSR || mt_op == MTFSFM || mt_op == MTBSFM) {
rc = 0;
for (; mt_count > 500; mt_count -= 500)
if ((rc = fn(device, 500)) != 0)
break;
if (rc == 0)
rc = fn(device, mt_count);
} else
rc = fn(device, mt_count);
return rc;
}
/*
* Tape init function.
*/
static int
tape_init (void)
{
tape_dbf_area = debug_register ( "tape", 1, 2, 3*sizeof(long));
debug_register_view(tape_dbf_area, &debug_sprintf_view);
DBF_EVENT(3, "tape init: ($Revision: 1.21 $)\n");
tape_proc_init();
tapechar_init ();
tapeblock_init ();
return 0;
}
/*
* Tape exit function.
*/
static void
tape_exit(void)
{
DBF_EVENT(6, "tape exit\n");
/* Get rid of the frontends */
tapechar_exit();
tapeblock_exit();
tape_proc_cleanup();
debug_unregister (tape_dbf_area);
}
MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
"Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
MODULE_DESCRIPTION("Linux on zSeries channel attached "
"tape device driver ($Revision: 1.21 $)");
module_init(tape_init);
module_exit(tape_exit);
EXPORT_SYMBOL(tape_dbf_area);
EXPORT_SYMBOL(tape_state_verbose);
EXPORT_SYMBOL(tape_op_verbose);
EXPORT_SYMBOL(tape_state_set);
EXPORT_SYMBOL(tape_med_state_set);
EXPORT_SYMBOL(tape_alloc_request);
EXPORT_SYMBOL(tape_free_request);
EXPORT_SYMBOL(tape_dump_sense);
EXPORT_SYMBOL(tape_dump_sense_dbf);
EXPORT_SYMBOL(tape_do_io);
EXPORT_SYMBOL(tape_do_io_async);
EXPORT_SYMBOL(tape_do_io_interruptible);
EXPORT_SYMBOL(tape_mtop);
/***************************************************************************
*
* drivers/s390/char/tape_idalbuf.h
* functions for idal buffer handling
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*
****************************************************************************
*/
#include <linux/kernel.h>
#include <asm/hardirq.h> // in_interrupt
/*
* Macros
*/
#ifdef CONFIG_ARCH_S390X
// on ESAME each idal entry points to a 4K buffer
#define IDALBUF_BLK_SIZE 4096
#else
// on ESA each idal entry points to a 2K buffer
#define IDALBUF_BLK_SIZE 2048
#endif
#define IDALBUF_MAX_ENTRIES 33 // an ida list can have up to 33 entries
#define IDALBUF_PAGE_ORDER 1 // each chunk has 2exp(1) pages
#define __IDALBUF_CHUNK_SIZE ((1<<IDALBUF_PAGE_ORDER) * PAGE_SIZE)
#define __IDALBUF_ENTRIES_PER_CHUNK (__IDALBUF_CHUNK_SIZE/IDALBUF_BLK_SIZE)
// Macro which finds out, if we need idal addressing
#ifdef CONFIG_ARCH_S390X
#define __IDALBUF_DIRECT_ADDR(idal) \
( (idal->size <= __IDALBUF_CHUNK_SIZE) \
&& ( ( ((unsigned long)idal->data[0]) >> 31) == 0) )
#else
#define __IDALBUF_DIRECT_ADDR(idal) \
(idal->size <= __IDALBUF_CHUNK_SIZE)
#endif
#ifndef MIN
#define MIN(a,b) (((a)<(b))?(a):(b))
#endif
/*
* The idalbuf data structure
*/
typedef struct _idalbuf_t{
void* data[IDALBUF_MAX_ENTRIES];
int size;
} idalbuf_t;
static inline unsigned int
__round_up_multiple (unsigned int no, unsigned int mult)
{
int rem = no % mult;
return (rem ? no - rem + mult : no);
}
/*
* Setup a ccw in a way that the data buf is an idalbuf_mem_t
*/
static inline void
idalbuf_set_normalized_cda(ccw1_t *ccw, idalbuf_t* idal)
{
if(__IDALBUF_DIRECT_ADDR(idal)){
// we do not need idals - use direct addressing
ccw->cda = (unsigned long) idal->data[0];
} else {
// setup idals
ccw->flags |= CCW_FLAG_IDA;
ccw->cda = (unsigned long) idal->data;
}
ccw->count = idal->size;
}
/*
* Alloc size bytes of memory
*/
static inline idalbuf_t*
idalbuf_alloc(size_t size)
{
int i = 0;
int count = __round_up_multiple(size,IDALBUF_BLK_SIZE) / IDALBUF_BLK_SIZE;
idalbuf_t* rc;
char* addr = NULL;
int kmalloc_flags;
if(in_interrupt())
kmalloc_flags = GFP_ATOMIC;
else
kmalloc_flags = GFP_KERNEL;
if(size/IDALBUF_BLK_SIZE > IDALBUF_MAX_ENTRIES)
BUG();
// the ida list must be below 2GB --> GFP_DMA
rc = kmalloc(sizeof(idalbuf_t),kmalloc_flags | GFP_DMA);
if(!rc)
goto error;
for(i=0; i< count;i++){
if((i % __IDALBUF_ENTRIES_PER_CHUNK) == 0){
// data does not need to be below 2GB
rc->data[i] = (void*)__get_free_pages(kmalloc_flags ,IDALBUF_PAGE_ORDER);
if(!rc->data[i])
goto error;
addr = (char*)(rc->data[i]);
} else {
addr+=IDALBUF_BLK_SIZE;
rc->data[i] = addr;
}
}
rc->size=size;
return rc;
error:
if(rc){
int end = i;
for(i=end-1;i>=0;i-=__IDALBUF_ENTRIES_PER_CHUNK)
free_pages((unsigned long)rc->data[i],IDALBUF_PAGE_ORDER);
kfree(rc);
}
return NULL;
}
/*
* Free an idal buffer
*/
static inline void
idalbuf_free(idalbuf_t* idal)
{
int count = __round_up_multiple(idal->size,__IDALBUF_CHUNK_SIZE)/__IDALBUF_CHUNK_SIZE;
int i;
for(i = 0; i < count; i++){
free_pages((unsigned long)idal->data[i*__IDALBUF_ENTRIES_PER_CHUNK],IDALBUF_PAGE_ORDER);
}
kfree(idal);
}
/*
* Copy count bytes from an idal buffer to contiguous user memory
*/
static inline int
idalbuf_copy_to_user(void* to, const idalbuf_t* from, size_t count)
{
int i;
int rc = 0;
if(count > from->size)
BUG();
for(i = 0; i < count; i+=__IDALBUF_CHUNK_SIZE){
rc = copy_to_user(((char*)to) + i,from->data[i/IDALBUF_BLK_SIZE],MIN(__IDALBUF_CHUNK_SIZE,(count-i)));
if(rc)
goto out;
}
out:
return rc;
}
/*
* Copy count bytes from contiguous user memory to an idal buffer
*/
static inline int
idalbuf_copy_from_user(idalbuf_t* to, const void* from, size_t count)
{
int i;
int rc = 0;
if(count > to->size)
BUG();
for(i = 0; i < count; i+=__IDALBUF_CHUNK_SIZE){
rc = copy_from_user(to->data[i/IDALBUF_BLK_SIZE],((char*)from)+i,MIN(__IDALBUF_CHUNK_SIZE,(count-i)));
if(rc)
goto out;
}
out:
return rc;
}
/*
* Copy count bytes from an idal buffer to a contiguous kernel buffer
*/
static inline void
idalbuf_copy_from_idal(void* to, const idalbuf_t* from, size_t count)
{
int i;
if(count > from->size)
BUG();
for(i = 0; i < count; i+=__IDALBUF_CHUNK_SIZE){
memcpy((char*)to + i,(from->data[i/IDALBUF_BLK_SIZE]),MIN(__IDALBUF_CHUNK_SIZE,(count-i)) );
}
}
/*
* Copy count bytes from a contiguous kernel buffer to an idal buffer
*/
static inline void
idalbuf_copy_to_idal(idalbuf_t* to, const void* from, size_t count)
{
int i;
if(count > to->size)
BUG();
for(i = 0; i < count; i+=__IDALBUF_CHUNK_SIZE){
memcpy(to->data[i/IDALBUF_BLK_SIZE],(char*)from+i,MIN(__IDALBUF_CHUNK_SIZE,(count-i)) );
}
}
/*
* drivers/s390/char/tape.c
* tape device driver for S/390 and zSeries tapes.
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
* PROCFS Functions
*/
#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/seq_file.h>
#include "tape.h"
#define PRINTK_HEADER "T390:"
static const char *tape_med_st_verbose[MS_SIZE] =
{
[MS_UNKNOWN] = "UNKNOWN ",
[MS_LOADED] = "LOADED ",
[MS_UNLOADED] = "UNLOADED"
};
/* our proc tapedevices entry */
static struct proc_dir_entry *tape_proc_devices;
/*
* Show function for /proc/tapedevices
*/
static int tape_proc_show(struct seq_file *m, void *v)
{
struct tape_device *device;
struct tape_request *request;
const char *str;
unsigned long n;
n = (unsigned long) v - 1;
if (!n) {
seq_printf(m, "TapeNo\tDevNo\tCuType\tCuModel\tDevType\t"
"DevMod\tBlkSize\tState\tOp\tMedState\n");
}
device = tape_get_device(n);
if (IS_ERR(device))
return 0;
spin_lock_irq(get_ccwdev_lock(device->cdev));
seq_printf(m, "%d\t", (int) n);
seq_printf(m, "%s\t", device->cdev->dev.bus_id);
seq_printf(m, "%04X\t", device->cdev->id.cu_type);
seq_printf(m, "%02X\t", device->cdev->id.cu_model);
seq_printf(m, "%04X\t", device->cdev->id.dev_type);
seq_printf(m, "%02X\t", device->cdev->id.dev_model);
if (device->char_data.block_size == 0)
seq_printf(m, "auto\t");
else
seq_printf(m, "%i\t", device->char_data.block_size);
if (device->tape_state >= 0 &&
device->tape_state < TS_SIZE)
str = tape_state_verbose[device->tape_state];
else
str = "UNKNOWN";
seq_printf(m, "%s\t", str);
if (!list_empty(&device->req_queue)) {
request = list_entry(device->req_queue.next,
struct tape_request, list);
str = tape_op_verbose[request->op];
} else
str = "---";
seq_printf(m, "%s\t", str);
seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
tape_put_device(device);
return 0;
}
static void *tape_proc_start(struct seq_file *m, loff_t *pos)
{
if (*pos >= (1 << KDEV_MINOR_BITS) / TAPE_MINORS_PER_DEV)
return NULL;
return (void *)((unsigned long) *pos + 1);
}
static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return tape_proc_start(m, pos);
}
static void tape_proc_stop(struct seq_file *m, void *v)
{
}
static struct seq_operations tape_proc_seq = {
.start = tape_proc_start,
.next = tape_proc_next,
.stop = tape_proc_stop,
.show = tape_proc_show,
};
static int tape_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &tape_proc_seq);
}
static struct file_operations tape_proc_ops =
{
.open = tape_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* Initialize procfs stuff on startup
*/
void
tape_proc_init(void)
{
tape_proc_devices =
create_proc_entry ("tapedevices", S_IFREG | S_IRUGO | S_IWUSR,
&proc_root);
if (tape_proc_devices == NULL) {
PRINT_WARN("tape: Cannot register procfs entry tapedevices\n");
return;
}
tape_proc_devices->proc_fops = &tape_proc_ops;
tape_proc_devices->owner = THIS_MODULE;
}
/*
* Cleanup all stuff registered to the procfs
*/
void
tape_proc_cleanup(void)
{
if (tape_proc_devices != NULL)
remove_proc_entry ("tapedevices", &proc_root);
}
/*
* drivers/s390/char/tape_std.c
* standard tape device functions for ibm tapes.
*
* S390 and zSeries version
* Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/config.h>
#include <linux/version.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <asm/types.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/tape390.h>
#include "tape.h"
#include "tape_std.h"
#define PRINTK_HEADER "T3xxx:"
/*
* tape_std_assign
*/
int
tape_std_assign(struct tape_device *device)
{
struct tape_request *request;
request = tape_alloc_request(2, 11);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_ASSIGN;
tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
return tape_do_io_free(device, request);
}
/*
* tape_std_unassign
*/
int
tape_std_unassign (struct tape_device *device)
{
struct tape_request *request;
request = tape_alloc_request(2, 11);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_UNASSIGN;
tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
return tape_do_io_free(device, request);
}
/*
* TAPE390_DISPLAY: Show a string on the tape display.
*/
int
tape_std_display(struct tape_device *device, int cmd, unsigned long arg)
{
struct display_struct d_struct;
struct tape_request *request;
int rc;
if (copy_from_user(&d_struct, (char *) arg, sizeof(d_struct)) != 0)
return -EFAULT;
request = tape_alloc_request(2, 17);
if (IS_ERR(request)) {
DBF_EVENT(3, "TAPE: load display failed\n");
return PTR_ERR(request);
}
request->op = TO_DIS;
*(unsigned char *) request->cpdata = d_struct.cntrl;
memcpy(((unsigned char *) request->cpdata) + 1, d_struct.message1, 8);
memcpy(((unsigned char *) request->cpdata) + 9, d_struct.message2, 8);
ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
rc = tape_do_io_interruptible(device, request);
tape_free_request(request);
return rc;
}
/*
* Read block id.
*/
int
tape_std_read_block_id(struct tape_device *device, __u64 *id)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(3, 8);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_RBI;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0)
/* Get result from read buffer. */
*id = *(__u64 *) request->cpdata;
tape_free_request(request);
return rc;
}
/*
* MTLOAD: Loads the tape.
* The default implementation just wait until the tape medium state changes
* to MS_LOADED.
*/
int
tape_std_mtload(struct tape_device *device, int count)
{
return wait_event_interruptible(device->state_change_wq,
(device->medium_state == MS_LOADED));
}
/*
* MTSETBLK: Set block size.
*/
int
tape_std_mtsetblk(struct tape_device *device, int count)
{
struct idal_buffer *new;
if (count <= 0) {
/*
* Just set block_size to 0. tapechar_read/tapechar_write
* will realloc the idal buffer if a bigger one than the
* current is needed.
*/
device->char_data.block_size = 0;
return 0;
}
if (device->char_data.idal_buf != NULL &&
device->char_data.idal_buf->size == count)
/* We already have a idal buffer of that size. */
return 0;
/* Allocate a new idal buffer. */
new = idal_buffer_alloc(count, 0);
if (new == NULL)
return -ENOMEM;
if (device->char_data.idal_buf != NULL)
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = new;
device->char_data.block_size = count;
return 0;
}
/*
* MTRESET: Set block size to 0.
*/
int
tape_std_mtreset(struct tape_device *device, int count)
{
DBF_EVENT(6, "TCHAR:devreset:\n");
device->char_data.block_size = 0;
return 0;
}
/*
* MTFSF: Forward space over 'count' file marks. The tape is positioned
* at the EOT (End of Tape) side of the file mark.
*/
int
tape_std_mtfsf(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTFSR: Forward space over 'count' tape blocks (blocksize is set
* via MTSETBLK.
*/
int
tape_std_mtfsr(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSB;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTBSR: Backward space over 'count' tape blocks.
* (blocksize is set via MTSETBLK.
*/
int
tape_std_mtbsr(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSB;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTWEOF: Write 'count' file marks at the current position.
*/
int
tape_std_mtweof(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_WTM;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTBSFM: Backward space over 'count' file marks.
* The tape is positioned at the BOT (Begin Of Tape) side of the
* last skipped file mark.
*/
int
tape_std_mtbsfm(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTBSF: Backward space over 'count' file marks. The tape is positioned at
* the EOT (End of Tape) side of the last skipped file mark.
*/
int
tape_std_mtbsf(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0) {
request->op = TO_FSF;
/* need to skip forward over the filemark. */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, FORSPACEFILE, 0, NULL);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
}
tape_free_request(request);
return rc;
}
/*
* MTFSFM: Forward space over 'count' file marks.
* The tape is positioned at the BOT (Begin Of Tape) side
* of the last skipped file mark.
*/
int
tape_std_mtfsfm(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0) {
request->op = TO_BSF;
/* need to skip forward over the filemark. */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, BACKSPACEFILE, 0, NULL);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
}
tape_free_request(request);
return rc;
}
/*
* MTREW: Rewind the tape.
*/
int
tape_std_mtrew(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(3, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_REW;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTOFFL: Rewind the tape and put the drive off-line.
* Implement 'rewind unload'
*/
int
tape_std_mtoffl(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(3, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_RUN;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTNOP: 'No operation'.
*/
int
tape_std_mtnop(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_NOP;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTEOM: positions at the end of the portion of the tape already used
* for recordind data. MTEOM positions after the last file mark, ready for
* appending another file.
*/
int
tape_std_mteom(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(4, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
/* execute it */
tape_do_io_interruptible(device, request);
tape_free_request(request);
/* MTEOM/MTRETEN errors get ignored. */
return 0;
}
/*
* MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
*/
int
tape_std_mtreten(struct tape_device *device, int mt_count)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(4, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
/* execute it, MTRETEN rc gets ignored */
rc = tape_do_io_interruptible(device, request);
tape_free_request(request);
return tape_std_mtrew(device, 1);
}
/*
* MTERASE: erases the tape.
*/
int
tape_std_mterase(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(5, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_DSE;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
tape_ccw_end(request->cpaddr + 4, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTUNLOAD: Rewind the tape and unload it.
*/
int
tape_std_mtunload(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(3, 32);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_RUN;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
tape_ccw_end(request->cpaddr + 2, SENSE, 32, request->cpdata);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTCOMPRESSION: used to enable compression.
* Sets the IDRC on/off.
*/
int
tape_std_mtcompression(struct tape_device *device, int mt_count)
{
struct tape_request *request;
if (mt_count < 0 || mt_count > 1) {
DBF_EXCEPTION(6, "xcom parm\n");
if (*device->modeset_byte & 0x08)
PRINT_INFO("(%s) Compression is currently on\n",
device->cdev->dev.bus_id);
else
PRINT_INFO("(%s) Compression is currently off\n",
device->cdev->dev.bus_id);
PRINT_INFO("Use 1 to switch compression on, 0 to "
"switch it off\n");
return -EINVAL;
}
request = tape_alloc_request(2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_NOP;
/* setup ccws */
*device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* Read Block
*/
struct tape_request *
tape_std_read_block(struct tape_device *device, size_t count)
{
struct tape_request *request;
/*
* We have to alloc 4 ccws in order to be able to transform request
* into a read backward request in error case.
*/
request = tape_alloc_request(4, 0);
if (IS_ERR(request)) {
DBF_EXCEPTION(6, "xrbl fail");
return request;
}
request->op = TO_RFO;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
device->char_data.idal_buf);
DBF_EVENT(6, "xrbl ccwg\n");
return request;
}
/*
* Read Block backward transformation function.
*/
void
tape_std_read_backward(struct tape_device *device, struct tape_request *request)
{
/*
* We have allocated 4 ccws in tape_std_read, so we can now
* transform the request to a read backward, followed by a
* forward space block.
*/
request->op = TO_RBA;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
device->char_data.idal_buf);
tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
DBF_EVENT(6, "xrop ccwg");}
/*
* Write Block
*/
struct tape_request *
tape_std_write_block(struct tape_device *device, size_t count)
{
struct tape_request *request;
request = tape_alloc_request(2, 0);
if (IS_ERR(request)) {
DBF_EXCEPTION(6, "xwbl fail\n");
return request;
}
request->op = TO_WRI;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
device->char_data.idal_buf);
DBF_EVENT(6, "xwbl ccwg\n");
return request;
}
/*
* This routine is called by frontend after an ENOSP on write
*/
void
tape_std_process_eov(struct tape_device *device)
{
/*
* End of volume: We have to backspace the last written record, then
* we TRY to write a tapemark and then backspace over the written TM
*/
if (tape_mtop(device, MTBSR, 1) == 0 &&
tape_mtop(device, MTWEOF, 1) == 0) {
tape_mtop(device, MTBSR, 1);
}
}
EXPORT_SYMBOL(tape_std_assign);
EXPORT_SYMBOL(tape_std_unassign);
EXPORT_SYMBOL(tape_std_display);
EXPORT_SYMBOL(tape_std_read_block_id);
EXPORT_SYMBOL(tape_std_mtload);
EXPORT_SYMBOL(tape_std_mtsetblk);
EXPORT_SYMBOL(tape_std_mtreset);
EXPORT_SYMBOL(tape_std_mtfsf);
EXPORT_SYMBOL(tape_std_mtfsr);
EXPORT_SYMBOL(tape_std_mtbsr);
EXPORT_SYMBOL(tape_std_mtweof);
EXPORT_SYMBOL(tape_std_mtbsfm);
EXPORT_SYMBOL(tape_std_mtbsf);
EXPORT_SYMBOL(tape_std_mtfsfm);
EXPORT_SYMBOL(tape_std_mtrew);
EXPORT_SYMBOL(tape_std_mtoffl);
EXPORT_SYMBOL(tape_std_mtnop);
EXPORT_SYMBOL(tape_std_mteom);
EXPORT_SYMBOL(tape_std_mtreten);
EXPORT_SYMBOL(tape_std_mterase);
EXPORT_SYMBOL(tape_std_mtunload);
EXPORT_SYMBOL(tape_std_mtcompression);
EXPORT_SYMBOL(tape_std_read_block);
EXPORT_SYMBOL(tape_std_read_backward);
EXPORT_SYMBOL(tape_std_write_block);
EXPORT_SYMBOL(tape_std_process_eov);
/*
* drivers/s390/char/tape_34xx.h
* standard tape device functions for ibm tapes.
*
* S390 and zSeries version
* Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _TAPE_STD_H
#define _TAPE_STD_H
/*
* The CCW commands for the Tape type of command.
*/
#define INVALID_00 0x00 /* Invalid cmd */
#define BACKSPACEBLOCK 0x27 /* Back Space block */
#define BACKSPACEFILE 0x2f /* Back Space file */
#define DATA_SEC_ERASE 0x97 /* Data security erase */
#define ERASE_GAP 0x17 /* Erase Gap */
#define FORSPACEBLOCK 0x37 /* Forward space block */
#define FORSPACEFILE 0x3F /* Forward Space file */
#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */
#define NOP 0x03 /* No operation */
#define READ_FORWARD 0x02 /* Read forward */
#define REWIND 0x07 /* Rewind */
#define REWIND_UNLOAD 0x0F /* Rewind and Unload */
#define SENSE 0x04 /* Sense */
#define NEW_MODE_SET 0xEB /* Guess it is Mode set */
#define WRITE_CMD 0x01 /* Write */
#define WRITETAPEMARK 0x1F /* Write Tape Mark */
#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */
#define CONTROL_ACCESS 0xE3 /* Set high speed */
#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT */
#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */
#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */
#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */
#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */
#define MODE_SET_C3 0xC3 /* for 3420 */
#define MODE_SET_CB 0xCB /* for 3420 */
#define MODE_SET_D3 0xD3 /* for 3420 */
#define READ_BACKWARD 0x0C /* */
#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */
#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */
#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */
#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT */
#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT */
#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT */
#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */
#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */
#define READ_DEV_CHAR 0x64 /* Read device characteristics */
#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT */
#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */
#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */
#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */
#define SYNC 0x43 /* Synchronize (flush buffer) */
#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */
#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */
#define READ_CONFIG_DATA 0xFA /* 3490 CMD */
#define READ_MESSAGE_ID 0x4E /* 3490 CMD */
#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */
#define SET_INTERFACE_ID 0x73 /* 3490 CMD */
#define SENSE_COMMAND_REJECT 0x80
#define SENSE_INTERVENTION_REQUIRED 0x40
#define SENSE_BUS_OUT_CHECK 0x20
#define SENSE_EQUIPMENT_CHECK 0x10
#define SENSE_DATA_CHECK 0x08
#define SENSE_OVERRUN 0x04
#define SENSE_DEFERRED_UNIT_CHECK 0x02
#define SENSE_ASSIGNED_ELSEWHERE 0x01
#define SENSE_LOCATE_FAILURE 0x80
#define SENSE_DRIVE_ONLINE 0x40
#define SENSE_RESERVED 0x20
#define SENSE_RECORD_SEQUENCE_ERR 0x10
#define SENSE_BEGINNING_OF_TAPE 0x08
#define SENSE_WRITE_MODE 0x04
#define SENSE_WRITE_PROTECT 0x02
#define SENSE_NOT_CAPABLE 0x01
#define SENSE_CHANNEL_ADAPTER_CODE 0xE0
#define SENSE_CHANNEL_ADAPTER_LOC 0x10
#define SENSE_REPORTING_CU 0x08
#define SENSE_AUTOMATIC_LOADER 0x04
#define SENSE_TAPE_SYNC_MODE 0x02
#define SENSE_TAPE_POSITIONING 0x01
/* discipline functions */
struct tape_request *tape_std_read_block(struct tape_device *, size_t);
void tape_std_read_backward(struct tape_device *device,
struct tape_request *request);
struct tape_request *tape_std_write_block(struct tape_device *, size_t);
struct tape_request *tape_std_bread(struct tape_device *, struct request *);
void tape_std_free_bread(struct tape_request *);
void tape_std_check_locate(struct tape_device *, struct tape_request *);
struct tape_request *tape_std_bwrite(struct request *,
struct tape_device *, int);
/* Some non-mtop commands. */
int tape_std_assign(struct tape_device *);
int tape_std_unassign(struct tape_device *);
int tape_std_read_block_id(struct tape_device *device, __u64 *id);
int tape_std_display(struct tape_device *, int, unsigned long);
/* Standard magnetic tape commands. */
int tape_std_mtbsf(struct tape_device *, int);
int tape_std_mtbsfm(struct tape_device *, int);
int tape_std_mtbsr(struct tape_device *, int);
int tape_std_mtcompression(struct tape_device *, int);
int tape_std_mteom(struct tape_device *, int);
int tape_std_mterase(struct tape_device *, int);
int tape_std_mtfsf(struct tape_device *, int);
int tape_std_mtfsfm(struct tape_device *, int);
int tape_std_mtfsr(struct tape_device *, int);
int tape_std_mtload(struct tape_device *, int);
int tape_std_mtnop(struct tape_device *, int);
int tape_std_mtoffl(struct tape_device *, int);
int tape_std_mtreset(struct tape_device *, int);
int tape_std_mtreten(struct tape_device *, int);
int tape_std_mtrew(struct tape_device *, int);
int tape_std_mtsetblk(struct tape_device *, int);
int tape_std_mtunload(struct tape_device *, int);
int tape_std_mtweof(struct tape_device *, int);
/* Event handlers */
void tape_std_default_handler(struct tape_device *);
void tape_std_unexpect_uchk_handler(struct tape_device *);
void tape_std_irq(struct tape_device *);
void tape_std_process_eov(struct tape_device *);
// the error recovery stuff:
void tape_std_error_recovery(struct tape_device *);
void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
void tape_std_error_recovery_succeded(struct tape_device *);
void tape_std_error_recovery_do_retry(struct tape_device *);
void tape_std_error_recovery_read_opposite(struct tape_device *);
void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
#endif // _TAPE_STD_H
/***************************************************************************
*
* drivers/s390/char/tapeblock.c
* block device frontend for tape device driver
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
*
****************************************************************************
*/
#include "tapedefs.h"
#include <linux/config.h>
#include <linux/blkdev.h>
#include <linux/blk.h>
#include <linux/version.h>
#include <linux/interrupt.h>
#include <asm/debug.h>
#include <asm/s390dyn.h>
#include <linux/compatmac.h>
#ifdef MODULE
#define __NO_VERSION__
#include <linux/module.h>
#endif
#include "tape.h"
#include "tapeblock.h"
#define PRINTK_HEADER "TBLOCK:"
/*
* file operation structure for tape devices
*/
static struct block_device_operations tapeblock_fops = {
.owner = THIS_MODULE,
.open = tapeblock_open,
.release = tapeblock_release,
};
int tapeblock_major = 0;
static void tape_request_fn (request_queue_t * queue);
static request_queue_t* tapeblock_getqueue (kdev_t kdev);
#ifdef CONFIG_DEVFS_FS
devfs_handle_t
tapeblock_mkdevfstree (tape_dev_t* td) {
devfs_handle_t rc=NULL;
char name[32];
sprintf (name, "tape/%04x/block", td->devinfo.devno);
rc = devfs_mk_dir (NULL, name, NULL);
if (rc==NULL) goto out_undo;
sprintf (name, "tape/%04x/block/disc", td->devinfo.devno);
rc = devfs_register(NULL, name, DEVFS_FL_DEFAULT,
tapeblock_major, td->first_minor,
TAPEBLOCK_DEVFSMODE, &tapeblock_fops, td);
if (rc==NULL) goto out_undo;
goto out;
out_undo:
tapeblock_rmdevfstree(td);
out:
return rc;
}
void
tapeblock_rmdevfstree (tape_dev_t* td) {
devfs_remove("tape/%04x/block/disc", td->devinfo.devno);
devfs_remove("tape/%04x/block", td->devinfo.devno);
}
#endif
void
tapeblock_setup(tape_dev_t* td) {
blk_queue_hardsect_size(&ti->request_queue, 2048);
blk_init_queue (&td->blk_data.request_queue, tape_request_fn);
#ifdef CONFIG_DEVFS_FS
tapeblock_mkdevfstree(td);
#endif
set_device_ro (MKDEV(tapeblock_major, td->first_minor), 1);
}
int
tapeblock_init(void) {
int result;
tape_frontend_t* blkfront,*temp;
tape_dev_t* td;
tape_init();
/* Register the tape major number to the kernel */
result = register_blkdev(tapeblock_major, "tBLK", &tapeblock_fops);
if (result < 0) {
PRINT_WARN(KERN_ERR "tape: can't get major %d for block device\n", tapeblock_major);
result=-ENODEV;
goto out;
}
if (tapeblock_major == 0) tapeblock_major = result; /* accept dynamic major number*/
INIT_BLK_DEV(tapeblock_major,tape_request_fn,tapeblock_getqueue,NULL);
PRINT_WARN(KERN_ERR " tape gets major %d for block device\n", tapeblock_major);
max_sectors[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_KERNEL);
if (max_sectors[tapeblock_major]==NULL) goto out_undo_hardsect_size;
memset(max_sectors[tapeblock_major],0,256*sizeof(int));
blkfront = kmalloc(sizeof(tape_frontend_t),GFP_KERNEL);
if (blkfront==NULL) goto out_undo_max_sectors;
blkfront->device_setup=(tape_setup_device_t)tapeblock_setup;
#ifdef CONFIG_DEVFS_FS
blkfront->mkdevfstree = tapeblock_mkdevfstree;
blkfront->rmdevfstree = tapeblock_rmdevfstree;
#endif
blkfront->next=NULL;
if (tape_first_front==NULL) {
tape_first_front=blkfront;
} else {
temp=tape_first_front;
while (temp->next!=NULL)
temp=temp->next;
temp->next=blkfront;
}
td=tape_first_dev;
while (td!=NULL) {
tapeblock_setup(td);
td=td->next;
}
result=0;
goto out;
out_undo_max_sectors:
kfree(max_sectors[tapeblock_major]);
out_undo_hardsect_size:
out_undo_blk_size:
out_undo_bdev:
unregister_blkdev(tapeblock_major, "tBLK");
result=-ENOMEM;
max_sectors[tapeblock_major]=NULL;
tapeblock_major=-1;
out:
return result;
}
void
tapeblock_uninit(void) {
if (tapeblock_major==-1)
goto out; /* init failed so there is nothing to clean up */
if (max_sectors[tapeblock_major]!=NULL) {
kfree (max_sectors[tapeblock_major]);
max_sectors[tapeblock_major]=NULL;
}
unregister_blkdev(tapeblock_major, "tBLK");
out:
return;
}
int
tapeblock_open(struct inode *inode, struct file *filp) {
tape_dev_t *td = NULL;
int rc = 0;
long lockflags;
tape_sprintf_event (tape_dbf_area,6,"b:open: %x\n",td->first_minor);
inode = filp->f_dentry->d_inode;
td = tape_get_device_by_minor(MINOR (inode->i_rdev));
if (td == NULL){
rc = -ENODEV;
goto error;
}
s390irq_spin_lock_irqsave (td->devinfo.irq, lockflags);
if (tape_state_get(td) == TS_NOT_OPER) {
tape_sprintf_event (tape_dbf_area,6,"c:nodev\n");
rc = -ENODEV;
goto out_rel_lock;
}
if (tape_state_get (td) != TS_UNUSED) {
tape_sprintf_event (tape_dbf_area,6,"b:dbusy\n");
rc = -EBUSY;
goto out_rel_lock;
}
tape_state_set (td, TS_IN_USE);
td->blk_data.position=-1;
s390irq_spin_unlock_irqrestore (td->devinfo.irq, lockflags);
rc=tapeblock_mediumdetect(td);
if (rc) {
s390irq_spin_lock_irqsave (td->devinfo.irq, lockflags);
tape_state_set (td, TS_UNUSED);
goto out_rel_lock; // in case of errors, we don't have a size of the medium
}
if ( td->discipline->owner )
__MOD_INC_USE_COUNT(td->discipline->owner);
s390irq_spin_lock_irqsave (td->devinfo.irq, lockflags);
td->filp = filp;
filp->private_data = td;/* save the dev.info for later reference */
out_rel_lock:
s390irq_spin_unlock_irqrestore (td->devinfo.irq, lockflags);
error:
if(rc != 0){
if (td != NULL)
tape_put_device(td);
}
return rc;
}
int
tapeblock_release(struct inode *inode, struct file *filp) {
long lockflags;
tape_dev_t *td = NULL;
int rc = 0;
if((!inode) || !(inode->i_rdev)) {
rc = -EINVAL;
goto out;
}
td = tape_get_device_by_minor(MINOR (inode->i_rdev));
if (td==NULL) {
rc = -ENODEV;
goto out;
}
s390irq_spin_lock_irqsave (td->devinfo.irq, lockflags);
tape_sprintf_event (tape_dbf_area,6,"b:release: %x\n",td->first_minor);
if(tape_state_get(td) == TS_IN_USE)
tape_state_set (td, TS_UNUSED);
else if (tape_state_get(td) != TS_NOT_OPER)
BUG();
s390irq_spin_unlock_irqrestore (td->devinfo.irq, lockflags);
tape_put_device(td);
tape_put_device(td); /* 2x ! */
if ( td->discipline->owner )
__MOD_DEC_USE_COUNT(td->discipline->owner);
out:
return rc;
}
static void
tapeblock_end_request(tape_dev_t* td) {
struct buffer_head *bh;
int uptodate;
tape_ccw_req_t *treq = tape_get_active_ccw_req(td);
if(treq == NULL){
uptodate = 0;
}
else
uptodate=(treq->rc == 0); // is the buffer up to date?
if (uptodate) {
tape_sprintf_event (tape_dbf_area,6,"b:done: %x\n",(unsigned long)treq);
} else {
tape_sprintf_event (tape_dbf_area,3,"b:failed: %x\n",(unsigned long)treq);
}
// now inform ll_rw_block about a request status
while ((bh = td->blk_data.current_request->bh) != NULL) {
td->blk_data.current_request->bh = bh->b_reqnext;
bh->b_reqnext = NULL;
bh->b_end_io (bh, uptodate);
}
if (!end_that_request_first (td->blk_data.current_request, uptodate, "tBLK")) {
#ifndef DEVICE_NO_RANDOM
add_blkdev_randomness (MAJOR (td->blk_data.current_request->rq_dev));
#endif
end_that_request_last (td->blk_data.current_request);
}
if (treq!=NULL) {
tape_remove_ccw_req(td,treq);
td->discipline->free_bread(treq);
}
td->blk_data.current_request=NULL;
return;
}
static void
tapeblock_exec_IO (tape_dev_t* td) {
int rc;
struct request* req;
tape_ccw_req_t *treq = tape_get_active_ccw_req(td);
if (treq) { // process done/failed request
while (treq->rc != 0 && td->blk_data.blk_retries>0) {
td->blk_data.blk_retries--;
td->blk_data.position=-1;
td->discipline->bread_enable_locate(treq);
tape_sprintf_event (tape_dbf_area,3,"b:retryreq: %x\n",(unsigned long)treq);
rc = tape_do_io_irq(td,treq,TAPE_SCHED_BLOCK);
if (rc != 0) {
tape_sprintf_event (tape_dbf_area,3,"b:doIOfail: %x\n",(unsigned long)treq);
continue; // one retry lost 'cause doIO failed
}
return;
}
tapeblock_end_request (td); // check state, inform user, free mem, dev=idl
}
if(TAPE_BUSY(td)) BUG(); // tape should be idle now, request should be freed!
if (tape_state_get (td) == TS_NOT_OPER) {
return;
}
if (list_empty (&td->blk_data.request_queue.queue_head)) {
// nothing more to do or device has dissapeared;)
tape_sprintf_event (tape_dbf_area,6,"b:Qempty\n");
return;
}
// queue is not empty, fetch a request and start IO!
req=td->blk_data.current_request=tape_next_request(&td->blk_data.request_queue);
if (req==NULL) {
BUG(); // Yo. The queue was not reported empy, but no request found. This is _bad_.
}
if (req->cmd!=READ) { // we only support reading
tapeblock_end_request (td); // check state, inform user, free mem, dev=idl
tapeblock_schedule_exec_io(td);
return;
}
treq=td->discipline->bread(req,td,tapeblock_major); //build channel program from request
if (!treq) {
// ccw generation failed. we try again later.
tape_sprintf_event (tape_dbf_area,3,"b:cqrNULL\n");
tapeblock_schedule_exec_io(td);
td->blk_data.current_request=NULL;
return;
}
td->blk_data.blk_retries = TAPEBLOCK_RETRIES;
rc = tape_do_io_irq(td,treq,TAPE_SCHED_BLOCK);
if (rc != 0) {
// okay. ssch failed. we try later.
tape_sprintf_event (tape_dbf_area,3,"b:doIOfail\n");
tape_remove_ccw_req(td,treq);
td->discipline->free_bread(treq);
td->blk_data.current_request=NULL;
tapeblock_schedule_exec_io(td);
return;
}
// our request is in IO. we remove it from the queue and exit
tape_dequeue_request (&td->blk_data.request_queue,req);
}
static void
do_tape_request (tape_dev_t * td) {
long lockflags;
if (td==NULL) BUG();
s390irq_spin_lock_irqsave (td->devinfo.irq, lockflags);
if (tape_state_get(td)!=TS_IN_USE) {
s390irq_spin_unlock_irqrestore(td->devinfo.irq,lockflags);
return;
}
tapeblock_exec_IO(td);
s390irq_spin_unlock_irqrestore(td->devinfo.irq,lockflags);
}
static void
run_tapeblock_exec_IO (tape_dev_t* td) {
long flags_390irq,flags_ior;
request_queue_t *q = &tape->request_queue;
spin_lock_irqsave (&q->queue_lock, flags_ior);
s390irq_spin_lock_irqsave(td->devinfo.irq,flags_390irq);
atomic_set(&td->blk_data.bh_scheduled,0);
tapeblock_exec_IO(td);
s390irq_spin_unlock_irqrestore(td->devinfo.irq,flags_390irq);
spin_unlock_irqrestore (&q->queue_lock, flags_ior);
}
void
tapeblock_schedule_exec_io (tape_dev_t *td)
{
/* Protect against rescheduling, when already running */
if (atomic_compare_and_swap(0,1,&td->blk_data.bh_scheduled)) {
return;
}
INIT_LIST_HEAD(&td->blk_data.bh_tq.list);
td->blk_data.bh_tq.sync = 0;
td->blk_data.bh_tq.routine = (void *) (void *) run_tapeblock_exec_IO;
td->blk_data.bh_tq.data = td;
queue_task (&td->blk_data.bh_tq, &tq_immediate);
mark_bh (IMMEDIATE_BH);
return;
}
static void tape_request_fn (request_queue_t* queue) {
tape_dev_t* td=tape_get_device_by_queue(queue);
if (td!=NULL) {
do_tape_request(td);
tape_put_device(td);
}
}
static request_queue_t* tapeblock_getqueue (kdev_t kdev) {
tape_dev_t* td=tape_get_device_by_minor(MINOR(kdev));
if (td!=NULL) return &td->blk_data.request_queue;
else return NULL;
}
int tapeblock_mediumdetect(tape_dev_t* td) {
tape_ccw_req_t *treq;
unsigned int nr_of_blks;
int rc;
PRINT_INFO("Detecting media size...\n");
/* Rewind */
treq = td->discipline->ioctl (td, MTREW, 1, &rc);
if (treq == NULL)
return rc;
rc = tape_do_io_and_wait (td,treq,TAPE_WAIT_INTERRUPTIBLE);
TAPE_MERGE_RC(treq,rc);
tape_free_ccw_req (treq);
if (rc)
return rc;
/* FSF */
treq=td->discipline->ioctl (td, MTFSF,1,&rc);
if (treq == NULL)
return rc;
rc = tape_do_io_and_wait (td,treq,TAPE_WAIT_INTERRUPTIBLE);
TAPE_MERGE_RC(treq,rc);
tape_free_ccw_req (treq);
if (rc)
return rc;
/* TELL */
treq = td->discipline->ioctl (td, MTTELL, 1, &rc);
if (treq == NULL)
return rc;
rc = tape_do_io_and_wait(td,treq,TAPE_WAIT_INTERRUPTIBLE);
TAPE_MERGE_RC(treq,rc);
nr_of_blks = *((int*)(treq->kernbuf)) - 1; /* don't count FM */
tape_free_ccw_req (treq);
if(rc)
return rc;
/* Rewind */
treq = td->discipline->ioctl (td, MTREW, 1, &rc);
if (treq == NULL)
return rc;
rc = tape_do_io_and_wait(td,treq,TAPE_WAIT_INTERRUPTIBLE);
TAPE_MERGE_RC(treq,rc);
tape_free_ccw_req (treq);
if(rc)
return rc;
return 0;
}
/***************************************************************************
*
* drivers/s390/char/tapeblock.h
* character device frontend for tape device driver
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
*
****************************************************************************
*/
#ifndef TAPEBLOCK_H
#define TAPEBLOCK_H
#include <linux/config.h>
#define TAPEBLOCK_READAHEAD 30
#define TAPEBLOCK_MAJOR 0
#define TAPEBLOCK_DEVFSMODE 0060644 // blkdev, rwx for user, rw for group&others
int tapeblock_open(struct inode *, struct file *);
int tapeblock_release(struct inode *, struct file *);
void tapeblock_setup(tape_dev_t* td);
void tapeblock_schedule_exec_io (tape_dev_t *td);
int tapeblock_mediumdetect(tape_dev_t* td);
#ifdef CONFIG_DEVFS_FS
devfs_handle_t tapeblock_mkdevfstree (tape_dev_t* td);
void tapeblock_rmdevfstree (tape_dev_t* td);
#endif
int tapeblock_init (void);
void tapeblock_uninit (void);
#endif
/***************************************************************************
*
* drivers/s390/char/tapechar.c
* character device frontend for tape device driver
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
*
****************************************************************************
*/
#include "tapedefs.h"
#include <linux/config.h>
#include <linux/version.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <asm/s390dyn.h>
#include <linux/mtio.h>
#include <asm/uaccess.h>
#include <linux/compatmac.h>
#ifdef MODULE
#define __NO_VERSION__
#include <linux/module.h>
#endif
#include "tape.h"
#include "tapechar.h"
#define PRINTK_HEADER "TCHAR:"
/*******************************************************************
* GLOBALS
*******************************************************************/
/*
* file operation structure for tape devices
*/
static struct file_operations tape_fops =
{
.owner = THIS_MODULE,
.read = tapechar_read,
.write = tapechar_write,
.ioctl = tapechar_ioctl,
.open = tapechar_open,
.release = tapechar_release,
};
int tapechar_major = TAPECHAR_MAJOR;
/*******************************************************************
* DEVFS Functions
*******************************************************************/
#ifdef CONFIG_DEVFS_FS
/*
* Create Char directory with (non)rewinding entries
*/
devfs_handle_t
tapechar_mkdevfstree (tape_dev_t* td) {
devfs_handle_t rc=NULL;
char name[32];
sprintf (name, "tape/%04x/char", td->devinfo.devno);
rc = devfs_mk_dir (NULL, name, NULL);
if (rc==NULL) goto out_undo;
sprintf (name, "tape/%04x/char/nonrewinding", td->devinfo.devno);
rc = devfs_register(NULL. name. DEVFS_FL_DEFAULT,tapechar_major,
TAPECHAR_NOREW_MINOR(td->first_minor),
TAPECHAR_DEVFSMODE, &tape_fops, td);
if (rc==NULL) goto out_undo;
sprintf (name, "tape/%04x/char/rewinding", td->devinfo.devno);
rc = devfs_register(NULL. name, DEVFS_FL_DEFAULT, tapechar_major,
TAPECHAR_REW_MINOR(td->first_minor),
TAPECHAR_DEVFSMODE, &tape_fops, td);
if (rc==NULL) goto out_undo;
goto out;
out_undo:
tapechar_rmdevfstree (td);
out:
return rc;
}
/*
* Remove DEVFS entries
*/
void
tapechar_rmdevfstree (tape_dev_t* td) {
devfs_remove("tape/%04x/char/nonrewinding", td->devinfo.devno);
devfs_remove("tape/%04x/char/rewinding", td->devinfo.devno);
devfs_remove("tape/%04x/char", td->devinfo.devno);
}
#endif
/*******************************************************************
* TAPECHAR Setup Functions
*******************************************************************/
/*
* This function is called for every new tapedevice
*/
void
tapechar_setup (tape_dev_t * td)
{
#ifdef CONFIG_DEVFS_FS
tapechar_mkdevfstree(td);
#endif
}
/*
* Tapechar init Function
*/
void
tapechar_init (void)
{
int result;
tape_frontend_t *charfront,*temp;
tape_dev_t* td;
tape_init();
/* Register the tape major number to the kernel */
result = register_chrdev (tapechar_major, "tape", &tape_fops);
if (result < 0) {
PRINT_WARN (KERN_ERR "tape: can't get major %d\n", tapechar_major);
tape_sprintf_event (tape_dbf_area,3,"c:initfail\n");
goto out;
}
if (tapechar_major == 0)
tapechar_major = result; /* accept dynamic major number */
PRINT_WARN (KERN_ERR " tape gets major %d for character device\n", result);
charfront = kmalloc (sizeof (tape_frontend_t), GFP_KERNEL);
if (charfront == NULL) {
PRINT_WARN (KERN_ERR "tapechar: cannot alloc memory for the frontend_t structure\n");
tape_sprintf_event (tape_dbf_area,3,"c:initfail no mem\n");
goto out;
}
charfront->device_setup = (tape_setup_device_t)tapechar_setup;
#ifdef CONFIG_DEVFS_FS
charfront->mkdevfstree = tapechar_mkdevfstree;
charfront->rmdevfstree = tapechar_rmdevfstree;
#endif
tape_sprintf_event (tape_dbf_area,3,"c:init ok\n");
charfront->next=NULL;
if (tape_first_front==NULL) {
tape_first_front=charfront;
} else {
temp=tape_first_front;
while (temp->next!=NULL)
temp=temp->next;
temp->next=charfront;
}
td=tape_first_dev;
while (td!=NULL) {
tapechar_setup(td);
td=td->next;
}
out:
return;
}
/*
* cleanup
*/
void
tapechar_uninit (void)
{
unregister_chrdev (tapechar_major, "tape");
}
/*******************************************************************
* TAPECHAR Util functions
*******************************************************************/
/*
* Terminate write command (we write two TMs and skip backward over last)
* This ensures that the tape is always correctly terminated.
* When the user writes afterwards a new file, he will overwrite the
* second TM and therefore one TM will remain to seperate the
* two files on the tape...
*/
static void
tapechar_terminate_write(tape_dev_t* td)
{
tape_ccw_req_t *treq;
int rc;
treq = td->discipline->ioctl(td, MTWEOF,1,&rc);
if (!treq)
goto out;
tape_do_io_and_wait(td,treq,TAPE_WAIT);
tape_free_ccw_req(treq);
treq = td->discipline->ioctl(td, MTWEOF,1,&rc);
if (!treq)
goto out;
tape_do_io_and_wait(td,treq,TAPE_WAIT);
tape_free_ccw_req(treq);
treq = td->discipline->ioctl(td, MTBSR,1,&rc);
if (!treq)
goto out;
tape_do_io_and_wait(td,treq,TAPE_WAIT);
tape_free_ccw_req(treq);
out:
return;
}
/*******************************************************************
* TAPECHAR Functions:
* - read
* - write
* - open
* - close
* - ioctl
*******************************************************************/
/*
* Tape device read function
*/
ssize_t
tapechar_read (struct file *filp, char *data, size_t count, loff_t * ppos)
{
tape_dev_t *td;
size_t block_size;
tape_ccw_req_t *treq;
int rc = 0;
size_t cpysize;
tape_sprintf_event (tape_dbf_area,6,"c:read\n");
td = (tape_dev_t*)filp->private_data;
if (ppos != &filp->f_pos) {
/* "A request was outside the capabilities of the device." */
/* This check uses internal knowledge about how pread and */
/* read work... */
tape_sprintf_event (tape_dbf_area,6,"c:ppos wrong\n");
rc = -EOVERFLOW; /* errno=75 Value too large for def. data type */
goto out;
}
if (td->char_data.block_size == 0) {
block_size = count;
} else {
if (count < td->char_data.block_size) {
rc = -EINVAL; // invalid argument+
tape_sprintf_event (tape_dbf_area,3,"tapechar:read smaller than block size was requested\n");
goto out;
}
block_size = td->char_data.block_size;
}
tape_sprintf_event (tape_dbf_area,6,"c:nbytes: %x\n",block_size);
treq = td->discipline->read_block (data, block_size, td);
if (!treq) {
rc = -ENOBUFS;
goto out;
}
rc = tape_do_io_and_wait(td,treq,TAPE_WAIT);
TAPE_MERGE_RC(treq,rc);
if(rc != 0)
goto out_free;
rc = cpysize = block_size - td->devstat.rescnt;
if(idalbuf_copy_to_user(treq->userbuf, treq->idal_buf, cpysize)) {
tape_sprintf_exception (tape_dbf_area,6,"xfrb segf.\n");
rc = -EFAULT;
}
tape_sprintf_event (tape_dbf_area,6,"c:rbytes: %x\n", cpysize);
filp->f_pos += cpysize;
out_free:
tape_free_ccw_req(treq);
out:
return rc;
}
/*
* Tape device write function
*/
ssize_t
tapechar_write (struct file *filp, const char *data, size_t count, loff_t * ppos)
{
tape_dev_t *td;
size_t block_size;
tape_ccw_req_t *treq;
int nblocks, i = 0,rc = 0;
size_t written = 0;
tape_sprintf_event (tape_dbf_area,6,"c:write\n");
td = (tape_dev_t*)filp->private_data;
block_size = count;
if (ppos != &filp->f_pos) {
/* "A request was outside the capabilities of the device." */
tape_sprintf_event (tape_dbf_area,6,"c:ppos wrong\n");
rc = -EOVERFLOW; /* errno=75 Value too large for def. data type */
goto out;
}
if ((td->char_data.block_size != 0) && (count < td->char_data.block_size)){
rc = -EIO;
goto out;
}
if (td->char_data.block_size == 0) {
block_size = count;
nblocks = 1;
} else {
block_size = td->char_data.block_size;
nblocks = count / block_size;
}
tape_sprintf_event (tape_dbf_area,6,"c:nbytes: %x\n",block_size);
tape_sprintf_event (tape_dbf_area,6,"c:nblocks: %x\n",nblocks);
for (i = 0; i < nblocks; i++) {
treq = td->discipline->write_block (data + i * block_size, block_size, td);
if (!treq) {
rc = -ENOBUFS;
goto out;
}
rc = tape_do_io_and_wait(td,treq,TAPE_WAIT);
TAPE_MERGE_RC(treq,rc);
tape_free_ccw_req(treq);
if(rc < 0)
goto out;
tape_sprintf_event (tape_dbf_area,6,"c:wbytes: %x\n",block_size - td->devstat.rescnt);
filp->f_pos += block_size - td->devstat.rescnt;
written += block_size - td->devstat.rescnt;
rc = written;
if (td->devstat.rescnt > 0)
goto out;
}
tape_sprintf_event (tape_dbf_area,6,"c:wtotal: %x\n",written);
out:
if (rc==-ENOSPC){
if(td->discipline->process_eov)
td->discipline->process_eov(td);
if(i > 0){
rc = i*block_size;
printk("write rc = %i\n",rc); /* XXX */
}
}
return rc;
}
/*
* MT IOCTLS
*/
static int
tapechar_mtioctop (struct file *filp, short mt_op, int mt_count)
{
tape_dev_t *td;
tape_ccw_req_t *treq = NULL;
int rc = 0;
td = (tape_dev_t*)filp->private_data;
tape_sprintf_event (tape_dbf_area,6,"c:mtio\n");
tape_sprintf_event (tape_dbf_area,6,"c:ioop: %x\n",mt_op);
tape_sprintf_event (tape_dbf_area,6,"c:arg: %x\n",mt_count);
switch (mt_op) {
case MTRETEN: // retension the tape
treq = td->discipline->ioctl (td, MTEOM,1,&rc);
break;
case MTLOAD:
treq = td->discipline->ioctl (td, MTLOAD,1,&rc);
if (rc != -EINVAL){
// the backend driver has an load function
break;
}
// if no medium is in, wait until it gets inserted
if (td->medium_state != MS_LOADED) {
// create dummy request
treq = tape_alloc_ccw_req(1,0,0,TO_LOAD);
rc = tape_do_wait_req(td,treq,TAPE_WAIT_INTERRUPTIBLE_NOHALTIO);
} else {
rc = 0; // already loaded
}
goto out;
case MTSETBLK:
td->char_data.block_size = mt_count;
tape_sprintf_event (tape_dbf_area,6,"c:setblk:\n");
goto out;
case MTRESET:
td->char_data.block_size = 0;
tape_sprintf_event (tape_dbf_area,6,"c:devreset:\n");
goto out;
default:
treq = td->discipline->ioctl (td, mt_op,mt_count,&rc);
}
if (treq == NULL) {
tape_sprintf_event (tape_dbf_area,6,"c:ccwg fail\n");
goto out;
}
if(TAPE_INTERRUPTIBLE_OP(mt_op)){
rc = tape_do_io_and_wait(td,treq,TAPE_WAIT_INTERRUPTIBLE);
} else {
rc = tape_do_io_and_wait(td,treq,TAPE_WAIT);
}
TAPE_MERGE_RC(treq,rc);
tape_free_ccw_req(treq);
if ((mt_op == MTEOM) || (mt_op == MTRETEN)){
rc = 0; // EOM and RETEN report an error, this is fine...
}
if(rc != 0)
goto out; /* IO Failed */
// if medium was unloaded, update the corresponding variable.
switch (mt_op) {
case MTOFFL:
case MTUNLOAD:
td->medium_state = MS_UNLOADED;
break;
case MTRETEN: //need to rewind the tape after moving to eom
return tapechar_mtioctop (filp, MTREW, 1);
case MTFSFM: //need to skip back over the filemark
return tapechar_mtioctop (filp, MTBSFM, 1);
case MTBSF: //need to skip forward over the filemark
return tapechar_mtioctop (filp, MTFSF, 1);
}
tape_sprintf_event (tape_dbf_area,6,"c:mtio done\n");
out:
return rc;
}
/*
* Tape device io controls.
*/
int
tapechar_ioctl (struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
tape_dev_t *td;
tape_ccw_req_t *treq = NULL;
struct mtop op; /* structure for MTIOCTOP */
struct mtpos pos; /* structure for MTIOCPOS */
struct mtget get;
int rc;
tape_sprintf_event (tape_dbf_area,6,"c:ioct\n");
td = (tape_dev_t*)filp->private_data;
// check for discipline ioctl overloading
if ((rc = td->discipline->discipline_ioctl_overload (td, cmd, arg)) != -EINVAL) {
tape_sprintf_event (tape_dbf_area,6,"c:ioverloa\n");
goto out;
}
rc = 0;
switch (cmd) {
case MTIOCTOP: /* tape op command */
if (copy_from_user (&op, (char *) arg, sizeof (struct mtop))) {
rc = -EFAULT;
goto out;
}
if(op.mt_count < 0){
rc = -EINVAL;
goto out;
}
if(op.mt_op == MTBSR ||
op.mt_op == MTFSR ||
op.mt_op == MTFSF ||
op.mt_op == MTBSR ||
op.mt_op == MTFSFM ||
op.mt_op == MTBSFM)
{
int i;
/* We assume that the backends can handle count up */
/* to 500. */
for(i = 0; i < op.mt_count; i+=500){
rc = tapechar_mtioctop (filp, op.mt_op, MIN(500,op.mt_count-i));
if(rc)
goto out;
}
} else {
/* Single operations */
rc = tapechar_mtioctop (filp, op.mt_op, op.mt_count);
}
goto out;
case MTIOCPOS: /* query tape position */
memset (&pos,0,sizeof (struct mtpos));
treq = td->discipline->ioctl (td, MTTELL,1,&rc);
if(!treq)
goto out;
rc = tape_do_io_and_wait(td,treq,TAPE_WAIT);
TAPE_MERGE_RC(treq,rc);
if(rc == 0){
pos.mt_blkno = *((int*)(treq->kernbuf));
if (copy_to_user ((char *) arg, &pos, sizeof (struct mtpos)))
rc = -EFAULT;
}
tape_free_ccw_req(treq);
goto out;
case MTIOCGET:
memset (&get,0,sizeof (struct mtget));
treq = td->discipline->ioctl (td, MTTELL,1,&rc);
if(!treq)
goto out;
rc = tape_do_io_and_wait(td,treq,TAPE_WAIT);
TAPE_MERGE_RC(treq,rc);
if(rc == 0){
get.mt_erreg = treq->rc;
get.mt_blkno = *((int*)(treq->kernbuf));
get.mt_type = MT_ISUNKNOWN;
get.mt_resid = td->devstat.rescnt;
get.mt_dsreg = td->tape_state;
if (copy_to_user ((char *) arg, &get, sizeof (struct mtget)))
rc = -EFAULT;
}
tape_free_ccw_req(treq);
goto out;
default:
tape_sprintf_event (tape_dbf_area,3,"c:ioct inv\n");
rc = -EINVAL;
goto out;
}
out:
return rc;
}
/*
* Tape device open function.
*/
int
tapechar_open (struct inode *inode, struct file *filp)
{
tape_dev_t *td = NULL;
int rc = 0;
long lockflags;
tape_sprintf_event (tape_dbf_area,6,"c:open: %x\n",td->first_minor);
inode = filp->f_dentry->d_inode;
td = tape_get_device_by_minor(minor (inode->i_rdev));
if (td == NULL){
rc = -ENODEV;
goto error;
}
s390irq_spin_lock_irqsave(td->devinfo.irq,lockflags);
if (tape_state_get(td) == TS_NOT_OPER) {
tape_sprintf_event (tape_dbf_area,6,"c:nodev\n");
rc = -ENODEV;
goto out;
}
if (tape_state_get (td) != TS_UNUSED) {
tape_sprintf_event (tape_dbf_area,6,"c:dbusy\n");
rc = -EBUSY;
goto out;
}
if ( td->discipline->owner )
__MOD_INC_USE_COUNT(td->discipline->owner);
tape_state_set (td, TS_IN_USE);
td->filp = filp; /* save for later reference */
filp->private_data = td; /* save the dev.info for later reference */
out:
s390irq_spin_unlock_irqrestore(td->devinfo.irq,lockflags);
error:
if(rc != 0){
if (td!=NULL)
tape_put_device(td);
}
return rc;
}
/*
* Tape device release function.
*/
int
tapechar_release (struct inode *inode, struct file *filp)
{
tape_dev_t *td = NULL;
tape_ccw_req_t *treq = NULL;
int rc = 0;
long lockflags;
tape_sprintf_event (tape_dbf_area,6,"c:release: %x\n",td->first_minor);
td = (tape_dev_t*)filp->private_data;
if(td->last_op == TO_WRI)
tapechar_terminate_write(td);
if (minor (inode->i_rdev) == TAPECHAR_REW_MINOR(td->first_minor)) {
treq = td->discipline->ioctl (td, MTREW,1,&rc);
if (treq != NULL) {
tape_sprintf_event (tape_dbf_area,6,"c:rewrelea\n");
rc = tape_do_io_and_wait(td, treq, TAPE_WAIT);
tape_free_ccw_req (treq);
}
}
s390irq_spin_lock_irqsave(td->devinfo.irq,lockflags);
if(tape_state_get(td) == TS_IN_USE)
tape_state_set (td, TS_UNUSED);
else if (tape_state_get(td) != TS_NOT_OPER)
BUG();
s390irq_spin_unlock_irqrestore(td->devinfo.irq,lockflags);
if ( td->discipline->owner )
__MOD_DEC_USE_COUNT(td->discipline->owner);
tape_put_device(td);
return rc;
}
/***************************************************************************
*
* drivers/s390/char/tapechar.h
* character device frontend for tape device driver
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
*
****************************************************************************
*/
#ifndef TAPECHAR_H
#define TAPECHAR_H
#include <linux/config.h>
#define TAPECHAR_DEVFSMODE 0020644 // chardev, rwx for user, rw for group&others
#define TAPECHAR_MAJOR 0 /* get dynamic major since no major officialy defined for tape */
#define TAPECHAR_NOREW_MINOR(x) x /* Minor for nonrewinding device */
#define TAPECHAR_REW_MINOR(x) (x+1) /* Minor for rewinding device */
/*
* Prototypes
*/
ssize_t tapechar_read(struct file *, char *, size_t, loff_t *);
ssize_t tapechar_write(struct file *, const char *, size_t, loff_t *);
int tapechar_ioctl(struct inode *,struct file *,unsigned int,unsigned long);
int tapechar_open (struct inode *,struct file *);
int tapechar_release (struct inode *,struct file *);
#ifdef CONFIG_DEVFS_FS
devfs_handle_t tapechar_mkdevfstree (tape_dev_t* td);
void tapechar_rmdevfstree (tape_dev_t* td);
#endif
void tapechar_init (void);
void tapechar_uninit (void);
#endif /* TAPECHAR_H */
/***********************************************************************
* drivers/s390/char/tapedefs.h
* tape device driver for S/390 and zSeries tapes.
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
*
***********************************************************************
*/
/* Kernel Version Compatibility section */
#include <linux/version.h>
#include <linux/blkdev.h>
#include <linux/blk.h>
#include <asm/irq.h>
#include <linux/compatmac.h>
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,2,17))
#define TAPE_DEBUG // use s390 debug feature
#else
#undef TAPE_DEBUG // debug feature not supported by our 2.2.16 code
static inline void set_normalized_cda ( ccw1_t * cp, unsigned long address ) {
cp -> cda = address;
}
static inline void clear_normalized_cda ( ccw1_t * ccw ) {
ccw -> cda = 0;
}
#define BUG() PRINT_FATAL("tape390: CRITICAL INTERNAL ERROR OCCURED. REPORT THIS BACK TO LINUX390@DE.IBM.COM\n")
#endif
#define CONFIG_S390_TAPE_DYNAMIC // allow devices to be attached or detached on the fly
#define TAPEBLOCK_RETRIES 20 // number of retries, when a block-dev request fails.
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98))
#define INIT_BLK_DEV(d_major,d_request_fn,d_queue_fn,d_current) \
do { \
blk_dev[d_major].queue = d_queue_fn; \
} while(0)
static inline struct request *
tape_next_request( request_queue_t *queue )
{
return elv_next_request(queue);
}
static inline void
tape_dequeue_request( request_queue_t * q, struct request *req )
{
blkdev_dequeue_request (req);
}
#else
#define s390_dev_info_t dev_info_t
typedef struct request *request_queue_t;
#ifndef init_waitqueue_head
#define init_waitqueue_head(x) do { *x = NULL; } while(0)
#endif
#define blk_init_queue(x,y) do {} while(0)
#define blk_queue_headactive(x,y) do {} while(0)
#define INIT_BLK_DEV(d_major,d_request_fn,d_queue_fn,d_current) \
do { \
blk_dev[d_major].request_fn = d_request_fn; \
blk_dev[d_major].queue = d_queue_fn; \
blk_dev[d_major].current_request = d_current; \
} while(0)
static inline struct request *
tape_next_request( request_queue_t *queue )
{
return *queue;
}
static inline void
tape_dequeue_request( request_queue_t * q, struct request *req )
{
*q = req->next;
req->next = NULL;
}
#endif
......@@ -122,4 +122,138 @@ clear_normalized_cda(struct ccw1 * ccw)
ccw->cda = 0;
}
/*
* Idal buffer extension
*/
struct idal_buffer {
size_t size;
size_t page_order;
void *data[0];
};
/*
* Allocate an idal buffer
*/
static inline struct idal_buffer *
idal_buffer_alloc(size_t size, int page_order)
{
struct idal_buffer *ib;
int nr_chunks, nr_ptrs, i;
nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG;
ib = kmalloc(sizeof(struct idal_buffer) + nr_ptrs*sizeof(void *),
GFP_DMA | GFP_KERNEL);
if (ib == NULL)
return ERR_PTR(-ENOMEM);
ib->size = size;
ib->page_order = page_order;
for (i = 0; i < nr_ptrs; i++) {
if ((i & (nr_chunks - 1)) != 0) {
ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE;
continue;
}
ib->data[i] = (void *)
__get_free_pages(GFP_KERNEL, page_order);
if (ib->data[i] != NULL)
continue;
// Not enough memory
while (i >= nr_chunks) {
i -= nr_chunks;
free_pages((unsigned long) ib->data[i],
ib->page_order);
}
kfree(ib);
return ERR_PTR(-ENOMEM);
}
return ib;
}
/*
* Free an idal buffer.
*/
static inline void
idal_buffer_free(struct idal_buffer *ib)
{
int nr_chunks, nr_ptrs, i;
nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG;
for (i = 0; i < nr_ptrs; i += nr_chunks)
free_pages((unsigned long) ib->data[i], ib->page_order);
kfree(ib);
}
/*
* Test if a idal list is really needed.
*/
static inline int
__idal_buffer_is_needed(struct idal_buffer *ib)
{
#ifdef CONFIG_ARCH_S390X
return ib->size > (4096 << ib->page_order) ||
idal_is_needed(ib->data[0], ib->size);
#else
return ib->size > (4096 << ib->page_order);
#endif
}
/*
* Set channel data address to idal buffer.
*/
static inline void
idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
{
if (__idal_buffer_is_needed(ib)) {
// setup idals;
ccw->cda = (u32)(addr_t) ib->data;
ccw->flags |= CCW_FLAG_IDA;
} else
// we do not need idals - use direct addressing
ccw->cda = (u32)(addr_t) ib->data[0];
ccw->count = ib->size;
}
/*
* Copy count bytes from an idal buffer to user memory
*/
static inline size_t
idal_buffer_to_user(struct idal_buffer *ib, void *to, size_t count)
{
size_t left;
int i;
if (count > ib->size)
BUG();
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
if (left)
return left + count - IDA_BLOCK_SIZE;
(addr_t) to += IDA_BLOCK_SIZE;
count -= IDA_BLOCK_SIZE;
}
return copy_to_user(to, ib->data[i], count);
}
/*
* Copy count bytes from user memory to an idal buffer
*/
static inline size_t
idal_buffer_from_user(struct idal_buffer *ib, const void *from, size_t count)
{
size_t left;
int i;
if (count > ib->size)
BUG();
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
if (left)
return left + count - IDA_BLOCK_SIZE;
(addr_t) from += IDA_BLOCK_SIZE;
count -= IDA_BLOCK_SIZE;
}
return copy_from_user(ib->data[i], from, count);
}
#endif
......@@ -122,4 +122,138 @@ clear_normalized_cda(struct ccw1 * ccw)
ccw->cda = 0;
}
/*
* Idal buffer extension
*/
struct idal_buffer {
size_t size;
size_t page_order;
void *data[0];
};
/*
* Allocate an idal buffer
*/
static inline struct idal_buffer *
idal_buffer_alloc(size_t size, int page_order)
{
struct idal_buffer *ib;
int nr_chunks, nr_ptrs, i;
nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG;
ib = kmalloc(sizeof(struct idal_buffer) + nr_ptrs*sizeof(void *),
GFP_DMA | GFP_KERNEL);
if (ib == NULL)
return ERR_PTR(-ENOMEM);
ib->size = size;
ib->page_order = page_order;
for (i = 0; i < nr_ptrs; i++) {
if ((i & (nr_chunks - 1)) != 0) {
ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE;
continue;
}
ib->data[i] = (void *)
__get_free_pages(GFP_KERNEL, page_order);
if (ib->data[i] != NULL)
continue;
// Not enough memory
while (i >= nr_chunks) {
i -= nr_chunks;
free_pages((unsigned long) ib->data[i],
ib->page_order);
}
kfree(ib);
return ERR_PTR(-ENOMEM);
}
return ib;
}
/*
* Free an idal buffer.
*/
static inline void
idal_buffer_free(struct idal_buffer *ib)
{
int nr_chunks, nr_ptrs, i;
nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG;
for (i = 0; i < nr_ptrs; i += nr_chunks)
free_pages((unsigned long) ib->data[i], ib->page_order);
kfree(ib);
}
/*
* Test if a idal list is really needed.
*/
static inline int
__idal_buffer_is_needed(struct idal_buffer *ib)
{
#ifdef CONFIG_ARCH_S390X
return ib->size > (4096 << ib->page_order) ||
idal_is_needed(ib->data[0], ib->size);
#else
return ib->size > (4096 << ib->page_order);
#endif
}
/*
* Set channel data address to idal buffer.
*/
static inline void
idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
{
if (__idal_buffer_is_needed(ib)) {
// setup idals;
ccw->cda = (u32)(addr_t) ib->data;
ccw->flags |= CCW_FLAG_IDA;
} else
// we do not need idals - use direct addressing
ccw->cda = (u32)(addr_t) ib->data[0];
ccw->count = ib->size;
}
/*
* Copy count bytes from an idal buffer to user memory
*/
static inline size_t
idal_buffer_to_user(struct idal_buffer *ib, void *to, size_t count)
{
size_t left;
int i;
if (count > ib->size)
BUG();
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
if (left)
return left + count - IDA_BLOCK_SIZE;
(addr_t) to += IDA_BLOCK_SIZE;
count -= IDA_BLOCK_SIZE;
}
return copy_to_user(to, ib->data[i], count);
}
/*
* Copy count bytes from user memory to an idal buffer
*/
static inline size_t
idal_buffer_from_user(struct idal_buffer *ib, const void *from, size_t count)
{
size_t left;
int i;
if (count > ib->size)
BUG();
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
if (left)
return left + count - IDA_BLOCK_SIZE;
(addr_t) from += IDA_BLOCK_SIZE;
count -= IDA_BLOCK_SIZE;
}
return copy_from_user(ib->data[i], from, count);
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment