Commit e48f3a58 authored by Linus Torvalds's avatar Linus Torvalds

Automerge

parents a60ca6f1 476d1e91
......@@ -247,6 +247,15 @@ ChangeLog
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.0.13:
- Internal changes towards using iget5_locked() in preparation for
fake inodes and small cleanups to ntfs_volume structure.
2.0.12:
- Internal cleanups in address space operations made possible by the
changes introduced in the previous release.
2.0.11:
- Internal updates and cleanups introducing the first step towards
fake inode based attribute i/o.
2.0.10:
- Microsoft says that the maximum number of inodes is 2^32 - 1. Update
the driver accordingly to only use 32-bits to store inode numbers on
......
......@@ -84,6 +84,10 @@ typedef unsigned int drm_magic_t;
/* Warning: If you change this structure, make sure you change
* XF86DRIClipRectRec in the server as well */
/* KW: Actually it's illegal to change either for
* backwards-compatibility reasons.
*/
typedef struct drm_clip_rect {
unsigned short x1;
unsigned short y1;
......@@ -99,15 +103,6 @@ typedef struct drm_tex_region {
unsigned int age;
} drm_tex_region_t;
/* Seperate include files for the i810/mga/r128 specific structures */
#include "mga_drm.h"
#include "i810_drm.h"
#include "r128_drm.h"
#include "radeon_drm.h"
#include "sis_drm.h"
#include "i830_drm.h"
#include "gamma_drm.h"
typedef struct drm_version {
int version_major; /* Major version */
int version_minor; /* Minor version */
......@@ -428,95 +423,8 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t)
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t)
/* MGA specific ioctls */
#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t)
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x41, drm_lock_t)
#define DRM_IOCTL_MGA_RESET DRM_IO( 0x42)
#define DRM_IOCTL_MGA_SWAP DRM_IO( 0x43)
#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x44, drm_mga_clear_t)
#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x45, drm_mga_vertex_t)
#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t)
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x47, drm_mga_iload_t)
#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x48, drm_mga_blit_t)
/* i810 specific ioctls */
#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t)
#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t)
#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t)
#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43)
#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44)
#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t)
#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48)
#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t)
#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a)
#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b)
#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t)
#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d )
/* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41)
#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t)
#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43)
#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44)
#define DRM_IOCTL_R128_RESET DRM_IO( 0x46)
#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47)
#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t)
#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t)
#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t)
#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t)
#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t)
#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t)
#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t)
#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( 0x51, drm_r128_clear2_t)
/* Radeon specific ioctls */
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41)
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t)
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43)
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44)
#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45)
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t)
#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47)
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t)
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t)
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t)
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t)
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t)
#define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52)
/* Gamma specific ioctls */
#define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t)
#define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t)
/* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t)
#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t)
#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t)
#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t)
#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
/* I830 specific ioctls */
#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t)
#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t)
#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t)
#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43)
#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44)
#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t)
#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46)
#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t)
#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48)
/* Device specfic ioctls should only be in their respective headers
* The device specific ioctl range is 0x40 to 0x79. */
#define DRM_COMMAND_BASE 0x40
#endif
......@@ -555,7 +555,7 @@ static int DRM(alloc_queue)(drm_device_t *dev)
/* Allocate a new queue */
down(&dev->struct_sem);
queue = gamma_alloc(sizeof(*queue), DRM_MEM_QUEUES);
queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES);
memset(queue, 0, sizeof(*queue));
atomic_set(&queue->use_count, 1);
......
......@@ -32,6 +32,8 @@
#define __NO_VERSION__
#include "gamma.h"
#include "drmP.h"
#include "drm.h"
#include "gamma_drm.h"
#include "gamma_drv.h"
#include <linux/interrupt.h> /* For task queue support */
......
......@@ -48,6 +48,16 @@ typedef struct _drm_gamma_sarea {
int vertex_prim;
} drm_gamma_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmGamma.h)
*/
/* Gamma specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t)
#define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t)
typedef struct drm_gamma_copy {
unsigned int DMAOutputAddress;
unsigned int DMAOutputCount;
......
......@@ -32,6 +32,8 @@
#include <linux/config.h>
#include "gamma.h"
#include "drmP.h"
#include "drm.h"
#include "gamma_drm.h"
#include "gamma_drv.h"
#define DRIVER_AUTHOR "VA Linux Systems Inc."
......
......@@ -33,6 +33,8 @@
#define __NO_VERSION__
#include "i810.h"
#include "drmP.h"
#include "drm.h"
#include "i810_drm.h"
#include "i810_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
......
......@@ -168,14 +168,34 @@ typedef struct _drm_i810_sarea {
} drm_i810_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmMga.h)
*/
/* i810 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t)
#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t)
#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t)
#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43)
#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44)
#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t)
#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48)
#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t)
#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a)
#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b)
#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t)
#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d )
typedef struct _drm_i810_clear {
int clear_color;
int clear_depth;
int flags;
} drm_i810_clear_t;
/* These may be placeholders if we have more cliprects than
* I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
* false, indicating that the buffer will be dispatched again with a
......
......@@ -33,6 +33,8 @@
#include <linux/config.h>
#include "i810.h"
#include "drmP.h"
#include "drm.h"
#include "i810_drm.h"
#include "i810_drv.h"
#define DRIVER_AUTHOR "VA Linux Systems Inc."
......
......@@ -34,10 +34,11 @@
#define __NO_VERSION__
#include "i830.h"
#include "drmP.h"
#include "drm.h"
#include "i830_drm.h"
#include "i830_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/pagemap.h>
/* in case we don't have a 2.3.99-pre6 kernel or later: */
#ifndef VM_DONTCOPY
......@@ -58,7 +59,6 @@
do { \
int _head; \
int _tail; \
int _i; \
do { \
_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; \
_tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; \
......@@ -369,9 +369,7 @@ static int i830_wait_ring(drm_device_t *dev, int n)
unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
end = jiffies + (HZ*3);
while (ring->space < n) {
int i;
while (ring->space < n) {
ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
ring->space = ring->head - (ring->tail+8);
if (ring->space < 0) ring->space += ring->Size;
......@@ -387,7 +385,6 @@ static int i830_wait_ring(drm_device_t *dev, int n)
DRM_ERROR("lockup\n");
goto out_wait_ring;
}
udelay(1);
}
......
......@@ -201,6 +201,19 @@ typedef struct _drm_i830_sarea {
int vertex_prim;
} drm_i830_sarea_t;
/* I830 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t)
#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t)
#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t)
#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43)
#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44)
#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t)
#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46)
#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t)
#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48)
typedef struct _drm_i830_clear {
int clear_color;
int clear_depth;
......
......@@ -34,6 +34,8 @@
#include <linux/config.h>
#include "i830.h"
#include "drmP.h"
#include "drm.h"
#include "i830_drm.h"
#include "i830_drv.h"
#define DRIVER_AUTHOR "VA Linux Systems Inc."
......
......@@ -36,6 +36,8 @@
#define __NO_VERSION__
#include "mga.h"
#include "drmP.h"
#include "drm.h"
#include "mga_drm.h"
#include "mga_drv.h"
#include <linux/interrupt.h> /* For task queue support */
......
......@@ -38,6 +38,7 @@
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mga_sarea.h)
*/
#ifndef __MGA_SAREA_DEFINES__
#define __MGA_SAREA_DEFINES__
......@@ -225,6 +226,20 @@ typedef struct _drm_mga_sarea {
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmMga.h)
*/
/* MGA specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t)
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x41, drm_lock_t)
#define DRM_IOCTL_MGA_RESET DRM_IO( 0x42)
#define DRM_IOCTL_MGA_SWAP DRM_IO( 0x43)
#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x44, drm_mga_clear_t)
#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x45, drm_mga_vertex_t)
#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t)
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x47, drm_mga_iload_t)
#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x48, drm_mga_blit_t)
typedef struct _drm_mga_warp_index {
int installed;
unsigned long phys_addr;
......
......@@ -32,6 +32,8 @@
#include <linux/config.h>
#include "mga.h"
#include "drmP.h"
#include "drm.h"
#include "mga_drm.h"
#include "mga_drv.h"
#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
......
......@@ -35,6 +35,8 @@
#define __NO_VERSION__
#include "mga.h"
#include "drmP.h"
#include "drm.h"
#include "mga_drm.h"
#include "mga_drv.h"
#include "drm.h"
......
......@@ -30,6 +30,8 @@
#define __NO_VERSION__
#include "mga.h"
#include "drmP.h"
#include "drm.h"
#include "mga_drm.h"
#include "mga_drv.h"
#include "mga_ucode.h"
......
......@@ -31,6 +31,8 @@
#define __NO_VERSION__
#include "r128.h"
#include "drmP.h"
#include "drm.h"
#include "r128_drm.h"
#include "r128_drv.h"
#include <linux/interrupt.h> /* For task queue support */
......
......@@ -170,6 +170,27 @@ typedef struct drm_r128_sarea {
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmR128.h)
*/
/* Rage 128 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41)
#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t)
#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43)
#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44)
#define DRM_IOCTL_R128_RESET DRM_IO( 0x46)
#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47)
#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t)
#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t)
#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t)
#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t)
#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t)
#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t)
#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t)
#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( 0x51, drm_r128_clear2_t)
typedef struct drm_r128_init {
enum {
R128_INIT_CCE = 0x01,
......
......@@ -32,6 +32,8 @@
#include <linux/config.h>
#include "r128.h"
#include "drmP.h"
#include "drm.h"
#include "r128_drm.h"
#include "r128_drv.h"
#include "ati_pcigart.h"
......
......@@ -34,8 +34,8 @@
#ifndef __R128_DRV_H__
#define __R128_DRV_H__
#define GET_RING_HEAD( ring ) le32_to_cpu( *(ring)->head )
#define SET_RING_HEAD( ring, val ) *(ring)->head = cpu_to_le32( val )
#define GET_RING_HEAD(ring) readl( (volatile u32 *) (ring)->head )
#define SET_RING_HEAD(ring,val) writel( (val), (volatile u32 *) (ring)->head )
typedef struct drm_r128_freelist {
unsigned int age;
......@@ -384,44 +384,11 @@ extern int r128_cce_indirect( struct inode *inode, struct file *filp,
#define R128_BASE(reg) ((unsigned long)(dev_priv->mmio->handle))
#define R128_ADDR(reg) (R128_BASE( reg ) + reg)
#define R128_DEREF(reg) *(volatile u32 *)R128_ADDR( reg )
#ifdef __alpha__
#define R128_READ(reg) (_R128_READ((u32 *)R128_ADDR(reg)))
static inline u32 _R128_READ(u32 *addr)
{
mb();
return *(volatile u32 *)addr;
}
#define R128_WRITE(reg,val) \
do { \
wmb(); \
R128_DEREF(reg) = val; \
} while (0)
#else
#define R128_READ(reg) le32_to_cpu( R128_DEREF( reg ) )
#define R128_WRITE(reg,val) \
do { \
R128_DEREF( reg ) = cpu_to_le32( val ); \
} while (0)
#endif
#define R128_READ(reg) readl( (volatile u32 *) R128_ADDR(reg) )
#define R128_WRITE(reg,val) writel( (val), (volatile u32 *) R128_ADDR(reg) )
#define R128_DEREF8(reg) *(volatile u8 *)R128_ADDR( reg )
#ifdef __alpha__
#define R128_READ8(reg) _R128_READ8((u8 *)R128_ADDR(reg))
static inline u8 _R128_READ8(u8 *addr)
{
mb();
return *(volatile u8 *)addr;
}
#define R128_WRITE8(reg,val) \
do { \
wmb(); \
R128_DEREF8(reg) = val; \
} while (0)
#else
#define R128_READ8(reg) R128_DEREF8( reg )
#define R128_WRITE8(reg,val) do { R128_DEREF8( reg ) = val; } while (0)
#endif
#define R128_READ8(reg) readb( (volatile u8 *) R128_ADDR(reg) )
#define R128_WRITE8(reg,val) writeb( (val), (volatile u8 *) R128_ADDR(reg) )
#define R128_WRITE_PLL(addr,val) \
do { \
......@@ -493,7 +460,11 @@ do { \
* Ring control
*/
#if defined(__powerpc__)
#define r128_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring )
#else
#define r128_flush_write_combine() mb()
#endif
#define R128_VERBOSE 0
......
......@@ -30,6 +30,8 @@
#define __NO_VERSION__
#include "r128.h"
#include "drmP.h"
#include "drm.h"
#include "r128_drm.h"
#include "r128_drv.h"
#include "drm.h"
#include <linux/delay.h>
......
......@@ -31,6 +31,8 @@
#define __NO_VERSION__
#include "radeon.h"
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include <linux/interrupt.h> /* For task queue support */
......@@ -38,7 +40,7 @@
#define RADEON_FIFO_DEBUG 0
#if defined(__alpha__)
#if defined(__alpha__) || defined(__powerpc__)
# define PCIGART_ENABLED
#else
# undef PCIGART_ENABLED
......@@ -631,7 +633,11 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
}
/* Set ring buffer size */
#ifdef __BIG_ENDIAN
RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT );
#else
RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw );
#endif
radeon_do_wait_for_idle( dev_priv );
......
......@@ -301,6 +301,29 @@ typedef struct {
*
* KW: actually it's illegal to change any of this (backwards compatibility).
*/
/* Radeon specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41)
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t)
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43)
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44)
#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45)
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t)
#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47)
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t)
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t)
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t)
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t)
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t)
#define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52)
typedef struct drm_radeon_init {
enum {
RADEON_INIT_CP = 0x01,
......
......@@ -30,6 +30,8 @@
#include <linux/config.h>
#include "radeon.h"
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include "ati_pcigart.h"
......@@ -37,11 +39,11 @@
#define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon"
#define DRIVER_DATE "20020521"
#define DRIVER_DATE "20020611"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 3
#define DRIVER_PATCHLEVEL 0
#define DRIVER_PATCHLEVEL 1
/* Interface history:
*
......
......@@ -31,6 +31,9 @@
#ifndef __RADEON_DRV_H__
#define __RADEON_DRV_H__
#define GET_RING_HEAD(ring) readl( (volatile u32 *) (ring)->head )
#define SET_RING_HEAD(ring,val) writel( (val), (volatile u32 *) (ring)->head )
typedef struct drm_radeon_freelist {
unsigned int age;
drm_buf_t *buf;
......@@ -141,7 +144,7 @@ extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n );
static inline void
radeon_update_ring_snapshot( drm_radeon_ring_buffer_t *ring )
{
ring->space = (*(volatile int *)ring->head - ring->tail) * sizeof(u32);
ring->space = (GET_RING_HEAD(ring) - ring->tail) * sizeof(u32);
if ( ring->space <= 0 )
ring->space += ring->size;
}
......@@ -249,6 +252,12 @@ extern int radeon_cp_flip( struct inode *inode, struct file *filp,
# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4)
# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
#define RADEON_RBBM_GUICNTL 0x172c
# define RADEON_HOST_DATA_SWAP_NONE (0 << 0)
# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0)
# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0)
# define RADEON_HOST_DATA_SWAP_HDW (3 << 0)
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_FB_LOCATION 0x0148
#define RADEON_MCLK_CNTL 0x0012
......@@ -424,6 +433,7 @@ extern int radeon_cp_flip( struct inode *inode, struct file *filp,
#define RADEON_CP_RB_BASE 0x0700
#define RADEON_CP_RB_CNTL 0x0704
# define RADEON_BUF_SWAP_32BIT (2 << 16)
#define RADEON_CP_RB_RPTR_ADDR 0x070c
#define RADEON_CP_RB_RPTR 0x0710
#define RADEON_CP_RB_WPTR 0x0714
......@@ -533,41 +543,11 @@ extern int radeon_cp_flip( struct inode *inode, struct file *filp,
#define RADEON_BASE(reg) ((unsigned long)(dev_priv->mmio->handle))
#define RADEON_ADDR(reg) (RADEON_BASE( reg ) + reg)
#define RADEON_DEREF(reg) *(volatile u32 *)RADEON_ADDR( reg )
#ifdef __alpha__
#define RADEON_READ(reg) (_RADEON_READ((u32 *)RADEON_ADDR( reg )))
static inline u32 _RADEON_READ(u32 *addr)
{
mb();
return *(volatile u32 *)addr;
}
#define RADEON_WRITE(reg,val) \
do { \
wmb(); \
RADEON_DEREF(reg) = val; \
} while (0)
#else
#define RADEON_READ(reg) RADEON_DEREF( reg )
#define RADEON_WRITE(reg, val) do { RADEON_DEREF( reg ) = val; } while (0)
#endif
#define RADEON_READ(reg) readl( (volatile u32 *) RADEON_ADDR(reg) )
#define RADEON_WRITE(reg,val) writel( (val), (volatile u32 *) RADEON_ADDR(reg) )
#define RADEON_DEREF8(reg) *(volatile u8 *)RADEON_ADDR( reg )
#ifdef __alpha__
#define RADEON_READ8(reg) _RADEON_READ8((u8 *)RADEON_ADDR( reg ))
static inline u8 _RADEON_READ8(u8 *addr)
{
mb();
return *(volatile u8 *)addr;
}
#define RADEON_WRITE8(reg,val) \
do { \
wmb(); \
RADEON_DEREF8( reg ) = val; \
} while (0)
#else
#define RADEON_READ8(reg) RADEON_DEREF8( reg )
#define RADEON_WRITE8(reg, val) do { RADEON_DEREF8( reg ) = val; } while (0)
#endif
#define RADEON_READ8(reg) readb( (volatile u8 *) RADEON_ADDR(reg) )
#define RADEON_WRITE8(reg,val) writeb( (val), (volatile u8 *) RADEON_ADDR(reg) )
#define RADEON_WRITE_PLL( addr, val ) \
do { \
......@@ -664,6 +644,15 @@ do { \
goto __ring_space_done; \
udelay( 1 ); \
} \
DRM_ERROR( "ring space check from memory failed, reading register...\n" ); \
/* If ring space check fails from RAM, try reading the \
register directly */ \
ring->space = 4 * ( RADEON_READ( RADEON_CP_RB_RPTR ) - ring->tail ); \
if ( ring->space <= 0 ) \
ring->space += ring->size; \
if ( ring->space >= ring->high_mark ) \
goto __ring_space_done; \
\
DRM_ERROR( "ring space check failed!\n" ); \
return -EBUSY; \
} \
......@@ -701,7 +690,11 @@ do { \
* Ring control
*/
#if defined(__powerpc__)
#define radeon_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring )
#else
#define radeon_flush_write_combine() mb()
#endif
#define RADEON_VERBOSE 0
......@@ -737,8 +730,9 @@ do { \
dev_priv->ring.tail = write; \
} while (0)
#define COMMIT_RING() do { \
RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
#define COMMIT_RING() do { \
radeon_flush_write_combine(); \
RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
} while (0)
#define OUT_RING( x ) do { \
......
......@@ -30,8 +30,9 @@
#define __NO_VERSION__
#include "radeon.h"
#include "drmP.h"
#include "radeon_drv.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include <linux/delay.h>
......@@ -669,7 +670,6 @@ static void radeon_cp_dispatch_vertex( drm_device_t *dev,
int i = 0;
RING_LOCALS;
DRM_DEBUG("%s: hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
__FUNCTION__,
prim->prim,
......@@ -684,7 +684,6 @@ static void radeon_cp_dispatch_vertex( drm_device_t *dev,
return;
}
do {
/* Emit the next cliprect */
if ( i < nbox ) {
......@@ -906,6 +905,16 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
ADVANCE_RING();
#ifdef __BIG_ENDIAN
/* The Mesa texture functions provide the data in little endian as the
* chip wants it, but we need to compensate for the fact that the CP
* ring gets byte-swapped
*/
BEGIN_RING( 2 );
OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
ADVANCE_RING();
#endif
/* Make a copy of the parameters in case we have to update them
* for a multi-pass texture blit.
*/
......@@ -1081,6 +1090,7 @@ static int radeon_do_init_pageflip( drm_device_t *dev )
dev_priv->page_flipping = 1;
dev_priv->current_page = 0;
dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
return 0;
}
......@@ -1095,6 +1105,7 @@ int radeon_do_cleanup_pageflip( drm_device_t *dev )
dev_priv->page_flipping = 0;
dev_priv->current_page = 0;
dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
return 0;
}
......@@ -1585,15 +1596,15 @@ static int radeon_emit_packets(
drm_radeon_cmd_header_t header,
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = packet[(int)header.packet.packet_id].len;
int reg = packet[(int)header.packet.packet_id].start;
int id = (int)header.packet.packet_id;
int sz = packet[id].len;
int reg = packet[id].start;
int *data = (int *)cmdbuf->buf;
RING_LOCALS;
if (sz * sizeof(int) > cmdbuf->bufsz)
return -EINVAL;
BEGIN_RING(sz+1);
OUT_RING( CP_PACKET0( reg, (sz-1) ) );
OUT_RING_USER_TABLE( data, sz );
......
......@@ -2,6 +2,16 @@
#ifndef _sis_drm_public_h_
#define _sis_drm_public_h_
/* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t)
#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t)
#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t)
#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t)
#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
typedef struct {
int context;
unsigned int offset;
......
......@@ -141,6 +141,19 @@ static struct fb_var_screeninfo vesafb_defined = {
/* --------------------------------------------------------------------- */
static inline void my_install_cmap(WPMINFO2)
{
/* Do not touch this code if you do not understand what it does! */
/* Never try to use do_install_cmap() instead. It is crap. */
struct fb_cmap* cmap = &ACCESS_FBINFO(currcon_display)->cmap;
if (cmap->len)
fb_set_cmap(cmap, 1, &ACCESS_FBINFO(fbcon));
else
fb_set_cmap(fb_default_cmap(ACCESS_FBINFO(curr.cmap_len)),
1, &ACCESS_FBINFO(fbcon));
}
static void matrox_pan_var(WPMINFO struct fb_var_screeninfo *var) {
unsigned int pos;
......@@ -869,7 +882,7 @@ static int matroxfb_set_var(struct fb_var_screeninfo *var, int con,
up_read(&ACCESS_FBINFO(altout.lock));
}
matrox_cfbX_init(PMINFO display);
do_install_cmap(ACCESS_FBINFO(fbcon.currcon),&ACCESS_FBINFO(fbcon));
my_install_cmap(PMINFO2);
#if defined(CONFIG_FB_COMPAT_XPMAC)
if (console_fb_info == &ACCESS_FBINFO(fbcon)) {
int vmode, cmode;
......
......@@ -29,7 +29,7 @@ MODULE_PARM_DESC(mem, "Memory size reserved for dualhead (default=8MB)");
static int matroxfb_dh_getcolreg(unsigned regno, unsigned *red, unsigned *green,
unsigned *blue, unsigned *transp, struct fb_info* info) {
#define m2info ((struct matroxfb_dh_fb_info*)info)
if (regno > 16)
if (regno >= 16)
return 1;
*red = m2info->palette[regno].red;
*blue = m2info->palette[regno].blue;
......@@ -44,7 +44,7 @@ static int matroxfb_dh_setcolreg(unsigned regno, unsigned red, unsigned green,
#define m2info ((struct matroxfb_dh_fb_info*)info)
struct display* p;
if (regno > 16)
if (regno >= 16)
return 1;
m2info->palette[regno].red = red;
m2info->palette[regno].blue = blue;
......@@ -84,6 +84,19 @@ static int matroxfb_dh_setcolreg(unsigned regno, unsigned red, unsigned green,
#undef m2info
}
static inline void my_install_cmap(struct matroxfb_dh_fb_info* m2info)
{
/* Do not touch this code if you do not understand what it does! */
/* Never try to use do_install_cmap() instead. It is crap. */
struct fb_cmap* cmap = &m2info->currcon_display->cmap;
if (cmap->len)
fb_set_cmap(cmap, 1, &m2info->fbcon);
else
fb_set_cmap(fb_default_cmap(16), 1, &m2info->fbcon);
}
static void matroxfb_dh_restore(struct matroxfb_dh_fb_info* m2info,
struct my_timming* mt,
struct display* p,
......@@ -439,7 +452,7 @@ static int matroxfb_dh_set_var(struct fb_var_screeninfo* var, int con,
up_read(&ACCESS_FBINFO(altout.lock));
}
matroxfb_dh_cfbX_init(m2info, p);
do_install_cmap(ACCESS_FBINFO(fbcon.currcon), &ACCESS_FBINFO(fbcon));
my_install_cmap(m2info);
}
return 0;
#undef m2info
......
......@@ -6,7 +6,7 @@ ToDo:
user open()s a file with i_size > s_maxbytes? Should read_inode()
truncate the visible i_size? Will the user just get -E2BIG (or
whatever) on open()? Or will (s)he be able to open() but lseek() and
read() will fail when s_maxbytes is reached? -> Investigate this!
read() will fail when s_maxbytes is reached? -> Investigate this.
- Implement/allow non-resident index bitmaps in dir.c::ntfs_readdir()
and then also consider initialized_size w.r.t. the bitmaps, etc.
- vcn_to_lcn() should somehow return the correct pointer within the
......@@ -17,11 +17,67 @@ ToDo:
- Consider if ntfs_file_read_compressed_block() shouldn't be coping
with initialized_size < data_size. I don't think it can happen but
it requires more careful consideration.
- CLEANUP: Modularise and reuse code in aops.c. At the moment we have
several copies of almost identicall functions and the functions are
quite big. Modularising them a bit, e.g. a-la get_block(), will make
them cleaner and make code reuse easier.
- Want to use dummy inodes for address space i/o.
- CLEANUP: At the moment we have two copies of almost identical
functions in aops.c, can merge them once fake inode address space
based attribute i/o is further developed.
- CLEANUP: Modularising code in aops.c a bit, e.g. a-la get_block(),
will be cleaner and make code reuse easier.
- Modify ntfs_read_locked_inode() to return an error code and update
callers, i.e. ntfs_iget(), to pass that error code up instead of just
using -EIO.
- Enable NFS exporting of NTFS.
- Use fake inodes for address space i/o.
2.0.13 - Use iget5_locked() in preparation for fake inodes and small cleanups.
- Remove nr_mft_bits and the now superfluous union with nr_mft_records
from ntfs_volume structure.
- Remove nr_lcn_bits and the now superfluous union with nr_clusters
from ntfs_volume structure.
- Use iget5_locked() and friends instead of conventional iget(). Wrap
the call in fs/ntfs/inode.c::ntfs_iget() and update callers of iget()
to use ntfs_iget(). Leave only one iget() call at mount time so we
don't need an ntfs_iget_mount().
- Change fs/ntfs/inode.c::ntfs_new_extent_inode() to take mft_no as an
additional argument.
2.0.12 - Initial cleanup of address space operations following 2.0.11 changes.
- Merge fs/ntfs/aops.c::end_buffer_read_mst_async() and
fs/ntfs/aops.c::end_buffer_read_file_async() into one function
fs/ntfs/aops.c::end_buffer_read_attr_async() using NInoMstProtected()
to determine whether to apply mst fixups or not.
- Above change allows merging fs/ntfs/aops.c::ntfs_file_read_block()
and fs/ntfs/aops.c::ntfs_mst_readpage() into one function
fs/ntfs/aops.c::ntfs_attr_read_block(). Also, create a tiny wrapper
fs/ntfs/aops.c::ntfs_mst_readpage() to transform the parameters from
the VFS readpage function prototype to the ntfs_attr_read_block()
function prototype.
2.0.11 - Initial preparations for fake inode based attribute i/o.
- Move definition of ntfs_inode_state_bits to fs/ntfs/inode.h and
do some macro magic (adapted from include/linux/buffer_head.h) to
expand all the helper functions NInoFoo(), NInoSetFoo(), and
NInoClearFoo().
- Add new flag to ntfs_inode_state_bits: NI_Sparse.
- Add new fields to ntfs_inode structure to allow use of fake inodes
for attribute i/o: type, name, name_len. Also add new state bits:
NI_Attr, which, if set, indicates the inode is a fake inode, and
NI_MstProtected, which, if set, indicates the attribute uses multi
sector transfer protection, i.e. fixups need to be applied after
reads and before/after writes.
- Rename fs/ntfs/inode.c::ntfs_{new,clear,destroy}_inode() to
ntfs_{new,clear,destroy}_extent_inode() and update callers.
- Use ntfs_clear_extent_inode() in fs/ntfs/inode.c::__ntfs_clear_inode()
instead of ntfs_destroy_extent_inode().
- Cleanup memory deallocations in {__,}ntfs_clear_{,big_}inode().
- Make all operations on ntfs inode state bits use the NIno* functions.
- Set up the new ntfs inode fields and state bits in
fs/ntfs/inode.c::ntfs_read_inode() and add appropriate cleanup of
allocated memory to __ntfs_clear_inode().
- Cleanup ntfs_inode structure a bit for better ordering of elements
w.r.t. their size to allow better packing of the structure in memory.
2.0.10 - There can only be 2^32 - 1 inodes on an NTFS volume.
......@@ -38,7 +94,10 @@ ToDo:
- Change decompression engine to use a single buffer protected by a
spin lock instead of per-CPU buffers. (Rusty Russell)
- Switch to using the new KM_BIO_SRC_IRQ for atomic kmaps. (Andrew
- Do not update cb_pos when handling a partial final page during
decompression of a sparse compression block, as the value is later
reset without being read/used. (Rusty Russell)
- Switch to using the new KM_BIO_SRC_IRQ for atomic kmap()s. (Andrew
Morton)
- Change buffer size in ntfs_readdir()/ntfs_filldir() to use
NLS_MAX_CHARSET_SIZE which makes the buffers almost 1kiB each but
......
......@@ -5,7 +5,7 @@ obj-$(CONFIG_NTFS_FS) += ntfs.o
ntfs-objs := aops.o attrib.o compress.o debug.o dir.o file.o inode.o mft.o \
mst.o namei.o super.o sysctl.o time.o unistr.o upcase.o
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.0.10\"
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.0.13\"
ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
......
......@@ -3,7 +3,7 @@
* Part of the Linux-NTFS project.
*
* Copyright (c) 2001,2002 Anton Altaparmakov.
* Copyright (C) 2002 Richard Russon.
* Copyright (c) 2002 Richard Russon.
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
......@@ -30,31 +30,43 @@
#include "ntfs.h"
/**
* end_buffer_read_file_async -
* end_buffer_read_attr_async - async io completion for reading attributes
* @bh: buffer head on which io is completed
* @uptodate: whether @bh is now uptodate or not
*
* Async io completion handler for accessing files. Adapted from
* end_buffer_read_mst_async().
* Asynchronous I/O completion handler for reading pages belonging to the
* attribute address space of an inode. The inodes can either be files or
* directories or they can be fake inodes describing some attribute.
*
* If NInoMstProtected(), perform the post read mst fixups when all IO on the
* page has been completed and mark the page uptodate or set the error bit on
* the page. To determine the size of the records that need fixing up, we cheat
* a little bit by setting the index_block_size in ntfs_inode to the ntfs
* record size, and index_block_size_bits, to the log(base 2) of the ntfs
* record size.
*/
static void end_buffer_read_file_async(struct buffer_head *bh, int uptodate)
static void end_buffer_read_attr_async(struct buffer_head *bh, int uptodate)
{
static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
unsigned long flags;
struct buffer_head *tmp;
struct page *page;
ntfs_inode *ni;
if (uptodate)
if (likely(uptodate))
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
page = bh->b_page;
ni = NTFS_I(page->mapping->host);
if (likely(uptodate)) {
s64 file_ofs;
ntfs_inode *ni = NTFS_I(page->mapping->host);
file_ofs = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
/* Check for the current buffer head overflowing. */
if (file_ofs + bh->b_size > ni->initialized_size) {
char *addr;
int ofs = 0;
......@@ -82,10 +94,47 @@ static void end_buffer_read_file_async(struct buffer_head *bh, int uptodate)
SetPageError(page);
tmp = tmp->b_this_page;
}
spin_unlock_irqrestore(&page_uptodate_lock, flags);
if (!PageError(page))
SetPageUptodate(page);
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
* attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
*/
if (!NInoMstProtected(ni)) {
if (likely(!PageError(page)))
SetPageUptodate(page);
unlock_page(page);
return;
} else {
char *addr;
unsigned int i, recs, nr_err;
u32 rec_size;
rec_size = ni->_IDM(index_block_size);
recs = PAGE_CACHE_SIZE / rec_size;
addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
for (i = nr_err = 0; i < recs; i++) {
if (likely(!post_read_mst_fixup((NTFS_RECORD*)(addr +
i * rec_size), rec_size)))
continue;
nr_err++;
ntfs_error(ni->vol->sb, "post_read_mst_fixup() failed, "
"corrupt %s record 0x%Lx. Run chkdsk.",
ni->mft_no ? "index" : "mft",
(long long)(((s64)page->index <<
PAGE_CACHE_SHIFT >>
ni->_IDM(index_block_size_bits)) + i));
}
flush_dcache_page(page);
kunmap_atomic(addr, KM_BIO_SRC_IRQ);
if (likely(!nr_err && recs))
SetPageUptodate(page);
else {
ntfs_error(ni->vol->sb, "Setting page error, index "
"0x%lx.", page->index);
SetPageError(page);
}
}
unlock_page(page);
return;
still_busy:
......@@ -94,11 +143,20 @@ static void end_buffer_read_file_async(struct buffer_head *bh, int uptodate)
}
/**
* ntfs_file_read_block -
* ntfs_attr_read_block - fill a @page of an address space with data
* @page: page cache page to fill with data
*
* NTFS version of block_read_full_page(). Adapted from ntfs_mst_readpage().
* Fill the page @page of the address space belonging to the @page->host inode.
* We read each buffer asynchronously and when all buffers are read in, our io
* completion handler end_buffer_read_attr_async(), if required, automatically
* applies the mst fixups to the page before finally marking it uptodate and
* unlocking it.
*
* Return 0 on success and -errno on error.
*
* Contains an adapted version of fs/buffer.c::block_read_full_page().
*/
static int ntfs_file_read_block(struct page *page)
static int ntfs_attr_read_block(struct page *page)
{
VCN vcn;
LCN lcn;
......@@ -119,7 +177,7 @@ static int ntfs_file_read_block(struct page *page)
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
bh = head = page_buffers(page);
if (!bh)
if (unlikely(!bh))
return -ENOMEM;
blocks = PAGE_CACHE_SIZE >> blocksize_bits;
......@@ -128,11 +186,9 @@ static int ntfs_file_read_block(struct page *page)
zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
#ifdef DEBUG
if (unlikely(!ni->mft_no)) {
ntfs_error(vol->sb, "NTFS: Attempt to access $MFT! This is a "
"very serious bug! Denying access...");
return -EACCES;
}
if (unlikely(!ni->run_list.rl && !ni->mft_no))
panic("NTFS: $MFT/$DATA run list has been unmapped! This is a "
"very serious bug! Cannot continue...");
#endif
/* Loop through all the buffers in the page. */
......@@ -211,7 +267,7 @@ static int ntfs_file_read_block(struct page *page)
for (i = 0; i < nr; i++) {
struct buffer_head *tbh = arr[i];
lock_buffer(tbh);
tbh->b_end_io = end_buffer_read_file_async;
tbh->b_end_io = end_buffer_read_attr_async;
set_buffer_async_read(tbh);
}
/* Finally, start i/o on the buffers. */
......@@ -220,7 +276,7 @@ static int ntfs_file_read_block(struct page *page)
return 0;
}
/* No i/o was scheduled on any of the buffers. */
if (!PageError(page))
if (likely(!PageError(page)))
SetPageUptodate(page);
else /* Signal synchronous i/o error. */
nr = -EIO;
......@@ -234,17 +290,17 @@ static int ntfs_file_read_block(struct page *page)
* @page: page cache page to fill with data
*
* For non-resident attributes, ntfs_file_readpage() fills the @page of the open
* file @file by calling the generic block_read_full_page() function provided by
* the kernel which in turn invokes our ntfs_file_get_block() callback in order
* to create and read in the buffers associated with the page asynchronously.
* file @file by calling the ntfs version of the generic block_read_full_page()
* function provided by the kernel, ntfs_attr_read_block(), which in turn
* creates and reads in the buffers associated with the page asynchronously.
*
* For resident attributes, OTOH, ntfs_file_readpage() fills @page by copying
* the data from the mft record (which at this stage is most likely in memory)
* and fills the remainder with zeroes. Thus, in this case I/O is synchronous,
* and fills the remainder with zeroes. Thus, in this case, I/O is synchronous,
* as even if the mft record is not cached at this point in time, we need to
* wait for it to be read in before we can do the copy.
*
* Return zero on success or -errno on error.
* Return 0 on success or -errno on error.
*/
static int ntfs_file_readpage(struct file *file, struct page *page)
{
......@@ -256,43 +312,43 @@ static int ntfs_file_readpage(struct file *file, struct page *page)
u32 attr_len;
int err = 0;
if (!PageLocked(page))
if (unlikely(!PageLocked(page)))
PAGE_BUG(page);
ni = NTFS_I(page->mapping->host);
/* Is the unnamed $DATA attribute resident? */
if (test_bit(NI_NonResident, &ni->state)) {
if (NInoNonResident(ni)) {
/* Attribute is not resident. */
/* If the file is encrypted, we deny access, just like NT4. */
if (test_bit(NI_Encrypted, &ni->state)) {
if (NInoEncrypted(ni)) {
err = -EACCES;
goto unl_err_out;
}
/* Compressed data stream. Handled in compress.c. */
if (test_bit(NI_Compressed, &ni->state))
if (NInoCompressed(ni))
return ntfs_file_read_compressed_block(page);
/* Normal data stream. */
return ntfs_file_read_block(page);
return ntfs_attr_read_block(page);
}
/* Attribute is resident, implying it is not compressed or encrypted. */
/* Map, pin and lock the mft record for reading. */
mrec = map_mft_record(READ, ni);
if (IS_ERR(mrec)) {
if (unlikely(IS_ERR(mrec))) {
err = PTR_ERR(mrec);
goto unl_err_out;
}
ctx = get_attr_search_ctx(ni, mrec);
if (!ctx) {
if (unlikely(!ctx)) {
err = -ENOMEM;
goto unm_unl_err_out;
}
/* Find the data attribute in the mft record. */
if (!lookup_attr(AT_DATA, NULL, 0, 0, 0, NULL, 0, ctx)) {
if (unlikely(!lookup_attr(AT_DATA, NULL, 0, 0, 0, NULL, 0, ctx))) {
err = -ENOENT;
goto put_unm_unl_err_out;
}
......@@ -330,6 +386,25 @@ static int ntfs_file_readpage(struct file *file, struct page *page)
return err;
}
/**
* ntfs_mst_readpage - fill a @page of the mft or a directory with data
* @file: open file/directory to which the @page belongs or NULL
* @page: page cache page to fill with data
*
* Readpage method for the VFS address space operations of directory inodes
* and the $MFT/$DATA attribute.
*
* We just call ntfs_attr_read_block() here, in fact we only need this wrapper
* because of the difference in function parameters.
*/
int ntfs_mst_readpage(struct file *file, struct page *page)
{
if (unlikely(!PageLocked(page)))
PAGE_BUG(page);
return ntfs_attr_read_block(page);
}
/**
* end_buffer_read_mftbmp_async -
*
......@@ -343,7 +418,7 @@ static void end_buffer_read_mftbmp_async(struct buffer_head *bh, int uptodate)
struct buffer_head *tmp;
struct page *page;
if (uptodate)
if (likely(uptodate))
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
......@@ -386,7 +461,7 @@ static void end_buffer_read_mftbmp_async(struct buffer_head *bh, int uptodate)
}
spin_unlock_irqrestore(&page_uptodate_lock, flags);
if (!PageError(page))
if (likely(!PageError(page)))
SetPageUptodate(page);
unlock_page(page);
return;
......@@ -410,7 +485,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
int nr, i;
unsigned char blocksize_bits;
if (!PageLocked(page))
if (unlikely(!PageLocked(page)))
PAGE_BUG(page);
blocksize = vol->sb->s_blocksize;
......@@ -419,7 +494,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
bh = head = page_buffers(page);
if (!bh)
if (unlikely(!bh))
return -ENOMEM;
blocks = PAGE_CACHE_SIZE >> blocksize_bits;
......@@ -503,264 +578,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
return 0;
}
/* No i/o was scheduled on any of the buffers. */
if (!PageError(page))
SetPageUptodate(page);
else /* Signal synchronous i/o error. */
nr = -EIO;
unlock_page(page);
return nr;
}
/**
* end_buffer_read_mst_async - async io completion for reading index records
* @bh: buffer head on which io is completed
* @uptodate: whether @bh is now uptodate or not
*
* Asynchronous I/O completion handler for reading pages belonging to the
* index allocation attribute address space of directory inodes.
*
* Perform the post read mst fixups when all IO on the page has been completed
* and marks the page uptodate or sets the error bit on the page.
*
* Adapted from fs/buffer.c.
*
* NOTE: We use this function as async io completion handler for reading pages
* belonging to the mft data attribute address space, too as this saves
* duplicating an almost identical function. We do this by cheating a little
* bit in setting the index_block_size in the mft ntfs_inode to the mft record
* size of the volume (vol->mft_record_size), and index_block_size_bits to
* mft_record_size_bits, respectively.
*/
static void end_buffer_read_mst_async(struct buffer_head *bh, int uptodate)
{
static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
unsigned long flags;
struct buffer_head *tmp;
struct page *page;
ntfs_inode *ni;
if (uptodate)
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
page = bh->b_page;
ni = NTFS_I(page->mapping->host);
if (likely(uptodate)) {
s64 file_ofs;
file_ofs = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
/* Check for the current buffer head overflowing. */
if (file_ofs + bh->b_size > ni->initialized_size) {
char *addr;
int ofs = 0;
if (file_ofs < ni->initialized_size)
ofs = ni->initialized_size - file_ofs;
addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs);
flush_dcache_page(page);
kunmap_atomic(addr, KM_BIO_SRC_IRQ);
}
} else
SetPageError(page);
spin_lock_irqsave(&page_uptodate_lock, flags);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh->b_this_page;
while (tmp != bh) {
if (buffer_locked(tmp)) {
if (buffer_async_read(tmp))
goto still_busy;
} else if (!buffer_uptodate(tmp))
SetPageError(page);
tmp = tmp->b_this_page;
}
spin_unlock_irqrestore(&page_uptodate_lock, flags);
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups.
*/
if (!PageError(page)) {
char *addr;
unsigned int i, recs, nr_err = 0;
u32 rec_size;
rec_size = ni->_IDM(index_block_size);
recs = PAGE_CACHE_SIZE / rec_size;
addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
for (i = 0; i < recs; i++) {
if (!post_read_mst_fixup((NTFS_RECORD*)(addr +
i * rec_size), rec_size))
continue;
nr_err++;
ntfs_error(ni->vol->sb, "post_read_mst_fixup() failed, "
"corrupt %s record 0x%Lx. Run chkdsk.",
ni->mft_no ? "index" : "mft",
(long long)((page->index <<
PAGE_CACHE_SHIFT >>
ni->_IDM(index_block_size_bits)) + i));
}
flush_dcache_page(page);
kunmap_atomic(addr, KM_BIO_SRC_IRQ);
if (likely(!nr_err && recs))
SetPageUptodate(page);
else {
ntfs_error(ni->vol->sb, "Setting page error, index "
"0x%lx.", page->index);
SetPageError(page);
}
}
unlock_page(page);
return;
still_busy:
spin_unlock_irqrestore(&page_uptodate_lock, flags);
return;
}
/**
* ntfs_mst_readpage - fill a @page of the mft or a directory with data
* @file: open file/directory to which the page @page belongs or NULL
* @page: page cache page to fill with data
*
* Readpage method for the VFS address space operations.
*
* Fill the page @page of the $MFT or the open directory @dir. We read each
* buffer asynchronously and when all buffers are read in our io completion
* handler end_buffer_read_mst_async() automatically applies the mst fixups to
* the page before finally marking it uptodate and unlocking it.
*
* Contains an adapted version of fs/buffer.c::block_read_full_page().
*/
int ntfs_mst_readpage(struct file *dir, struct page *page)
{
VCN vcn;
LCN lcn;
ntfs_inode *ni;
ntfs_volume *vol;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
sector_t iblock, lblock, zblock;
unsigned int blocksize, blocks, vcn_ofs;
int i, nr;
unsigned char blocksize_bits;
if (!PageLocked(page))
PAGE_BUG(page);
ni = NTFS_I(page->mapping->host);
vol = ni->vol;
blocksize_bits = VFS_I(ni)->i_blkbits;
blocksize = 1 << blocksize_bits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
bh = head = page_buffers(page);
if (!bh)
return -ENOMEM;
blocks = PAGE_CACHE_SIZE >> blocksize_bits;
iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
#ifdef DEBUG
if (unlikely(!ni->run_list.rl && !ni->mft_no))
panic("NTFS: $MFT/$DATA run list has been unmapped! This is a "
"very serious bug! Cannot continue...");
#endif
/* Loop through all the buffers in the page. */
nr = i = 0;
do {
if (unlikely(buffer_uptodate(bh)))
continue;
if (unlikely(buffer_mapped(bh))) {
arr[nr++] = bh;
continue;
}
bh->b_bdev = vol->sb->s_bdev;
/* Is the block within the allowed limits? */
if (iblock < lblock) {
BOOL is_retry = FALSE;
/* Convert iblock into corresponding vcn and offset. */
vcn = (VCN)iblock << blocksize_bits >>
vol->cluster_size_bits;
vcn_ofs = ((VCN)iblock << blocksize_bits) &
vol->cluster_size_mask;
retry_remap:
/* Convert the vcn to the corresponding lcn. */
down_read(&ni->run_list.lock);
lcn = vcn_to_lcn(ni->run_list.rl, vcn);
up_read(&ni->run_list.lock);
/* Successful remap. */
if (lcn >= 0) {
/* Setup buffer head to correct block. */
bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits;
set_buffer_mapped(bh);
/* Only read initialized data blocks. */
if (iblock < zblock) {
arr[nr++] = bh;
continue;
}
/* Fully non-initialized data block, zero it. */
goto handle_zblock;
}
/* It is a hole, need to zero it. */
if (lcn == LCN_HOLE)
goto handle_hole;
/* If first try and run list unmapped, map and retry. */
if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
is_retry = TRUE;
if (!map_run_list(ni, vcn))
goto retry_remap;
}
/* Hard error, zero out region. */
SetPageError(page);
ntfs_error(vol->sb, "vcn_to_lcn(vcn = 0x%Lx) failed "
"with error code 0x%Lx%s.",
(long long)vcn, (long long)-lcn,
is_retry ? " even after retrying" : "");
// FIXME: Depending on vol->on_errors, do something.
}
/*
* Either iblock was outside lblock limits or vcn_to_lcn()
* returned error. Just zero that portion of the page and set
* the buffer uptodate.
*/
handle_hole:
bh->b_blocknr = -1UL;
clear_buffer_mapped(bh);
handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page);
kunmap(page);
set_buffer_uptodate(bh);
} while (i++, iblock++, (bh = bh->b_this_page) != head);
/* Check we have at least one buffer ready for i/o. */
if (nr) {
/* Lock the buffers. */
for (i = 0; i < nr; i++) {
struct buffer_head *tbh = arr[i];
lock_buffer(tbh);
tbh->b_end_io = end_buffer_read_mst_async;
set_buffer_async_read(tbh);
}
/* Finally, start i/o on the buffers. */
for (i = 0; i < nr; i++)
submit_bh(READ, arr[i]);
return 0;
}
/* No i/o was scheduled on any of the buffers. */
if (!PageError(page))
if (likely(!PageError(page)))
SetPageUptodate(page);
else /* Signal synchronous i/o error. */
nr = -EIO;
......
......@@ -27,7 +27,7 @@
/**
* The little endian Unicode string $I30 as a global constant.
*/
const uchar_t I30[5] = { const_cpu_to_le16('$'), const_cpu_to_le16('I'),
uchar_t I30[5] = { const_cpu_to_le16('$'), const_cpu_to_le16('I'),
const_cpu_to_le16('3'), const_cpu_to_le16('0'),
const_cpu_to_le16(0) };
......
......@@ -38,7 +38,7 @@ typedef struct {
} __attribute__ ((__packed__)) ntfs_name;
/* The little endian Unicode string $I30 as a global constant. */
extern const uchar_t I30[5];
extern uchar_t I30[5];
extern MFT_REF ntfs_lookup_inode_by_name(ntfs_inode *dir_ni,
const uchar_t *uname, const int uname_len, ntfs_name **res);
......
......@@ -24,6 +24,178 @@
#include "ntfs.h"
#include "dir.h"
#include "inode.h"
#include "attrib.h"
/**
* ntfs_attr - ntfs in memory attribute structure
* @mft_no: mft record number of the base mft record of this attribute
* @name: Unicode name of the attribute (NULL if unnamed)
* @name_len: length of @name in Unicode characters (0 if unnamed)
* @type: attribute type (see layout.h)
*
* This structure exists only to provide a small structure for the
* ntfs_iget()/ntfs_test_inode()/ntfs_init_locked_inode() mechanism.
*
* NOTE: Elements are ordered by size to make the structure as compact as
* possible on all architectures.
*/
typedef struct {
unsigned long mft_no;
uchar_t *name;
u32 name_len;
ATTR_TYPES type;
} ntfs_attr;
/**
* ntfs_test_inode - compare two (possibly fake) inodes for equality
* @vi: vfs inode which to test
* @na: ntfs attribute which is being tested with
*
* Compare the ntfs attribute embedded in the ntfs specific part of the vfs
* inode @vi for equality with the ntfs attribute @na.
*
* If searching for the normal file/directory inode, set @na->type to AT_UNUSED.
* @na->name and @na->name_len are then ignored.
*
* Return 1 if the attributes match and 0 if not.
*
* NOTE: This function runs with the inode_lock spin lock held so it is not
* allowed to sleep.
*/
static int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
{
ntfs_inode *ni;
if (vi->i_ino != na->mft_no)
return 0;
ni = NTFS_I(vi);
/* If !NInoAttr(ni), @vi is a normal file or directory inode. */
if (likely(!NInoAttr(ni))) {
/* If not looking for a normal inode this is a mismatch. */
if (unlikely(na->type != AT_UNUSED))
return 0;
} else {
/* A fake inode describing an attribute. */
if (ni->type != na->type)
return 0;
if (ni->name_len != na->name_len)
return 0;
if (na->name_len && memcmp(ni->name, na->name,
na->name_len * sizeof(uchar_t)))
return 0;
}
/* Match! */
return 1;
}
/**
* ntfs_init_locked_inode - initialize an inode
* @vi: vfs inode to initialize
* @na: ntfs attribute which to initialize @vi to
*
* Initialize the vfs inode @vi with the values from the ntfs attribute @na in
* order to enable ntfs_test_inode() to do its work.
*
* If initializing the normal file/directory inode, set @na->type to AT_UNUSED.
* In that case, @na->name and @na->name_len should be set to NULL and 0,
* respectively. Although that is not strictly necessary as
* ntfs_read_inode_locked() will fill them in later.
*
* Return 0 on success and -errno on error.
*
* NOTE: This function runs with the inode_lock spin lock held so it is not
* allowed to sleep. (Hence the GFP_ATOMIC allocation.)
*/
static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
{
ntfs_inode *ni = NTFS_I(vi);
vi->i_ino = na->mft_no;
ni->type = na->type;
ni->name = na->name;
ni->name_len = na->name_len;
/* If initializing a normal inode, we are done. */
if (likely(na->type == AT_UNUSED))
return 0;
/* It is a fake inode. */
NInoSetAttr(ni);
/*
* We have I30 global constant as an optimization as it is the name
* in >99.9% of named attributes! The other <0.1% incur a GFP_ATOMIC
* allocation but that is ok. And most attributes are unnamed anyway,
* thus the fraction of named attributes with name != I30 is actually
* absolutely tiny.
*/
if (na->name && na->name_len && na->name != I30) {
unsigned int i;
i = na->name_len * sizeof(uchar_t);
ni->name = (uchar_t*)kmalloc(i + sizeof(uchar_t), GFP_ATOMIC);
if (!ni->name)
return -ENOMEM;
memcpy(ni->name, na->name, i);
ni->name[i] = cpu_to_le16('\0');
}
return 0;
}
typedef int (*test_t)(struct inode *, void *);
typedef int (*set_t)(struct inode *, void *);
static void ntfs_read_locked_inode(struct inode *vi);
/**
* ntfs_iget - obtain a struct inode corresponding to a specific normal inode
* @sb: super block of mounted volume
* @mft_no: mft record number / inode number to obtain
*
* Obtain the struct inode corresponding to a specific normal inode (i.e. a
* file or directory).
*
* If the inode is in the cache, it is just returned with an increased
* reference count. Otherwise, a new struct inode is allocated and initialized,
* and finally ntfs_read_locked_inode() is called to read in the inode and
* fill in the remainder of the inode structure.
*
* Return the struct inode on success. Check the return value with IS_ERR() and
* if true, the function failed and the error code is obtained from PTR_ERR().
*/
struct inode *ntfs_iget(struct super_block *sb, unsigned long mft_no)
{
struct inode *vi;
ntfs_attr na;
na.mft_no = mft_no;
na.type = AT_UNUSED;
na.name = NULL;
na.name_len = 0;
vi = iget5_locked(sb, mft_no, (test_t)ntfs_test_inode,
(set_t)ntfs_init_locked_inode, &na);
if (!vi)
return ERR_PTR(-ENOMEM);
/* If this is a freshly allocated inode, need to read it now. */
if (vi->i_state & I_NEW) {
ntfs_read_locked_inode(vi);
unlock_new_inode(vi);
}
#if 0
// TODO: Enable this and do the follow up cleanup, i.e. remove all the
// bad inode checks. -- BUT: Do we actually want to do this? -- It may
// result in repeated attemps to read a bad inode which is not
// desirable. (AIA)
/*
* There is no point in keeping bad inodes around. This also simplifies
* things in that we never need to check for bad inodes elsewhere.
*/
if (is_bad_inode(vi)) {
iput(vi);
vi = ERR_PTR(-EIO);
}
#endif
return vi;
}
struct inode *ntfs_alloc_big_inode(struct super_block *sb)
{
......@@ -32,12 +204,12 @@ struct inode *ntfs_alloc_big_inode(struct super_block *sb)
ntfs_debug("Entering.");
ni = (ntfs_inode *)kmem_cache_alloc(ntfs_big_inode_cache,
SLAB_NOFS);
if (!ni) {
ntfs_error(sb, "Allocation of NTFS big inode structure "
"failed.");
return NULL;
if (likely(ni != NULL)) {
ni->state = 0;
return VFS_I(ni);
}
return VFS_I(ni);
ntfs_error(sb, "Allocation of NTFS big inode structure failed.");
return NULL;
}
void ntfs_destroy_big_inode(struct inode *inode)
......@@ -49,17 +221,20 @@ void ntfs_destroy_big_inode(struct inode *inode)
kmem_cache_free(ntfs_big_inode_cache, NTFS_I(inode));
}
ntfs_inode *ntfs_alloc_inode(void)
static ntfs_inode *ntfs_alloc_extent_inode(void)
{
ntfs_inode *ni = (ntfs_inode *)kmem_cache_alloc(ntfs_inode_cache,
SLAB_NOFS);
ntfs_debug("Entering.");
if (unlikely(!ni))
ntfs_error(NULL, "Allocation of NTFS inode structure failed.");
return ni;
if (likely(ni != NULL)) {
ni->state = 0;
return ni;
}
ntfs_error(NULL, "Allocation of NTFS inode structure failed.");
return NULL;
}
void ntfs_destroy_inode(ntfs_inode *ni)
void ntfs_destroy_extent_inode(ntfs_inode *ni)
{
ntfs_debug("Entering.");
BUG_ON(atomic_read(&ni->mft_count) || !atomic_dec_and_test(&ni->count));
......@@ -68,27 +243,42 @@ void ntfs_destroy_inode(ntfs_inode *ni)
/**
* __ntfs_init_inode - initialize ntfs specific part of an inode
* @sb: super block of mounted volume
* @ni: freshly allocated ntfs inode which to initialize
*
* Initialize an ntfs inode to defaults.
*
* NOTE: ni->mft_no, ni->state, ni->type, ni->name, and ni->name_len are left
* untouched. Make sure to initialize them elsewhere.
*
* Return zero on success and -ENOMEM on error.
*/
static void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni)
{
ntfs_debug("Entering.");
memset(ni, 0, sizeof(ntfs_inode));
ni->initialized_size = ni->allocated_size = 0;
ni->seq_no = 0;
atomic_set(&ni->count, 1);
ni->vol = NULL;
ni->vol = NTFS_SB(sb);
init_run_list(&ni->run_list);
init_rwsem(&ni->mrec_lock);
atomic_set(&ni->mft_count, 0);
ni->page = NULL;
ni->page_ofs = 0;
ni->attr_list_size = 0;
ni->attr_list = NULL;
init_run_list(&ni->attr_list_rl);
ni->_IDM(index_block_size) = 0;
ni->_IDM(index_vcn_size) = 0;
ni->_IDM(bmp_size) = 0;
ni->_IDM(bmp_initialized_size) = 0;
ni->_IDM(bmp_allocated_size) = 0;
init_run_list(&ni->_IDM(bmp_rl));
ni->_IDM(index_block_size_bits) = 0;
ni->_IDM(index_vcn_size_bits) = 0;
init_MUTEX(&ni->extent_lock);
ni->nr_extents = 0;
ni->_INE(base_ntfs_ino) = NULL;
ni->vol = NTFS_SB(sb);
return;
}
......@@ -102,13 +292,18 @@ static void ntfs_init_big_inode(struct inode *vi)
return;
}
ntfs_inode *ntfs_new_inode(struct super_block *sb)
ntfs_inode *ntfs_new_extent_inode(struct super_block *sb, unsigned long mft_no)
{
ntfs_inode *ni = ntfs_alloc_inode();
ntfs_inode *ni = ntfs_alloc_extent_inode();
ntfs_debug("Entering.");
if (ni)
if (likely(ni != NULL)) {
__ntfs_init_inode(sb, ni);
ni->mft_no = mft_no;
ni->type = AT_UNUSED;
ni->name = NULL;
ni->name_len = 0;
}
return ni;
}
......@@ -189,18 +384,20 @@ static int ntfs_is_extended_system_file(attr_search_context *ctx)
}
/**
* ntfs_read_inode - read an inode from its device
* ntfs_read_locked_inode - read an inode from its device
* @vi: inode to read
*
* ntfs_read_inode() is called from the VFS iget() function to read the inode
* ntfs_read_locked_inode() is called from the ntfs_iget() to read the inode
* described by @vi into memory from the device.
*
* The only fields in @vi that we need to/can look at when the function is
* called are i_sb, pointing to the mounted device's super block, and i_ino,
* the number of the inode to load.
* the number of the inode to load. If this is a fake inode, i.e. NInoAttr(),
* then the fields type, name, and name_len are also valid, and describe the
* attribute which this fake inode represents.
*
* ntfs_read_inode() maps, pins and locks the mft record number i_ino for
* reading and sets up the necessary @vi fields as well as initializing
* ntfs_read_locked_inode() maps, pins and locks the mft record number i_ino
* for reading and sets up the necessary @vi fields as well as initializing
* the ntfs inode.
*
* Q: What locks are held when the function is called?
......@@ -209,9 +406,9 @@ static int ntfs_is_extended_system_file(attr_search_context *ctx)
* i_flags is set to 0 and we have no business touching it. Only an ioctl()
* is allowed to write to them. We should of course be honouring them but
* we need to do that using the IS_* macros defined in include/linux/fs.h.
* In any case ntfs_read_inode() has nothing to do with i_flags at all.
* In any case ntfs_read_locked_inode() has nothing to do with i_flags.
*/
void ntfs_read_inode(struct inode *vi)
static void ntfs_read_locked_inode(struct inode *vi)
{
ntfs_volume *vol = NTFS_SB(vi->i_sb);
ntfs_inode *ni;
......@@ -239,7 +436,8 @@ void ntfs_read_inode(struct inode *vi)
/*
* Initialize the ntfs specific part of @vi special casing
* FILE_MFT which we need to do at mount time.
* FILE_MFT which we need to do at mount time. This also sets
* ni->mft_no to vi->i_ino.
*/
if (vi->i_ino != FILE_MFT)
ntfs_init_big_inode(vi);
......@@ -358,13 +556,14 @@ void ntfs_read_inode(struct inode *vi)
if (vi->i_ino == FILE_MFT)
goto skip_attr_list_load;
ntfs_debug("Attribute list found in inode 0x%lx.", vi->i_ino);
ni->state |= 1 << NI_AttrList;
NInoSetAttrList(ni);
if (ctx->attr->flags & ATTR_IS_ENCRYPTED ||
ctx->attr->flags & ATTR_COMPRESSION_MASK) {
ctx->attr->flags & ATTR_COMPRESSION_MASK ||
ctx->attr->flags & ATTR_IS_SPARSE) {
ntfs_error(vi->i_sb, "Attribute list attribute is "
"compressed/encrypted. Not allowed. "
"Corrupt inode. You should run "
"chkdsk.");
"compressed/encrypted/sparse. Not "
"allowed. Corrupt inode. You should "
"run chkdsk.");
goto put_unm_err_out;
}
/* Now allocate memory for the attribute list. */
......@@ -377,7 +576,7 @@ void ntfs_read_inode(struct inode *vi)
goto ec_put_unm_err_out;
}
if (ctx->attr->non_resident) {
ni->state |= 1 << NI_AttrListNonResident;
NInoSetAttrListNonResident(ni);
if (ctx->attr->_ANR(lowest_vcn)) {
ntfs_error(vi->i_sb, "Attribute list has non "
"zero lowest_vcn. Inode is "
......@@ -459,7 +658,7 @@ void ntfs_read_inode(struct inode *vi)
* encrypted.
*/
if (ctx->attr->flags & ATTR_COMPRESSION_MASK)
ni->state |= 1 << NI_Compressed;
NInoSetCompressed(ni);
if (ctx->attr->flags & ATTR_IS_ENCRYPTED) {
if (ctx->attr->flags & ATTR_COMPRESSION_MASK) {
ntfs_error(vi->i_sb, "Found encrypted and "
......@@ -467,8 +666,10 @@ void ntfs_read_inode(struct inode *vi)
"allowed.");
goto put_unm_err_out;
}
ni->state |= 1 << NI_Encrypted;
NInoSetEncrypted(ni);
}
if (ctx->attr->flags & ATTR_IS_SPARSE)
NInoSetSparse(ni);
ir = (INDEX_ROOT*)((char*)ctx->attr +
le16_to_cpu(ctx->attr->_ARA(value_offset)));
ir_end = (char*)ir + le32_to_cpu(ctx->attr->_ARA(value_length));
......@@ -530,12 +731,19 @@ void ntfs_read_inode(struct inode *vi)
ni->_IDM(index_vcn_size) = vol->sector_size;
ni->_IDM(index_vcn_size_bits) = vol->sector_size_bits;
}
/* Setup the index allocation attribute, even if not present. */
NInoSetMstProtected(ni);
ni->type = AT_INDEX_ALLOCATION;
ni->name = I30;
ni->name_len = 4;
if (!(ir->index.flags & LARGE_INDEX)) {
/* No index allocation. */
vi->i_size = ni->initialized_size = 0;
goto skip_large_dir_stuff;
} /* LARGE_INDEX: Index allocation present. Setup state. */
ni->state |= 1 << NI_NonResident;
NInoSetIndexAllocPresent(ni);
/* Find index allocation attribute. */
reinit_attr_search_ctx(ctx);
if (!lookup_attr(AT_INDEX_ALLOCATION, I30, 4, CASE_SENSITIVE,
......@@ -555,6 +763,11 @@ void ntfs_read_inode(struct inode *vi)
"is encrypted.");
goto put_unm_err_out;
}
if (ctx->attr->flags & ATTR_IS_SPARSE) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
"is sparse.");
goto put_unm_err_out;
}
if (ctx->attr->flags & ATTR_COMPRESSION_MASK) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
"is compressed.");
......@@ -581,13 +794,13 @@ void ntfs_read_inode(struct inode *vi)
goto put_unm_err_out;
}
if (ctx->attr->flags & (ATTR_COMPRESSION_MASK |
ATTR_IS_ENCRYPTED)) {
ATTR_IS_ENCRYPTED | ATTR_IS_SPARSE)) {
ntfs_error(vi->i_sb, "$BITMAP attribute is compressed "
"and/or encrypted.");
"and/or encrypted and/or sparse.");
goto put_unm_err_out;
}
if (ctx->attr->non_resident) {
ni->state |= 1 << NI_BmpNonResident;
NInoSetBmpNonResident(ni);
if (ctx->attr->_ANR(lowest_vcn)) {
ntfs_error(vi->i_sb, "First extent of $BITMAP "
"attribute has non zero "
......@@ -645,8 +858,15 @@ void ntfs_read_inode(struct inode *vi)
vi->i_fop = &ntfs_dir_ops;
vi->i_mapping->a_ops = &ntfs_dir_aops;
} else {
/* It is a file: find first extent of unnamed data attribute. */
/* It is a file. */
reinit_attr_search_ctx(ctx);
/* Setup the data attribute, even if not present. */
ni->type = AT_DATA;
ni->name = NULL;
ni->name_len = 0;
/* Find first extent of the unnamed data attribute. */
if (!lookup_attr(AT_DATA, NULL, 0, 0, 0, NULL, 0, ctx)) {
vi->i_size = ni->initialized_size =
ni->allocated_size = 0LL;
......@@ -675,9 +895,9 @@ void ntfs_read_inode(struct inode *vi)
}
/* Setup the state. */
if (ctx->attr->non_resident) {
ni->state |= 1 << NI_NonResident;
NInoSetNonResident(ni);
if (ctx->attr->flags & ATTR_COMPRESSION_MASK) {
ni->state |= 1 << NI_Compressed;
NInoSetCompressed(ni);
if (vol->cluster_size > 4096) {
ntfs_error(vi->i_sb, "Found "
"compressed data but "
......@@ -707,8 +927,9 @@ void ntfs_read_inode(struct inode *vi)
goto ec_put_unm_err_out;
}
ni->_ICF(compression_block_size) = 1U << (
ctx->attr->_ANR(compression_unit)
+ vol->cluster_size_bits);
ctx->attr->_ANR(
compression_unit) +
vol->cluster_size_bits);
ni->_ICF(compression_block_size_bits) = ffs(
ni->_ICF(compression_block_size)) - 1;
}
......@@ -718,8 +939,10 @@ void ntfs_read_inode(struct inode *vi)
"and compressed data.");
goto put_unm_err_out;
}
ni->state |= 1 << NI_Encrypted;
NInoSetEncrypted(ni);
}
if (ctx->attr->flags & ATTR_IS_SPARSE)
NInoSetSparse(ni);
if (ctx->attr->_ANR(lowest_vcn)) {
ntfs_error(vi->i_sb, "First extent of $DATA "
"attribute has non zero "
......@@ -852,15 +1075,24 @@ void ntfs_read_inode_mount(struct inode *vi)
ntfs_debug("Entering.");
/* Initialize the ntfs specific part of @vi. */
ntfs_init_big_inode(vi);
ni = NTFS_I(vi);
if (vi->i_ino != FILE_MFT) {
ntfs_error(sb, "Called for inode 0x%lx but only inode %d "
"allowed.", vi->i_ino, FILE_MFT);
goto err_out;
}
/* Initialize the ntfs specific part of @vi. */
ntfs_init_big_inode(vi);
ni = NTFS_I(vi);
/* Setup the data attribute. It is special as it is mst protected. */
NInoSetNonResident(ni);
NInoSetMstProtected(ni);
ni->type = AT_DATA;
ni->name = NULL;
ni->name_len = 0;
/*
* This sets up our little cheat allowing us to reuse the async io
* completion handler for directories.
......@@ -930,13 +1162,14 @@ void ntfs_read_inode_mount(struct inode *vi)
u8 *al_end;
ntfs_debug("Attribute list attribute found in $MFT.");
ni->state |= 1 << NI_AttrList;
NInoSetAttrList(ni);
if (ctx->attr->flags & ATTR_IS_ENCRYPTED ||
ctx->attr->flags & ATTR_COMPRESSION_MASK) {
ctx->attr->flags & ATTR_COMPRESSION_MASK ||
ctx->attr->flags & ATTR_IS_SPARSE) {
ntfs_error(sb, "Attribute list attribute is "
"compressed/encrypted. Not allowed. "
"$MFT is corrupt. You should run "
"chkdsk.");
"compressed/encrypted/sparse. Not "
"allowed. $MFT is corrupt. You should "
"run chkdsk.");
goto put_err_out;
}
/* Now allocate memory for the attribute list. */
......@@ -948,7 +1181,7 @@ void ntfs_read_inode_mount(struct inode *vi)
goto put_err_out;
}
if (ctx->attr->non_resident) {
ni->state |= 1 << NI_AttrListNonResident;
NInoSetAttrListNonResident(ni);
if (ctx->attr->_ANR(lowest_vcn)) {
ntfs_error(sb, "Attribute list has non zero "
"lowest_vcn. $MFT is corrupt. "
......@@ -1071,11 +1304,13 @@ void ntfs_read_inode_mount(struct inode *vi)
}
/* $MFT must be uncompressed and unencrypted. */
if (attr->flags & ATTR_COMPRESSION_MASK ||
attr->flags & ATTR_IS_ENCRYPTED) {
ntfs_error(sb, "$MFT must be uncompressed and "
"unencrypted but a compressed/"
"encrypted extent was found. "
"$MFT is corrupt. Run chkdsk.");
attr->flags & ATTR_IS_ENCRYPTED ||
attr->flags & ATTR_IS_SPARSE) {
ntfs_error(sb, "$MFT must be uncompressed, "
"non-sparse, and unencrypted but a "
"compressed/sparse/encrypted extent "
"was found. $MFT is corrupt. Run "
"chkdsk.");
goto put_err_out;
}
/*
......@@ -1123,7 +1358,7 @@ void ntfs_read_inode_mount(struct inode *vi)
ntfs_error(sb, "$MFT is too big! Aborting.");
goto put_err_out;
}
vol->_VMM(nr_mft_records) = ll;
vol->nr_mft_records = ll;
/*
* We have got the first extent of the run_list for
* $MFT which means it is now relatively safe to call
......@@ -1149,7 +1384,7 @@ void ntfs_read_inode_mount(struct inode *vi)
* ntfs_read_inode() on extents of $MFT/$DATA. But lets
* hope this never happens...
*/
ntfs_read_inode(vi);
ntfs_read_locked_inode(vi);
if (is_bad_inode(vi)) {
ntfs_error(sb, "ntfs_read_inode() of $MFT "
"failed. BUG or corrupt $MFT. "
......@@ -1296,29 +1531,42 @@ void __ntfs_clear_inode(ntfs_inode *ni)
// FIXME: Handle dirty case for each extent inode!
for (i = 0; i < ni->nr_extents; i++)
ntfs_destroy_inode(ni->_INE(extent_ntfs_inos)[i]);
ntfs_clear_extent_inode(ni->_INE(extent_ntfs_inos)[i]);
kfree(ni->_INE(extent_ntfs_inos));
}
/* Free all alocated memory. */
down_write(&ni->run_list.lock);
ntfs_free(ni->run_list.rl);
ni->run_list.rl = NULL;
if (ni->run_list.rl) {
ntfs_free(ni->run_list.rl);
ni->run_list.rl = NULL;
}
up_write(&ni->run_list.lock);
ntfs_free(ni->attr_list);
if (ni->attr_list) {
ntfs_free(ni->attr_list);
ni->attr_list = NULL;
}
down_write(&ni->attr_list_rl.lock);
ntfs_free(ni->attr_list_rl.rl);
ni->attr_list_rl.rl = NULL;
if (ni->attr_list_rl.rl) {
ntfs_free(ni->attr_list_rl.rl);
ni->attr_list_rl.rl = NULL;
}
up_write(&ni->attr_list_rl.lock);
if (ni->name_len && ni->name != I30) {
/* Catch bugs... */
BUG_ON(!ni->name);
kfree(ni->name);
}
}
void ntfs_clear_inode(ntfs_inode *ni)
void ntfs_clear_extent_inode(ntfs_inode *ni)
{
__ntfs_clear_inode(ni);
/* Bye, bye... */
ntfs_destroy_inode(ni);
ntfs_destroy_extent_inode(ni);
}
/**
......@@ -1339,7 +1587,8 @@ void ntfs_clear_big_inode(struct inode *vi)
if (S_ISDIR(vi->i_mode)) {
down_write(&ni->_IDM(bmp_rl).lock);
ntfs_free(ni->_IDM(bmp_rl).rl);
if (ni->_IDM(bmp_rl).rl)
ntfs_free(ni->_IDM(bmp_rl).rl);
up_write(&ni->_IDM(bmp_rl).lock);
}
return;
......
......@@ -3,7 +3,7 @@
* the Linux-NTFS project.
*
* Copyright (c) 2001,2002 Anton Altaparmakov.
* Copyright (C) 2002 Richard Russon.
* Copyright (c) 2002 Richard Russon.
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
......@@ -26,6 +26,7 @@
#include <linux/seq_file.h>
#include "layout.h"
#include "volume.h"
typedef struct _ntfs_inode ntfs_inode;
......@@ -38,21 +39,39 @@ struct _ntfs_inode {
s64 initialized_size; /* Copy from $DATA/$INDEX_ALLOCATION. */
s64 allocated_size; /* Copy from $DATA/$INDEX_ALLOCATION. */
unsigned long state; /* NTFS specific flags describing this inode.
See fs/ntfs/ntfs.h:ntfs_inode_state_bits. */
See ntfs_inode_state_bits below. */
unsigned long mft_no; /* Number of the mft record / inode. */
u16 seq_no; /* Sequence number of the mft record. */
atomic_t count; /* Inode reference count for book keeping. */
ntfs_volume *vol; /* Pointer to the ntfs volume of this inode. */
/*
* If NInoAttr() is true, the below fields describe the attribute which
* this fake inode belongs to. The actual inode of this attribute is
* pointed to by base_ntfs_ino and nr_extents is always set to -1 (see
* below). For real inodes, we also set the type (AT_DATA for files and
* AT_INDEX_ALLOCATION for directories), with the name = NULL and
* name_len = 0 for files and name = I30 (global constant) and
* name_len = 4 for directories.
*/
ATTR_TYPES type; /* Attribute type of this fake inode. */
uchar_t *name; /* Attribute name of this fake inode. */
u32 name_len; /* Attribute name length of this fake inode. */
run_list run_list; /* If state has the NI_NonResident bit set,
the run list of the unnamed data attribute
(if a file) or of the index allocation
attribute (directory). If run_list.rl is
NULL, the run list has not been read in or
has been unmapped. If NI_NonResident is
clear, the unnamed data attribute is
resident (file) or there is no $I30 index
allocation attribute (directory). In that
case run_list.rl is always NULL.*/
attribute (directory) or of the attribute
described by the fake inode (if NInoAttr()).
If run_list.rl is NULL, the run list has not
been read in yet or has been unmapped. If
NI_NonResident is clear, the attribute is
resident (file and fake inode) or there is
no $I30 index allocation attribute
(small directory). In the latter case
run_list.rl is always NULL.*/
/*
* The following fields are only valid for real inodes and extent
* inodes.
*/
struct rw_semaphore mrec_lock; /* Lock for serializing access to the
mft record belonging to this inode. */
atomic_t mft_count; /* Mapping reference count for book keeping. */
......@@ -74,17 +93,18 @@ struct _ntfs_inode {
union {
struct { /* It is a directory or $MFT. */
u32 index_block_size; /* Size of an index block. */
u8 index_block_size_bits; /* Log2 of the above. */
u32 index_vcn_size; /* Size of a vcn in this
directory index. */
u8 index_vcn_size_bits; /* Log2 of the above. */
s64 bmp_size; /* Size of the $I30 bitmap. */
s64 bmp_initialized_size; /* Copy from $I30 bitmap. */
s64 bmp_allocated_size; /* Copy from $I30 bitmap. */
run_list bmp_rl; /* Run list for the $I30 bitmap
if it is non-resident. */
u8 index_block_size_bits; /* Log2 of the above. */
u8 index_vcn_size_bits; /* Log2 of the above. */
} SN(idm);
struct { /* It is a compressed file. */
struct { /* It is a compressed file or fake inode. */
s64 compressed_size; /* Copy from $DATA. */
u32 compression_block_size; /* Size of a compression
block (cb). */
u8 compression_block_size_bits; /* Log2 of the size of
......@@ -92,13 +112,13 @@ struct _ntfs_inode {
u8 compression_block_clusters; /* Number of clusters
per compression
block. */
s64 compressed_size; /* Copy from $DATA. */
} SN(icf);
} SN(idc);
struct semaphore extent_lock; /* Lock for accessing/modifying the
below . */
s32 nr_extents; /* For a base mft record, the number of attached extent
inodes (0 if none), for extent records this is -1. */
inodes (0 if none), for extent records and for fake
inodes describing an attribute this is -1. */
union { /* This union is only used if nr_extents != 0. */
ntfs_inode **extent_ntfs_inos; /* For nr_extents > 0, array of
the ntfs inodes of the extent
......@@ -107,7 +127,9 @@ struct _ntfs_inode {
been loaded. */
ntfs_inode *base_ntfs_ino; /* For nr_extents == -1, the
ntfs inode of the base mft
record. */
record. For fake inodes, the
real (base) inode to which
the attribute belongs. */
} SN(ine);
};
......@@ -115,6 +137,79 @@ struct _ntfs_inode {
#define _ICF(X) SC(idc.icf,X)
#define _INE(X) SC(ine,X)
/*
* Defined bits for the state field in the ntfs_inode structure.
* (f) = files only, (d) = directories only, (a) = attributes/fake inodes only
*/
typedef enum {
NI_Dirty, /* 1: Mft record needs to be written to disk. */
NI_AttrList, /* 1: Mft record contains an attribute list. */
NI_AttrListNonResident, /* 1: Attribute list is non-resident. Implies
NI_AttrList is set. */
NI_Attr, /* 1: Fake inode for attribute i/o.
0: Real inode or extent inode. */
NI_MstProtected, /* 1: Attribute is protected by MST fixups.
0: Attribute is not protected by fixups. */
NI_NonResident, /* 1: Unnamed data attr is non-resident (f).
1: Attribute is non-resident (a). */
NI_IndexAllocPresent = NI_NonResident, /* 1: $I30 index alloc attr is
present (d). */
NI_Compressed, /* 1: Unnamed data attr is compressed (f).
1: Create compressed files by default (d).
1: Attribute is compressed (a). */
NI_Encrypted, /* 1: Unnamed data attr is encrypted (f).
1: Create encrypted files by default (d).
1: Attribute is encrypted (a). */
NI_Sparse, /* 1: Unnamed data attr is sparse (f).
1: Create sparse files by default (d).
1: Attribute is sparse (a). */
NI_BmpNonResident, /* 1: $I30 bitmap attr is non resident (d). */
} ntfs_inode_state_bits;
/*
* NOTE: We should be adding dirty mft records to a list somewhere and they
* should be independent of the (ntfs/vfs) inode structure so that an inode can
* be removed but the record can be left dirty for syncing later.
*/
/*
* Macro tricks to expand the NInoFoo(), NInoSetFoo(), and NInoClearFoo()
* functions.
*/
#define NINO_FNS(flag) \
static inline int NIno##flag(ntfs_inode *ni) \
{ \
return test_bit(NI_##flag, &(ni)->state); \
} \
static inline void NInoSet##flag(ntfs_inode *ni) \
{ \
set_bit(NI_##flag, &(ni)->state); \
} \
static inline void NInoClear##flag(ntfs_inode *ni) \
{ \
clear_bit(NI_##flag, &(ni)->state); \
}
/* Emit the ntfs inode bitops functions. */
NINO_FNS(Dirty)
NINO_FNS(AttrList)
NINO_FNS(AttrListNonResident)
NINO_FNS(Attr)
NINO_FNS(MstProtected)
NINO_FNS(NonResident)
NINO_FNS(IndexAllocPresent)
NINO_FNS(Compressed)
NINO_FNS(Encrypted)
NINO_FNS(Sparse)
NINO_FNS(BmpNonResident)
/*
* The full structure containing a ntfs_inode and a vfs struct inode. Used for
* all real and fake inodes but not for extent inodes which lack the vfs struct
* inode.
*/
typedef struct {
ntfs_inode ntfs_inode;
struct inode vfs_inode; /* The vfs inode structure. */
......@@ -136,14 +231,16 @@ static inline struct inode *VFS_I(ntfs_inode *ni)
return &((big_ntfs_inode*)ni)->vfs_inode;
}
extern struct inode *ntfs_iget(struct super_block *sb, unsigned long mft_no);
extern struct inode *ntfs_alloc_big_inode(struct super_block *sb);
extern void ntfs_destroy_big_inode(struct inode *inode);
extern void ntfs_clear_big_inode(struct inode *vi);
extern ntfs_inode *ntfs_new_inode(struct super_block *sb);
extern void ntfs_clear_inode(ntfs_inode *ni);
extern ntfs_inode *ntfs_new_extent_inode(struct super_block *sb,
unsigned long mft_no);
extern void ntfs_clear_extent_inode(ntfs_inode *ni);
extern void ntfs_read_inode(struct inode *vi);
extern void ntfs_read_inode_mount(struct inode *vi);
extern void ntfs_dirty_inode(struct inode *vi);
......
......@@ -102,7 +102,7 @@ extern int ntfs_mst_readpage(struct file *, struct page *);
* ntfs_mft_aops - address space operations for access to $MFT
*
* Address space operations for access to $MFT. This allows us to simply use
* read_cache_page() in map_mft_record().
* ntfs_map_page() in map_mft_record_page().
*/
struct address_space_operations ntfs_mft_aops = {
writepage: NULL, /* Write dirty page to disk. */
......@@ -334,9 +334,9 @@ void unmap_mft_record(const int rw, ntfs_inode *ni)
/*
* If pure ntfs_inode, i.e. no vfs inode attached, we leave it to
* ntfs_clear_inode() in the extent inode case, and to the caller in
* the non-extent, yet pure ntfs inode case, to do the actual tear
* down of all structures and freeing of all allocated memory.
* ntfs_clear_extent_inode() in the extent inode case, and to the
* caller in the non-extent, yet pure ntfs inode case, to do the actual
* tear down of all structures and freeing of all allocated memory.
*/
return;
}
......@@ -417,14 +417,13 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
return m;
}
/* Record wasn't there. Get a new ntfs inode and initialize it. */
ni = ntfs_new_inode(base_ni->vol->sb);
ni = ntfs_new_extent_inode(base_ni->vol->sb, mft_no);
if (!ni) {
up(&base_ni->extent_lock);
atomic_dec(&base_ni->count);
return ERR_PTR(-ENOMEM);
}
ni->vol = base_ni->vol;
ni->mft_no = mft_no;
ni->seq_no = seq_no;
ni->nr_extents = -1;
ni->_INE(base_ntfs_ino) = base_ni;
......@@ -433,7 +432,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
if (IS_ERR(m)) {
up(&base_ni->extent_lock);
atomic_dec(&base_ni->count);
ntfs_clear_inode(ni);
ntfs_clear_extent_inode(ni);
goto map_err_out;
}
/* Verify the sequence number. */
......@@ -479,7 +478,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
* release it or we will leak memory.
*/
if (destroy_ni)
ntfs_clear_inode(ni);
ntfs_clear_extent_inode(ni);
return m;
}
......@@ -38,8 +38,8 @@
* supplying the name of the inode in @dent->d_name.name. ntfs_lookup()
* converts the name to Unicode and walks the contents of the directory inode
* @dir_ino looking for the converted Unicode name. If the name is found in the
* directory, the corresponding inode is loaded by calling iget() on its inode
* number and the inode is associated with the dentry @dent via a call to
* directory, the corresponding inode is loaded by calling ntfs_iget() on its
* inode number and the inode is associated with the dentry @dent via a call to
* d_add().
*
* If the name is not found in the directory, a NULL inode is inserted into the
......@@ -111,9 +111,9 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent)
kmem_cache_free(ntfs_name_cache, uname);
if (!IS_ERR_MREF(mref)) {
dent_ino = MREF(mref);
ntfs_debug("Found inode 0x%lx. Calling iget.", dent_ino);
dent_inode = iget(vol->sb, dent_ino);
if (dent_inode) {
ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino);
dent_inode = ntfs_iget(vol->sb, dent_ino);
if (likely(!IS_ERR(dent_inode))) {
/* Consistency check. */
if (MSEQNO(mref) == NTFS_I(dent_inode)->seq_no ||
dent_ino == FILE_MFT) {
......@@ -132,16 +132,19 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent)
ntfs_error(vol->sb, "Found stale reference to inode "
"0x%lx (reference sequence number = "
"0x%x, inode sequence number = 0x%x, "
"returning -EACCES. Run chkdsk.",
"returning -EIO. Run chkdsk.",
dent_ino, MSEQNO(mref),
NTFS_I(dent_inode)->seq_no);
iput(dent_inode);
dent_inode = ERR_PTR(-EIO);
} else
ntfs_error(vol->sb, "iget(0x%lx) failed, returning "
"-EACCES.", dent_ino);
ntfs_error(vol->sb, "ntfs_iget(0x%lx) failed with "
"error code %li.", dent_ino,
PTR_ERR(dent_inode));
if (name)
kfree(name);
return ERR_PTR(-EACCES);
/* Return the error code. */
return (struct dentry *)dent_inode;
}
/* It is guaranteed that name is no longer allocated at this point. */
if (MREF_ERR(mref) == -ENOENT) {
......@@ -256,7 +259,8 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent)
BUG_ON(real_dent->d_inode != dent_inode);
/*
* Already have the inode and the dentry attached, decrement
* the reference count to balance the iget() we did earlier on.
* the reference count to balance the ntfs_iget() we did
* earlier on.
*/
iput(dent_inode);
return real_dent;
......
......@@ -53,41 +53,6 @@ typedef enum {
NTFS_MAX_NAME_LEN = 255,
} NTFS_CONSTANTS;
/*
* Defined bits for the state field in the ntfs_inode structure.
* (f) = files only, (d) = directories only
*/
typedef enum {
NI_Dirty, /* 1: Mft record needs to be written to disk. */
NI_AttrList, /* 1: Mft record contains an attribute list. */
NI_AttrListNonResident, /* 1: Attribute list is non-resident. Implies
NI_AttrList is set. */
NI_NonResident, /* 1: Unnamed data attr is non-resident (f).
1: $I30 index alloc attr is present (d). */
NI_Compressed, /* 1: Unnamed data attr is compressed (f).
1: Create compressed files by default (d). */
NI_Encrypted, /* 1: Unnamed data attr is encrypted (f).
1: Create encrypted files by default (d). */
NI_BmpNonResident, /* 1: $I30 bitmap attr is non resident (d). */
} ntfs_inode_state_bits;
/*
* NOTE: We should be adding dirty mft records to a list somewhere and they
* should be independent of the (ntfs/vfs) inode structure so that an inode can
* be removed but the record can be left dirty for syncing later.
*/
#define NInoDirty(n_ino) test_bit(NI_Dirty, &(n_ino)->state)
#define NInoSetDirty(n_ino) set_bit(NI_Dirty, &(n_ino)->state)
#define NInoClearDirty(n_ino) clear_bit(NI_Dirty, &(n_ino)->state)
#define NInoAttrList(n_ino) test_bit(NI_AttrList, &(n_ino)->state)
#define NInoNonResident(n_ino) test_bit(NI_NonResident, &(n_ino)->state)
#define NInoIndexAllocPresent(n_ino) test_bit(NI_NonResident, &(n_ino)->state)
#define NInoCompressed(n_ino) test_bit(NI_Compressed, &(n_ino)->state)
#define NInoEncrypted(n_ino) test_bit(NI_Encrypted, &(n_ino)->state)
#define NInoBmpNonResident(n_ino) test_bit(NI_BmpNonResident, &(n_ino)->state)
/* Global variables. */
/* Slab caches (from super.c). */
......
......@@ -605,17 +605,17 @@ static BOOL parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
sizeof(unsigned long) * 4);
return FALSE;
}
vol->_VCL(nr_clusters) = ll;
ntfs_debug("vol->nr_clusters = 0x%Lx", (long long)vol->_VCL(nr_clusters));
vol->nr_clusters = ll;
ntfs_debug("vol->nr_clusters = 0x%Lx", (long long)vol->nr_clusters);
ll = sle64_to_cpu(b->mft_lcn);
if (ll >= vol->_VCL(nr_clusters)) {
if (ll >= vol->nr_clusters) {
ntfs_error(vol->sb, "MFT LCN is beyond end of volume. Weird.");
return FALSE;
}
vol->mft_lcn = ll;
ntfs_debug("vol->mft_lcn = 0x%Lx", (long long)vol->mft_lcn);
ll = sle64_to_cpu(b->mftmirr_lcn);
if (ll >= vol->_VCL(nr_clusters)) {
if (ll >= vol->nr_clusters) {
ntfs_error(vol->sb, "MFTMirr LCN is beyond end of volume. "
"Weird.");
return FALSE;
......@@ -629,7 +629,7 @@ static BOOL parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
* Determine MFT zone size. This is not strictly the right place to do
* this, but I am too lazy to create a function especially for it...
*/
vol->mft_zone_end = vol->_VCL(nr_clusters);
vol->mft_zone_end = vol->nr_clusters;
switch (vol->mft_zone_multiplier) { /* % of volume size in clusters */
case 4:
vol->mft_zone_end = vol->mft_zone_end >> 1; /* 50% */
......@@ -678,9 +678,9 @@ static BOOL load_and_init_upcase(ntfs_volume *vol)
ntfs_debug("Entering.");
/* Read upcase table and setup vol->upcase and vol->upcase_len. */
ino = iget(sb, FILE_UpCase);
if (!ino || is_bad_inode(ino)) {
if (ino)
ino = ntfs_iget(sb, FILE_UpCase);
if (IS_ERR(ino) || is_bad_inode(ino)) {
if (!IS_ERR(ino))
iput(ino);
goto upcase_failed;
}
......@@ -848,7 +848,7 @@ static BOOL load_system_files(ntfs_volume *vol)
vol->mftbmp_allocated_size =
sle64_to_cpu(attr->_ANR(allocated_size));
/* Consistency check. */
if (vol->mftbmp_size < (vol->_VMM(nr_mft_records) + 7) >> 3) {
if (vol->mftbmp_size < (vol->nr_mft_records + 7) >> 3) {
ntfs_error(sb, "$MFT/$BITMAP is too short to "
"contain a complete mft "
"bitmap: impossible. $MFT is "
......@@ -914,9 +914,9 @@ static BOOL load_system_files(ntfs_volume *vol)
// volume read-write...
/* Get mft mirror inode. */
vol->mftmirr_ino = iget(sb, FILE_MFTMirr);
if (!vol->mftmirr_ino || is_bad_inode(vol->mftmirr_ino)) {
if (is_bad_inode(vol->mftmirr_ino))
vol->mftmirr_ino = ntfs_iget(sb, FILE_MFTMirr);
if (IS_ERR(vol->mftmirr_ino) || is_bad_inode(vol->mftmirr_ino)) {
if (!IS_ERR(vol->mftmirr_ino))
iput(vol->mftmirr_ino);
ntfs_error(sb, "Failed to load $MFTMirr.");
return FALSE;
......@@ -932,13 +932,13 @@ static BOOL load_system_files(ntfs_volume *vol)
* need for any locking at this stage as we are already running
* exclusively as we are mount in progress task.
*/
vol->lcnbmp_ino = iget(sb, FILE_Bitmap);
if (!vol->lcnbmp_ino || is_bad_inode(vol->lcnbmp_ino)) {
if (is_bad_inode(vol->lcnbmp_ino))
vol->lcnbmp_ino = ntfs_iget(sb, FILE_Bitmap);
if (IS_ERR(vol->lcnbmp_ino) || is_bad_inode(vol->lcnbmp_ino)) {
if (!IS_ERR(vol->lcnbmp_ino))
iput(vol->lcnbmp_ino);
goto bitmap_failed;
}
if ((vol->_VCL(nr_lcn_bits) + 7) >> 3 > vol->lcnbmp_ino->i_size) {
if ((vol->nr_clusters + 7) >> 3 > vol->lcnbmp_ino->i_size) {
iput(vol->lcnbmp_ino);
bitmap_failed:
ntfs_error(sb, "Failed to load $Bitmap.");
......@@ -948,9 +948,9 @@ static BOOL load_system_files(ntfs_volume *vol)
* Get the volume inode and setup our cache of the volume flags and
* version.
*/
vol->vol_ino = iget(sb, FILE_Volume);
if (!vol->vol_ino || is_bad_inode(vol->vol_ino)) {
if (is_bad_inode(vol->vol_ino))
vol->vol_ino = ntfs_iget(sb, FILE_Volume);
if (IS_ERR(vol->vol_ino) || is_bad_inode(vol->vol_ino)) {
if (!IS_ERR(vol->vol_ino))
iput(vol->vol_ino);
volume_failed:
ntfs_error(sb, "Failed to load $Volume.");
......@@ -993,9 +993,9 @@ static BOOL load_system_files(ntfs_volume *vol)
* Get the inode for the logfile and empty it if this is a read-write
* mount.
*/
tmp_ino = iget(sb, FILE_LogFile);
if (!tmp_ino || is_bad_inode(tmp_ino)) {
if (is_bad_inode(tmp_ino))
tmp_ino = ntfs_iget(sb, FILE_LogFile);
if (IS_ERR(tmp_ino) || is_bad_inode(tmp_ino)) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
ntfs_error(sb, "Failed to load $LogFile.");
// FIMXE: We only want to empty the thing so pointless bailing
......@@ -1010,9 +1010,9 @@ static BOOL load_system_files(ntfs_volume *vol)
* Get the inode for the attribute definitions file and parse the
* attribute definitions.
*/
tmp_ino = iget(sb, FILE_AttrDef);
if (!tmp_ino || is_bad_inode(tmp_ino)) {
if (is_bad_inode(tmp_ino))
tmp_ino = ntfs_iget(sb, FILE_AttrDef);
if (IS_ERR(tmp_ino) || is_bad_inode(tmp_ino)) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
ntfs_error(sb, "Failed to load $AttrDef.");
goto iput_vol_bmp_mirr_err_out;
......@@ -1020,9 +1020,9 @@ static BOOL load_system_files(ntfs_volume *vol)
// FIXME: Parse the attribute definitions.
iput(tmp_ino);
/* Get the root directory inode. */
vol->root_ino = iget(sb, FILE_root);
if (!vol->root_ino || is_bad_inode(vol->root_ino)) {
if (is_bad_inode(vol->root_ino))
vol->root_ino = ntfs_iget(sb, FILE_root);
if (IS_ERR(vol->root_ino) || is_bad_inode(vol->root_ino)) {
if (!IS_ERR(vol->root_ino))
iput(vol->root_ino);
ntfs_error(sb, "Failed to load root directory.");
goto iput_vol_bmp_mirr_err_out;
......@@ -1032,18 +1032,18 @@ static BOOL load_system_files(ntfs_volume *vol)
return TRUE;
/* NTFS 3.0+ specific initialization. */
/* Get the security descriptors inode. */
vol->secure_ino = iget(sb, FILE_Secure);
if (!vol->secure_ino || is_bad_inode(vol->secure_ino)) {
if (is_bad_inode(vol->secure_ino))
vol->secure_ino = ntfs_iget(sb, FILE_Secure);
if (IS_ERR(vol->secure_ino) || is_bad_inode(vol->secure_ino)) {
if (!IS_ERR(vol->secure_ino))
iput(vol->secure_ino);
ntfs_error(sb, "Failed to load $Secure.");
goto iput_root_vol_bmp_mirr_err_out;
}
// FIXME: Initialize security.
/* Get the extended system files' directory inode. */
tmp_ino = iget(sb, FILE_Extend);
if (!tmp_ino || is_bad_inode(tmp_ino)) {
if (is_bad_inode(tmp_ino))
tmp_ino = ntfs_iget(sb, FILE_Extend);
if (IS_ERR(tmp_ino) || is_bad_inode(tmp_ino)) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
ntfs_error(sb, "Failed to load $Extend.");
goto iput_sec_root_vol_bmp_mirr_err_out;
......@@ -1051,8 +1051,8 @@ static BOOL load_system_files(ntfs_volume *vol)
// FIXME: Do something. E.g. want to delete the $UsnJrnl if exists.
// Note we might be doing this at the wrong level; we might want to
// d_alloc_root() and then do a "normal" open(2) of $Extend\$UsnJrnl
// rather than using iget here, as we don't know the inode number for
// the files in $Extend directory.
// rather than using ntfs_iget here, as we don't know the inode number
// for the files in $Extend directory.
iput(tmp_ino);
return TRUE;
iput_sec_root_vol_bmp_mirr_err_out:
......@@ -1172,7 +1172,7 @@ s64 get_nr_free_clusters(ntfs_volume *vol)
* Convert the number of bits into bytes rounded up, then convert into
* multiples of PAGE_CACHE_SIZE.
*/
max_index = (vol->_VCL(nr_clusters) + 7) >> (3 + PAGE_CACHE_SHIFT);
max_index = (vol->nr_clusters + 7) >> (3 + PAGE_CACHE_SHIFT);
/* Use multiples of 4 bytes. */
max_size = PAGE_CACHE_SIZE >> 2;
ntfs_debug("Reading $BITMAP, max_index = 0x%lx, max_size = 0x%x.",
......@@ -1211,7 +1211,7 @@ s64 get_nr_free_clusters(ntfs_volume *vol)
* Get the multiples of 4 bytes in use in the final partial
* page.
*/
max_size = ((((vol->_VCL(nr_clusters) + 7) >> 3) & ~PAGE_CACHE_MASK)
max_size = ((((vol->nr_clusters + 7) >> 3) & ~PAGE_CACHE_MASK)
+ 3) >> 2;
/* If there is a partial page go back and do it. */
if (max_size) {
......@@ -1254,7 +1254,7 @@ unsigned long get_nr_free_mft_records(ntfs_volume *vol)
* Convert the number of bits into bytes rounded up, then convert into
* multiples of PAGE_CACHE_SIZE.
*/
max_index = (vol->_VMM(nr_mft_records) + 7) >> (3 + PAGE_CACHE_SHIFT);
max_index = (vol->nr_mft_records + 7) >> (3 + PAGE_CACHE_SHIFT);
/* Use multiples of 4 bytes. */
max_size = PAGE_CACHE_SIZE >> 2;
ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
......@@ -1293,12 +1293,12 @@ unsigned long get_nr_free_mft_records(ntfs_volume *vol)
* Get the multiples of 4 bytes in use in the final partial
* page.
*/
max_size = ((((vol->_VMM(nr_mft_records) + 7) >> 3) &
max_size = ((((vol->nr_mft_records + 7) >> 3) &
~PAGE_CACHE_MASK) + 3) >> 2;
/* If there is a partial page go back and do it. */
if (max_size) {
/* Compensate for out of bounds zero bits. */
if ((i = vol->_VMM(nr_mft_records) & 31))
if ((i = vol->nr_mft_records & 31))
nr_free -= 32 - i;
ntfs_debug("Handling partial page, max_size = 0x%x",
max_size);
......@@ -1345,7 +1345,7 @@ int ntfs_statfs(struct super_block *sb, struct statfs *sfs)
* inodes are also stored in data blocs ($MFT is a file) this is just
* the total clusters.
*/
sfs->f_blocks = vol->_VCL(nr_clusters) << vol->cluster_size_bits >>
sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >>
PAGE_CACHE_SHIFT;
/* Free data blocks in file system in units of f_bsize. */
size = get_nr_free_clusters(vol) << vol->cluster_size_bits >>
......@@ -1394,8 +1394,6 @@ struct super_operations ntfs_mount_sops = {
struct super_operations ntfs_sops = {
alloc_inode: ntfs_alloc_big_inode, /* VFS: Allocate a new inode. */
destroy_inode: ntfs_destroy_big_inode, /* VFS: Deallocate an inode. */
read_inode: ntfs_read_inode, /* VFS: Load inode from disk,
called from iget(). */
dirty_inode: ntfs_dirty_inode, /* VFS: Called from
__mark_inode_dirty(). */
//write_inode: NULL, /* VFS: Write dirty inode to disk. */
......@@ -1575,9 +1573,9 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
/*
* Now load the metadata required for the page cache and our address
* space operations to function. We do this by setting up a specialised
* read_inode method and then just calling iget() to obtain the inode
* for $MFT which is sufficient to allow our normal inode operations
* and associated address space operations to function.
* read_inode method and then just calling the normal iget() to obtain
* the inode for $MFT which is sufficient to allow our normal inode
* operations and associated address space operations to function.
*/
/*
* Poison vol->mft_ino so we know whether iget() called into our
......@@ -1601,9 +1599,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
* Note: sb->s_op has already been set to &ntfs_sops by our specialized
* ntfs_read_inode_mount() method when it was invoked by iget().
*/
down(&ntfs_lock);
/*
* The current mount is a compression user if the cluster size is
* less than or equal 4kiB.
......@@ -1618,7 +1614,6 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
goto iput_tmp_ino_err_out_now;
}
}
/*
* Increment the number of mounts and generate the global default
* upcase table if necessary. Also temporarily increment the number of
......@@ -1629,12 +1624,10 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
ntfs_nr_upcase_users++;
up(&ntfs_lock);
/*
* From now on, ignore @silent parameter. If we fail below this line,
* it will be due to a corrupt fs or a system error, so we report it.
*/
/*
* Open the system files with normal access functions and complete
* setting up the ntfs super block.
......@@ -1643,9 +1636,8 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
ntfs_error(sb, "Failed to load system files.");
goto unl_upcase_iput_tmp_ino_err_out_now;
}
if ((sb->s_root = d_alloc_root(vol->root_ino))) {
/* We increment i_count simulating an iget(). */
/* We increment i_count simulating an ntfs_iget(). */
atomic_inc(&vol->root_ino->i_count);
ntfs_debug("Exiting, status successful.");
/* Release the default upcase if it has no users. */
......@@ -1709,10 +1701,11 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
}
#undef OGIN
/*
* This is needed to get ntfs_clear_inode() called for each inode we
* have ever called iget()/iput() on, otherwise we A) leak resources
* and B) a subsequent mount fails automatically due to iget() never
* calling down into our ntfs_read_inode{_mount}() methods again...
* This is needed to get ntfs_clear_extent_inode() called for each
* inode we have ever called ntfs_iget()/iput() on, otherwise we A)
* leak resources and B) a subsequent mount fails automatically due to
* ntfs_iget() never calling down into our ntfs_read_locked_inode()
* method again...
*/
if (invalidate_inodes(sb)) {
ntfs_error(sb, "Busy inodes left. This is most likely a NTFS "
......
......@@ -3,7 +3,7 @@
* of the Linux-NTFS project.
*
* Copyright (c) 2001,2002 Anton Altaparmakov.
* Copyright (C) 2002 Richard Russon.
* Copyright (c) 2002 Richard Russon.
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
......@@ -89,10 +89,8 @@ typedef struct {
u32 index_record_size; /* in bytes */
u32 index_record_size_mask; /* index_record_size - 1 */
u8 index_record_size_bits; /* log2(index_record_size) */
union {
LCN nr_clusters; /* Volume size in clusters. */
LCN nr_lcn_bits; /* Number of bits in lcn bitmap. */
} SN(vcl);
LCN nr_clusters; /* Volume size in clusters == number of
bits in lcn bitmap. */
LCN mft_lcn; /* Cluster location of mft data. */
LCN mftmirr_lcn; /* Cluster location of copy of mft. */
u64 serial_no; /* The volume serial number. */
......@@ -104,10 +102,8 @@ typedef struct {
struct inode *mft_ino; /* The VFS inode of $MFT. */
struct rw_semaphore mftbmp_lock; /* Lock for serializing accesses to the
mft record bitmap ($MFT/$BITMAP). */
union {
unsigned long nr_mft_records; /* Number of mft records. */
unsigned long nr_mft_bits; /* Number of bits in mft bitmap. */
} SN(vmm);
unsigned long nr_mft_records; /* Number of mft records == number of
bits in mft bitmap. */
struct address_space mftbmp_mapping; /* Page cache for $MFT/$BITMAP. */
run_list mftbmp_rl; /* Run list for $MFT/$BITMAP. */
s64 mftbmp_size; /* Data size of $MFT/$BITMAP. */
......@@ -128,8 +124,5 @@ typedef struct {
struct nls_table *nls_map;
} ntfs_volume;
#define _VCL(X) SC(vcl,X)
#define _VMM(X) SC(vmm,X)
#endif /* _LINUX_NTFS_VOLUME_H */
......@@ -3573,9 +3573,18 @@ maestro_probe(struct pci_dev *pcidev,const struct pci_device_id *pdid)
static void maestro_remove(struct pci_dev *pcidev) {
struct ess_card *card = pci_get_drvdata(pcidev);
int i;
u32 n;
/* XXX maybe should force stop bob, but should be all
stopped by _release by now */
/* Turn off hardware volume control interrupt.
This has to come before we leave the IRQ below,
or a crash results if a button is pressed ! */
n = inw(card->iobase+0x18);
n&=~(1<<6);
outw(n, card->iobase+0x18);
free_irq(card->irq, card);
unregister_sound_mixer(card->dev_mixer);
for(i=0;i<NR_DSPS;i++)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment