Commit 2b2f2aed authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-4.19.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Andreas Gruenbacher:

 - iomap support for buffered writes and for direct I/O

 - two patches that reduce the size of struct gfs2_inode

 - lots of fixes and cleanups

* tag 'gfs2-4.19.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (25 commits)
  gfs2: eliminate update_rgrp_lvb_unlinked
  gfs2: Fix gfs2_testbit to use clone bitmaps
  gfs2: Get rid of gfs2_ea_strlen
  gfs2: cleanup: call gfs2_rgrp_ondisk2lvb from gfs2_rgrp_out
  gfs2: Special-case rindex for gfs2_grow
  GFS2: rgrp free blocks used incorrectly
  gfs2: remove redundant variable 'moved'
  gfs2: use iomap_readpage for blocksize == PAGE_SIZE
  gfs2: Use iomap for stuffed direct I/O reads
  gfs2: fallocate_chunk: Always initialize struct iomap
  GFS2: Fix recovery issues for spectators
  fs: gfs2: Adding new return type vm_fault_t
  gfs2: using posix_acl_xattr_size instead of posix_acl_to_xattr
  gfs2: Don't reject a supposedly full bitmap if we have blocks reserved
  gfs2: Eliminate redundant ip->i_rgd
  gfs2: Stop messing with ip->i_rgd in the rlist code
  gfs2: Remove gfs2_write_{begin,end}
  gfs2: iomap direct I/O support
  gfs2: gfs2_extent_length cleanup
  gfs2: iomap buffered write support
  ...
parents 72f02ba6 f5580d0f
......@@ -82,14 +82,12 @@ struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
int error;
int len;
size_t len;
char *data;
const char *name = gfs2_acl_name(type);
if (acl) {
len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0);
if (len == 0)
return 0;
len = posix_acl_xattr_size(acl->a_count);
data = kmalloc(len, GFP_NOFS);
if (data == NULL)
return -ENOMEM;
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 Red Hat, Inc. All rights reserved.
*/
#ifndef __AOPS_DOT_H__
#define __AOPS_DOT_H__
#include "incore.h"
extern int stuffed_readpage(struct gfs2_inode *ip, struct page *page);
extern int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
loff_t pos, unsigned copied,
struct page *page);
extern void adjust_fs_space(struct inode *inode);
extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
unsigned int from, unsigned int len);
#endif /* __AOPS_DOT_H__ */
This diff is collapsed.
......@@ -1011,7 +1011,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
u64 bn, leaf_no;
__be64 *lp;
u32 index;
int x, moved = 0;
int x;
int error;
index = name->hash >> (32 - dip->i_depth);
......@@ -1113,8 +1113,6 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
if (!prev)
prev = dent;
moved = 1;
} else {
prev = dent;
}
......
......@@ -26,10 +26,12 @@
#include <linux/dlm.h>
#include <linux/dlm_plock.h>
#include <linux/delay.h>
#include <linux/backing-dev.h>
#include "gfs2.h"
#include "incore.h"
#include "bmap.h"
#include "aops.h"
#include "dir.h"
#include "glock.h"
#include "glops.h"
......@@ -387,7 +389,7 @@ static int gfs2_allocate_page_backing(struct page *page)
* blocks allocated on disk to back that page.
*/
static int gfs2_page_mkwrite(struct vm_fault *vmf)
static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
......@@ -688,12 +690,83 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
return ret ? ret : ret1;
}
static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
size_t count = iov_iter_count(to);
struct gfs2_holder gh;
ssize_t ret;
if (!count)
return 0; /* skip atime */
gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
ret = gfs2_glock_nq(&gh);
if (ret)
goto out_uninit;
ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL);
gfs2_glock_dq(&gh);
out_uninit:
gfs2_holder_uninit(&gh);
return ret;
}
static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
size_t len = iov_iter_count(from);
loff_t offset = iocb->ki_pos;
struct gfs2_holder gh;
ssize_t ret;
/*
* Deferred lock, even if its a write, since we do no allocation on
* this path. All we need to change is the atime, and this lock mode
* ensures that other nodes have flushed their buffered read caches
* (i.e. their page cache entries for this inode). We do not,
* unfortunately, have the option of only flushing a range like the
* VFS does.
*/
gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
ret = gfs2_glock_nq(&gh);
if (ret)
goto out_uninit;
/* Silently fall back to buffered I/O when writing beyond EOF */
if (offset + len > i_size_read(&ip->i_inode))
goto out;
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL);
out:
gfs2_glock_dq(&gh);
out_uninit:
gfs2_holder_uninit(&gh);
return ret;
}
static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
ssize_t ret;
if (iocb->ki_flags & IOCB_DIRECT) {
ret = gfs2_file_direct_read(iocb, to);
if (likely(ret != -ENOTBLK))
return ret;
iocb->ki_flags &= ~IOCB_DIRECT;
}
return generic_file_read_iter(iocb, to);
}
/**
* gfs2_file_write_iter - Perform a write to a file
* @iocb: The io context
* @iov: The data to write
* @nr_segs: Number of @iov segments
* @pos: The file position
* @from: The data to write
*
* We have to do a lock/unlock here to refresh the inode size for
* O_APPEND writes, otherwise we can land up writing at the wrong
......@@ -705,8 +778,9 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct gfs2_inode *ip = GFS2_I(file_inode(file));
int ret;
struct inode *inode = file_inode(file);
struct gfs2_inode *ip = GFS2_I(inode);
ssize_t written = 0, ret;
ret = gfs2_rsqa_alloc(ip);
if (ret)
......@@ -723,7 +797,71 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
gfs2_glock_dq_uninit(&gh);
}
return generic_file_write_iter(iocb, from);
inode_lock(inode);
ret = generic_write_checks(iocb, from);
if (ret <= 0)
goto out;
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
ret = file_remove_privs(file);
if (ret)
goto out2;
ret = file_update_time(file);
if (ret)
goto out2;
if (iocb->ki_flags & IOCB_DIRECT) {
struct address_space *mapping = file->f_mapping;
loff_t pos, endbyte;
ssize_t buffered;
written = gfs2_file_direct_write(iocb, from);
if (written < 0 || !iov_iter_count(from))
goto out2;
ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
if (unlikely(ret < 0))
goto out2;
buffered = ret;
/*
* We need to ensure that the page cache pages are written to
* disk and invalidated to preserve the expected O_DIRECT
* semantics.
*/
pos = iocb->ki_pos;
endbyte = pos + buffered - 1;
ret = filemap_write_and_wait_range(mapping, pos, endbyte);
if (!ret) {
iocb->ki_pos += buffered;
written += buffered;
invalidate_mapping_pages(mapping,
pos >> PAGE_SHIFT,
endbyte >> PAGE_SHIFT);
} else {
/*
* We don't know how much we wrote, so just return
* the number of bytes which were direct-written
*/
}
} else {
ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
if (likely(ret > 0))
iocb->ki_pos += ret;
}
out2:
current->backing_dev_info = NULL;
out:
inode_unlock(inode);
if (likely(ret > 0)) {
/* Handle various SYNC-type writes */
ret = generic_write_sync(iocb, ret);
}
return written ? written : ret;
}
static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
......@@ -733,7 +871,6 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
struct gfs2_inode *ip = GFS2_I(inode);
loff_t end = offset + len;
struct buffer_head *dibh;
struct iomap iomap = { };
int error;
error = gfs2_meta_inode_buffer(ip, &dibh);
......@@ -749,12 +886,14 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
}
while (offset < end) {
struct iomap iomap = { };
error = gfs2_iomap_get_alloc(inode, offset, end - offset,
&iomap);
if (error)
goto out;
offset = iomap.offset + iomap.length;
if (iomap.type != IOMAP_HOLE)
if (!(iomap.flags & IOMAP_F_NEW))
continue;
error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
iomap.length >> inode->i_blkbits,
......@@ -1125,7 +1264,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
const struct file_operations gfs2_file_fops = {
.llseek = gfs2_llseek,
.read_iter = generic_file_read_iter,
.read_iter = gfs2_file_read_iter,
.write_iter = gfs2_file_write_iter,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
......@@ -1155,7 +1294,7 @@ const struct file_operations gfs2_dir_fops = {
const struct file_operations gfs2_file_fops_nolock = {
.llseek = gfs2_llseek,
.read_iter = generic_file_read_iter,
.read_iter = gfs2_file_read_iter,
.write_iter = gfs2_file_write_iter,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
......
......@@ -65,6 +65,27 @@ struct gfs2_log_operations {
#define GBF_FULL 1
/**
* Clone bitmaps (bi_clone):
*
* - When a block is freed, we remember the previous state of the block in the
* clone bitmap, and only mark the block as free in the real bitmap.
*
* - When looking for a block to allocate, we check for a free block in the
* clone bitmap, and if no clone bitmap exists, in the real bitmap.
*
* - For allocating a block, we mark it as allocated in the real bitmap, and if
* a clone bitmap exists, also in the clone bitmap.
*
* - At the end of a log_flush, we copy the real bitmap into the clone bitmap
* to make the clone bitmap reflect the current allocation state.
* (Alternatively, we could remove the clone bitmap.)
*
* The clone bitmaps are in-core only, and is never written to disk.
*
* These steps ensure that blocks which have been freed in a transaction cannot
* be reallocated in that same transaction.
*/
struct gfs2_bitmap {
struct buffer_head *bi_bh;
char *bi_clone;
......@@ -295,7 +316,6 @@ struct gfs2_blkreserv {
struct rb_node rs_node; /* link to other block reservations */
struct gfs2_rbm rs_rbm; /* Start of reservation */
u32 rs_free; /* how many blocks are still free */
u64 rs_inum; /* Inode number for reservation */
};
/*
......@@ -398,7 +418,6 @@ struct gfs2_inode {
struct gfs2_holder i_gh; /* for prepare/commit_write only */
struct gfs2_qadata *i_qadata; /* quota allocation data */
struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */
struct gfs2_rgrpd *i_rgd;
u64 i_goal; /* goal block for allocations */
struct rw_semaphore i_rw_mutex;
struct list_head i_ordered;
......
......@@ -821,6 +821,13 @@ static int control_mount(struct gfs2_sbd *sdp)
goto fail;
}
/**
* If we're a spectator, we don't want to take the lock in EX because
* we cannot do the first-mount responsibility it implies: recovery.
*/
if (sdp->sd_args.ar_spectator)
goto locks_done;
error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
if (!error) {
mounted_mode = DLM_LOCK_EX;
......@@ -896,9 +903,16 @@ static int control_mount(struct gfs2_sbd *sdp)
if (lvb_gen < mount_gen) {
/* wait for mounted nodes to update control_lock lvb to our
generation, which might include new recovery bits set */
fs_info(sdp, "control_mount wait1 block %u start %u mount %u "
"lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
lvb_gen, ls->ls_recover_flags);
if (sdp->sd_args.ar_spectator) {
fs_info(sdp, "Recovery is required. Waiting for a "
"non-spectator to mount.\n");
msleep_interruptible(1000);
} else {
fs_info(sdp, "control_mount wait1 block %u start %u "
"mount %u lvb %u flags %lx\n", block_gen,
start_gen, mount_gen, lvb_gen,
ls->ls_recover_flags);
}
spin_unlock(&ls->ls_recover_spin);
goto restart;
}
......
......@@ -92,7 +92,8 @@ static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
struct writeback_control *wbc,
struct gfs2_trans *tr)
struct gfs2_trans *tr,
bool *withdraw)
__releases(&sdp->sd_ail_lock)
__acquires(&sdp->sd_ail_lock)
{
......@@ -107,8 +108,10 @@ __acquires(&sdp->sd_ail_lock)
gfs2_assert(sdp, bd->bd_tr == tr);
if (!buffer_busy(bh)) {
if (!buffer_uptodate(bh))
if (!buffer_uptodate(bh)) {
gfs2_io_error_bh(sdp, bh);
*withdraw = true;
}
list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
continue;
}
......@@ -148,6 +151,7 @@ void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
struct list_head *head = &sdp->sd_ail1_list;
struct gfs2_trans *tr;
struct blk_plug plug;
bool withdraw = false;
trace_gfs2_ail_flush(sdp, wbc, 1);
blk_start_plug(&plug);
......@@ -156,11 +160,13 @@ void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
list_for_each_entry_reverse(tr, head, tr_list) {
if (wbc->nr_to_write <= 0)
break;
if (gfs2_ail1_start_one(sdp, wbc, tr))
if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw))
goto restart;
}
spin_unlock(&sdp->sd_ail_lock);
blk_finish_plug(&plug);
if (withdraw)
gfs2_lm_withdraw(sdp, NULL);
trace_gfs2_ail_flush(sdp, wbc, 0);
}
......@@ -188,7 +194,8 @@ static void gfs2_ail1_start(struct gfs2_sbd *sdp)
*
*/
static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
bool *withdraw)
{
struct gfs2_bufdata *bd, *s;
struct buffer_head *bh;
......@@ -199,11 +206,12 @@ static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
gfs2_assert(sdp, bd->bd_tr == tr);
if (buffer_busy(bh))
continue;
if (!buffer_uptodate(bh))
if (!buffer_uptodate(bh)) {
gfs2_io_error_bh(sdp, bh);
*withdraw = true;
}
list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
}
}
/**
......@@ -218,10 +226,11 @@ static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
struct gfs2_trans *tr, *s;
int oldest_tr = 1;
int ret;
bool withdraw = false;
spin_lock(&sdp->sd_ail_lock);
list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
gfs2_ail1_empty_one(sdp, tr);
gfs2_ail1_empty_one(sdp, tr, &withdraw);
if (list_empty(&tr->tr_ail1_list) && oldest_tr)
list_move(&tr->tr_list, &sdp->sd_ail2_list);
else
......@@ -230,6 +239,9 @@ static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
ret = list_empty(&sdp->sd_ail1_list);
spin_unlock(&sdp->sd_ail_lock);
if (withdraw)
gfs2_lm_withdraw(sdp, "fatal: I/O error(s)\n");
return ret;
}
......@@ -689,7 +701,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
hash = ~crc32(~0, lh, LH_V1_SIZE);
lh->lh_hash = cpu_to_be32(hash);
tv = current_kernel_time64();
ktime_get_coarse_real_ts64(&tv);
lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
lh->lh_sec = cpu_to_be64(tv.tv_sec);
addr = gfs2_log_bmap(sdp);
......
......@@ -49,7 +49,7 @@ void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
if (test_set_buffer_pinned(bh))
gfs2_assert_withdraw(sdp, 0);
if (!buffer_uptodate(bh))
gfs2_io_error_bh(sdp, bh);
gfs2_io_error_bh_wd(sdp, bh);
bd = bh->b_private;
/* If this buffer is in the AIL and it has already been written
* to in-place disk block, remove it from the AIL.
......
......@@ -293,7 +293,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
if (unlikely(!buffer_uptodate(bh))) {
struct gfs2_trans *tr = current->journal_info;
if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
gfs2_io_error_bh(sdp, bh);
gfs2_io_error_bh_wd(sdp, bh);
brelse(bh);
*bhp = NULL;
return -EIO;
......@@ -320,7 +320,7 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
if (!buffer_uptodate(bh)) {
struct gfs2_trans *tr = current->journal_info;
if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
gfs2_io_error_bh(sdp, bh);
gfs2_io_error_bh_wd(sdp, bh);
return -EIO;
}
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
......
......@@ -413,12 +413,13 @@ void gfs2_recover_func(struct work_struct *work)
ktime_t t_start, t_jlck, t_jhd, t_tlck, t_rep;
int ro = 0;
unsigned int pass;
int error;
int error = 0;
int jlocked = 0;
t_start = ktime_get();
if (sdp->sd_args.ar_spectator ||
(jd->jd_jid != sdp->sd_lockstruct.ls_jid)) {
if (sdp->sd_args.ar_spectator)
goto fail;
if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
jd->jd_jid);
jlocked = 1;
......
This diff is collapsed.
......@@ -1729,7 +1729,6 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb)
if (ip) {
ip->i_flags = 0;
ip->i_gl = NULL;
ip->i_rgd = NULL;
memset(&ip->i_res, 0, sizeof(ip->i_res));
RB_CLEAR_NODE(&ip->i_res.rs_node);
ip->i_rahead = 0;
......
......@@ -429,11 +429,18 @@ int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid)
spin_lock(&sdp->sd_jindex_spin);
rv = -EBUSY;
if (sdp->sd_jdesc->jd_jid == jid)
/**
* If we're a spectator, we use journal0, but it's not really ours.
* So we need to wait for its recovery too. If we skip it we'd never
* queue work to the recovery workqueue, and so its completion would
* never clear the DFL_BLOCK_LOCKS flag, so all our locks would
* permanently stop working.
*/
if (sdp->sd_jdesc->jd_jid == jid && !sdp->sd_args.ar_spectator)
goto out;
rv = -ENOENT;
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
if (jd->jd_jid != jid)
if (jd->jd_jid != jid && !sdp->sd_args.ar_spectator)
continue;
rv = gfs2_recover_journal(jd, false);
break;
......
......@@ -606,7 +606,8 @@ TRACE_EVENT(gfs2_rs,
__entry->rd_addr = rs->rs_rbm.rgd->rd_addr;
__entry->rd_free_clone = rs->rs_rbm.rgd->rd_free_clone;
__entry->rd_reserved = rs->rs_rbm.rgd->rd_reserved;
__entry->inum = rs->rs_inum;
__entry->inum = container_of(rs, struct gfs2_inode,
i_res)->i_no_addr;
__entry->start = gfs2_rbm_to_block(&rs->rs_rbm);
__entry->free = rs->rs_free;
__entry->func = func;
......
......@@ -30,9 +30,11 @@ struct gfs2_glock;
* block, or all of the blocks in the rg, whichever is smaller */
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned requested)
{
if (requested < ip->i_rgd->rd_length)
struct gfs2_rgrpd *rgd = ip->i_res.rs_rbm.rgd;
if (requested < rgd->rd_length)
return requested + 1;
return ip->i_rgd->rd_length;
return rgd->rd_length;
}
extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
......
......@@ -46,14 +46,16 @@ int gfs2_lm_withdraw(struct gfs2_sbd *sdp, const char *fmt, ...)
test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags))
return 0;
va_start(args, fmt);
if (fmt) {
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
vaf.fmt = fmt;
vaf.va = &args;
fs_err(sdp, "%pV", &vaf);
fs_err(sdp, "%pV", &vaf);
va_end(args);
va_end(args);
}
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) {
fs_err(sdp, "about to withdraw this file system\n");
......@@ -246,21 +248,21 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file,
}
/**
* gfs2_io_error_bh_i - Flag a buffer I/O error and withdraw
* Returns: -1 if this call withdrew the machine,
* 0 if it was already withdrawn
* gfs2_io_error_bh_i - Flag a buffer I/O error
* @withdraw: withdraw the filesystem
*/
int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
const char *function, char *file, unsigned int line)
void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
const char *function, char *file, unsigned int line,
bool withdraw)
{
int rv;
rv = gfs2_lm_withdraw(sdp,
"fatal: I/O error\n"
" block = %llu\n"
" function = %s, file = %s, line = %u\n",
(unsigned long long)bh->b_blocknr,
function, file, line);
return rv;
fs_err(sdp,
"fatal: I/O error\n"
" block = %llu\n"
" function = %s, file = %s, line = %u\n",
(unsigned long long)bh->b_blocknr,
function, file, line);
if (withdraw)
gfs2_lm_withdraw(sdp, NULL);
}
......@@ -136,11 +136,15 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__);
int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
const char *function, char *file, unsigned int line);
void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
const char *function, char *file, unsigned int line,
bool withdraw);
#define gfs2_io_error_bh_wd(sdp, bh) \
gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, true);
#define gfs2_io_error_bh(sdp, bh) \
gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__);
gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, false);
extern struct kmem_cache *gfs2_glock_cachep;
......
......@@ -343,60 +343,45 @@ struct ea_list {
unsigned int ei_size;
};
static inline unsigned int gfs2_ea_strlen(struct gfs2_ea_header *ea)
{
switch (ea->ea_type) {
case GFS2_EATYPE_USR:
return 5 + ea->ea_name_len + 1;
case GFS2_EATYPE_SYS:
return 7 + ea->ea_name_len + 1;
case GFS2_EATYPE_SECURITY:
return 9 + ea->ea_name_len + 1;
default:
return 0;
}
}
static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
void *private)
{
struct ea_list *ei = private;
struct gfs2_ea_request *er = ei->ei_er;
unsigned int ea_size = gfs2_ea_strlen(ea);
unsigned int ea_size;
char *prefix;
unsigned int l;
if (ea->ea_type == GFS2_EATYPE_UNUSED)
return 0;
if (er->er_data_len) {
char *prefix = NULL;
unsigned int l = 0;
char c = 0;
switch (ea->ea_type) {
case GFS2_EATYPE_USR:
prefix = "user.";
l = 5;
break;
case GFS2_EATYPE_SYS:
prefix = "system.";
l = 7;
break;
case GFS2_EATYPE_SECURITY:
prefix = "security.";
l = 9;
break;
default:
BUG();
}
ea_size = l + ea->ea_name_len + 1;
if (er->er_data_len) {
if (ei->ei_size + ea_size > er->er_data_len)
return -ERANGE;
switch (ea->ea_type) {
case GFS2_EATYPE_USR:
prefix = "user.";
l = 5;
break;
case GFS2_EATYPE_SYS:
prefix = "system.";
l = 7;
break;
case GFS2_EATYPE_SECURITY:
prefix = "security.";
l = 9;
break;
}
BUG_ON(l == 0);
memcpy(er->er_data + ei->ei_size, prefix, l);
memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
ea->ea_name_len);
memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
er->er_data[ei->ei_size + ea_size - 1] = 0;
}
ei->ei_size += ea_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment