Commit f5a53edc authored by Jaegeuk Kim's avatar Jaegeuk Kim

f2fs: support aligned pinned file

This patch supports 2MB-aligned pinned file, which can guarantee no GC at all
by allocating fully valid 2MB segment.

Check free segments by has_not_enough_free_secs() with large budget.
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent bc005a4d
...@@ -890,6 +890,7 @@ enum { ...@@ -890,6 +890,7 @@ enum {
CURSEG_WARM_NODE, /* direct node blocks of normal files */ CURSEG_WARM_NODE, /* direct node blocks of normal files */
CURSEG_COLD_NODE, /* indirect node blocks */ CURSEG_COLD_NODE, /* indirect node blocks */
NO_CHECK_TYPE, NO_CHECK_TYPE,
CURSEG_COLD_DATA_PINNED,/* cold data for pinned file */
}; };
struct flush_cmd { struct flush_cmd {
...@@ -1301,6 +1302,7 @@ struct f2fs_sb_info { ...@@ -1301,6 +1302,7 @@ struct f2fs_sb_info {
/* threshold for gc trials on pinned files */ /* threshold for gc trials on pinned files */
u64 gc_pin_file_threshold; u64 gc_pin_file_threshold;
struct rw_semaphore pin_sem;
/* maximum # of trials to find a victim segment for SSR and GC */ /* maximum # of trials to find a victim segment for SSR and GC */
unsigned int max_victim_search; unsigned int max_victim_search;
...@@ -3116,7 +3118,7 @@ void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); ...@@ -3116,7 +3118,7 @@ void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
unsigned int start, unsigned int end); unsigned int start, unsigned int end);
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type);
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
struct cp_control *cpc); struct cp_control *cpc);
......
...@@ -1545,12 +1545,44 @@ static int expand_inode_data(struct inode *inode, loff_t offset, ...@@ -1545,12 +1545,44 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
if (off_end) if (off_end)
map.m_len++; map.m_len++;
if (f2fs_is_pinned_file(inode)) if (!map.m_len)
map.m_seg_type = CURSEG_COLD_DATA; return 0;
if (f2fs_is_pinned_file(inode)) {
block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
sbi->log_blocks_per_seg;
block_t done = 0;
if (map.m_len % sbi->blocks_per_seg)
len += sbi->blocks_per_seg;
map.m_len = sbi->blocks_per_seg;
next_alloc:
if (has_not_enough_free_secs(sbi, 0,
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
mutex_lock(&sbi->gc_mutex);
err = f2fs_gc(sbi, true, false, NULL_SEGNO);
if (err && err != -ENODATA && err != -EAGAIN)
goto out_err;
}
err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ? down_write(&sbi->pin_sem);
F2FS_GET_BLOCK_PRE_DIO : map.m_seg_type = CURSEG_COLD_DATA_PINNED;
F2FS_GET_BLOCK_PRE_AIO)); f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
up_write(&sbi->pin_sem);
done += map.m_len;
len -= map.m_len;
map.m_lblk += map.m_len;
if (!err && len)
goto next_alloc;
map.m_len = done;
} else {
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
}
out_err:
if (err) { if (err) {
pgoff_t last_off; pgoff_t last_off;
......
...@@ -711,7 +711,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, ...@@ -711,7 +711,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
} }
if (!err) if (!err)
f2fs_allocate_new_segments(sbi); f2fs_allocate_new_segments(sbi, NO_CHECK_TYPE);
return err; return err;
} }
......
...@@ -2690,7 +2690,7 @@ void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, ...@@ -2690,7 +2690,7 @@ void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
up_read(&SM_I(sbi)->curseg_lock); up_read(&SM_I(sbi)->curseg_lock);
} }
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type)
{ {
struct curseg_info *curseg; struct curseg_info *curseg;
unsigned int old_segno; unsigned int old_segno;
...@@ -2699,10 +2699,17 @@ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) ...@@ -2699,10 +2699,17 @@ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
down_write(&SIT_I(sbi)->sentry_lock); down_write(&SIT_I(sbi)->sentry_lock);
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
if (type != NO_CHECK_TYPE && i != type)
continue;
curseg = CURSEG_I(sbi, i); curseg = CURSEG_I(sbi, i);
old_segno = curseg->segno; if (type == NO_CHECK_TYPE || curseg->next_blkoff ||
SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); get_valid_blocks(sbi, curseg->segno, false) ||
locate_dirty_segment(sbi, old_segno); get_ckpt_valid_blocks(sbi, curseg->segno)) {
old_segno = curseg->segno;
SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
locate_dirty_segment(sbi, old_segno);
}
} }
up_write(&SIT_I(sbi)->sentry_lock); up_write(&SIT_I(sbi)->sentry_lock);
...@@ -3068,6 +3075,19 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, ...@@ -3068,6 +3075,19 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
{ {
struct sit_info *sit_i = SIT_I(sbi); struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, type); struct curseg_info *curseg = CURSEG_I(sbi, type);
bool put_pin_sem = false;
if (type == CURSEG_COLD_DATA) {
/* GC during CURSEG_COLD_DATA_PINNED allocation */
if (down_read_trylock(&sbi->pin_sem)) {
put_pin_sem = true;
} else {
type = CURSEG_WARM_DATA;
curseg = CURSEG_I(sbi, type);
}
} else if (type == CURSEG_COLD_DATA_PINNED) {
type = CURSEG_COLD_DATA;
}
down_read(&SM_I(sbi)->curseg_lock); down_read(&SM_I(sbi)->curseg_lock);
...@@ -3133,6 +3153,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, ...@@ -3133,6 +3153,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
mutex_unlock(&curseg->curseg_mutex); mutex_unlock(&curseg->curseg_mutex);
up_read(&SM_I(sbi)->curseg_lock); up_read(&SM_I(sbi)->curseg_lock);
if (put_pin_sem)
up_read(&sbi->pin_sem);
} }
static void update_device_state(struct f2fs_io_info *fio) static void update_device_state(struct f2fs_io_info *fio)
......
...@@ -313,6 +313,8 @@ struct sit_entry_set { ...@@ -313,6 +313,8 @@ struct sit_entry_set {
*/ */
static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
{ {
if (type == CURSEG_COLD_DATA_PINNED)
type = CURSEG_COLD_DATA;
return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
} }
......
...@@ -2853,6 +2853,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi) ...@@ -2853,6 +2853,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
spin_lock_init(&sbi->dev_lock); spin_lock_init(&sbi->dev_lock);
init_rwsem(&sbi->sb_lock); init_rwsem(&sbi->sb_lock);
init_rwsem(&sbi->pin_sem);
} }
static int init_percpu_info(struct f2fs_sb_info *sbi) static int init_percpu_info(struct f2fs_sb_info *sbi)
......
...@@ -154,6 +154,8 @@ static ssize_t features_show(struct f2fs_attr *a, ...@@ -154,6 +154,8 @@ static ssize_t features_show(struct f2fs_attr *a,
if (f2fs_sb_has_casefold(sbi)) if (f2fs_sb_has_casefold(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "casefold"); len ? ", " : "", "casefold");
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "pin_file");
len += snprintf(buf + len, PAGE_SIZE - len, "\n"); len += snprintf(buf + len, PAGE_SIZE - len, "\n");
return len; return len;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment