Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
b213437f
Commit
b213437f
authored
Jun 17, 2002
by
Rusty Russell
Committed by
Linus Torvalds
Jun 17, 2002
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] Make NTFS use a single uncompression-buffer
This was done by inspection, is it OK Anton? It's very simple:
parent
dc7ac875
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
27 additions
and
46 deletions
+27
-46
fs/ntfs/compress.c
fs/ntfs/compress.c
+26
-45
fs/ntfs/super.c
fs/ntfs/super.c
+1
-1
No files found.
fs/ntfs/compress.c
View file @
b213437f
...
@@ -50,14 +50,15 @@ typedef enum {
...
@@ -50,14 +50,15 @@ typedef enum {
}
ntfs_compression_constants
;
}
ntfs_compression_constants
;
/**
/**
* ntfs_compression_buffer
s - per-CPU buffers
for the decompression engine.
* ntfs_compression_buffer
- one buffer
for the decompression engine.
*/
*/
static
u8
**
ntfs_compression_buffers
=
NULL
;
static
u8
*
ntfs_compression_buffer
=
NULL
;
/* This spinlock which protects it */
static
spinlock_t
ntfs_cb_lock
=
SPIN_LOCK_UNLOCKED
;
/**
/**
* allocate_compression_buffers - allocate the per-CPU decompression buffers
* allocate_compression_buffers - allocate the decompression buffers
*
* Allocate the per-CPU buffers for the decompression engine.
*
*
* Caller has to hold the ntfs_lock semaphore.
* Caller has to hold the ntfs_lock semaphore.
*
*
...
@@ -67,30 +68,16 @@ int allocate_compression_buffers(void)
...
@@ -67,30 +68,16 @@ int allocate_compression_buffers(void)
{
{
int
i
,
j
;
int
i
,
j
;
BUG_ON
(
ntfs_compression_buffer
s
);
BUG_ON
(
ntfs_compression_buffer
);
ntfs_compression_buffers
=
(
u8
**
)
kmalloc
(
smp_num_cpus
*
sizeof
(
u8
*
),
ntfs_compression_buffer
=
vmalloc
(
NTFS_MAX_CB_SIZE
);
GFP_KERNEL
);
if
(
!
ntfs_compression_buffer
)
if
(
!
ntfs_compression_buffers
)
return
-
ENOMEM
;
return
-
ENOMEM
;
for
(
i
=
0
;
i
<
smp_num_cpus
;
i
++
)
{
return
0
;
ntfs_compression_buffers
[
i
]
=
(
u8
*
)
vmalloc
(
NTFS_MAX_CB_SIZE
);
if
(
!
ntfs_compression_buffers
[
i
])
break
;
}
if
(
i
==
smp_num_cpus
)
return
0
;
/* Allocation failed, cleanup and return error. */
for
(
j
=
0
;
j
<
i
;
j
++
)
vfree
(
ntfs_compression_buffers
[
j
]);
kfree
(
ntfs_compression_buffers
);
return
-
ENOMEM
;
}
}
/**
/**
* free_compression_buffers - free the per-CPU decompression buffers
* free_compression_buffers - free the decompression buffers
*
* Free the per-CPU buffers used by the decompression engine.
*
*
* Caller has to hold the ntfs_lock semaphore.
* Caller has to hold the ntfs_lock semaphore.
*/
*/
...
@@ -98,12 +85,9 @@ void free_compression_buffers(void)
...
@@ -98,12 +85,9 @@ void free_compression_buffers(void)
{
{
int
i
;
int
i
;
BUG_ON
(
!
ntfs_compression_buffers
);
BUG_ON
(
!
ntfs_compression_buffer
);
vfree
(
ntfs_compression_buffer
);
for
(
i
=
0
;
i
<
smp_num_cpus
;
i
++
)
ntfs_compression_buffer
=
NULL
;
vfree
(
ntfs_compression_buffers
[
i
]);
kfree
(
ntfs_compression_buffers
);
ntfs_compression_buffers
=
NULL
;
}
}
/**
/**
...
@@ -188,8 +172,8 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
...
@@ -188,8 +172,8 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
ntfs_debug
(
"Completed. Returning success (0)."
);
ntfs_debug
(
"Completed. Returning success (0)."
);
err
=
0
;
err
=
0
;
return_error:
return_error:
/* We can sleep from now on, so we
reenable preemption
. */
/* We can sleep from now on, so we
drop lock
. */
preempt_enable
(
);
spin_unlock
(
&
ntfs_cb_lock
);
/* Second stage: finalize completed pages. */
/* Second stage: finalize completed pages. */
for
(
i
=
0
;
i
<
nr_completed_pages
;
i
++
)
{
for
(
i
=
0
;
i
<
nr_completed_pages
;
i
++
)
{
int
di
=
completed_pages
[
i
];
int
di
=
completed_pages
[
i
];
...
@@ -607,12 +591,10 @@ int ntfs_file_read_compressed_block(struct page *page)
...
@@ -607,12 +591,10 @@ int ntfs_file_read_compressed_block(struct page *page)
}
}
/*
/*
* Get the compression buffer corresponding to the current CPU. We must
* Get the compression buffer. We must not sleep any more
* not sleep any more until we are finished with the compression buffer.
* until we are finished with it. */
* If on a preemptible kernel, now disable preemption.
spin_lock
(
&
ntfs_cb_lock
);
*/
cb
=
ntfs_compression_buffer
;
preempt_disable
();
cb
=
ntfs_compression_buffers
[
smp_processor_id
()];
BUG_ON
(
!
cb
);
BUG_ON
(
!
cb
);
...
@@ -647,8 +629,8 @@ int ntfs_file_read_compressed_block(struct page *page)
...
@@ -647,8 +629,8 @@ int ntfs_file_read_compressed_block(struct page *page)
if
(
vcn
==
start_vcn
-
cb_clusters
)
{
if
(
vcn
==
start_vcn
-
cb_clusters
)
{
/* Sparse cb, zero out page range overlapping the cb. */
/* Sparse cb, zero out page range overlapping the cb. */
ntfs_debug
(
"Found sparse compression block."
);
ntfs_debug
(
"Found sparse compression block."
);
/* We can sleep from now on, so we
reenable preemption
. */
/* We can sleep from now on, so we
drop lock
. */
preempt_enable
(
);
spin_unlock
(
&
ntfs_cb_lock
);
if
(
cb_max_ofs
)
if
(
cb_max_ofs
)
cb_max_page
--
;
cb_max_page
--
;
for
(;
cur_page
<
cb_max_page
;
cur_page
++
)
{
for
(;
cur_page
<
cb_max_page
;
cur_page
++
)
{
...
@@ -729,8 +711,8 @@ int ntfs_file_read_compressed_block(struct page *page)
...
@@ -729,8 +711,8 @@ int ntfs_file_read_compressed_block(struct page *page)
cb_pos
+=
cb_max_ofs
-
cur_ofs
;
cb_pos
+=
cb_max_ofs
-
cur_ofs
;
cur_ofs
=
cb_max_ofs
;
cur_ofs
=
cb_max_ofs
;
}
}
/* We can sleep from now on, so
we reenable preemption
. */
/* We can sleep from now on, so
drop lock
. */
preempt_enable
(
);
spin_unlock
(
&
ntfs_cb_lock
);
/* Second stage: finalize pages. */
/* Second stage: finalize pages. */
for
(;
cur2_page
<
cb_max_page
;
cur2_page
++
)
{
for
(;
cur2_page
<
cb_max_page
;
cur2_page
++
)
{
page
=
pages
[
cur2_page
];
page
=
pages
[
cur2_page
];
...
@@ -759,9 +741,8 @@ int ntfs_file_read_compressed_block(struct page *page)
...
@@ -759,9 +741,8 @@ int ntfs_file_read_compressed_block(struct page *page)
cb_max_page
,
cb_max_ofs
,
xpage
,
&
xpage_done
,
cb_max_page
,
cb_max_ofs
,
xpage
,
&
xpage_done
,
cb_pos
,
cb_size
-
(
cb_pos
-
cb
));
cb_pos
,
cb_size
-
(
cb_pos
-
cb
));
/*
/*
* We can sleep from now on, preemption already reenabled by
* We can sleep from now on, lock already dropped by
* ntfs_decompess.
* ntfs_decompress. */
*/
if
(
err
)
{
if
(
err
)
{
ntfs_error
(
vol
->
sb
,
"ntfs_decompress() failed in inode "
ntfs_error
(
vol
->
sb
,
"ntfs_decompress() failed in inode "
"0x%Lx with error code %i. Skipping "
"0x%Lx with error code %i. Skipping "
...
...
fs/ntfs/super.c
View file @
b213437f
...
@@ -1615,7 +1615,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
...
@@ -1615,7 +1615,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
if
(
vol
->
cluster_size
<=
4096
&&
!
ntfs_nr_compression_users
++
)
{
if
(
vol
->
cluster_size
<=
4096
&&
!
ntfs_nr_compression_users
++
)
{
result
=
allocate_compression_buffers
();
result
=
allocate_compression_buffers
();
if
(
result
)
{
if
(
result
)
{
ntfs_error
(
NULL
,
"Failed to allocate
per CPU
buffers "
ntfs_error
(
NULL
,
"Failed to allocate buffers "
"for compression engine."
);
"for compression engine."
);
ntfs_nr_compression_users
--
;
ntfs_nr_compression_users
--
;
up
(
&
ntfs_lock
);
up
(
&
ntfs_lock
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment