Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
571ed1fd
Commit
571ed1fd
authored
Sep 29, 2018
by
Trond Myklebust
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
SUNRPC: Replace krb5_seq_lock with a lockless scheme
Signed-off-by:
Trond Myklebust
<
trond.myklebust@hammerspace.com
>
parent
0c1c19f4
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
30 additions
and
18 deletions
+30
-18
include/linux/sunrpc/gss_krb5.h
include/linux/sunrpc/gss_krb5.h
+2
-1
net/sunrpc/auth_gss/gss_krb5_seal.c
net/sunrpc/auth_gss/gss_krb5_seal.c
+26
-11
net/sunrpc/auth_gss/gss_krb5_wrap.c
net/sunrpc/auth_gss/gss_krb5_wrap.c
+2
-6
No files found.
include/linux/sunrpc/gss_krb5.h
View file @
571ed1fd
...
...
@@ -118,7 +118,8 @@ struct krb5_ctx {
u8
acceptor_integ
[
GSS_KRB5_MAX_KEYLEN
];
};
extern
spinlock_t
krb5_seq_lock
;
extern
u32
gss_seq_send_fetch_and_inc
(
struct
krb5_ctx
*
ctx
);
extern
u64
gss_seq_send64_fetch_and_inc
(
struct
krb5_ctx
*
ctx
);
/* The length of the Kerberos GSS token header */
#define GSS_KRB5_TOK_HDR_LEN (16)
...
...
net/sunrpc/auth_gss/gss_krb5_seal.c
View file @
571ed1fd
...
...
@@ -68,8 +68,6 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
DEFINE_SPINLOCK
(
krb5_seq_lock
);
static
void
*
setup_token
(
struct
krb5_ctx
*
ctx
,
struct
xdr_netobj
*
token
)
{
...
...
@@ -124,6 +122,30 @@ setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
return
krb5_hdr
;
}
u32
gss_seq_send_fetch_and_inc
(
struct
krb5_ctx
*
ctx
)
{
u32
old
,
seq_send
=
READ_ONCE
(
ctx
->
seq_send
);
do
{
old
=
seq_send
;
seq_send
=
cmpxchg
(
&
ctx
->
seq_send
,
old
,
old
+
1
);
}
while
(
old
!=
seq_send
);
return
seq_send
;
}
u64
gss_seq_send64_fetch_and_inc
(
struct
krb5_ctx
*
ctx
)
{
u64
old
,
seq_send
=
READ_ONCE
(
ctx
->
seq_send
);
do
{
old
=
seq_send
;
seq_send
=
cmpxchg
(
&
ctx
->
seq_send64
,
old
,
old
+
1
);
}
while
(
old
!=
seq_send
);
return
seq_send
;
}
static
u32
gss_get_mic_v1
(
struct
krb5_ctx
*
ctx
,
struct
xdr_buf
*
text
,
struct
xdr_netobj
*
token
)
...
...
@@ -154,9 +176,7 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
memcpy
(
ptr
+
GSS_KRB5_TOK_HDR_LEN
,
md5cksum
.
data
,
md5cksum
.
len
);
spin_lock
(
&
krb5_seq_lock
);
seq_send
=
ctx
->
seq_send
++
;
spin_unlock
(
&
krb5_seq_lock
);
seq_send
=
gss_seq_send_fetch_and_inc
(
ctx
);
if
(
krb5_make_seq_num
(
ctx
,
ctx
->
seq
,
ctx
->
initiate
?
0
:
0xff
,
seq_send
,
ptr
+
GSS_KRB5_TOK_HDR_LEN
,
ptr
+
8
))
...
...
@@ -174,7 +194,6 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
.
data
=
cksumdata
};
void
*
krb5_hdr
;
s32
now
;
u64
seq_send
;
u8
*
cksumkey
;
unsigned
int
cksum_usage
;
__be64
seq_send_be64
;
...
...
@@ -185,11 +204,7 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
/* Set up the sequence number. Now 64-bits in clear
* text and w/o direction indicator */
spin_lock
(
&
krb5_seq_lock
);
seq_send
=
ctx
->
seq_send64
++
;
spin_unlock
(
&
krb5_seq_lock
);
seq_send_be64
=
cpu_to_be64
(
seq_send
);
seq_send_be64
=
cpu_to_be64
(
gss_seq_send64_fetch_and_inc
(
ctx
));
memcpy
(
krb5_hdr
+
8
,
(
char
*
)
&
seq_send_be64
,
8
);
if
(
ctx
->
initiate
)
{
...
...
net/sunrpc/auth_gss/gss_krb5_wrap.c
View file @
571ed1fd
...
...
@@ -228,9 +228,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
memcpy
(
ptr
+
GSS_KRB5_TOK_HDR_LEN
,
md5cksum
.
data
,
md5cksum
.
len
);
spin_lock
(
&
krb5_seq_lock
);
seq_send
=
kctx
->
seq_send
++
;
spin_unlock
(
&
krb5_seq_lock
);
seq_send
=
gss_seq_send_fetch_and_inc
(
kctx
);
/* XXX would probably be more efficient to compute checksum
* and encrypt at the same time: */
...
...
@@ -477,9 +475,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
*
be16ptr
++
=
0
;
be64ptr
=
(
__be64
*
)
be16ptr
;
spin_lock
(
&
krb5_seq_lock
);
*
be64ptr
=
cpu_to_be64
(
kctx
->
seq_send64
++
);
spin_unlock
(
&
krb5_seq_lock
);
*
be64ptr
=
cpu_to_be64
(
gss_seq_send64_fetch_and_inc
(
kctx
));
err
=
(
*
kctx
->
gk5e
->
encrypt_v2
)(
kctx
,
offset
,
buf
,
pages
);
if
(
err
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment