Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
3465893d
Commit
3465893d
authored
Aug 07, 2018
by
Herbert Xu
Browse files
Options
Browse Files
Download
Plain Diff
Merge
git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Merge crypto-2.6 to pick up NEON yield revert.
parents
d6e43798
f10dc56c
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
86 additions
and
148 deletions
+86
-148
arch/arm64/crypto/aes-ce-ccm-core.S
arch/arm64/crypto/aes-ce-ccm-core.S
+55
-95
arch/arm64/crypto/ghash-ce-core.S
arch/arm64/crypto/ghash-ce-core.S
+25
-51
drivers/crypto/padlock-aes.c
drivers/crypto/padlock-aes.c
+6
-2
No files found.
arch/arm64/crypto/aes-ce-ccm-core.S
View file @
3465893d
...
...
@@ -19,33 +19,24 @@
*
u32
*
macp
,
u8
const
rk
[],
u32
rounds
)
;
*/
ENTRY
(
ce_aes_ccm_auth_data
)
frame_push
7
mov
x19
,
x0
mov
x20
,
x1
mov
x21
,
x2
mov
x22
,
x3
mov
x23
,
x4
mov
x24
,
x5
ldr
w25
,
[
x22
]
/*
leftover
from
prev
round
?
*/
ldr
w8
,
[
x3
]
/*
leftover
from
prev
round
?
*/
ld1
{
v0
.16
b
},
[
x0
]
/*
load
mac
*/
cbz
w
25
,
1
f
sub
w
25
,
w25
,
#
16
cbz
w
8
,
1
f
sub
w
8
,
w8
,
#
16
eor
v1
.16
b
,
v1
.16
b
,
v1
.16
b
0
:
ldrb
w7
,
[
x
20
],
#
1
/*
get
1
byte
of
input
*/
subs
w2
1
,
w21
,
#
1
add
w
25
,
w25
,
#
1
0
:
ldrb
w7
,
[
x
1
],
#
1
/*
get
1
byte
of
input
*/
subs
w2
,
w2
,
#
1
add
w
8
,
w8
,
#
1
ins
v1
.
b
[
0
],
w7
ext
v1
.16
b
,
v1
.16
b
,
v1
.16
b
,
#
1
/*
rotate
in
the
input
bytes
*/
beq
8
f
/*
out
of
input
?
*/
cbnz
w
25
,
0
b
cbnz
w
8
,
0
b
eor
v0
.16
b
,
v0
.16
b
,
v1
.16
b
1
:
ld1
{
v3
.4
s
},
[
x
23
]
/*
load
first
round
key
*/
prfm
pldl1strm
,
[
x
20
]
cmp
w
24
,
#
12
/*
which
key
size
?
*/
add
x6
,
x
23
,
#
16
sub
w7
,
w
24
,
#
2
/*
modified
#
of
rounds
*/
1
:
ld1
{
v3
.4
s
},
[
x
4
]
/*
load
first
round
key
*/
prfm
pldl1strm
,
[
x
1
]
cmp
w
5
,
#
12
/*
which
key
size
?
*/
add
x6
,
x
4
,
#
16
sub
w7
,
w
5
,
#
2
/*
modified
#
of
rounds
*/
bmi
2
f
bne
5
f
mov
v5
.16
b
,
v3
.16
b
...
...
@@ -64,43 +55,33 @@ ENTRY(ce_aes_ccm_auth_data)
ld1
{
v5
.4
s
},
[
x6
],
#
16
/*
load
next
round
key
*/
bpl
3
b
aese
v0
.16
b
,
v4
.16
b
subs
w2
1
,
w21
,
#
16
/*
last
data
?
*/
subs
w2
,
w2
,
#
16
/*
last
data
?
*/
eor
v0
.16
b
,
v0
.16
b
,
v5
.16
b
/*
final
round
*/
bmi
6
f
ld1
{
v1
.16
b
},
[
x
20
],
#
16
/*
load
next
input
block
*/
ld1
{
v1
.16
b
},
[
x
1
],
#
16
/*
load
next
input
block
*/
eor
v0
.16
b
,
v0
.16
b
,
v1
.16
b
/*
xor
with
mac
*/
beq
6
f
if_will_cond_yield_neon
st1
{
v0
.16
b
},
[
x19
]
/*
store
mac
*/
do_cond_yield_neon
ld1
{
v0
.16
b
},
[
x19
]
/*
reload
mac
*/
endif_yield_neon
b
1
b
6
:
st1
{
v0
.16
b
},
[
x19
]
/*
store
mac
*/
bne
1
b
6
:
st1
{
v0
.16
b
},
[
x0
]
/*
store
mac
*/
beq
10
f
adds
w2
1
,
w21
,
#
16
adds
w2
,
w2
,
#
16
beq
10
f
mov
w
25
,
w21
7
:
ldrb
w7
,
[
x
20
],
#
1
mov
w
8
,
w2
7
:
ldrb
w7
,
[
x
1
],
#
1
umov
w6
,
v0
.
b
[
0
]
eor
w6
,
w6
,
w7
strb
w6
,
[
x
19
],
#
1
subs
w2
1
,
w21
,
#
1
strb
w6
,
[
x
0
],
#
1
subs
w2
,
w2
,
#
1
beq
10
f
ext
v0
.16
b
,
v0
.16
b
,
v0
.16
b
,
#
1
/*
rotate
out
the
mac
bytes
*/
b
7
b
8
:
mov
w7
,
w
25
add
w
25
,
w25
,
#
16
8
:
mov
w7
,
w
8
add
w
8
,
w8
,
#
16
9
:
ext
v1
.16
b
,
v1
.16
b
,
v1
.16
b
,
#
1
adds
w7
,
w7
,
#
1
bne
9
b
eor
v0
.16
b
,
v0
.16
b
,
v1
.16
b
st1
{
v0
.16
b
},
[
x19
]
10
:
str
w25
,
[
x22
]
frame_pop
st1
{
v0
.16
b
},
[
x0
]
10
:
str
w8
,
[
x3
]
ret
ENDPROC
(
ce_aes_ccm_auth_data
)
...
...
@@ -145,29 +126,19 @@ ENTRY(ce_aes_ccm_final)
ENDPROC
(
ce_aes_ccm_final
)
.
macro
aes_ccm_do_crypt
,
enc
frame_push
8
mov
x19
,
x0
mov
x20
,
x1
mov
x21
,
x2
mov
x22
,
x3
mov
x23
,
x4
mov
x24
,
x5
mov
x25
,
x6
ldr
x26
,
[
x25
,
#
8
]
/*
load
lower
ctr
*/
ld1
{
v0
.16
b
},
[
x24
]
/*
load
mac
*/
CPU_LE
(
rev
x26
,
x26
)
/*
keep
swabbed
ctr
in
reg
*/
ldr
x8
,
[
x6
,
#
8
]
/*
load
lower
ctr
*/
ld1
{
v0
.16
b
},
[
x5
]
/*
load
mac
*/
CPU_LE
(
rev
x8
,
x8
)
/*
keep
swabbed
ctr
in
reg
*/
0
:
/
*
outer
loop
*/
ld1
{
v1
.8
b
},
[
x
25
]
/*
load
upper
ctr
*/
prfm
pldl1strm
,
[
x
20
]
add
x
26
,
x26
,
#
1
rev
x9
,
x
26
cmp
w
23
,
#
12
/*
which
key
size
?
*/
sub
w7
,
w
23
,
#
2
/*
get
modified
#
of
rounds
*/
ld1
{
v1
.8
b
},
[
x
6
]
/*
load
upper
ctr
*/
prfm
pldl1strm
,
[
x
1
]
add
x
8
,
x8
,
#
1
rev
x9
,
x
8
cmp
w
4
,
#
12
/*
which
key
size
?
*/
sub
w7
,
w
4
,
#
2
/*
get
modified
#
of
rounds
*/
ins
v1
.
d
[
1
],
x9
/*
no
carry
in
lower
ctr
*/
ld1
{
v3
.4
s
},
[
x
22
]
/*
load
first
round
key
*/
add
x10
,
x
22
,
#
16
ld1
{
v3
.4
s
},
[
x
3
]
/*
load
first
round
key
*/
add
x10
,
x
3
,
#
16
bmi
1
f
bne
4
f
mov
v5
.16
b
,
v3
.16
b
...
...
@@ -194,9 +165,9 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
bpl
2
b
aese
v0
.16
b
,
v4
.16
b
aese
v1
.16
b
,
v4
.16
b
subs
w2
1
,
w21
,
#
16
bmi
7
f
/*
partial
block
?
*/
ld1
{
v2
.16
b
},
[
x
20
],
#
16
/*
load
next
input
block
*/
subs
w2
,
w2
,
#
16
bmi
6
f
/*
partial
block
?
*/
ld1
{
v2
.16
b
},
[
x
1
],
#
16
/*
load
next
input
block
*/
.
if
\
enc
==
1
eor
v2
.16
b
,
v2
.16
b
,
v5
.16
b
/*
final
round
enc
+
mac
*/
eor
v1
.16
b
,
v1
.16
b
,
v2
.16
b
/*
xor
with
crypted
ctr
*/
...
...
@@ -205,29 +176,18 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
eor
v1
.16
b
,
v2
.16
b
,
v5
.16
b
/*
final
round
enc
*/
.
endif
eor
v0
.16
b
,
v0
.16
b
,
v2
.16
b
/*
xor
mac
with
pt
^
rk
[
last
]
*/
st1
{
v1
.16
b
},
[
x19
],
#
16
/*
write
output
block
*/
beq
5
f
if_will_cond_yield_neon
st1
{
v0
.16
b
},
[
x24
]
/*
store
mac
*/
do_cond_yield_neon
ld1
{
v0
.16
b
},
[
x24
]
/*
reload
mac
*/
endif_yield_neon
b
0
b
5
:
CPU_LE
(
rev
x26
,
x26
)
st1
{
v0
.16
b
},
[
x24
]
/*
store
mac
*/
str
x26
,
[
x25
,
#
8
]
/*
store
lsb
end
of
ctr
(
BE
)
*/
6
:
frame_pop
ret
7
:
eor
v0
.16
b
,
v0
.16
b
,
v5
.16
b
/*
final
round
mac
*/
st1
{
v1
.16
b
},
[
x0
],
#
16
/*
write
output
block
*/
bne
0
b
CPU_LE
(
rev
x8
,
x8
)
st1
{
v0
.16
b
},
[
x5
]
/*
store
mac
*/
str
x8
,
[
x6
,
#
8
]
/*
store
lsb
end
of
ctr
(
BE
)
*/
5
:
ret
6
:
eor
v0
.16
b
,
v0
.16
b
,
v5
.16
b
/*
final
round
mac
*/
eor
v1
.16
b
,
v1
.16
b
,
v5
.16
b
/*
final
round
enc
*/
st1
{
v0
.16
b
},
[
x
24
]
/*
store
mac
*/
add
w2
1
,
w21
,
#
16
/*
process
partial
tail
block
*/
8
:
ldrb
w9
,
[
x20
],
#
1
/*
get
1
byte
of
input
*/
st1
{
v0
.16
b
},
[
x
5
]
/*
store
mac
*/
add
w2
,
w2
,
#
16
/*
process
partial
tail
block
*/
7
:
ldrb
w9
,
[
x1
],
#
1
/*
get
1
byte
of
input
*/
umov
w6
,
v1
.
b
[
0
]
/*
get
top
crypted
ctr
byte
*/
umov
w7
,
v0
.
b
[
0
]
/*
get
top
mac
byte
*/
.
if
\
enc
==
1
...
...
@@ -237,13 +197,13 @@ CPU_LE( rev x26, x26 )
eor
w9
,
w9
,
w6
eor
w7
,
w7
,
w9
.
endif
strb
w9
,
[
x
19
],
#
1
/*
store
out
byte
*/
strb
w7
,
[
x
24
],
#
1
/*
store
mac
byte
*/
subs
w2
1
,
w21
,
#
1
beq
6
b
strb
w9
,
[
x
0
],
#
1
/*
store
out
byte
*/
strb
w7
,
[
x
5
],
#
1
/*
store
mac
byte
*/
subs
w2
,
w2
,
#
1
beq
5
b
ext
v0
.16
b
,
v0
.16
b
,
v0
.16
b
,
#
1
/*
shift
out
mac
byte
*/
ext
v1
.16
b
,
v1
.16
b
,
v1
.16
b
,
#
1
/*
shift
out
ctr
byte
*/
b
8
b
b
7
b
.
endm
/
*
...
...
arch/arm64/crypto/ghash-ce-core.S
View file @
3465893d
...
...
@@ -322,55 +322,41 @@ ENDPROC(pmull_ghash_update_p8)
.
endm
.
macro
pmull_gcm_do_crypt
,
enc
frame_push
10
ld1
{
SHASH
.2
d
},
[
x4
]
ld1
{
XL
.2
d
},
[
x1
]
ldr
x8
,
[
x5
,
#
8
]
//
load
lower
counter
mov
x19
,
x0
mov
x20
,
x1
mov
x21
,
x2
mov
x22
,
x3
mov
x23
,
x4
mov
x24
,
x5
mov
x25
,
x6
mov
x26
,
x7
.
if
\
enc
==
1
ldr
x27
,
[
sp
,
#
96
]
//
first
stacked
arg
.
endif
ldr
x28
,
[
x24
,
#
8
]
//
load
lower
counter
CPU_LE
(
rev
x28
,
x28
)
0
:
mov
x0
,
x25
load_round_keys
w26
,
x0
ld1
{
SHASH
.2
d
},
[
x23
]
ld1
{
XL
.2
d
},
[
x20
]
load_round_keys
w7
,
x6
movi
MASK
.16
b
,
#
0xe1
ext
SHASH2
.16
b
,
SHASH
.16
b
,
SHASH
.16
b
,
#
8
CPU_LE
(
rev
x8
,
x8
)
shl
MASK
.2
d
,
MASK
.2
d
,
#
57
eor
SHASH2
.16
b
,
SHASH2
.16
b
,
SHASH
.16
b
.
if
\
enc
==
1
ld1
{
KS
.16
b
},
[
x27
]
ldr
x10
,
[
sp
]
ld1
{
KS
.16
b
},
[
x10
]
.
endif
1
:
ld1
{
CTR
.8
b
},
[
x24
]
//
load
upper
counter
ld1
{
INP
.16
b
},
[
x
22
],
#
16
rev
x9
,
x
2
8
add
x
28
,
x2
8
,
#
1
sub
w
19
,
w19
,
#
1
0
:
ld1
{
CTR
.8
b
},
[
x5
]
//
load
upper
counter
ld1
{
INP
.16
b
},
[
x
3
],
#
16
rev
x9
,
x8
add
x
8
,
x
8
,
#
1
sub
w
0
,
w0
,
#
1
ins
CTR
.
d
[
1
],
x9
//
set
lower
counter
.
if
\
enc
==
1
eor
INP
.16
b
,
INP
.16
b
,
KS
.16
b
//
encrypt
input
st1
{
INP
.16
b
},
[
x2
1
],
#
16
st1
{
INP
.16
b
},
[
x2
],
#
16
.
endif
rev64
T1
.16
b
,
INP
.16
b
cmp
w
26
,
#
12
b.ge
4
f
//
AES
-
192
/
256
?
cmp
w
7
,
#
12
b.ge
2
f
//
AES
-
192
/
256
?
2
:
enc_round
CTR
,
v21
1
:
enc_round
CTR
,
v21
ext
T2
.16
b
,
XL
.16
b
,
XL
.16
b
,
#
8
ext
IN1
.16
b
,
T1
.16
b
,
T1
.16
b
,
#
8
...
...
@@ -425,39 +411,27 @@ CPU_LE( rev x28, x28 )
.
if
\
enc
==
0
eor
INP
.16
b
,
INP
.16
b
,
KS
.16
b
st1
{
INP
.16
b
},
[
x2
1
],
#
16
st1
{
INP
.16
b
},
[
x2
],
#
16
.
endif
cb
z
w19
,
3
f
cb
nz
w0
,
0
b
if_will_cond_yield_neon
st1
{
XL
.2
d
},
[
x20
]
.
if
\
enc
==
1
st1
{
KS
.16
b
},
[
x27
]
.
endif
do_cond_yield_neon
b
0
b
endif_yield_neon
CPU_LE
(
rev
x8
,
x8
)
st1
{
XL
.2
d
},
[
x1
]
str
x8
,
[
x5
,
#
8
]
//
store
lower
counter
b
1
b
3
:
st1
{
XL
.2
d
},
[
x20
]
.
if
\
enc
==
1
st1
{
KS
.16
b
},
[
x
27
]
st1
{
KS
.16
b
},
[
x
10
]
.
endif
CPU_LE
(
rev
x28
,
x28
)
str
x28
,
[
x24
,
#
8
]
//
store
lower
counter
frame_pop
ret
4
:
b.eq
5
f
//
AES
-
192
?
2
:
b.eq
3
f
//
AES
-
192
?
enc_round
CTR
,
v17
enc_round
CTR
,
v18
5
:
enc_round
CTR
,
v19
3
:
enc_round
CTR
,
v19
enc_round
CTR
,
v20
b
2
b
b
1
b
.
endm
/
*
...
...
drivers/crypto/padlock-aes.c
View file @
3465893d
...
...
@@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
return
;
}
count
-=
initial
;
if
(
initial
)
asm
volatile
(
".byte 0xf3,0x0f,0xa7,0xc8"
/* rep xcryptecb */
:
"+S"
(
input
),
"+D"
(
output
)
...
...
@@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
asm
volatile
(
".byte 0xf3,0x0f,0xa7,0xc8"
/* rep xcryptecb */
:
"+S"
(
input
),
"+D"
(
output
)
:
"d"
(
control_word
),
"b"
(
key
),
"c"
(
count
-
initial
));
:
"d"
(
control_word
),
"b"
(
key
),
"c"
(
count
));
}
static
inline
u8
*
padlock_xcrypt_cbc
(
const
u8
*
input
,
u8
*
output
,
void
*
key
,
...
...
@@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
if
(
count
<
cbc_fetch_blocks
)
return
cbc_crypt
(
input
,
output
,
key
,
iv
,
control_word
,
count
);
count
-=
initial
;
if
(
initial
)
asm
volatile
(
".byte 0xf3,0x0f,0xa7,0xd0"
/* rep xcryptcbc */
:
"+S"
(
input
),
"+D"
(
output
),
"+a"
(
iv
)
...
...
@@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
asm
volatile
(
".byte 0xf3,0x0f,0xa7,0xd0"
/* rep xcryptcbc */
:
"+S"
(
input
),
"+D"
(
output
),
"+a"
(
iv
)
:
"d"
(
control_word
),
"b"
(
key
),
"c"
(
count
-
initial
));
:
"d"
(
control_word
),
"b"
(
key
),
"c"
(
count
));
return
iv
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment