Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
b3124ec2
Commit
b3124ec2
authored
Aug 13, 2018
by
Michael Ellerman
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'fixes' into next
Merge our fixes branch from the 4.18 cycle to resolve some minor conflicts.
parents
f7a6947c
cca19f0b
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
73 additions
and
26 deletions
+73
-26
arch/powerpc/Makefile
arch/powerpc/Makefile
+1
-0
arch/powerpc/include/asm/mmu_context.h
arch/powerpc/include/asm/mmu_context.h
+23
-14
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/idle_book3s.S
+2
-0
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_64_vio.c
+1
-1
arch/powerpc/kvm/book3s_64_vio_hv.c
arch/powerpc/kvm/book3s_64_vio_hv.c
+4
-2
arch/powerpc/mm/mmu_context_iommu.c
arch/powerpc/mm/mmu_context_iommu.c
+35
-2
arch/powerpc/xmon/xmon.c
arch/powerpc/xmon/xmon.c
+2
-2
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vfio/vfio_iommu_spapr_tce.c
+5
-5
No files found.
arch/powerpc/Makefile
View file @
b3124ec2
...
...
@@ -237,6 +237,7 @@ endif
cpu-as-$(CONFIG_4xx)
+=
-Wa
,-m405
cpu-as-$(CONFIG_ALTIVEC)
+=
$(
call
as-option,-Wa
$(comma)
-maltivec
)
cpu-as-$(CONFIG_E200)
+=
-Wa
,-me200
cpu-as-$(CONFIG_E500)
+=
-Wa
,-me500
cpu-as-$(CONFIG_PPC_BOOK3S_64)
+=
-Wa
,-mpower4
cpu-as-$(CONFIG_PPC_E500MC)
+=
$(
call
as-option,-Wa
$(comma)
-me500mc
)
...
...
arch/powerpc/include/asm/mmu_context.h
View file @
b3124ec2
...
...
@@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
extern
struct
mm_iommu_table_group_mem_t
*
mm_iommu_find
(
struct
mm_struct
*
mm
,
unsigned
long
ua
,
unsigned
long
entries
);
extern
long
mm_iommu_ua_to_hpa
(
struct
mm_iommu_table_group_mem_t
*
mem
,
unsigned
long
ua
,
unsigned
long
*
hpa
);
unsigned
long
ua
,
unsigned
int
pageshift
,
unsigned
long
*
hpa
);
extern
long
mm_iommu_ua_to_hpa_rm
(
struct
mm_iommu_table_group_mem_t
*
mem
,
unsigned
long
ua
,
unsigned
long
*
hpa
);
unsigned
long
ua
,
unsigned
int
pageshift
,
unsigned
long
*
hpa
);
extern
long
mm_iommu_mapped_inc
(
struct
mm_iommu_table_group_mem_t
*
mem
);
extern
void
mm_iommu_mapped_dec
(
struct
mm_iommu_table_group_mem_t
*
mem
);
#endif
...
...
@@ -143,24 +143,33 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
{
int
c
;
c
=
atomic_dec_if_positive
(
&
mm
->
context
.
copros
);
/* Detect imbalance between add and remove */
WARN_ON
(
c
<
0
);
/*
* Need to broadcast a global flush of the full mm before
* decrementing active_cpus count, as the next TLBI may be
* local and the nMMU and/or PSL need to be cleaned up.
* Should be rare enough so that it's acceptable.
* When removing the last copro, we need to broadcast a global
* flush of the full mm, as the next TLBI may be local and the
* nMMU and/or PSL need to be cleaned up.
*
* Both the 'copros' and 'active_cpus' counts are looked at in
* flush_all_mm() to determine the scope (local/global) of the
* TLBIs, so we need to flush first before decrementing
* 'copros'. If this API is used by several callers for the
* same context, it can lead to over-flushing. It's hopefully
* not common enough to be a problem.
*
* Skip on hash, as we don't know how to do the proper flush
* for the time being. Invalidations will remain global if
* used on hash.
* used on hash. Note that we can't drop 'copros' either, as
* it could make some invalidations local with no flush
* in-between.
*/
if
(
c
==
0
&&
radix_enabled
())
{
if
(
radix_enabled
())
{
flush_all_mm
(
mm
);
dec_mm_active_cpus
(
mm
);
c
=
atomic_dec_if_positive
(
&
mm
->
context
.
copros
);
/* Detect imbalance between add and remove */
WARN_ON
(
c
<
0
);
if
(
c
==
0
)
dec_mm_active_cpus
(
mm
);
}
}
#else
...
...
arch/powerpc/kernel/idle_book3s.S
View file @
b3124ec2
...
...
@@ -146,7 +146,9 @@ power9_restore_additional_sprs:
mtspr
SPRN_MMCR1
,
r4
ld
r3
,
STOP_MMCR2
(
r13
)
ld
r4
,
PACA_SPRG_VDSO
(
r13
)
mtspr
SPRN_MMCR2
,
r3
mtspr
SPRN_SPRG3
,
r4
blr
/*
...
...
arch/powerpc/kvm/book3s_64_vio.c
View file @
b3124ec2
...
...
@@ -449,7 +449,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
return
H_TOO_HARD
;
if
(
WARN_ON_ONCE
(
mm_iommu_ua_to_hpa
(
mem
,
ua
,
&
hpa
)))
if
(
WARN_ON_ONCE
(
mm_iommu_ua_to_hpa
(
mem
,
ua
,
tbl
->
it_page_shift
,
&
hpa
)))
return
H_HARDWARE
;
if
(
mm_iommu_mapped_inc
(
mem
))
...
...
arch/powerpc/kvm/book3s_64_vio_hv.c
View file @
b3124ec2
...
...
@@ -275,7 +275,8 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if
(
!
mem
)
return
H_TOO_HARD
;
if
(
WARN_ON_ONCE_RM
(
mm_iommu_ua_to_hpa_rm
(
mem
,
ua
,
&
hpa
)))
if
(
WARN_ON_ONCE_RM
(
mm_iommu_ua_to_hpa_rm
(
mem
,
ua
,
tbl
->
it_page_shift
,
&
hpa
)))
return
H_HARDWARE
;
if
(
WARN_ON_ONCE_RM
(
mm_iommu_mapped_inc
(
mem
)))
...
...
@@ -461,7 +462,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
mem
=
mm_iommu_lookup_rm
(
vcpu
->
kvm
->
mm
,
ua
,
IOMMU_PAGE_SIZE_4K
);
if
(
mem
)
prereg
=
mm_iommu_ua_to_hpa_rm
(
mem
,
ua
,
&
tces
)
==
0
;
prereg
=
mm_iommu_ua_to_hpa_rm
(
mem
,
ua
,
IOMMU_PAGE_SHIFT_4K
,
&
tces
)
==
0
;
}
if
(
!
prereg
)
{
...
...
arch/powerpc/mm/mmu_context_iommu.c
View file @
b3124ec2
...
...
@@ -19,6 +19,7 @@
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <asm/mmu_context.h>
#include <asm/pte-walk.h>
static
DEFINE_MUTEX
(
mem_list_mutex
);
...
...
@@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t {
struct
rcu_head
rcu
;
unsigned
long
used
;
atomic64_t
mapped
;
unsigned
int
pageshift
;
u64
ua
;
/* userspace address */
u64
entries
;
/* number of entries in hpas[] */
u64
*
hpas
;
/* vmalloc'ed */
...
...
@@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
{
struct
mm_iommu_table_group_mem_t
*
mem
;
long
i
,
j
,
ret
=
0
,
locked_entries
=
0
;
unsigned
int
pageshift
;
unsigned
long
flags
;
struct
page
*
page
=
NULL
;
mutex_lock
(
&
mem_list_mutex
);
...
...
@@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
goto
unlock_exit
;
}
/*
* For a starting point for a maximum page size calculation
* we use @ua and @entries natural alignment to allow IOMMU pages
* smaller than huge pages but still bigger than PAGE_SIZE.
*/
mem
->
pageshift
=
__ffs
(
ua
|
(
entries
<<
PAGE_SHIFT
));
mem
->
hpas
=
vzalloc
(
array_size
(
entries
,
sizeof
(
mem
->
hpas
[
0
])));
if
(
!
mem
->
hpas
)
{
kfree
(
mem
);
...
...
@@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
}
}
populate:
pageshift
=
PAGE_SHIFT
;
if
(
PageCompound
(
page
))
{
pte_t
*
pte
;
struct
page
*
head
=
compound_head
(
page
);
unsigned
int
compshift
=
compound_order
(
head
);
local_irq_save
(
flags
);
/* disables as well */
pte
=
find_linux_pte
(
mm
->
pgd
,
ua
,
NULL
,
&
pageshift
);
local_irq_restore
(
flags
);
/* Double check it is still the same pinned page */
if
(
pte
&&
pte_page
(
*
pte
)
==
head
&&
pageshift
==
compshift
)
pageshift
=
max_t
(
unsigned
int
,
pageshift
,
PAGE_SHIFT
);
}
mem
->
pageshift
=
min
(
mem
->
pageshift
,
pageshift
);
mem
->
hpas
[
i
]
=
page_to_pfn
(
page
)
<<
PAGE_SHIFT
;
}
...
...
@@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
EXPORT_SYMBOL_GPL
(
mm_iommu_find
);
long
mm_iommu_ua_to_hpa
(
struct
mm_iommu_table_group_mem_t
*
mem
,
unsigned
long
ua
,
unsigned
long
*
hpa
)
unsigned
long
ua
,
unsigned
int
pageshift
,
unsigned
long
*
hpa
)
{
const
long
entry
=
(
ua
-
mem
->
ua
)
>>
PAGE_SHIFT
;
u64
*
va
=
&
mem
->
hpas
[
entry
];
...
...
@@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
if
(
entry
>=
mem
->
entries
)
return
-
EFAULT
;
if
(
pageshift
>
mem
->
pageshift
)
return
-
EFAULT
;
*
hpa
=
*
va
|
(
ua
&
~
PAGE_MASK
);
return
0
;
...
...
@@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
EXPORT_SYMBOL_GPL
(
mm_iommu_ua_to_hpa
);
long
mm_iommu_ua_to_hpa_rm
(
struct
mm_iommu_table_group_mem_t
*
mem
,
unsigned
long
ua
,
unsigned
long
*
hpa
)
unsigned
long
ua
,
unsigned
int
pageshift
,
unsigned
long
*
hpa
)
{
const
long
entry
=
(
ua
-
mem
->
ua
)
>>
PAGE_SHIFT
;
void
*
va
=
&
mem
->
hpas
[
entry
];
...
...
@@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
if
(
entry
>=
mem
->
entries
)
return
-
EFAULT
;
if
(
pageshift
>
mem
->
pageshift
)
return
-
EFAULT
;
pa
=
(
void
*
)
vmalloc_to_phys
(
va
);
if
(
!
pa
)
return
-
EFAULT
;
...
...
arch/powerpc/xmon/xmon.c
View file @
b3124ec2
...
...
@@ -2735,7 +2735,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
{
int
nr
,
dotted
;
unsigned
long
first_adr
;
unsigned
long
inst
,
last_inst
=
0
;
unsigned
int
inst
,
last_inst
=
0
;
unsigned
char
val
[
4
];
dotted
=
0
;
...
...
@@ -2759,7 +2759,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
dotted
=
0
;
last_inst
=
inst
;
if
(
praddr
)
printf
(
REG
" %.8
l
x"
,
adr
,
inst
);
printf
(
REG
" %.8x"
,
adr
,
inst
);
printf
(
"
\t
"
);
dump_func
(
inst
,
adr
);
printf
(
"
\n
"
);
...
...
drivers/vfio/vfio_iommu_spapr_tce.c
View file @
b3124ec2
...
...
@@ -419,17 +419,17 @@ static void tce_iommu_unuse_page(struct tce_container *container,
}
static
int
tce_iommu_prereg_ua_to_hpa
(
struct
tce_container
*
container
,
unsigned
long
tce
,
unsigned
long
s
ize
,
unsigned
long
tce
,
unsigned
long
s
hift
,
unsigned
long
*
phpa
,
struct
mm_iommu_table_group_mem_t
**
pmem
)
{
long
ret
=
0
;
struct
mm_iommu_table_group_mem_t
*
mem
;
mem
=
mm_iommu_lookup
(
container
->
mm
,
tce
,
size
);
mem
=
mm_iommu_lookup
(
container
->
mm
,
tce
,
1ULL
<<
shift
);
if
(
!
mem
)
return
-
EINVAL
;
ret
=
mm_iommu_ua_to_hpa
(
mem
,
tce
,
phpa
);
ret
=
mm_iommu_ua_to_hpa
(
mem
,
tce
,
shift
,
phpa
);
if
(
ret
)
return
-
EINVAL
;
...
...
@@ -450,7 +450,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
return
;
ret
=
tce_iommu_prereg_ua_to_hpa
(
container
,
be64_to_cpu
(
*
pua
),
IOMMU_PAGE_SIZE
(
tbl
)
,
&
hpa
,
&
mem
);
tbl
->
it_page_shift
,
&
hpa
,
&
mem
);
if
(
ret
)
pr_debug
(
"%s: tce %llx at #%lx was not cached, ret=%d
\n
"
,
__func__
,
be64_to_cpu
(
*
pua
),
entry
,
ret
);
...
...
@@ -566,7 +566,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
__be64
*
pua
=
IOMMU_TABLE_USERSPACE_ENTRY
(
tbl
,
entry
+
i
);
ret
=
tce_iommu_prereg_ua_to_hpa
(
container
,
tce
,
IOMMU_PAGE_SIZE
(
tbl
)
,
&
hpa
,
&
mem
);
tce
,
tbl
->
it_page_shift
,
&
hpa
,
&
mem
);
if
(
ret
)
break
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment