Commit 3943875e authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/ttm: Fix vma page_prot bit manipulation

Fix a long-standing TTM issue where we manipulated the vma page_prot
bits while mmap_sem was taken in read mode only. We now make a local
copy of the vma structure which we pass when we set the ptes.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarJerome Glisse <jglisse@redhat.com>
parent 59c8e663
...@@ -102,6 +102,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -102,6 +102,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int retval = VM_FAULT_NOPAGE; int retval = VM_FAULT_NOPAGE;
struct ttm_mem_type_manager *man = struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type]; &bdev->man[bo->mem.mem_type];
struct vm_area_struct cvma;
/* /*
* Work around locking order reversal in fault / nopfn * Work around locking order reversal in fault / nopfn
...@@ -164,26 +165,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -164,26 +165,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
/* /*
* Strictly, we're not allowed to modify vma->vm_page_prot here, * Make a local vma copy to modify the page_prot member
* since the mmap_sem is only held in read mode. However, we * and vm_flags if necessary. The vma parameter is protected
* modify only the caching bits of vma->vm_page_prot and * by mmap_sem in write mode.
* consider those bits protected by
* the bo->mutex, as we should be the only writers.
* There shouldn't really be any readers of these bits except
* within vm_insert_mixed()? fork?
*
* TODO: Add a list of vmas to the bo, and change the
* vma->vm_page_prot when the object changes caching policy, with
* the correct locks held.
*/ */
cvma = *vma;
cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
if (bo->mem.bus.is_iomem) { if (bo->mem.bus.is_iomem) {
vma->vm_page_prot = ttm_io_prot(bo->mem.placement, cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
vma->vm_page_prot); cvma.vm_page_prot);
} else { } else {
ttm = bo->ttm; ttm = bo->ttm;
vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? if (!(bo->mem.placement & TTM_PL_FLAG_CACHED))
vm_get_page_prot(vma->vm_flags) : cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
ttm_io_prot(bo->mem.placement, vma->vm_page_prot); cvma.vm_page_prot);
/* Allocate all page at once, most common usage */ /* Allocate all page at once, most common usage */
if (ttm->bdev->driver->ttm_tt_populate(ttm)) { if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
...@@ -210,7 +206,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -210,7 +206,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
} }
ret = vm_insert_mixed(vma, address, pfn); ret = vm_insert_mixed(&cvma, address, pfn);
/* /*
* Somebody beat us to this PTE or prefaulting to * Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error. * an already populated PTE, or prefaulting error.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment