Commit 678c9fd0 authored by Ross Zwisler's avatar Ross Zwisler Committed by Linus Torvalds

dax: add tracepoints to dax_load_hole()

Add tracepoints to dax_load_hole(), following the same logging conventions
as the rest of DAX.

Here is the logging generated by a PTE read from a hole:

  read-1075  [002] ....
    62.362108: dax_pte_fault: dev 259:0 ino 0x1003 shared ALLOW_RETRY|KILLABLE|USER address 0x10480000 pgoff 0x280

  read-1075  [002] ....
    62.362140: dax_load_hole: dev 259:0 ino 0x1003 shared ALLOW_RETRY|KILLABLE|USER address 0x10480000 pgoff 0x280 NOPAGE

  read-1075  [002] ....
    62.362141: dax_pte_fault_done: dev 259:0 ino 0x1003 shared ALLOW_RETRY|KILLABLE|USER address 0x10480000 pgoff 0x280 NOPAGE

Link: http://lkml.kernel.org/r/20170221195116.13278-4-ross.zwisler@linux.intel.comSigned-off-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c3ff68d7
...@@ -509,21 +509,25 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping, ...@@ -509,21 +509,25 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
static int dax_load_hole(struct address_space *mapping, void **entry, static int dax_load_hole(struct address_space *mapping, void **entry,
struct vm_fault *vmf) struct vm_fault *vmf)
{ {
struct inode *inode = mapping->host;
struct page *page; struct page *page;
int ret; int ret;
/* Hole page already exists? Return it... */ /* Hole page already exists? Return it... */
if (!radix_tree_exceptional_entry(*entry)) { if (!radix_tree_exceptional_entry(*entry)) {
page = *entry; page = *entry;
goto out; goto finish_fault;
} }
/* This will replace locked radix tree entry with a hole page */ /* This will replace locked radix tree entry with a hole page */
page = find_or_create_page(mapping, vmf->pgoff, page = find_or_create_page(mapping, vmf->pgoff,
vmf->gfp_mask | __GFP_ZERO); vmf->gfp_mask | __GFP_ZERO);
if (!page) if (!page) {
return VM_FAULT_OOM; ret = VM_FAULT_OOM;
out: goto out;
}
finish_fault:
vmf->page = page; vmf->page = page;
ret = finish_fault(vmf); ret = finish_fault(vmf);
vmf->page = NULL; vmf->page = NULL;
...@@ -531,8 +535,10 @@ static int dax_load_hole(struct address_space *mapping, void **entry, ...@@ -531,8 +535,10 @@ static int dax_load_hole(struct address_space *mapping, void **entry,
if (!ret) { if (!ret) {
/* Grab reference for PTE that is now referencing the page */ /* Grab reference for PTE that is now referencing the page */
get_page(page); get_page(page);
return VM_FAULT_NOPAGE; ret = VM_FAULT_NOPAGE;
} }
out:
trace_dax_load_hole(inode, vmf, ret);
return ret; return ret;
} }
......
...@@ -192,6 +192,7 @@ DEFINE_PTE_FAULT_EVENT(dax_pte_fault); ...@@ -192,6 +192,7 @@ DEFINE_PTE_FAULT_EVENT(dax_pte_fault);
DEFINE_PTE_FAULT_EVENT(dax_pte_fault_done); DEFINE_PTE_FAULT_EVENT(dax_pte_fault_done);
DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite_no_entry); DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite_no_entry);
DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite); DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite);
DEFINE_PTE_FAULT_EVENT(dax_load_hole);
#endif /* _TRACE_FS_DAX_H */ #endif /* _TRACE_FS_DAX_H */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment