Commit 0b8f0d87 authored by Quanfa Fu's avatar Quanfa Fu Committed by Linus Torvalds
parent 7f0d2672
...@@ -1303,7 +1303,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, ...@@ -1303,7 +1303,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
/* /*
* Record which node the original page is from and save this * Record which node the original page is from and save this
* information to khugepaged_node_load[]. * information to khugepaged_node_load[].
* Khupaged will allocate hugepage from the node has the max * Khugepaged will allocate hugepage from the node has the max
* hit record. * hit record.
*/ */
node = page_to_nid(page); node = page_to_nid(page);
......
...@@ -1306,7 +1306,7 @@ static int __get_unpoison_page(struct page *page) ...@@ -1306,7 +1306,7 @@ static int __get_unpoison_page(struct page *page)
* *
* get_hwpoison_page() takes a page refcount of an error page to handle memory * get_hwpoison_page() takes a page refcount of an error page to handle memory
* error on it, after checking that the error page is in a well-defined state * error on it, after checking that the error page is in a well-defined state
* (defined as a page-type we can successfully handle the memor error on it, * (defined as a page-type we can successfully handle the memory error on it,
* such as LRU page and hugetlb page). * such as LRU page and hugetlb page).
* *
* Memory error handling could be triggered at any time on any type of page, * Memory error handling could be triggered at any time on any type of page,
......
...@@ -819,7 +819,7 @@ void __init setup_kmalloc_cache_index_table(void) ...@@ -819,7 +819,7 @@ void __init setup_kmalloc_cache_index_table(void)
if (KMALLOC_MIN_SIZE >= 64) { if (KMALLOC_MIN_SIZE >= 64) {
/* /*
* The 96 byte size cache is not used if the alignment * The 96 byte sized cache is not used if the alignment
* is 64 byte. * is 64 byte.
*/ */
for (i = 64 + 8; i <= 96; i += 8) for (i = 64 + 8; i <= 96; i += 8)
......
...@@ -882,7 +882,7 @@ void lru_cache_disable(void) ...@@ -882,7 +882,7 @@ void lru_cache_disable(void)
* all online CPUs so any calls of lru_cache_disabled wrapped by * all online CPUs so any calls of lru_cache_disabled wrapped by
* local_lock or preemption disabled would be ordered by that. * local_lock or preemption disabled would be ordered by that.
* The atomic operation doesn't need to have stronger ordering * The atomic operation doesn't need to have stronger ordering
* requirements because that is enforeced by the scheduling * requirements because that is enforced by the scheduling
* guarantees. * guarantees.
*/ */
__lru_add_drain_all(true); __lru_add_drain_all(true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment