Commit cc1d98a9 authored by Anton Blanchard's avatar Anton Blanchard

ppc64: formatting cleanups

parent d1d40723
......@@ -7,7 +7,6 @@
*/
#ifndef __ASSEMBLY__
#include <linux/threads.h>
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
......@@ -358,20 +357,6 @@ extern pgd_t ioremap_dir[1024];
extern void paging_init(void);
/*
* Page tables may have changed. We don't need to do anything here
* as entries are faulted into the hash table by the low-level
* data/instruction access exception handlers.
*/
#if 0
/*
* We won't be able to use update_mmu_cache to update the
* hardware page table because we need to update the pte
* as well, but we don't get the address of the pte, only
* its value.
*/
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#else
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
......@@ -380,7 +365,6 @@ extern void paging_init(void);
* hash-table miss exception.
*/
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#endif
/* Encode and de-code a swap entry */
#define SWP_TYPE(entry) (((entry).val >> 1) & 0x3f)
......
......@@ -18,17 +18,17 @@
#ifdef DRENG_TYPES
typedef unsigned long __kernel_dev_t;
typedef unsigned long __kernel_ino_t;
typedef unsigned long __kernel_nlink_t;
typedef unsigned long __kernel_nlink_t;
#else
typedef unsigned int __kernel_dev_t;
typedef unsigned int __kernel_ino_t;
typedef unsigned int __kernel_nlink_t;
typedef unsigned int __kernel_nlink_t;
#endif
typedef unsigned int __kernel_mode_t;
typedef long __kernel_off_t;
typedef long long __kernel_loff_t;
typedef int __kernel_pid_t;
typedef int __kernel_ipc_pid_t;
typedef int __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef unsigned long __kernel_size_t;
......
......@@ -58,12 +58,6 @@ typedef struct {
#define SIGRTMIN 32
#define SIGRTMAX (_NSIG-1)
/*
* SA_FLAGS values:
*
......
......@@ -38,7 +38,7 @@
* give any ordering guarantees about loads, we have to use the
* stronger but slower sync instruction for mb and rmb.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
#define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment