Commit b7115316 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc: Replace _ALIGN_UP() by ALIGN()

_ALIGN_UP() is specific to powerpc
ALIGN() is generic and does the same

Replace _ALIGN_UP() by ALIGN()
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Reviewed-by: default avatarJoel Stanley <joel@jms.id.au>
Link: https://lore.kernel.org/r/8a6d7e45f7904c73a0af539642d3962e2a3c7268.1587407777.git.christophe.leroy@c-s.fr
parent e96d904e
...@@ -22,11 +22,11 @@ ...@@ -22,11 +22,11 @@
#define IOMMU_PAGE_SHIFT_4K 12 #define IOMMU_PAGE_SHIFT_4K 12
#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) #define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1)) #define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K) #define IOMMU_PAGE_ALIGN_4K(addr) ALIGN(addr, IOMMU_PAGE_SIZE_4K)
#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift) #define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1)) #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr)) #define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr))
/* Boot time flags */ /* Boot time flags */
extern int iommu_is_off; extern int iommu_is_off;
......
...@@ -534,7 +534,7 @@ struct exception_regs { ...@@ -534,7 +534,7 @@ struct exception_regs {
}; };
/* ensure this structure is always sized to a multiple of the stack alignment */ /* ensure this structure is always sized to a multiple of the stack alignment */
#define STACK_EXC_LVL_FRAME_SIZE _ALIGN_UP(sizeof (struct exception_regs), 16) #define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __HEAD_BOOKE_H__ */ #endif /* __HEAD_BOOKE_H__ */
...@@ -854,8 +854,8 @@ loff_t __init nvram_create_partition(const char *name, int sig, ...@@ -854,8 +854,8 @@ loff_t __init nvram_create_partition(const char *name, int sig,
BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16); BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
/* Convert sizes from bytes to blocks */ /* Convert sizes from bytes to blocks */
req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; req_size = ALIGN(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; min_size = ALIGN(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
/* If no minimum size specified, make it the same as the /* If no minimum size specified, make it the same as the
* requested size * requested size
......
...@@ -131,7 +131,7 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose) ...@@ -131,7 +131,7 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose)
unsigned long io_virt_offset; unsigned long io_virt_offset;
phys_page = ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE); phys_page = ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE); size_page = ALIGN(hose->pci_io_size, PAGE_SIZE);
/* Make sure IO area address is clear */ /* Make sure IO area address is clear */
hose->io_base_alloc = NULL; hose->io_base_alloc = NULL;
......
...@@ -97,7 +97,7 @@ static inline int overlaps_initrd(unsigned long start, unsigned long size) ...@@ -97,7 +97,7 @@ static inline int overlaps_initrd(unsigned long start, unsigned long size)
return 0; return 0;
return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) && return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
start <= _ALIGN_UP(initrd_end, PAGE_SIZE); start <= ALIGN(initrd_end, PAGE_SIZE);
#else #else
return 0; return 0;
#endif #endif
...@@ -624,7 +624,7 @@ static void __init early_reserve_mem(void) ...@@ -624,7 +624,7 @@ static void __init early_reserve_mem(void)
/* Then reserve the initrd, if any */ /* Then reserve the initrd, if any */
if (initrd_start && (initrd_end > initrd_start)) { if (initrd_start && (initrd_end > initrd_start)) {
memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE), memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
_ALIGN_UP(initrd_end, PAGE_SIZE) - ALIGN(initrd_end, PAGE_SIZE) -
ALIGN_DOWN(initrd_start, PAGE_SIZE)); ALIGN_DOWN(initrd_start, PAGE_SIZE));
} }
#endif /* CONFIG_BLK_DEV_INITRD */ #endif /* CONFIG_BLK_DEV_INITRD */
......
...@@ -1449,18 +1449,18 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) ...@@ -1449,18 +1449,18 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
unsigned long addr = 0; unsigned long addr = 0;
if (align) if (align)
base = _ALIGN_UP(base, align); base = ALIGN(base, align);
prom_debug("%s(%lx, %lx)\n", __func__, size, align); prom_debug("%s(%lx, %lx)\n", __func__, size, align);
if (ram_top == 0) if (ram_top == 0)
prom_panic("alloc_up() called with mem not initialized\n"); prom_panic("alloc_up() called with mem not initialized\n");
if (align) if (align)
base = _ALIGN_UP(alloc_bottom, align); base = ALIGN(alloc_bottom, align);
else else
base = alloc_bottom; base = alloc_bottom;
for(; (base + size) <= alloc_top; for(; (base + size) <= alloc_top;
base = _ALIGN_UP(base + 0x100000, align)) { base = ALIGN(base + 0x100000, align)) {
prom_debug(" trying: 0x%lx\n\r", base); prom_debug(" trying: 0x%lx\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0); addr = (unsigned long)prom_claim(base, size, 0);
if (addr != PROM_ERROR && addr != 0) if (addr != PROM_ERROR && addr != 0)
...@@ -1587,7 +1587,7 @@ static void __init reserve_mem(u64 base, u64 size) ...@@ -1587,7 +1587,7 @@ static void __init reserve_mem(u64 base, u64 size)
* dumb and just copy this entire array to the boot params * dumb and just copy this entire array to the boot params
*/ */
base = ALIGN_DOWN(base, PAGE_SIZE); base = ALIGN_DOWN(base, PAGE_SIZE);
top = _ALIGN_UP(top, PAGE_SIZE); top = ALIGN(top, PAGE_SIZE);
size = top - base; size = top - base;
if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
......
...@@ -205,7 +205,7 @@ static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt, ...@@ -205,7 +205,7 @@ static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
idx = (ioba >> stt->page_shift) - stt->offset; idx = (ioba >> stt->page_shift) - stt->offset;
sttpage = idx / TCES_PER_PAGE; sttpage = idx / TCES_PER_PAGE;
sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) / sttpages = ALIGN(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
TCES_PER_PAGE; TCES_PER_PAGE;
for (i = sttpage; i < sttpage + sttpages; ++i) for (i = sttpage; i < sttpage + sttpages; ++i)
if (!stt->pages[i]) if (!stt->pages[i])
......
...@@ -194,7 +194,7 @@ void __flush_hash_table_range(unsigned long start, unsigned long end) ...@@ -194,7 +194,7 @@ void __flush_hash_table_range(unsigned long start, unsigned long end)
unsigned long flags; unsigned long flags;
start = ALIGN_DOWN(start, PAGE_SIZE); start = ALIGN_DOWN(start, PAGE_SIZE);
end = _ALIGN_UP(end, PAGE_SIZE); end = ALIGN(end, PAGE_SIZE);
/* /*
......
...@@ -261,7 +261,7 @@ static int __meminit create_physical_mapping(unsigned long start, ...@@ -261,7 +261,7 @@ static int __meminit create_physical_mapping(unsigned long start,
pgprot_t prot; pgprot_t prot;
int psize; int psize;
start = _ALIGN_UP(start, PAGE_SIZE); start = ALIGN(start, PAGE_SIZE);
for (addr = start; addr < end; addr += mapping_size) { for (addr = start; addr < end; addr += mapping_size) {
unsigned long gap, previous_size; unsigned long gap, previous_size;
int rc; int rc;
......
...@@ -478,7 +478,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, ...@@ -478,7 +478,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* If hint, make sure it matches our alignment restrictions */ /* If hint, make sure it matches our alignment restrictions */
if (!fixed && addr) { if (!fixed && addr) {
addr = _ALIGN_UP(addr, page_size); addr = ALIGN(addr, page_size);
slice_dbg(" aligned addr=%lx\n", addr); slice_dbg(" aligned addr=%lx\n", addr);
/* Ignore hint if it's too large or overlaps a VMA */ /* Ignore hint if it's too large or overlaps a VMA */
if (addr > high_limit - len || addr < mmap_min_addr || if (addr > high_limit - len || addr < mmap_min_addr ||
......
...@@ -943,7 +943,7 @@ static int __init cell_iommu_fixed_mapping_init(void) ...@@ -943,7 +943,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
fbase = max(fbase, dbase + dsize); fbase = max(fbase, dbase + dsize);
} }
fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT); fbase = ALIGN(fbase, 1 << IO_SEGMENT_SHIFT);
fsize = memblock_phys_mem_size(); fsize = memblock_phys_mem_size();
if ((fbase + fsize) <= 0x800000000ul) if ((fbase + fsize) <= 0x800000000ul)
...@@ -963,8 +963,8 @@ static int __init cell_iommu_fixed_mapping_init(void) ...@@ -963,8 +963,8 @@ static int __init cell_iommu_fixed_mapping_init(void)
hend = hbase + htab_size_bytes; hend = hbase + htab_size_bytes;
/* The window must start and end on a segment boundary */ /* The window must start and end on a segment boundary */
if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) || if ((hbase != ALIGN(hbase, 1 << IO_SEGMENT_SHIFT)) ||
(hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) { (hend != ALIGN(hend, 1 << IO_SEGMENT_SHIFT))) {
pr_debug("iommu: hash window not segment aligned\n"); pr_debug("iommu: hash window not segment aligned\n");
return -1; return -1;
} }
......
...@@ -108,7 +108,7 @@ static void * __init bootx_early_getprop(unsigned long base, ...@@ -108,7 +108,7 @@ static void * __init bootx_early_getprop(unsigned long base,
#define dt_push_token(token, mem) \ #define dt_push_token(token, mem) \
do { \ do { \
*(mem) = _ALIGN_UP(*(mem),4); \ *(mem) = ALIGN(*(mem),4); \
*((u32 *)*(mem)) = token; \ *((u32 *)*(mem)) = token; \
*(mem) += 4; \ *(mem) += 4; \
} while(0) } while(0)
...@@ -150,7 +150,7 @@ static void __init bootx_dt_add_prop(char *name, void *data, int size, ...@@ -150,7 +150,7 @@ static void __init bootx_dt_add_prop(char *name, void *data, int size,
/* push property content */ /* push property content */
if (size && data) { if (size && data) {
memcpy((void *)*mem_end, data, size); memcpy((void *)*mem_end, data, size);
*mem_end = _ALIGN_UP(*mem_end + size, 4); *mem_end = ALIGN(*mem_end + size, 4);
} }
} }
...@@ -303,7 +303,7 @@ static void __init bootx_scan_dt_build_struct(unsigned long base, ...@@ -303,7 +303,7 @@ static void __init bootx_scan_dt_build_struct(unsigned long base,
*lp++ = *p; *lp++ = *p;
} }
*lp = 0; *lp = 0;
*mem_end = _ALIGN_UP((unsigned long)lp + 1, 4); *mem_end = ALIGN((unsigned long)lp + 1, 4);
/* get and store all properties */ /* get and store all properties */
while (*ppp) { while (*ppp) {
...@@ -356,11 +356,11 @@ static unsigned long __init bootx_flatten_dt(unsigned long start) ...@@ -356,11 +356,11 @@ static unsigned long __init bootx_flatten_dt(unsigned long start)
/* Start using memory after the big blob passed by BootX, get /* Start using memory after the big blob passed by BootX, get
* some space for the header * some space for the header
*/ */
mem_start = mem_end = _ALIGN_UP(((unsigned long)bi) + start, 4); mem_start = mem_end = ALIGN(((unsigned long)bi) + start, 4);
DBG("Boot params header at: %x\n", mem_start); DBG("Boot params header at: %x\n", mem_start);
hdr = (struct boot_param_header *)mem_start; hdr = (struct boot_param_header *)mem_start;
mem_end += sizeof(struct boot_param_header); mem_end += sizeof(struct boot_param_header);
rsvmap = (u64 *)(_ALIGN_UP(mem_end, 8)); rsvmap = (u64 *)(ALIGN(mem_end, 8));
hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - mem_start; hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - mem_start;
mem_end = ((unsigned long)rsvmap) + 8 * sizeof(u64); mem_end = ((unsigned long)rsvmap) + 8 * sizeof(u64);
......
...@@ -265,7 +265,7 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev, ...@@ -265,7 +265,7 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
continue; continue;
start = ALIGN_DOWN(r->start - base, sgsz); start = ALIGN_DOWN(r->start - base, sgsz);
end = _ALIGN_UP(r->end - base, sgsz); end = ALIGN(r->end - base, sgsz);
for (segno = start / sgsz; segno < end / sgsz; segno++) { for (segno = start / sgsz; segno < end / sgsz; segno++) {
if (pe_bitmap) if (pe_bitmap)
set_bit(segno, pe_bitmap); set_bit(segno, pe_bitmap);
...@@ -361,7 +361,7 @@ static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all) ...@@ -361,7 +361,7 @@ static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
return NULL; return NULL;
/* Allocate bitmap */ /* Allocate bitmap */
size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long)); size = ALIGN(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
pe_alloc = kzalloc(size, GFP_KERNEL); pe_alloc = kzalloc(size, GFP_KERNEL);
if (!pe_alloc) { if (!pe_alloc) {
pr_warn("%s: Out of memory !\n", pr_warn("%s: Out of memory !\n",
...@@ -2537,7 +2537,7 @@ unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, ...@@ -2537,7 +2537,7 @@ unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
direct_table_size = 1UL << table_shift; direct_table_size = 1UL << table_shift;
for ( ; levels; --levels) { for ( ; levels; --levels) {
bytes += _ALIGN_UP(tce_table_size, direct_table_size); bytes += ALIGN(tce_table_size, direct_table_size);
tce_table_size /= direct_table_size; tce_table_size /= direct_table_size;
tce_table_size <<= 3; tce_table_size <<= 3;
...@@ -3863,7 +3863,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, ...@@ -3863,7 +3863,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
PNV_IODA1_DMA32_SEGSIZE; PNV_IODA1_DMA32_SEGSIZE;
/* Allocate aux data & arrays. We don't have IO ports on PHB3 */ /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
size = _ALIGN_UP(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8, size = ALIGN(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
sizeof(unsigned long)); sizeof(unsigned long));
m64map_off = size; m64map_off = size;
size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]); size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]);
......
...@@ -395,7 +395,7 @@ static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r, ...@@ -395,7 +395,7 @@ static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
{ {
struct dma_chunk *c; struct dma_chunk *c;
unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size); unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size);
unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus, unsigned long aligned_len = ALIGN(len+bus_addr-aligned_bus,
1 << r->page_size); 1 << r->page_size);
list_for_each_entry(c, &r->chunk_list.head, link) { list_for_each_entry(c, &r->chunk_list.head, link) {
...@@ -424,7 +424,7 @@ static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r, ...@@ -424,7 +424,7 @@ static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
{ {
struct dma_chunk *c; struct dma_chunk *c;
unsigned long aligned_lpar = ALIGN_DOWN(lpar_addr, 1 << r->page_size); unsigned long aligned_lpar = ALIGN_DOWN(lpar_addr, 1 << r->page_size);
unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar, unsigned long aligned_len = ALIGN(len + lpar_addr - aligned_lpar,
1 << r->page_size); 1 << r->page_size);
list_for_each_entry(c, &r->chunk_list.head, link) { list_for_each_entry(c, &r->chunk_list.head, link) {
...@@ -776,7 +776,7 @@ static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr, ...@@ -776,7 +776,7 @@ static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
: virt_addr; : virt_addr;
unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size); unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size);
unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys,
1 << r->page_size); 1 << r->page_size);
*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
...@@ -831,7 +831,7 @@ static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr, ...@@ -831,7 +831,7 @@ static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
: virt_addr; : virt_addr;
unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size); unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size);
unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys,
1 << r->page_size); 1 << r->page_size);
DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__, DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
...@@ -891,7 +891,7 @@ static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr, ...@@ -891,7 +891,7 @@ static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
if (!c) { if (!c) {
unsigned long aligned_bus = ALIGN_DOWN(bus_addr, unsigned long aligned_bus = ALIGN_DOWN(bus_addr,
1 << r->page_size); 1 << r->page_size);
unsigned long aligned_len = _ALIGN_UP(len + bus_addr unsigned long aligned_len = ALIGN(len + bus_addr
- aligned_bus, 1 << r->page_size); - aligned_bus, 1 << r->page_size);
DBG("%s:%d: not found: bus_addr %llxh\n", DBG("%s:%d: not found: bus_addr %llxh\n",
__func__, __LINE__, bus_addr); __func__, __LINE__, bus_addr);
...@@ -928,7 +928,7 @@ static int dma_ioc0_unmap_area(struct ps3_dma_region *r, ...@@ -928,7 +928,7 @@ static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
if (!c) { if (!c) {
unsigned long aligned_bus = ALIGN_DOWN(bus_addr, unsigned long aligned_bus = ALIGN_DOWN(bus_addr,
1 << r->page_size); 1 << r->page_size);
unsigned long aligned_len = _ALIGN_UP(len + bus_addr unsigned long aligned_len = ALIGN(len + bus_addr
- aligned_bus, - aligned_bus,
1 << r->page_size); 1 << r->page_size);
DBG("%s:%d: not found: bus_addr %llxh\n", DBG("%s:%d: not found: bus_addr %llxh\n",
...@@ -974,7 +974,7 @@ static int dma_sb_region_create_linear(struct ps3_dma_region *r) ...@@ -974,7 +974,7 @@ static int dma_sb_region_create_linear(struct ps3_dma_region *r)
pr_info("%s:%d: forcing 16M pages for linear map\n", pr_info("%s:%d: forcing 16M pages for linear map\n",
__func__, __LINE__); __func__, __LINE__);
r->page_size = PS3_DMA_16M; r->page_size = PS3_DMA_16M;
r->len = _ALIGN_UP(r->len, 1 << r->page_size); r->len = ALIGN(r->len, 1 << r->page_size);
} }
} }
...@@ -1125,7 +1125,7 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev, ...@@ -1125,7 +1125,7 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev,
r->offset = lpar_addr; r->offset = lpar_addr;
if (r->offset >= map.rm.size) if (r->offset >= map.rm.size)
r->offset -= map.r1.offset; r->offset -= map.r1.offset;
r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size); r->len = len ? len : ALIGN(map.total, 1 << r->page_size);
switch (dev->dev_type) { switch (dev->dev_type) {
case PS3_DEVICE_TYPE_SB: case PS3_DEVICE_TYPE_SB:
......
...@@ -138,7 +138,7 @@ static int __init early_parse_ps3fb(char *p) ...@@ -138,7 +138,7 @@ static int __init early_parse_ps3fb(char *p)
if (!p) if (!p)
return 1; return 1;
ps3fb_videomemory.size = _ALIGN_UP(memparse(p, &p), ps3fb_videomemory.size = ALIGN(memparse(p, &p),
ps3fb_videomemory.align); ps3fb_videomemory.align);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment