Commit 5a09ddea authored by Rene Sapiens's avatar Rene Sapiens Committed by Greg Kroah-Hartman

staging: ti dspbridge: Rename words with camel case

The intention of this patch is to rename the remaining variables with camel
case. Variables will be renamed avoiding camel case and Hungarian notation.
The words to be renamed in this patch are:
========================================
validBit to valid_bit
victimEntryNum to victim_entry_num
virtualAddr to virtual_addr
xType to xtype
actualValue to actual_value
EASIL1_MMUMMU_IRQSTATUSReadRegister32 to easil1_mmummu_irqstatus_read_register32
EASIL1_MMUMMU_LOCKBaseValueWrite32 to easil1_mmummu_lock_base_value_write32
easiNum to easi_num
expectedValue to expected_value
invalidValue to invalid_value
L1_base to l1_base
L2_base to l2_base
lower16Bits to lower16_bits
lower8Bits to lower8_bits
lowerMiddle8Bits to lower_middle8_bits
lowerUpper8Bits to lower_upper8_bits
maxValidValue to max_valid_value
minValidValue to min_valid_value
newValue to new_value
returnCodeIfMismatch to return_code_if_mismatch
spyCodeIfMisMatch to spy_code_if_mis_match
upper16Bits to upper16_bits
upper8Bits to upper8_bits
========================================
Signed-off-by: default avatarRene Sapiens <rene.sapiens@ti.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 5e2eae57
......@@ -36,6 +36,6 @@
*
* NOTE: We currently dont use this functionality.
*/
#define _DEBUG_LEVEL1_EASI(easiNum) ((void)0)
#define _DEBUG_LEVEL1_EASI(easi_num) ((void)0)
#endif /* _EASIGLOBAL_H */
......@@ -94,39 +94,39 @@
#define LOWER8BIT_MASK 0x000000FF
/*
* Definition: RETURN32BITS_FROM16LOWER_AND16UPPER(lower16Bits, upper16Bits)
* Definition: RETURN32BITS_FROM16LOWER_AND16UPPER(lower16_bits, upper16_bits)
*
* DESCRIPTION: Returns a 32 bit value given a 16 bit lower value and a 16
* bit upper value
*/
#define RETURN32BITS_FROM16LOWER_AND16UPPER(lower16Bits, upper16Bits)\
(((((u32)lower16Bits) & LOWER16BIT_MASK)) | \
(((((u32)upper16Bits) & LOWER16BIT_MASK) << UPPER16BIT_SHIFT)))
#define RETURN32BITS_FROM16LOWER_AND16UPPER(lower16_bits, upper16_bits)\
(((((u32)lower16_bits) & LOWER16BIT_MASK)) | \
(((((u32)upper16_bits) & LOWER16BIT_MASK) << UPPER16BIT_SHIFT)))
/*
* Definition: RETURN16BITS_FROM8LOWER_AND8UPPER(lower16Bits, upper16Bits)
* Definition: RETURN16BITS_FROM8LOWER_AND8UPPER(lower16_bits, upper16_bits)
*
* DESCRIPTION: Returns a 16 bit value given a 8 bit lower value and a 8
* bit upper value
*/
#define RETURN16BITS_FROM8LOWER_AND8UPPER(lower8Bits, upper8Bits)\
(((((u32)lower8Bits) & LOWER8BIT_MASK)) | \
(((((u32)upper8Bits) & LOWER8BIT_MASK) << UPPER8BIT_OF16_SHIFT)))
#define RETURN16BITS_FROM8LOWER_AND8UPPER(lower8_bits, upper8_bits)\
(((((u32)lower8_bits) & LOWER8BIT_MASK)) | \
(((((u32)upper8_bits) & LOWER8BIT_MASK) << UPPER8BIT_OF16_SHIFT)))
/*
* Definition: RETURN32BITS_FROM48BIT_VALUES(lower8Bits, lowerMiddle8Bits,
* lowerUpper8Bits, upper8Bits)
* Definition: RETURN32BITS_FROM48BIT_VALUES(lower8_bits, lower_middle8_bits,
* lower_upper8_bits, upper8_bits)
*
* DESCRIPTION: Returns a 32 bit value given four 8 bit values
*/
#define RETURN32BITS_FROM48BIT_VALUES(lower8Bits, lowerMiddle8Bits,\
lowerUpper8Bits, upper8Bits)\
(((((u32)lower8Bits) & LOWER8BIT_MASK)) | \
(((((u32)lowerMiddle8Bits) & LOWER8BIT_MASK) <<\
#define RETURN32BITS_FROM48BIT_VALUES(lower8_bits, lower_middle8_bits,\
lower_upper8_bits, upper8_bits)\
(((((u32)lower8_bits) & LOWER8BIT_MASK)) | \
(((((u32)lower_middle8_bits) & LOWER8BIT_MASK) <<\
LOWER_MIDDLE8BIT_SHIFT)) | \
(((((u32)lowerUpper8Bits) & LOWER8BIT_MASK) <<\
(((((u32)lower_upper8_bits) & LOWER8BIT_MASK) <<\
UPPER_MIDDLE8BIT_SHIFT)) | \
(((((u32)upper8Bits) & LOWER8BIT_MASK) <<\
(((((u32)upper8_bits) & LOWER8BIT_MASK) <<\
UPPER8BIT_SHIFT)))
/*
......@@ -285,24 +285,26 @@ enum return_code_label {
/* Not sure if this all belongs here */
#define CHECK_RETURN_VALUE(actualValue, expectedValue, returnCodeIfMismatch,\
spyCodeIfMisMatch)
#define CHECK_RETURN_VALUE_RET(actualValue, expectedValue, returnCodeIfMismatch)
#define CHECK_RETURN_VALUE_RES(actualValue, expectedValue, spyCodeIfMisMatch)
#define CHECK_RETURN_VALUE_RET_VOID(actualValue, expectedValue,\
spyCodeIfMisMatch)
#define CHECK_INPUT_PARAM(actualValue, invalidValue, returnCodeIfMismatch,\
spyCodeIfMisMatch)
#define CHECK_INPUT_PARAM_NO_SPY(actualValue, invalidValue,\
returnCodeIfMismatch)
#define CHECK_INPUT_RANGE(actualValue, minValidValue, maxValidValue,\
returnCodeIfMismatch, spyCodeIfMisMatch)
#define CHECK_INPUT_RANGE_NO_SPY(actualValue, minValidValue, maxValidValue,\
returnCodeIfMismatch)
#define CHECK_INPUT_RANGE_MIN0(actualValue, maxValidValue,\
returnCodeIfMismatch, spyCodeIfMisMatch)
#define CHECK_INPUT_RANGE_NO_SPY_MIN0(actualValue, maxValidValue,\
returnCodeIfMismatch)
#define CHECK_RETURN_VALUE(actual_value, expected_value,\
return_code_if_mismatch, spy_code_if_mis_match)
#define CHECK_RETURN_VALUE_RET(actual_value, expected_value,\
return_code_if_mismatch)
#define CHECK_RETURN_VALUE_RES(actual_value, expected_value,\
spy_code_if_mis_match)
#define CHECK_RETURN_VALUE_RET_VOID(actual_value, expected_value,\
spy_code_if_mis_match)
#define CHECK_INPUT_PARAM(actual_value, invalid_value,\
return_code_if_mismatch, spy_code_if_mis_match)
#define CHECK_INPUT_PARAM_NO_SPY(actual_value, invalid_value,\
return_code_if_mismatch)
#define CHECK_INPUT_RANGE(actual_value, min_valid_value, max_valid_value,\
return_code_if_mismatch, spy_code_if_mis_match)
#define CHECK_INPUT_RANGE_NO_SPY(actual_value, min_valid_value,\
max_valid_value, return_code_if_mismatch)
#define CHECK_INPUT_RANGE_MIN0(actual_value, max_valid_value,\
return_code_if_mismatch, spy_code_if_mis_match)
#define CHECK_INPUT_RANGE_NO_SPY_MIN0(actual_value, max_valid_value,\
return_code_if_mismatch)
#endif /* _GLOBALTYPES_H */
......@@ -33,38 +33,38 @@
{\
const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
newValue <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
newValue &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
newValue |= data;\
__raw_writel(newValue, base_address+offset);\
new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
newValue <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
newValue &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
newValue |= data;\
__raw_writel(newValue, base_address+offset);\
new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUSReadRegister32),\
(_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\
__raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
__raw_writel(newValue, (base_address)+offset);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
......@@ -74,9 +74,9 @@
#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
__raw_writel(newValue, (base_address)+offset);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
......@@ -95,26 +95,26 @@
{\
const u32 offset = MMU_MMU_CNTL_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
newValue <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
newValue &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
newValue |= data;\
__raw_writel(newValue, base_address+offset);\
new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_CNTL_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
newValue <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
newValue &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
newValue |= data;\
__raw_writel(newValue, base_address+offset);\
new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
......@@ -124,9 +124,9 @@
#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_TTB_OFFSET;\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
__raw_writel(newValue, (base_address)+offset);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
......@@ -136,9 +136,9 @@
#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_LOCK_OFFSET;\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
__raw_writel(newValue, (base_address)+offset);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
......@@ -151,13 +151,13 @@
{\
const u32 offset = MMU_MMU_LOCK_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 newValue = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCKBaseValueWrite32);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\
data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
newValue <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
newValue &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
newValue |= data;\
__raw_writel(newValue, base_address+offset);\
new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
......@@ -170,13 +170,13 @@
{\
const u32 offset = MMU_MMU_LOCK_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
newValue <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
newValue &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
newValue |= data;\
__raw_writel(newValue, base_address+offset);\
new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
......@@ -192,33 +192,33 @@
#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
__raw_writel(newValue, (base_address)+offset);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_CAM_OFFSET;\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
__raw_writel(newValue, (base_address)+offset);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_RAM_OFFSET;\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
__raw_writel(newValue, (base_address)+offset);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
register u32 newValue = (value);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
__raw_writel(newValue, (base_address)+offset);\
__raw_writel(new_value, (base_address)+offset);\
}
#endif /* USE_LEVEL_1_MACROS */
......
......@@ -90,7 +90,7 @@ static hw_status mmu_flush_entry(const void __iomem *base_address);
* Description : It indicates the TLB entry is preserved entry
* or not
*
* Identifier : validBit
* Identifier : valid_bit
* Type : const u32
* Description : It indicates the TLB entry is valid entry or not
*
......@@ -115,7 +115,7 @@ static hw_status mmu_flush_entry(const void __iomem *base_address);
static hw_status mmu_set_cam_entry(const void __iomem *base_address,
const u32 page_sz,
const u32 preserved_bit,
const u32 validBit,
const u32 valid_bit,
const u32 virtual_addr_tag);
/*
......@@ -194,11 +194,11 @@ hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
}
hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
u32 victimEntryNum)
u32 victim_entry_num)
{
hw_status status = RET_OK;
MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victimEntryNum);
MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
return status;
}
......@@ -293,7 +293,7 @@ hw_status hw_mmu_twl_disable(const void __iomem *base_address)
return status;
}
hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtualAddr,
hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
u32 page_sz)
{
hw_status status = RET_OK;
......@@ -322,7 +322,7 @@ hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtualAddr,
}
/* Generate the 20-bit tag from virtual address */
virtual_addr_tag = ((virtualAddr & MMU_ADDR_MASK) >> 12);
virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
......@@ -333,11 +333,11 @@ hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtualAddr,
hw_status hw_mmu_tlb_add(const void __iomem *base_address,
u32 physical_addr,
u32 virtualAddr,
u32 virtual_addr,
u32 page_sz,
u32 entry_num,
struct hw_mmu_map_attrs_t *map_attrs,
s8 preserved_bit, s8 validBit)
s8 preserved_bit, s8 valid_bit)
{
hw_status status = RET_OK;
u32 lock_reg;
......@@ -377,10 +377,10 @@ hw_status hw_mmu_tlb_add(const void __iomem *base_address,
lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
/* Generate the 20-bit tag from virtual address */
virtual_addr_tag = ((virtualAddr & MMU_ADDR_MASK) >> 12);
virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
/* Write the fields in the CAM Entry Register */
mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, validBit,
mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
virtual_addr_tag);
/* Write the different fields of the RAM Entry Register */
......@@ -403,7 +403,7 @@ hw_status hw_mmu_tlb_add(const void __iomem *base_address,
hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
u32 physical_addr,
u32 virtualAddr,
u32 virtual_addr,
u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
{
hw_status status = RET_OK;
......@@ -413,7 +413,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
switch (page_sz) {
case HW_PAGE_SIZE4KB:
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
virtualAddr &
virtual_addr &
MMU_SMALL_PAGE_MASK);
pte_val =
((physical_addr & MMU_SMALL_PAGE_MASK) |
......@@ -425,7 +425,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
case HW_PAGE_SIZE64KB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
virtualAddr &
virtual_addr &
MMU_LARGE_PAGE_MASK);
pte_val =
((physical_addr & MMU_LARGE_PAGE_MASK) |
......@@ -436,7 +436,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
case HW_PAGE_SIZE1MB:
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtualAddr &
virtual_addr &
MMU_SECTION_ADDR_MASK);
pte_val =
((((physical_addr & MMU_SECTION_ADDR_MASK) |
......@@ -448,7 +448,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
case HW_PAGE_SIZE16MB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtualAddr &
virtual_addr &
MMU_SSECTION_ADDR_MASK);
pte_val =
(((physical_addr & MMU_SSECTION_ADDR_MASK) |
......@@ -460,7 +460,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
case HW_MMU_COARSE_PAGE_SIZE:
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtualAddr &
virtual_addr &
MMU_SECTION_ADDR_MASK);
pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
break;
......@@ -475,7 +475,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
return status;
}
hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtualAddr, u32 page_size)
hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
{
hw_status status = RET_OK;
u32 pte_addr;
......@@ -484,28 +484,28 @@ hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtualAddr, u32 page_size)
switch (page_size) {
case HW_PAGE_SIZE4KB:
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
virtualAddr &
virtual_addr &
MMU_SMALL_PAGE_MASK);
break;
case HW_PAGE_SIZE64KB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
virtualAddr &
virtual_addr &
MMU_LARGE_PAGE_MASK);
break;
case HW_PAGE_SIZE1MB:
case HW_MMU_COARSE_PAGE_SIZE:
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtualAddr &
virtual_addr &
MMU_SECTION_ADDR_MASK);
break;
case HW_PAGE_SIZE16MB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtualAddr &
virtual_addr &
MMU_SSECTION_ADDR_MASK);
break;
......@@ -539,7 +539,7 @@ static hw_status mmu_flush_entry(const void __iomem *base_address)
static hw_status mmu_set_cam_entry(const void __iomem *base_address,
const u32 page_sz,
const u32 preserved_bit,
const u32 validBit,
const u32 valid_bit,
const u32 virtual_addr_tag)
{
hw_status status = RET_OK;
......@@ -550,7 +550,7 @@ static hw_status mmu_set_cam_entry(const void __iomem *base_address,
RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
mmu_cam_reg = (virtual_addr_tag << 12);
mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (validBit << 2) |
mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
(preserved_bit << 3);
/* write values to register */
......
......@@ -50,7 +50,7 @@ extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
u32 num_locked_entries);
extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
u32 victimEntryNum);
u32 victim_entry_num);
/* For MMU faults */
extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
......@@ -77,45 +77,45 @@ extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
u32 virtualAddr, u32 page_sz);
u32 virtual_addr, u32 page_sz);
extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
u32 physical_addr,
u32 virtualAddr,
u32 virtual_addr,
u32 page_sz,
u32 entry_num,
struct hw_mmu_map_attrs_t *map_attrs,
s8 preserved_bit, s8 validBit);
s8 preserved_bit, s8 valid_bit);
/* For PTEs */
extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
u32 physical_addr,
u32 virtualAddr,
u32 virtual_addr,
u32 page_sz,
struct hw_mmu_map_attrs_t *map_attrs);
extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
u32 page_size, u32 virtualAddr);
u32 page_size, u32 virtual_addr);
void hw_mmu_tlb_flush_all(const void __iomem *base);
static inline u32 hw_mmu_pte_addr_l1(u32 L1_base, u32 va)
static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
{
u32 pte_addr;
u32 va31_to20;
va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */
va31_to20 &= 0xFFFFFFFCUL;
pte_addr = L1_base + va31_to20;
pte_addr = l1_base + va31_to20;
return pte_addr;
}
static inline u32 hw_mmu_pte_addr_l2(u32 L2_base, u32 va)
static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
{
u32 pte_addr;
pte_addr = (L2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
return pte_addr;
}
......
......@@ -370,17 +370,17 @@ extern int cmm_xlator_info(struct cmm_xlatorobject *xlator,
* Parameters:
* xlator: handle to translator.
* paddr address of buffer to translate.
* xType Type of address xlation. CMM_PA2VA or CMM_VA2PA.
* xtype Type of address xlation. CMM_PA2VA or CMM_VA2PA.
* Returns:
* Valid address on success, else NULL.
* Requires:
* refs > 0
* paddr != NULL
* xType >= CMM_VA2PA) && (xType <= CMM_DSPPA2PA)
* xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA)
* Ensures:
*
*/
extern void *cmm_xlator_translate(struct cmm_xlatorobject *xlator,
void *paddr, enum cmm_xlatetype xType);
void *paddr, enum cmm_xlatetype xtype);
#endif /* CMM_ */
......@@ -1103,7 +1103,7 @@ int cmm_xlator_info(struct cmm_xlatorobject *xlator, IN OUT u8 ** paddr,
* ======== cmm_xlator_translate ========
*/
void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
enum cmm_xlatetype xType)
enum cmm_xlatetype xtype)
{
u32 dw_addr_xlate = 0;
struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
......@@ -1113,7 +1113,7 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(paddr != NULL);
DBC_REQUIRE((xType >= CMM_VA2PA) && (xType <= CMM_DSPPA2PA));
DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
if (!xlator_obj)
goto loop_cont;
......@@ -1125,9 +1125,9 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
if (!allocator)
goto loop_cont;
if ((xType == CMM_VA2DSPPA) || (xType == CMM_VA2PA) ||
(xType == CMM_PA2VA)) {
if (xType == CMM_PA2VA) {
if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
(xtype == CMM_PA2VA)) {
if (xtype == CMM_PA2VA) {
/* Gpp Va = Va Base + offset */
dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
allocator->
......@@ -1152,14 +1152,14 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
dw_addr_xlate = (u32) paddr;
}
/*Now convert address to proper target physical address if needed */
if ((xType == CMM_VA2DSPPA) || (xType == CMM_PA2DSPPA)) {
if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
/* Got Gpp Pa now, convert to DSP Pa */
dw_addr_xlate =
GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size),
dw_addr_xlate,
allocator->dw_dsp_phys_addr_offset *
allocator->c_factor);
} else if (xType == CMM_DSPPA2PA) {
} else if (xtype == CMM_DSPPA2PA) {
/* Got DSP Pa, convert to GPP Pa */
dw_addr_xlate =
DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment