Commit 07ae144b authored by Thomas Petazzoni's avatar Thomas Petazzoni Committed by Jason Cooper

ARM: mvebu: returns ll_get_cpuid() to ll_get_coherency_cpumask()

In the refactoring of the coherency fabric assembly code, a function
called ll_get_cpuid() was created to factorize common logic between
functions adding CPU to the SMP coherency group, enabling and
disabling the coherency.

However, the name of the function is highly misleading: ll_get_cpuid()
makes one think tat it returns the ID of the CPU, i.e 0 for CPU0, 1
for CPU1, etc. In fact, this is not at all what this function returns:
it returns a CPU mask for the current CPU, usable for the coherency
fabric configuration and control registers.

Therefore this commit renames this function to
ll_get_coherency_cpumask(), and adds additional comments on top of the
function to explain in more details what it does, and also how the
endianess issue is handled.
Signed-off-by: default avatarThomas Petazzoni <thomas.petazzoni@free-electrons.com>
Link: https://lkml.kernel.org/r/1400762882-10116-5-git-send-email-thomas.petazzoni@free-electrons.comAcked-by: default avatarGregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: default avatarJason Cooper <jason@lakedaemon.net>
parent 4dd1b7fa
...@@ -49,15 +49,22 @@ ENTRY(ll_get_coherency_base) ...@@ -49,15 +49,22 @@ ENTRY(ll_get_coherency_base)
mov pc, lr mov pc, lr
ENDPROC(ll_get_coherency_base) ENDPROC(ll_get_coherency_base)
/* Returns the CPU ID in r3 (r0 is untouched) */ /*
ENTRY(ll_get_cpuid) * Returns the coherency CPU mask in r3 (r0 is untouched). This
* coherency CPU mask can be used with the coherency fabric
* configuration and control registers. Note that the mask is already
* endian-swapped as appropriate so that the calling functions do not
* have to care about endianness issues while accessing the coherency
* fabric registers
*/
ENTRY(ll_get_coherency_cpumask)
mrc 15, 0, r3, cr0, cr0, 5 mrc 15, 0, r3, cr0, cr0, 5
and r3, r3, #15 and r3, r3, #15
mov r2, #(1 << 24) mov r2, #(1 << 24)
lsl r3, r2, r3 lsl r3, r2, r3
ARM_BE8(rev r3, r3) ARM_BE8(rev r3, r3)
mov pc, lr mov pc, lr
ENDPROC(ll_get_cpuid) ENDPROC(ll_get_coherency_cpumask)
/* /*
* ll_add_cpu_to_smp_group(), ll_enable_coherency() and * ll_add_cpu_to_smp_group(), ll_enable_coherency() and
...@@ -71,14 +78,14 @@ ENDPROC(ll_get_cpuid) ...@@ -71,14 +78,14 @@ ENDPROC(ll_get_cpuid)
ENTRY(ll_add_cpu_to_smp_group) ENTRY(ll_add_cpu_to_smp_group)
/* /*
* As r0 is not modified by ll_get_coherency_base() and * As r0 is not modified by ll_get_coherency_base() and
* ll_get_cpuid(), we use it to temporarly save lr and avoid * ll_get_coherency_cpumask(), we use it to temporarly save lr
* it being modified by the branch and link calls. This * and avoid it being modified by the branch and link
* function is used very early in the secondary CPU boot, and * calls. This function is used very early in the secondary
* no stack is available at this point. * CPU boot, and no stack is available at this point.
*/ */
mov r0, lr mov r0, lr
bl ll_get_coherency_base bl ll_get_coherency_base
bl ll_get_cpuid bl ll_get_coherency_cpumask
mov lr, r0 mov lr, r0
add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
1: 1:
...@@ -93,14 +100,14 @@ ENDPROC(ll_add_cpu_to_smp_group) ...@@ -93,14 +100,14 @@ ENDPROC(ll_add_cpu_to_smp_group)
ENTRY(ll_enable_coherency) ENTRY(ll_enable_coherency)
/* /*
* As r0 is not modified by ll_get_coherency_base() and * As r0 is not modified by ll_get_coherency_base() and
* ll_get_cpuid(), we use it to temporarly save lr and avoid * ll_get_coherency_cpumask(), we use it to temporarly save lr
* it being modified by the branch and link calls. This * and avoid it being modified by the branch and link
* function is used very early in the secondary CPU boot, and * calls. This function is used very early in the secondary
* no stack is available at this point. * CPU boot, and no stack is available at this point.
*/ */
mov r0, lr mov r0, lr
bl ll_get_coherency_base bl ll_get_coherency_base
bl ll_get_cpuid bl ll_get_coherency_cpumask
mov lr, r0 mov lr, r0
add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
1: 1:
...@@ -117,14 +124,14 @@ ENDPROC(ll_enable_coherency) ...@@ -117,14 +124,14 @@ ENDPROC(ll_enable_coherency)
ENTRY(ll_disable_coherency) ENTRY(ll_disable_coherency)
/* /*
* As r0 is not modified by ll_get_coherency_base() and * As r0 is not modified by ll_get_coherency_base() and
* ll_get_cpuid(), we use it to temporarly save lr and avoid * ll_get_coherency_cpumask(), we use it to temporarly save lr
* it being modified by the branch and link calls. This * and avoid it being modified by the branch and link
* function is used very early in the secondary CPU boot, and * calls. This function is used very early in the secondary
* no stack is available at this point. * CPU boot, and no stack is available at this point.
*/ */
mov r0, lr mov r0, lr
bl ll_get_coherency_base bl ll_get_coherency_base
bl ll_get_cpuid bl ll_get_coherency_cpumask
mov lr, r0 mov lr, r0
add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
1: 1:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment