Commit d719518d authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next

Pull sparc updates from David Miller:

 1) Use register window state adjustment instructions when available,
    from Anthony Yznaga.

 2) Add VCC console concentrator driver, from Jag Raman.

 3) Add 16GB hugepage support, from Nitin Gupta.

 4) Support cpu 'poke' hypercall, from Vijay Kumar.

 5) Add M7/M8 optimized memcpy/memset/copy_{to,from}_user, from Babu
    Moger.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next: (33 commits)
  sparc64: Handle additional cases of no fault loads
  sparc64: speed up etrap/rtrap on NG2 and later processors
  sparc64: vcc: make ktermios const
  sparc: leon: grpci1: constify of_device_id
  sparc: leon: grpci2: constify of_device_id
  sparc64: vcc: Check for IS_ERR() instead of NULL
  sparc64: Cleanup hugepage table walk functions
  sparc64: Add 16GB hugepage support
  sparc64: Support huge PUD case in get_user_pages
  sparc64: vcc: Add install & cleanup TTY operations
  sparc64: vcc: Add break_ctl TTY operation
  sparc64: vcc: Add chars_in_buffer TTY operation
  sparc64: vcc: Add write & write_room TTY operations
  sparc64: vcc: Add hangup TTY operation
  sparc64: vcc: Add open & close TTY operations
  sparc64: vcc: Enable LDC event processing engine
  sparc64: vcc: Add RX & TX timer for delayed LDC operation
  sparc64: vcc: Create sysfs attribute group
  sparc64: vcc: Enable VCC port probe and removal
  sparc64: vcc: TTY driver initialization and cleanup
  ...
parents 4c2b5e0f b6fe1089
...@@ -12489,6 +12489,7 @@ F: drivers/tty/serial/sunsab.h ...@@ -12489,6 +12489,7 @@ F: drivers/tty/serial/sunsab.h
F: drivers/tty/serial/sunsu.c F: drivers/tty/serial/sunsu.c
F: drivers/tty/serial/sunzilog.c F: drivers/tty/serial/sunzilog.c
F: drivers/tty/serial/sunzilog.h F: drivers/tty/serial/sunzilog.h
F: drivers/tty/vcc.c
SPARSE CHECKER SPARSE CHECKER
M: "Christopher Li" <sparse@chrisli.org> M: "Christopher Li" <sparse@chrisli.org>
......
...@@ -238,3 +238,4 @@ CONFIG_CRYPTO_TWOFISH=m ...@@ -238,3 +238,4 @@ CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC16=m CONFIG_CRC16=m
CONFIG_LIBCRC32C=m CONFIG_LIBCRC32C=m
CONFIG_VCC=m
...@@ -4,6 +4,13 @@ ...@@ -4,6 +4,13 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm-generic/hugetlb.h> #include <asm-generic/hugetlb.h>
#ifdef CONFIG_HUGETLB_PAGE
struct pud_huge_patch_entry {
unsigned int addr;
unsigned int insn;
};
extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end;
#endif
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte); pte_t *ptep, pte_t pte);
......
...@@ -298,6 +298,24 @@ unsigned long sun4v_cpu_stop(unsigned long cpuid); ...@@ -298,6 +298,24 @@ unsigned long sun4v_cpu_stop(unsigned long cpuid);
unsigned long sun4v_cpu_yield(void); unsigned long sun4v_cpu_yield(void);
#endif #endif
/* cpu_poke()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_CPU_POKE
* RET0: status
* ERRORS: ENOCPU cpuid refers to a CPU that does not exist
* EINVAL cpuid is current CPU
*
* Poke CPU cpuid. If the target CPU is currently suspended having
* invoked the cpu-yield service, that vCPU will be resumed.
* Poke interrupts may only be sent to valid, non-local CPUs.
* It is not legal to poke the current vCPU.
*/
#define HV_FAST_CPU_POKE 0x13
#ifndef __ASSEMBLY__
unsigned long sun4v_cpu_poke(unsigned long cpuid);
#endif
/* cpu_qconf() /* cpu_qconf()
* TRAP: HV_FAST_TRAP * TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_CPU_QCONF * FUNCTION: HV_FAST_CPU_QCONF
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#define HPAGE_SHIFT 23 #define HPAGE_SHIFT 23
#define REAL_HPAGE_SHIFT 22 #define REAL_HPAGE_SHIFT 22
#define HPAGE_16GB_SHIFT 34
#define HPAGE_2GB_SHIFT 31 #define HPAGE_2GB_SHIFT 31
#define HPAGE_256MB_SHIFT 28 #define HPAGE_256MB_SHIFT 28
#define HPAGE_64K_SHIFT 16 #define HPAGE_64K_SHIFT 16
...@@ -28,7 +29,7 @@ ...@@ -28,7 +29,7 @@
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
#define HUGE_MAX_HSTATE 4 #define HUGE_MAX_HSTATE 5
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -414,6 +414,11 @@ static inline bool is_hugetlb_pmd(pmd_t pmd) ...@@ -414,6 +414,11 @@ static inline bool is_hugetlb_pmd(pmd_t pmd)
return !!(pmd_val(pmd) & _PAGE_PMD_HUGE); return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
} }
static inline bool is_hugetlb_pud(pud_t pud)
{
return !!(pud_val(pud) & _PAGE_PUD_HUGE);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pmd_mkhuge(pmd_t pmd) static inline pmd_t pmd_mkhuge(pmd_t pmd)
{ {
...@@ -687,6 +692,8 @@ static inline unsigned long pmd_write(pmd_t pmd) ...@@ -687,6 +692,8 @@ static inline unsigned long pmd_write(pmd_t pmd)
return pte_write(pte); return pte_write(pte);
} }
#define pud_write(pud) pte_write(__pte(pud_val(pud)))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline unsigned long pmd_dirty(pmd_t pmd) static inline unsigned long pmd_dirty(pmd_t pmd)
{ {
...@@ -823,9 +830,18 @@ static inline unsigned long __pmd_page(pmd_t pmd) ...@@ -823,9 +830,18 @@ static inline unsigned long __pmd_page(pmd_t pmd)
return ((unsigned long) __va(pfn << PAGE_SHIFT)); return ((unsigned long) __va(pfn << PAGE_SHIFT));
} }
static inline unsigned long pud_page_vaddr(pud_t pud)
{
pte_t pte = __pte(pud_val(pud));
unsigned long pfn;
pfn = pte_pfn(pte);
return ((unsigned long) __va(pfn << PAGE_SHIFT));
}
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
#define pud_page_vaddr(pud) \
((unsigned long) __va(pud_val(pud)))
#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
#define pud_present(pud) (pud_val(pud) != 0U) #define pud_present(pud) (pud_val(pud) != 0U)
......
...@@ -33,6 +33,9 @@ ...@@ -33,6 +33,9 @@
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
extern cpumask_t cpu_core_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS];
void smp_init_cpu_poke(void);
void scheduler_poke(void);
void arch_send_call_function_single_ipi(int cpu); void arch_send_call_function_single_ipi(int cpu);
void arch_send_call_function_ipi_mask(const struct cpumask *mask); void arch_send_call_function_ipi_mask(const struct cpumask *mask);
...@@ -74,6 +77,8 @@ void __cpu_die(unsigned int cpu); ...@@ -74,6 +77,8 @@ void __cpu_die(unsigned int cpu);
#define smp_fetch_global_regs() do { } while (0) #define smp_fetch_global_regs() do { } while (0)
#define smp_fetch_global_pmu() do { } while (0) #define smp_fetch_global_pmu() do { } while (0)
#define smp_fill_in_cpu_possible_map() do { } while (0) #define smp_fill_in_cpu_possible_map() do { } while (0)
#define smp_init_cpu_poke() do { } while (0)
#define scheduler_poke() do { } while (0)
#endif /* !(CONFIG_SMP) */ #endif /* !(CONFIG_SMP) */
......
...@@ -73,6 +73,8 @@ struct sun4v_1insn_patch_entry { ...@@ -73,6 +73,8 @@ struct sun4v_1insn_patch_entry {
}; };
extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch, extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
__sun4v_1insn_patch_end; __sun4v_1insn_patch_end;
extern struct sun4v_1insn_patch_entry __fast_win_ctrl_1insn_patch,
__fast_win_ctrl_1insn_patch_end;
struct sun4v_2insn_patch_entry { struct sun4v_2insn_patch_entry {
unsigned int addr; unsigned int addr;
......
...@@ -195,6 +195,41 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; ...@@ -195,6 +195,41 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
nop; \ nop; \
699: 699:
/* PUD has been loaded into REG1, interpret the value, seeing
* if it is a HUGE PUD or a normal one. If it is not valid
* then jump to FAIL_LABEL. If it is a HUGE PUD, and it
* translates to a valid PTE, branch to PTE_LABEL.
*
* We have to propagate bits [32:22] from the virtual address
* to resolve at 4M granularity.
*/
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
700: ba 700f; \
nop; \
.section .pud_huge_patch, "ax"; \
.word 700b; \
nop; \
.previous; \
brz,pn REG1, FAIL_LABEL; \
sethi %uhi(_PAGE_PUD_HUGE), REG2; \
sllx REG2, 32, REG2; \
andcc REG1, REG2, %g0; \
be,pt %xcc, 700f; \
sethi %hi(0x1ffc0000), REG2; \
sllx REG2, 1, REG2; \
brgez,pn REG1, FAIL_LABEL; \
andn REG1, REG2, REG1; \
and VADDR, REG2, REG2; \
brlz,pt REG1, PTE_LABEL; \
or REG1, REG2, REG1; \
700:
#else
#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
brz,pn REG1, FAIL_LABEL; \
nop;
#endif
/* PMD has been loaded into REG1, interpret the value, seeing /* PMD has been loaded into REG1, interpret the value, seeing
* if it is a HUGE PMD or a normal one. If it is not valid * if it is a HUGE PMD or a normal one. If it is not valid
* then jump to FAIL_LABEL. If it is a HUGE PMD, and it * then jump to FAIL_LABEL. If it is a HUGE PMD, and it
...@@ -242,6 +277,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; ...@@ -242,6 +277,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
srlx REG2, 64 - PAGE_SHIFT, REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \
andn REG2, 0x7, REG2; \ andn REG2, 0x7, REG2; \
ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \
brz,pn REG1, FAIL_LABEL; \ brz,pn REG1, FAIL_LABEL; \
sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \
......
...@@ -52,6 +52,7 @@ struct vio_ver_info { ...@@ -52,6 +52,7 @@ struct vio_ver_info {
#define VDEV_NETWORK_SWITCH 0x02 #define VDEV_NETWORK_SWITCH 0x02
#define VDEV_DISK 0x03 #define VDEV_DISK 0x03
#define VDEV_DISK_SERVER 0x04 #define VDEV_DISK_SERVER 0x04
#define VDEV_CONSOLE_CON 0x05
u8 resv1[3]; u8 resv1[3];
u64 resv2[5]; u64 resv2[5];
...@@ -282,6 +283,14 @@ struct vio_dring_state { ...@@ -282,6 +283,14 @@ struct vio_dring_state {
struct ldc_trans_cookie cookies[VIO_MAX_RING_COOKIES]; struct ldc_trans_cookie cookies[VIO_MAX_RING_COOKIES];
}; };
#define VIO_TAG_SIZE ((int)sizeof(struct vio_msg_tag))
#define VIO_VCC_MTU_SIZE (LDC_PACKET_SIZE - VIO_TAG_SIZE)
struct vio_vcc {
struct vio_msg_tag tag;
char data[VIO_VCC_MTU_SIZE];
};
static inline void *vio_dring_cur(struct vio_dring_state *dr) static inline void *vio_dring_cur(struct vio_dring_state *dr)
{ {
return dr->base + (dr->entry_size * dr->prod); return dr->base + (dr->entry_size * dr->prod);
......
...@@ -38,7 +38,11 @@ etrap_syscall: TRAP_LOAD_THREAD_REG(%g6, %g1) ...@@ -38,7 +38,11 @@ etrap_syscall: TRAP_LOAD_THREAD_REG(%g6, %g1)
or %g1, %g3, %g1 or %g1, %g3, %g1
bne,pn %xcc, 1f bne,pn %xcc, 1f
sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2 sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2
wrpr %g0, 7, %cleanwin 661: wrpr %g0, 7, %cleanwin
.section .fast_win_ctrl_1insn_patch, "ax"
.word 661b
.word 0x85880000 ! allclean
.previous
sethi %hi(TASK_REGOFF), %g2 sethi %hi(TASK_REGOFF), %g2
sethi %hi(TSTATE_PEF), %g3 sethi %hi(TSTATE_PEF), %g3
...@@ -88,16 +92,30 @@ etrap_save: save %g2, -STACK_BIAS, %sp ...@@ -88,16 +92,30 @@ etrap_save: save %g2, -STACK_BIAS, %sp
bne,pn %xcc, 3f bne,pn %xcc, 3f
mov PRIMARY_CONTEXT, %l4 mov PRIMARY_CONTEXT, %l4
rdpr %canrestore, %g3 661: rdpr %canrestore, %g3
.section .fast_win_ctrl_1insn_patch, "ax"
.word 661b
nop
.previous
rdpr %wstate, %g2 rdpr %wstate, %g2
wrpr %g0, 0, %canrestore 661: wrpr %g0, 0, %canrestore
.section .fast_win_ctrl_1insn_patch, "ax"
.word 661b
nop
.previous
sll %g2, 3, %g2 sll %g2, 3, %g2
/* Set TI_SYS_FPDEPTH to 1 and clear TI_SYS_NOERROR. */ /* Set TI_SYS_FPDEPTH to 1 and clear TI_SYS_NOERROR. */
mov 1, %l5 mov 1, %l5
sth %l5, [%l6 + TI_SYS_NOERROR] sth %l5, [%l6 + TI_SYS_NOERROR]
wrpr %g3, 0, %otherwin 661: wrpr %g3, 0, %otherwin
.section .fast_win_ctrl_1insn_patch, "ax"
.word 661b
.word 0x87880000 ! otherw
.previous
wrpr %g2, 0, %wstate wrpr %g2, 0, %wstate
sethi %hi(sparc64_kern_pri_context), %g2 sethi %hi(sparc64_kern_pri_context), %g2
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
......
...@@ -603,10 +603,10 @@ niagara_tlb_fixup: ...@@ -603,10 +603,10 @@ niagara_tlb_fixup:
be,pt %xcc, niagara4_patch be,pt %xcc, niagara4_patch
nop nop
cmp %g1, SUN4V_CHIP_SPARC_M7 cmp %g1, SUN4V_CHIP_SPARC_M7
be,pt %xcc, niagara4_patch be,pt %xcc, sparc_m7_patch
nop nop
cmp %g1, SUN4V_CHIP_SPARC_M8 cmp %g1, SUN4V_CHIP_SPARC_M8
be,pt %xcc, niagara4_patch be,pt %xcc, sparc_m7_patch
nop nop
cmp %g1, SUN4V_CHIP_SPARC_SN cmp %g1, SUN4V_CHIP_SPARC_SN
be,pt %xcc, niagara4_patch be,pt %xcc, niagara4_patch
...@@ -621,6 +621,18 @@ niagara_tlb_fixup: ...@@ -621,6 +621,18 @@ niagara_tlb_fixup:
ba,a,pt %xcc, 80f ba,a,pt %xcc, 80f
nop nop
sparc_m7_patch:
call m7_patch_copyops
nop
call m7_patch_bzero
nop
call m7_patch_pageops
nop
ba,a,pt %xcc, 80f
nop
niagara4_patch: niagara4_patch:
call niagara4_patch_copyops call niagara4_patch_copyops
nop nop
...@@ -881,7 +893,6 @@ sparc64_boot_end: ...@@ -881,7 +893,6 @@ sparc64_boot_end:
#include "misctrap.S" #include "misctrap.S"
#include "syscalls.S" #include "syscalls.S"
#include "helpers.S" #include "helpers.S"
#include "hvcalls.S"
#include "sun4v_tlb_miss.S" #include "sun4v_tlb_miss.S"
#include "sun4v_ivec.S" #include "sun4v_ivec.S"
#include "ktlb.S" #include "ktlb.S"
...@@ -926,6 +937,7 @@ swapper_4m_tsb: ...@@ -926,6 +937,7 @@ swapper_4m_tsb:
! 0x0000000000428000 ! 0x0000000000428000
#include "hvcalls.S"
#include "systbls_64.S" #include "systbls_64.S"
.data .data
......
...@@ -189,7 +189,7 @@ void __init sun4v_hvapi_init(void) ...@@ -189,7 +189,7 @@ void __init sun4v_hvapi_init(void)
group = HV_GRP_CORE; group = HV_GRP_CORE;
major = 1; major = 1;
minor = 1; minor = 6;
if (sun4v_hvapi_register(group, major, &minor)) if (sun4v_hvapi_register(group, major, &minor))
goto bad; goto bad;
......
...@@ -106,6 +106,17 @@ ENTRY(sun4v_cpu_yield) ...@@ -106,6 +106,17 @@ ENTRY(sun4v_cpu_yield)
nop nop
ENDPROC(sun4v_cpu_yield) ENDPROC(sun4v_cpu_yield)
/* %o0: cpuid
*
* returns %o0: status
*/
ENTRY(sun4v_cpu_poke)
mov HV_FAST_CPU_POKE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_poke)
/* %o0: type /* %o0: type
* %o1: queue paddr * %o1: queue paddr
* %o2: num queue entries * %o2: num queue entries
......
...@@ -1480,6 +1480,7 @@ int ldc_rx_reset(struct ldc_channel *lp) ...@@ -1480,6 +1480,7 @@ int ldc_rx_reset(struct ldc_channel *lp)
{ {
return __set_rx_head(lp, lp->rx_tail); return __set_rx_head(lp, lp->rx_tail);
} }
EXPORT_SYMBOL(ldc_rx_reset);
void __ldc_print(struct ldc_channel *lp, const char *caller) void __ldc_print(struct ldc_channel *lp, const char *caller)
{ {
...@@ -1493,6 +1494,7 @@ void __ldc_print(struct ldc_channel *lp, const char *caller) ...@@ -1493,6 +1494,7 @@ void __ldc_print(struct ldc_channel *lp, const char *caller)
lp->tx_head, lp->tx_tail, lp->tx_num_entries, lp->tx_head, lp->tx_tail, lp->tx_num_entries,
lp->rcv_nxt, lp->snd_nxt); lp->rcv_nxt, lp->snd_nxt);
} }
EXPORT_SYMBOL(__ldc_print);
static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size) static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
{ {
......
...@@ -695,7 +695,7 @@ static int grpci1_of_probe(struct platform_device *ofdev) ...@@ -695,7 +695,7 @@ static int grpci1_of_probe(struct platform_device *ofdev)
return err; return err;
} }
static struct of_device_id grpci1_of_match[] = { static const struct of_device_id grpci1_of_match[] __initconst = {
{ {
.name = "GAISLER_PCIFBRG", .name = "GAISLER_PCIFBRG",
}, },
......
...@@ -886,7 +886,7 @@ static int grpci2_of_probe(struct platform_device *ofdev) ...@@ -886,7 +886,7 @@ static int grpci2_of_probe(struct platform_device *ofdev)
return err; return err;
} }
static struct of_device_id grpci2_of_match[] = { static const struct of_device_id grpci2_of_match[] __initconst = {
{ {
.name = "GAISLER_GRPCI2", .name = "GAISLER_GRPCI2",
}, },
......
...@@ -77,8 +77,13 @@ void arch_cpu_idle(void) ...@@ -77,8 +77,13 @@ void arch_cpu_idle(void)
: "=&r" (pstate) : "=&r" (pstate)
: "i" (PSTATE_IE)); : "i" (PSTATE_IE));
if (!need_resched() && !cpu_is_offline(smp_processor_id())) if (!need_resched() && !cpu_is_offline(smp_processor_id())) {
sun4v_cpu_yield(); sun4v_cpu_yield();
/* If resumed by cpu_poke then we need to explicitly
* call scheduler_ipi().
*/
scheduler_poke();
}
/* Re-enable interrupts. */ /* Re-enable interrupts. */
__asm__ __volatile__( __asm__ __volatile__(
......
...@@ -224,10 +224,19 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 ...@@ -224,10 +224,19 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
rdpr %otherwin, %l2 rdpr %otherwin, %l2
srl %l1, 3, %l1 srl %l1, 3, %l1
wrpr %l2, %g0, %canrestore 661: wrpr %l2, %g0, %canrestore
.section .fast_win_ctrl_1insn_patch, "ax"
.word 661b
.word 0x89880000 ! normalw
.previous
wrpr %l1, %g0, %wstate wrpr %l1, %g0, %wstate
brnz,pt %l2, user_rtt_restore brnz,pt %l2, user_rtt_restore
wrpr %g0, %g0, %otherwin 661: wrpr %g0, %g0, %otherwin
.section .fast_win_ctrl_1insn_patch, "ax"
.word 661b
nop
.previous
ldx [%g6 + TI_FLAGS], %g3 ldx [%g6 + TI_FLAGS], %g3
wr %g0, ASI_AIUP, %asi wr %g0, ASI_AIUP, %asi
......
...@@ -300,6 +300,11 @@ static void __init sun4v_patch(void) ...@@ -300,6 +300,11 @@ static void __init sun4v_patch(void)
break; break;
} }
if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) {
sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch,
&__fast_win_ctrl_1insn_patch_end);
}
sun4v_hvapi_init(); sun4v_hvapi_init();
} }
...@@ -363,6 +368,7 @@ void __init start_early_boot(void) ...@@ -363,6 +368,7 @@ void __init start_early_boot(void)
check_if_starfire(); check_if_starfire();
per_cpu_patch(); per_cpu_patch();
sun4v_patch(); sun4v_patch();
smp_init_cpu_poke();
cpu = hard_smp_processor_id(); cpu = hard_smp_processor_id();
if (cpu >= NR_CPUS) { if (cpu >= NR_CPUS) {
......
...@@ -74,6 +74,9 @@ EXPORT_SYMBOL(cpu_core_sib_cache_map); ...@@ -74,6 +74,9 @@ EXPORT_SYMBOL(cpu_core_sib_cache_map);
static cpumask_t smp_commenced_mask; static cpumask_t smp_commenced_mask;
static DEFINE_PER_CPU(bool, poke);
static bool cpu_poke;
void smp_info(struct seq_file *m) void smp_info(struct seq_file *m)
{ {
int i; int i;
...@@ -1439,15 +1442,86 @@ void __init smp_cpus_done(unsigned int max_cpus) ...@@ -1439,15 +1442,86 @@ void __init smp_cpus_done(unsigned int max_cpus)
{ {
} }
static void send_cpu_ipi(int cpu)
{
xcall_deliver((u64) &xcall_receive_signal,
0, 0, cpumask_of(cpu));
}
void scheduler_poke(void)
{
if (!cpu_poke)
return;
if (!__this_cpu_read(poke))
return;
__this_cpu_write(poke, false);
set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
}
static unsigned long send_cpu_poke(int cpu)
{
unsigned long hv_err;
per_cpu(poke, cpu) = true;
hv_err = sun4v_cpu_poke(cpu);
if (hv_err != HV_EOK) {
per_cpu(poke, cpu) = false;
pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
__func__, hv_err);
}
return hv_err;
}
void smp_send_reschedule(int cpu) void smp_send_reschedule(int cpu)
{ {
if (cpu == smp_processor_id()) { if (cpu == smp_processor_id()) {
WARN_ON_ONCE(preemptible()); WARN_ON_ONCE(preemptible());
set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
} else { return;
xcall_deliver((u64) &xcall_receive_signal, }
0, 0, cpumask_of(cpu));
/* Use cpu poke to resume idle cpu if supported. */
if (cpu_poke && idle_cpu(cpu)) {
unsigned long ret;
ret = send_cpu_poke(cpu);
if (ret == HV_EOK)
return;
}
/* Use IPI in following cases:
* - cpu poke not supported
* - cpu not idle
* - send_cpu_poke() returns with error
*/
send_cpu_ipi(cpu);
}
void smp_init_cpu_poke(void)
{
unsigned long major;
unsigned long minor;
int ret;
if (tlb_type != hypervisor)
return;
ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor);
if (ret) {
pr_debug("HV_GRP_CORE is not registered\n");
return;
} }
if (major == 1 && minor >= 6) {
/* CPU POKE is registered. */
cpu_poke = true;
return;
}
pr_debug("CPU_POKE not supported\n");
} }
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
......
...@@ -265,6 +265,45 @@ void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, u ...@@ -265,6 +265,45 @@ void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, u
sun4v_insn_access_exception(regs, addr, type_ctx); sun4v_insn_access_exception(regs, addr, type_ctx);
} }
bool is_no_fault_exception(struct pt_regs *regs)
{
unsigned char asi;
u32 insn;
if (get_user(insn, (u32 __user *)regs->tpc) == -EFAULT)
return false;
/*
* Must do a little instruction decoding here in order to
* decide on a course of action. The bits of interest are:
* insn[31:30] = op, where 3 indicates the load/store group
* insn[24:19] = op3, which identifies individual opcodes
* insn[13] indicates an immediate offset
* op3[4]=1 identifies alternate space instructions
* op3[5:4]=3 identifies floating point instructions
* op3[2]=1 identifies stores
* See "Opcode Maps" in the appendix of any Sparc V9
* architecture spec for full details.
*/
if ((insn & 0xc0800000) == 0xc0800000) { /* op=3, op3[4]=1 */
if (insn & 0x2000) /* immediate offset */
asi = (regs->tstate >> 24); /* saved %asi */
else
asi = (insn >> 5); /* immediate asi */
if ((asi & 0xf2) == ASI_PNF) {
if (insn & 0x1000000) { /* op3[5:4]=3 */
handle_ldf_stq(insn, regs);
return true;
} else if (insn & 0x200000) { /* op3[2], stores */
return false;
}
handle_ld_nf(insn, regs);
return true;
}
}
return false;
}
void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{ {
enum ctx_state prev_state = exception_enter(); enum ctx_state prev_state = exception_enter();
...@@ -296,6 +335,9 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un ...@@ -296,6 +335,9 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un
die_if_kernel("Dax", regs); die_if_kernel("Dax", regs);
} }
if (is_no_fault_exception(regs))
return;
info.si_signo = SIGSEGV; info.si_signo = SIGSEGV;
info.si_errno = 0; info.si_errno = 0;
info.si_code = SEGV_MAPERR; info.si_code = SEGV_MAPERR;
...@@ -352,6 +394,9 @@ void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsig ...@@ -352,6 +394,9 @@ void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsig
regs->tpc &= 0xffffffff; regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff; regs->tnpc &= 0xffffffff;
} }
if (is_no_fault_exception(regs))
return;
info.si_signo = SIGSEGV; info.si_signo = SIGSEGV;
info.si_errno = 0; info.si_errno = 0;
info.si_code = SEGV_MAPERR; info.si_code = SEGV_MAPERR;
...@@ -2575,6 +2620,9 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo ...@@ -2575,6 +2620,9 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
goto out; goto out;
} }
if (is_no_fault_exception(regs))
return;
info.si_signo = SIGBUS; info.si_signo = SIGBUS;
info.si_errno = 0; info.si_errno = 0;
info.si_code = BUS_ADRALN; info.si_code = BUS_ADRALN;
...@@ -2597,6 +2645,9 @@ void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_c ...@@ -2597,6 +2645,9 @@ void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_c
kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
return; return;
} }
if (is_no_fault_exception(regs))
return;
info.si_signo = SIGBUS; info.si_signo = SIGBUS;
info.si_errno = 0; info.si_errno = 0;
info.si_code = BUS_ADRALN; info.si_code = BUS_ADRALN;
......
...@@ -117,7 +117,7 @@ tsb_miss_page_table_walk_sun4v_fastpath: ...@@ -117,7 +117,7 @@ tsb_miss_page_table_walk_sun4v_fastpath:
/* Valid PTE is now in %g5. */ /* Valid PTE is now in %g5. */
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
sethi %uhi(_PAGE_PMD_HUGE), %g7 sethi %uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7
sllx %g7, 32, %g7 sllx %g7, 32, %g7
andcc %g5, %g7, %g0 andcc %g5, %g7, %g0
......
...@@ -246,6 +246,7 @@ u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev) ...@@ -246,6 +246,7 @@ u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev)
return node; return node;
} }
EXPORT_SYMBOL(vio_vdev_node);
static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp, static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
struct vio_dev *vdev) struct vio_dev *vdev)
......
...@@ -814,15 +814,21 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, ...@@ -814,15 +814,21 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
case VDEV_NETWORK_SWITCH: case VDEV_NETWORK_SWITCH:
case VDEV_DISK: case VDEV_DISK:
case VDEV_DISK_SERVER: case VDEV_DISK_SERVER:
case VDEV_CONSOLE_CON:
break; break;
default: default:
return -EINVAL; return -EINVAL;
} }
if (dev_class == VDEV_NETWORK ||
dev_class == VDEV_NETWORK_SWITCH ||
dev_class == VDEV_DISK ||
dev_class == VDEV_DISK_SERVER) {
if (!ops || !ops->send_attr || !ops->handle_attr || if (!ops || !ops->send_attr || !ops->handle_attr ||
!ops->handshake_complete) !ops->handshake_complete)
return -EINVAL; return -EINVAL;
}
if (!ver_table || ver_table_size < 0) if (!ver_table || ver_table_size < 0)
return -EINVAL; return -EINVAL;
......
...@@ -154,6 +154,16 @@ SECTIONS ...@@ -154,6 +154,16 @@ SECTIONS
*(.get_tick_patch) *(.get_tick_patch)
__get_tick_patch_end = .; __get_tick_patch_end = .;
} }
.pud_huge_patch : {
__pud_huge_patch = .;
*(.pud_huge_patch)
__pud_huge_patch_end = .;
}
.fast_win_ctrl_1insn_patch : {
__fast_win_ctrl_1insn_patch = .;
*(.fast_win_ctrl_1insn_patch)
__fast_win_ctrl_1insn_patch_end = .;
}
PERCPU_SECTION(SMP_CACHE_BYTES) PERCPU_SECTION(SMP_CACHE_BYTES)
#ifdef CONFIG_JUMP_LABEL #ifdef CONFIG_JUMP_LABEL
......
/*
* M7copy_from_user.S: SPARC M7 optimized copy from userspace.
*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
*/
#define EX_LD(x, y) \
98: x; \
.section __ex_table,"a"; \
.align 4; \
.word 98b, y; \
.text; \
.align 4;
#define EX_LD_FP(x, y) \
98: x; \
.section __ex_table,"a"; \
.align 4; \
.word 98b, y##_fp; \
.text; \
.align 4;
#ifndef ASI_AIUS
#define ASI_AIUS 0x11
#endif
#define FUNC_NAME M7copy_from_user
#define LOAD(type,addr,dest) type##a [addr] %asi, dest
#define EX_RETVAL(x) 0
#ifdef __KERNEL__
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
bne,pn %icc, raw_copy_in_user; \
nop
#endif
#include "M7memcpy.S"
/*
* M7copy_to_user.S: SPARC M7 optimized copy to userspace.
*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
*/
#define EX_ST(x, y) \
98: x; \
.section __ex_table,"a"; \
.align 4; \
.word 98b, y; \
.text; \
.align 4;
#define EX_ST_FP(x, y) \
98: x; \
.section __ex_table,"a"; \
.align 4; \
.word 98b, y##_fp; \
.text; \
.align 4;
#ifndef ASI_AIUS
#define ASI_AIUS 0x11
#endif
#ifndef ASI_BLK_INIT_QUAD_LDD_AIUS
#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23
#endif
#define FUNC_NAME M7copy_to_user
#define STORE(type,src,addr) type##a src, [addr] %asi
#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS
#define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_S
#define EX_RETVAL(x) 0
#ifdef __KERNEL__
/* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively
* cheap.
*/
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
bne,pn %icc, raw_copy_in_user; \
nop
#endif
#include "M7memcpy.S"
/*
* M7memcpy: Optimized SPARC M7 memcpy
*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
*/
.file "M7memcpy.S"
/*
* memcpy(s1, s2, len)
*
* Copy s2 to s1, always copy n bytes.
* Note: this C code does not work for overlapped copies.
*
* Fast assembler language version of the following C-program for memcpy
* which represents the `standard' for the C-library.
*
* void *
* memcpy(void *s, const void *s0, size_t n)
* {
* if (n != 0) {
* char *s1 = s;
* const char *s2 = s0;
* do {
* *s1++ = *s2++;
* } while (--n != 0);
* }
* return (s);
* }
*
*
* SPARC T7/M7 Flow :
*
* if (count < SMALL_MAX) {
* if count < SHORTCOPY (SHORTCOPY=3)
* copy bytes; exit with dst addr
* if src & dst aligned on word boundary but not long word boundary,
* copy with ldw/stw; branch to finish_up
* if src & dst aligned on long word boundary
* copy with ldx/stx; branch to finish_up
* if src & dst not aligned and length <= SHORTCHECK (SHORTCHECK=14)
* copy bytes; exit with dst addr
* move enough bytes to get src to word boundary
* if dst now on word boundary
* move_words:
* copy words; branch to finish_up
* if dst now on half word boundary
* load words, shift half words, store words; branch to finish_up
* if dst on byte 1
* load words, shift 3 bytes, store words; branch to finish_up
* if dst on byte 3
* load words, shift 1 byte, store words; branch to finish_up
* finish_up:
* copy bytes; exit with dst addr
* } else { More than SMALL_MAX bytes
* move bytes until dst is on long word boundary
* if( src is on long word boundary ) {
* if (count < MED_MAX) {
* finish_long: src/dst aligned on 8 bytes
* copy with ldx/stx in 8-way unrolled loop;
* copy final 0-63 bytes; exit with dst addr
* } else { src/dst aligned; count > MED_MAX
* align dst on 64 byte boundary; for main data movement:
* prefetch src data to L2 cache; let HW prefetch move data to L1 cache
* Use BIS (block initializing store) to avoid copying store cache
* lines from memory. But pre-store first element of each cache line
* ST_CHUNK lines in advance of the rest of that cache line. That
* gives time for replacement cache lines to be written back without
* excess STQ and Miss Buffer filling. Repeat until near the end,
* then finish up storing before going to finish_long.
* }
* } else { src/dst not aligned on 8 bytes
* if src is word aligned and count < MED_WMAX
* move words in 8-way unrolled loop
* move final 0-31 bytes; exit with dst addr
* if count < MED_UMAX
* use alignaddr/faligndata combined with ldd/std in 8-way
* unrolled loop to move data.
* go to unalign_done
* else
* setup alignaddr for faligndata instructions
* align dst on 64 byte boundary; prefetch src data to L1 cache
* loadx8, falign, block-store, prefetch loop
* (only use block-init-store when src/dst on 8 byte boundaries.)
* unalign_done:
* move remaining bytes for unaligned cases. exit with dst addr.
* }
*
*/
#include <asm/visasm.h>
#include <asm/asi.h>
#if !defined(EX_LD) && !defined(EX_ST)
#define NON_USER_COPY
#endif
#ifndef EX_LD
#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
#define EX_ST_FP(x,y) x
#endif
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
#endif
#ifndef STORE
#define STORE(type,src,addr) type src, [addr]
#endif
/*
* ASI_BLK_INIT_QUAD_LDD_P/ASI_BLK_INIT_QUAD_LDD_S marks the cache
* line as "least recently used" which means if many threads are
* active, it has a high probability of being pushed out of the cache
* between the first initializing store and the final stores.
* Thus, we use ASI_ST_BLKINIT_MRU_P/ASI_ST_BLKINIT_MRU_S which
* marks the cache line as "most recently used" for all
* but the last cache line
*/
#ifndef STORE_ASI
#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P
#else
#define STORE_ASI 0x80 /* ASI_P */
#endif
#endif
#ifndef STORE_MRU_ASI
#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
#define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_P
#else
#define STORE_MRU_ASI 0x80 /* ASI_P */
#endif
#endif
#ifndef STORE_INIT
#define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI
#endif
#ifndef STORE_INIT_MRU
#define STORE_INIT_MRU(src,addr) stxa src, [addr] STORE_MRU_ASI
#endif
#ifndef FUNC_NAME
#define FUNC_NAME M7memcpy
#endif
#ifndef PREAMBLE
#define PREAMBLE
#endif
#define BLOCK_SIZE 64
#define SHORTCOPY 3
#define SHORTCHECK 14
#define SHORT_LONG 64 /* max copy for short longword-aligned case */
/* must be at least 64 */
#define SMALL_MAX 128
#define MED_UMAX 1024 /* max copy for medium un-aligned case */
#define MED_WMAX 1024 /* max copy for medium word-aligned case */
#define MED_MAX 1024 /* max copy for medium longword-aligned case */
#define ST_CHUNK 24 /* ST_CHUNK - block of values for BIS Store */
#define ALIGN_PRE 24 /* distance for aligned prefetch loop */
.register %g2,#scratch
.section ".text"
.global FUNC_NAME
.type FUNC_NAME, #function
.align 16
FUNC_NAME:
srlx %o2, 31, %g2
cmp %g2, 0
tne %xcc, 5
PREAMBLE
mov %o0, %g1 ! save %o0
brz,pn %o2, .Lsmallx
cmp %o2, 3
ble,pn %icc, .Ltiny_cp
cmp %o2, 19
ble,pn %icc, .Lsmall_cp
or %o0, %o1, %g2
cmp %o2, SMALL_MAX
bl,pn %icc, .Lmedium_cp
nop
.Lmedium:
neg %o0, %o5
andcc %o5, 7, %o5 ! bytes till DST 8 byte aligned
brz,pt %o5, .Ldst_aligned_on_8
! %o5 has the bytes to be written in partial store.
sub %o2, %o5, %o2
sub %o1, %o0, %o1 ! %o1 gets the difference
7: ! dst aligning loop
add %o1, %o0, %o4
EX_LD(LOAD(ldub, %o4, %o4), memcpy_retl_o2_plus_o5) ! load one byte
subcc %o5, 1, %o5
EX_ST(STORE(stb, %o4, %o0), memcpy_retl_o2_plus_o5_plus_1)
bgu,pt %xcc, 7b
add %o0, 1, %o0 ! advance dst
add %o1, %o0, %o1 ! restore %o1
.Ldst_aligned_on_8:
andcc %o1, 7, %o5
brnz,pt %o5, .Lsrc_dst_unaligned_on_8
nop
.Lsrc_dst_aligned_on_8:
! check if we are copying MED_MAX or more bytes
set MED_MAX, %o3
cmp %o2, %o3 ! limit to store buffer size
bgu,pn %xcc, .Llarge_align8_copy
nop
/*
* Special case for handling when src and dest are both long word aligned
* and total data to move is less than MED_MAX bytes
*/
.Lmedlong:
subcc %o2, 63, %o2 ! adjust length to allow cc test
ble,pn %xcc, .Lmedl63 ! skip big loop if less than 64 bytes
nop
.Lmedl64:
EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_63) ! load
subcc %o2, 64, %o2 ! decrement length count
EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_63_64) ! and store
EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_63_56) ! a block of 64
EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_63_56)
EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_63_48)
EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_63_48)
EX_LD(LOAD(ldx, %o1+24, %o3), memcpy_retl_o2_plus_63_40)
EX_ST(STORE(stx, %o3, %o0+24), memcpy_retl_o2_plus_63_40)
EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_63_32)! load and store
EX_ST(STORE(stx, %o4, %o0+32), memcpy_retl_o2_plus_63_32)
EX_LD(LOAD(ldx, %o1+40, %o3), memcpy_retl_o2_plus_63_24)! a block of 64
add %o1, 64, %o1 ! increase src ptr by 64
EX_ST(STORE(stx, %o3, %o0+40), memcpy_retl_o2_plus_63_24)
EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_63_16)
add %o0, 64, %o0 ! increase dst ptr by 64
EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_63_16)
EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_63_8)
bgu,pt %xcc, .Lmedl64 ! repeat if at least 64 bytes left
EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_63_8)
.Lmedl63:
addcc %o2, 32, %o2 ! adjust remaining count
ble,pt %xcc, .Lmedl31 ! to skip if 31 or fewer bytes left
nop
EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_31) ! load
sub %o2, 32, %o2 ! decrement length count
EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_31_32) ! and store
EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_31_24) ! a block of 32
add %o1, 32, %o1 ! increase src ptr by 32
EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_31_24)
EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_31_16)
add %o0, 32, %o0 ! increase dst ptr by 32
EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_31_16)
EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_31_8)
EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_31_8)
.Lmedl31:
addcc %o2, 16, %o2 ! adjust remaining count
ble,pt %xcc, .Lmedl15 ! skip if 15 or fewer bytes left
nop !
EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_15)
add %o1, 16, %o1 ! increase src ptr by 16
EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_15)
sub %o2, 16, %o2 ! decrease count by 16
EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_15_8)
add %o0, 16, %o0 ! increase dst ptr by 16
EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_15_8)
.Lmedl15:
addcc %o2, 15, %o2 ! restore count
bz,pt %xcc, .Lsmallx ! exit if finished
cmp %o2, 8
blt,pt %xcc, .Lmedw7 ! skip if 7 or fewer bytes left
tst %o2
EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) ! load 8 bytes
add %o1, 8, %o1 ! increase src ptr by 8
add %o0, 8, %o0 ! increase dst ptr by 8
subcc %o2, 8, %o2 ! decrease count by 8
bnz,pn %xcc, .Lmedw7
EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) ! and store 8
retl
mov EX_RETVAL(%g1), %o0 ! restore %o0
.align 16
.Lsrc_dst_unaligned_on_8:
! DST is 8-byte aligned, src is not
2:
andcc %o1, 0x3, %o5 ! test word alignment
bnz,pt %xcc, .Lunalignsetup ! branch to skip if not word aligned
nop
/*
* Handle all cases where src and dest are aligned on word
* boundaries. Use unrolled loops for better performance.
* This option wins over standard large data move when
* source and destination is in cache for.Lmedium
* to short data moves.
*/
set MED_WMAX, %o3
cmp %o2, %o3 ! limit to store buffer size
bge,pt %xcc, .Lunalignrejoin ! otherwise rejoin main loop
nop
subcc %o2, 31, %o2 ! adjust length to allow cc test
! for end of loop
ble,pt %xcc, .Lmedw31 ! skip big loop if less than 16
.Lmedw32:
EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_31)! move a block of 32
sllx %o4, 32, %o5
EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_31)
or %o4, %o5, %o5
EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_31)
subcc %o2, 32, %o2 ! decrement length count
EX_LD(LOAD(ld, %o1+8, %o4), memcpy_retl_o2_plus_31_24)
sllx %o4, 32, %o5
EX_LD(LOAD(ld, %o1+12, %o4), memcpy_retl_o2_plus_31_24)
or %o4, %o5, %o5
EX_ST(STORE(stx, %o5, %o0+8), memcpy_retl_o2_plus_31_24)
add %o1, 32, %o1 ! increase src ptr by 32
EX_LD(LOAD(ld, %o1-16, %o4), memcpy_retl_o2_plus_31_16)
sllx %o4, 32, %o5
EX_LD(LOAD(ld, %o1-12, %o4), memcpy_retl_o2_plus_31_16)
or %o4, %o5, %o5
EX_ST(STORE(stx, %o5, %o0+16), memcpy_retl_o2_plus_31_16)
add %o0, 32, %o0 ! increase dst ptr by 32
EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_31_8)
sllx %o4, 32, %o5
EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_31_8)
or %o4, %o5, %o5
bgu,pt %xcc, .Lmedw32 ! repeat if at least 32 bytes left
EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_31_8)
.Lmedw31:
addcc %o2, 31, %o2 ! restore count
bz,pt %xcc, .Lsmallx ! exit if finished
nop
cmp %o2, 16
blt,pt %xcc, .Lmedw15
nop
EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2)! move a block of 16 bytes
sllx %o4, 32, %o5
subcc %o2, 16, %o2 ! decrement length count
EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_16)
or %o4, %o5, %o5
EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_16)
add %o1, 16, %o1 ! increase src ptr by 16
EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_8)
add %o0, 16, %o0 ! increase dst ptr by 16
sllx %o4, 32, %o5
EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_8)
or %o4, %o5, %o5
EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_8)
.Lmedw15:
bz,pt %xcc, .Lsmallx ! exit if finished
cmp %o2, 8
blt,pn %xcc, .Lmedw7 ! skip if 7 or fewer bytes left
tst %o2
EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes
subcc %o2, 8, %o2 ! decrease count by 8
EX_ST(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_8)! and store 4 bytes
add %o1, 8, %o1 ! increase src ptr by 8
EX_LD(LOAD(ld, %o1-4, %o3), memcpy_retl_o2_plus_4) ! load 4 bytes
add %o0, 8, %o0 ! increase dst ptr by 8
EX_ST(STORE(stw, %o3, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes
bz,pt %xcc, .Lsmallx ! exit if finished
.Lmedw7: ! count is ge 1, less than 8
cmp %o2, 4 ! check for 4 bytes left
blt,pn %xcc, .Lsmallleft3 ! skip if 3 or fewer bytes left
nop !
EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes
add %o1, 4, %o1 ! increase src ptr by 4
add %o0, 4, %o0 ! increase dst ptr by 4
subcc %o2, 4, %o2 ! decrease count by 4
bnz .Lsmallleft3
EX_ST(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes
retl
mov EX_RETVAL(%g1), %o0
.align 16
.Llarge_align8_copy: ! Src and dst share 8 byte alignment
! align dst to 64 byte boundary
andcc %o0, 0x3f, %o3 ! %o3 == 0 means dst is 64 byte aligned
brz,pn %o3, .Laligned_to_64
andcc %o0, 8, %o3 ! odd long words to move?
brz,pt %o3, .Laligned_to_16
nop
EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2)
sub %o2, 8, %o2
add %o1, 8, %o1 ! increment src ptr
add %o0, 8, %o0 ! increment dst ptr
EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8)
.Laligned_to_16:
andcc %o0, 16, %o3 ! pair of long words to move?
brz,pt %o3, .Laligned_to_32
nop
EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2)
sub %o2, 16, %o2
EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_16)
add %o1, 16, %o1 ! increment src ptr
EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8)
add %o0, 16, %o0 ! increment dst ptr
EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8)
.Laligned_to_32:
andcc %o0, 32, %o3 ! four long words to move?
brz,pt %o3, .Laligned_to_64
nop
EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2)
sub %o2, 32, %o2
EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_32)
EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_24)
EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_24)
EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_16)
EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_16)
add %o1, 32, %o1 ! increment src ptr
EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8)
add %o0, 32, %o0 ! increment dst ptr
EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8)
.Laligned_to_64:
!
! Using block init store (BIS) instructions to avoid fetching cache
! lines from memory. Use ST_CHUNK stores to first element of each cache
! line (similar to prefetching) to avoid overfilling STQ or miss buffers.
! Gives existing cache lines time to be moved out of L1/L2/L3 cache.
! Initial stores using MRU version of BIS to keep cache line in
! cache until we are ready to store final element of cache line.
! Then store last element using the LRU version of BIS.
!
andn %o2, 0x3f, %o5 ! %o5 is multiple of block size
and %o2, 0x3f, %o2 ! residue bytes in %o2
!
! We use STORE_MRU_ASI for the first seven stores to each cache line
! followed by STORE_ASI (mark as LRU) for the last store. That
! mixed approach reduces the probability that the cache line is removed
! before we finish setting it, while minimizing the effects on
! other cached values during a large memcpy
!
! ST_CHUNK batches up initial BIS operations for several cache lines
! to allow multiple requests to not be blocked by overflowing the
! the store miss buffer. Then the matching stores for all those
! BIS operations are executed.
!
sub %o0, 8, %o0 ! adjust %o0 for ASI alignment
.Lalign_loop:
cmp %o5, ST_CHUNK*64
blu,pt %xcc, .Lalign_loop_fin
mov ST_CHUNK,%o3
.Lalign_loop_start:
prefetch [%o1 + (ALIGN_PRE * BLOCK_SIZE)], 21
subcc %o3, 1, %o3
EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5)
add %o1, 64, %o1
add %o0, 8, %o0
EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5)
bgu %xcc,.Lalign_loop_start
add %o0, 56, %o0
mov ST_CHUNK,%o3
sllx %o3, 6, %o4 ! ST_CHUNK*64
sub %o1, %o4, %o1 ! reset %o1
sub %o0, %o4, %o0 ! reset %o0
.Lalign_loop_rest:
EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5)
add %o0, 16, %o0
EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5)
add %o0, 8, %o0
EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5)
subcc %o3, 1, %o3
EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5)
add %o0, 8, %o0
EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5)
add %o0, 8, %o0
EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5)
add %o0, 8, %o0
EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5)
add %o1, 64, %o1
add %o0, 8, %o0
EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5)
add %o0, 8, %o0
EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5)
sub %o5, 64, %o5
bgu %xcc,.Lalign_loop_rest
! mark cache line as LRU
EX_ST(STORE_INIT(%o4, %o0), memcpy_retl_o2_plus_o5_plus_64)
cmp %o5, ST_CHUNK*64
bgu,pt %xcc, .Lalign_loop_start
mov ST_CHUNK,%o3
cmp %o5, 0
beq .Lalign_done
nop
.Lalign_loop_fin:
EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5)
EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5)
EX_ST(STORE(stx, %o4, %o0+8+8), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5)
EX_ST(STORE(stx, %o4, %o0+8+16), memcpy_retl_o2_plus_o5)
subcc %o5, 64, %o5
EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5_64)
EX_ST(STORE(stx, %o4, %o0+8+24), memcpy_retl_o2_plus_o5_64)
EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5_64)
EX_ST(STORE(stx, %o4, %o0+8+32), memcpy_retl_o2_plus_o5_64)
EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5_64)
EX_ST(STORE(stx, %o4, %o0+8+40), memcpy_retl_o2_plus_o5_64)
EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5_64)
add %o1, 64, %o1
EX_ST(STORE(stx, %o4, %o0+8+48), memcpy_retl_o2_plus_o5_64)
add %o0, 64, %o0
EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5_64)
bgu %xcc,.Lalign_loop_fin
EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_o5_64)
.Lalign_done:
add %o0, 8, %o0 ! restore %o0 from ASI alignment
membar #StoreStore
sub %o2, 63, %o2 ! adjust length to allow cc test
ba .Lmedl63 ! in .Lmedl63
nop
.align 16
! Dst is on 8 byte boundary; src is not; remaining count > SMALL_MAX
.Lunalignsetup:
.Lunalignrejoin:
mov %g1, %o3 ! save %g1 as VISEntryHalf clobbers it
#ifdef NON_USER_COPY
VISEntryHalfFast(.Lmedium_vis_entry_fail_cp)
#else
VISEntryHalf
#endif
mov %o3, %g1 ! restore %g1
set MED_UMAX, %o3
cmp %o2, %o3 ! check for.Lmedium unaligned limit
bge,pt %xcc,.Lunalign_large
prefetch [%o1 + (4 * BLOCK_SIZE)], 20
andn %o2, 0x3f, %o5 ! %o5 is multiple of block size
and %o2, 0x3f, %o2 ! residue bytes in %o2
cmp %o2, 8 ! Insure we do not load beyond
bgt .Lunalign_adjust ! end of source buffer
andn %o1, 0x7, %o4 ! %o4 has long word aligned src address
add %o2, 64, %o2 ! adjust to leave loop
sub %o5, 64, %o5 ! early if necessary
.Lunalign_adjust:
alignaddr %o1, %g0, %g0 ! generate %gsr
add %o1, %o5, %o1 ! advance %o1 to after blocks
EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5)
.Lunalign_loop:
EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5)
faligndata %f0, %f2, %f16
EX_LD_FP(LOAD(ldd, %o4+16, %f4), memcpy_retl_o2_plus_o5)
subcc %o5, BLOCK_SIZE, %o5
EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5_plus_64)
faligndata %f2, %f4, %f18
EX_LD_FP(LOAD(ldd, %o4+24, %f6), memcpy_retl_o2_plus_o5_plus_56)
EX_ST_FP(STORE(std, %f18, %o0+8), memcpy_retl_o2_plus_o5_plus_56)
faligndata %f4, %f6, %f20
EX_LD_FP(LOAD(ldd, %o4+32, %f8), memcpy_retl_o2_plus_o5_plus_48)
EX_ST_FP(STORE(std, %f20, %o0+16), memcpy_retl_o2_plus_o5_plus_48)
faligndata %f6, %f8, %f22
EX_LD_FP(LOAD(ldd, %o4+40, %f10), memcpy_retl_o2_plus_o5_plus_40)
EX_ST_FP(STORE(std, %f22, %o0+24), memcpy_retl_o2_plus_o5_plus_40)
faligndata %f8, %f10, %f24
EX_LD_FP(LOAD(ldd, %o4+48, %f12), memcpy_retl_o2_plus_o5_plus_32)
EX_ST_FP(STORE(std, %f24, %o0+32), memcpy_retl_o2_plus_o5_plus_32)
faligndata %f10, %f12, %f26
EX_LD_FP(LOAD(ldd, %o4+56, %f14), memcpy_retl_o2_plus_o5_plus_24)
add %o4, BLOCK_SIZE, %o4
EX_ST_FP(STORE(std, %f26, %o0+40), memcpy_retl_o2_plus_o5_plus_24)
faligndata %f12, %f14, %f28
EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5_plus_16)
EX_ST_FP(STORE(std, %f28, %o0+48), memcpy_retl_o2_plus_o5_plus_16)
faligndata %f14, %f0, %f30
EX_ST_FP(STORE(std, %f30, %o0+56), memcpy_retl_o2_plus_o5_plus_8)
add %o0, BLOCK_SIZE, %o0
bgu,pt %xcc, .Lunalign_loop
prefetch [%o4 + (5 * BLOCK_SIZE)], 20
ba .Lunalign_done
nop
.Lunalign_large:
andcc %o0, 0x3f, %o3 ! is dst 64-byte block aligned?
bz %xcc, .Lunalignsrc
sub %o3, 64, %o3 ! %o3 will be multiple of 8
neg %o3 ! bytes until dest is 64 byte aligned
sub %o2, %o3, %o2 ! update cnt with bytes to be moved
! Move bytes according to source alignment
andcc %o1, 0x1, %o5
bnz %xcc, .Lunalignbyte ! check for byte alignment
nop
andcc %o1, 2, %o5 ! check for half word alignment
bnz %xcc, .Lunalignhalf
nop
! Src is word aligned
.Lunalignword:
EX_LD_FP(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 4 bytes
add %o1, 8, %o1 ! increase src ptr by 8
EX_ST_FP(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_o3) ! and store 4
subcc %o3, 8, %o3 ! decrease count by 8
EX_LD_FP(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_o3_plus_4)! load 4
add %o0, 8, %o0 ! increase dst ptr by 8
bnz %xcc, .Lunalignword
EX_ST_FP(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_o3_plus_4)
ba .Lunalignsrc
nop
! Src is half-word aligned
.Lunalignhalf:
EX_LD_FP(LOAD(lduh, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 2 bytes
sllx %o4, 32, %o5 ! shift left
EX_LD_FP(LOAD(lduw, %o1+2, %o4), memcpy_retl_o2_plus_o3)
or %o4, %o5, %o5
sllx %o5, 16, %o5
EX_LD_FP(LOAD(lduh, %o1+6, %o4), memcpy_retl_o2_plus_o3)
or %o4, %o5, %o5
EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3)
add %o1, 8, %o1
subcc %o3, 8, %o3
bnz %xcc, .Lunalignhalf
add %o0, 8, %o0
ba .Lunalignsrc
nop
! Src is Byte aligned
.Lunalignbyte:
sub %o0, %o1, %o0 ! share pointer advance
.Lunalignbyte_loop:
EX_LD_FP(LOAD(ldub, %o1, %o4), memcpy_retl_o2_plus_o3)
sllx %o4, 56, %o5
EX_LD_FP(LOAD(lduh, %o1+1, %o4), memcpy_retl_o2_plus_o3)
sllx %o4, 40, %o4
or %o4, %o5, %o5
EX_LD_FP(LOAD(lduh, %o1+3, %o4), memcpy_retl_o2_plus_o3)
sllx %o4, 24, %o4
or %o4, %o5, %o5
EX_LD_FP(LOAD(lduh, %o1+5, %o4), memcpy_retl_o2_plus_o3)
sllx %o4, 8, %o4
or %o4, %o5, %o5
EX_LD_FP(LOAD(ldub, %o1+7, %o4), memcpy_retl_o2_plus_o3)
or %o4, %o5, %o5
add %o0, %o1, %o0
EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3)
sub %o0, %o1, %o0
subcc %o3, 8, %o3
bnz %xcc, .Lunalignbyte_loop
add %o1, 8, %o1
add %o0,%o1, %o0 ! restore pointer
! Destination is now block (64 byte aligned)
.Lunalignsrc:
andn %o2, 0x3f, %o5 ! %o5 is multiple of block size
and %o2, 0x3f, %o2 ! residue bytes in %o2
add %o2, 64, %o2 ! Insure we do not load beyond
sub %o5, 64, %o5 ! end of source buffer
andn %o1, 0x7, %o4 ! %o4 has long word aligned src address
alignaddr %o1, %g0, %g0 ! generate %gsr
add %o1, %o5, %o1 ! advance %o1 to after blocks
EX_LD_FP(LOAD(ldd, %o4, %f14), memcpy_retl_o2_plus_o5)
add %o4, 8, %o4
.Lunalign_sloop:
EX_LD_FP(LOAD(ldd, %o4, %f16), memcpy_retl_o2_plus_o5)
faligndata %f14, %f16, %f0
EX_LD_FP(LOAD(ldd, %o4+8, %f18), memcpy_retl_o2_plus_o5)
faligndata %f16, %f18, %f2
EX_LD_FP(LOAD(ldd, %o4+16, %f20), memcpy_retl_o2_plus_o5)
faligndata %f18, %f20, %f4
EX_ST_FP(STORE(std, %f0, %o0), memcpy_retl_o2_plus_o5)
subcc %o5, 64, %o5
EX_LD_FP(LOAD(ldd, %o4+24, %f22), memcpy_retl_o2_plus_o5_plus_56)
faligndata %f20, %f22, %f6
EX_ST_FP(STORE(std, %f2, %o0+8), memcpy_retl_o2_plus_o5_plus_56)
EX_LD_FP(LOAD(ldd, %o4+32, %f24), memcpy_retl_o2_plus_o5_plus_48)
faligndata %f22, %f24, %f8
EX_ST_FP(STORE(std, %f4, %o0+16), memcpy_retl_o2_plus_o5_plus_48)
EX_LD_FP(LOAD(ldd, %o4+40, %f26), memcpy_retl_o2_plus_o5_plus_40)
faligndata %f24, %f26, %f10
EX_ST_FP(STORE(std, %f6, %o0+24), memcpy_retl_o2_plus_o5_plus_40)
EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_40)
faligndata %f26, %f28, %f12
EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_40)
add %o4, 64, %o4
EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_40)
faligndata %f28, %f30, %f14
EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_40)
EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_40)
add %o0, 64, %o0
EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_40)
fsrc2 %f30, %f14
bgu,pt %xcc, .Lunalign_sloop
prefetch [%o4 + (8 * BLOCK_SIZE)], 20
.Lunalign_done:
! Handle trailing bytes, 64 to 127
! Dest long word aligned, Src not long word aligned
cmp %o2, 15
bleu %xcc, .Lunalign_short
andn %o2, 0x7, %o5 ! %o5 is multiple of 8
and %o2, 0x7, %o2 ! residue bytes in %o2
add %o2, 8, %o2
sub %o5, 8, %o5 ! insure we do not load past end of src
andn %o1, 0x7, %o4 ! %o4 has long word aligned src address
add %o1, %o5, %o1 ! advance %o1 to after multiple of 8
EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5)! fetch partialword
.Lunalign_by8:
EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5)
add %o4, 8, %o4
faligndata %f0, %f2, %f16
subcc %o5, 8, %o5
EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5)
fsrc2 %f2, %f0
bgu,pt %xcc, .Lunalign_by8
add %o0, 8, %o0
.Lunalign_short:
#ifdef NON_USER_COPY
VISExitHalfFast
#else
VISExitHalf
#endif
ba .Lsmallrest
nop
/*
* This is a special case of nested memcpy. This can happen when kernel
* calls unaligned memcpy back to back without saving FP registers. We need
* traps(context switch) to save/restore FP registers. If the kernel calls
* memcpy without this trap sequence we will hit FP corruption. Let's use
* the normal integer load/store method in this case.
*/
#ifdef NON_USER_COPY
.Lmedium_vis_entry_fail_cp:
or %o0, %o1, %g2
#endif
.Lmedium_cp:
LOAD(prefetch, %o1 + 0x40, #n_reads_strong)
andcc %g2, 0x7, %g0
bne,pn %xcc, .Lmedium_unaligned_cp
nop
.Lmedium_noprefetch_cp:
andncc %o2, 0x20 - 1, %o5
be,pn %xcc, 2f
sub %o2, %o5, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1 + 0x10, %g7), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5)
add %o1, 0x20, %o1
subcc %o5, 0x20, %o5
EX_ST(STORE(stx, %o3, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32)
EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24)
EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24)
EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8)
bne,pt %xcc, 1b
add %o0, 0x20, %o0
2: andcc %o2, 0x18, %o5
be,pt %xcc, 3f
sub %o2, %o5, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5)
add %o1, 0x08, %o1
add %o0, 0x08, %o0
subcc %o5, 0x08, %o5
bne,pt %xcc, 1b
EX_ST(STORE(stx, %o3, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8)
3: brz,pt %o2, .Lexit_cp
cmp %o2, 0x04
bl,pn %xcc, .Ltiny_cp
nop
EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2)
add %o1, 0x04, %o1
add %o0, 0x04, %o0
subcc %o2, 0x04, %o2
bne,pn %xcc, .Ltiny_cp
EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_4)
ba,a,pt %xcc, .Lexit_cp
.Lmedium_unaligned_cp:
/* First get dest 8 byte aligned. */
sub %g0, %o0, %o3
and %o3, 0x7, %o3
brz,pt %o3, 2f
sub %o2, %o3, %o2
1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1)
add %o1, 1, %o1
subcc %o3, 1, %o3
add %o0, 1, %o0
bne,pt %xcc, 1b
EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1)
2:
and %o1, 0x7, %o3
brz,pn %o3, .Lmedium_noprefetch_cp
sll %o3, 3, %o3
mov 64, %g2
sub %g2, %o3, %g2
andn %o1, 0x7, %o1
EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2)
sllx %o4, %o3, %o4
andn %o2, 0x08 - 1, %o5
sub %o2, %o5, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5)
add %o1, 0x08, %o1
subcc %o5, 0x08, %o5
srlx %g3, %g2, %g7
or %g7, %o4, %g7
EX_ST(STORE(stx, %g7, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8)
add %o0, 0x08, %o0
bne,pt %xcc, 1b
sllx %g3, %o3, %o4
srl %o3, 3, %o3
add %o1, %o3, %o1
brz,pn %o2, .Lexit_cp
nop
ba,pt %xcc, .Lsmall_unaligned_cp
.Ltiny_cp:
EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2)
subcc %o2, 1, %o2
be,pn %xcc, .Lexit_cp
EX_ST(STORE(stb, %o3, %o0 + 0x00), memcpy_retl_o2_plus_1)
EX_LD(LOAD(ldub, %o1 + 0x01, %o3), memcpy_retl_o2)
subcc %o2, 1, %o2
be,pn %xcc, .Lexit_cp
EX_ST(STORE(stb, %o3, %o0 + 0x01), memcpy_retl_o2_plus_1)
EX_LD(LOAD(ldub, %o1 + 0x02, %o3), memcpy_retl_o2)
ba,pt %xcc, .Lexit_cp
EX_ST(STORE(stb, %o3, %o0 + 0x02), memcpy_retl_o2)
.Lsmall_cp:
andcc %g2, 0x3, %g0
bne,pn %xcc, .Lsmall_unaligned_cp
andn %o2, 0x4 - 1, %o5
sub %o2, %o5, %o2
1:
EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5)
add %o1, 0x04, %o1
subcc %o5, 0x04, %o5
add %o0, 0x04, %o0
bne,pt %xcc, 1b
EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4)
brz,pt %o2, .Lexit_cp
nop
ba,a,pt %xcc, .Ltiny_cp
.Lsmall_unaligned_cp:
1: EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2)
add %o1, 1, %o1
add %o0, 1, %o0
subcc %o2, 1, %o2
bne,pt %xcc, 1b
EX_ST(STORE(stb, %o3, %o0 - 0x01), memcpy_retl_o2_plus_1)
ba,a,pt %xcc, .Lexit_cp
.Lsmallrest:
tst %o2
bz,pt %xcc, .Lsmallx
cmp %o2, 4
blt,pn %xcc, .Lsmallleft3
nop
sub %o2, 3, %o2
.Lsmallnotalign4:
EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_3)! read byte
subcc %o2, 4, %o2 ! reduce count by 4
EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_7)! write byte & repeat
EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2_plus_6)! for total of 4
add %o1, 4, %o1 ! advance SRC by 4
EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_6)
EX_LD(LOAD(ldub, %o1-2, %o3), memcpy_retl_o2_plus_5)
add %o0, 4, %o0 ! advance DST by 4
EX_ST(STORE(stb, %o3, %o0-2), memcpy_retl_o2_plus_5)
EX_LD(LOAD(ldub, %o1-1, %o3), memcpy_retl_o2_plus_4)
bgu,pt %xcc, .Lsmallnotalign4 ! loop til 3 or fewer bytes remain
EX_ST(STORE(stb, %o3, %o0-1), memcpy_retl_o2_plus_4)
addcc %o2, 3, %o2 ! restore count
bz,pt %xcc, .Lsmallx
.Lsmallleft3: ! 1, 2, or 3 bytes remain
subcc %o2, 1, %o2
EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_1) ! load one byte
bz,pt %xcc, .Lsmallx
EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_1) ! store one byte
EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2) ! load second byte
subcc %o2, 1, %o2
bz,pt %xcc, .Lsmallx
EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_1)! store second byte
EX_LD(LOAD(ldub, %o1+2, %o3), memcpy_retl_o2) ! load third byte
EX_ST(STORE(stb, %o3, %o0+2), memcpy_retl_o2) ! store third byte
.Lsmallx:
retl
mov EX_RETVAL(%g1), %o0
.Lsmallfin:
tst %o2
bnz,pn %xcc, .Lsmallleft3
nop
retl
mov EX_RETVAL(%g1), %o0 ! restore %o0
.Lexit_cp:
retl
mov EX_RETVAL(%g1), %o0
.size FUNC_NAME, .-FUNC_NAME
/*
* M7memset.S: SPARC M7 optimized memset.
*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
*/
/*
* M7memset.S: M7 optimized memset.
*
* char *memset(sp, c, n)
*
* Set an array of n chars starting at sp to the character c.
* Return sp.
*
* Fast assembler language version of the following C-program for memset
* which represents the `standard' for the C-library.
*
* void *
* memset(void *sp1, int c, size_t n)
* {
* if (n != 0) {
* char *sp = sp1;
* do {
* *sp++ = (char)c;
* } while (--n != 0);
* }
* return (sp1);
* }
*
* The algorithm is as follows :
*
* For small 6 or fewer bytes stores, bytes will be stored.
*
* For less than 32 bytes stores, align the address on 4 byte boundary.
* Then store as many 4-byte chunks, followed by trailing bytes.
*
* For sizes greater than 32 bytes, align the address on 8 byte boundary.
* if (count >= 64) {
* store 8-bytes chunks to align the address on 64 byte boundary
* if (value to be set is zero && count >= MIN_ZERO) {
* Using BIS stores, set the first long word of each
* 64-byte cache line to zero which will also clear the
* other seven long words of the cache line.
* }
* else if (count >= MIN_LOOP) {
* Using BIS stores, set the first long word of each of
* ST_CHUNK cache lines (64 bytes each) before the main
* loop is entered.
* In the main loop, continue pre-setting the first long
* word of each cache line ST_CHUNK lines in advance while
* setting the other seven long words (56 bytes) of each
* cache line until fewer than ST_CHUNK*64 bytes remain.
* Then set the remaining seven long words of each cache
* line that has already had its first long word set.
* }
* store remaining data in 64-byte chunks until less than
* 64 bytes remain.
* }
* Store as many 8-byte chunks, followed by trailing bytes.
*
* BIS = Block Init Store
* Doing the advance store of the first element of the cache line
* initiates the displacement of a cache line while only using a single
* instruction in the pipeline. That avoids various pipeline delays,
* such as filling the miss buffer. The performance effect is
* similar to prefetching for normal stores.
* The special case for zero fills runs faster and uses fewer instruction
* cycles than the normal memset loop.
*
* We only use BIS for memset of greater than MIN_LOOP bytes because a sequence
* BIS stores must be followed by a membar #StoreStore. The benefit of
* the BIS store must be balanced against the cost of the membar operation.
*/
/*
* ASI_STBI_P marks the cache line as "least recently used"
* which means if many threads are active, it has a high chance
* of being pushed out of the cache between the first initializing
* store and the final stores.
* Thus, we use ASI_STBIMRU_P which marks the cache line as
* "most recently used" for all but the last store to the cache line.
*/
#include <asm/asi.h>
#include <asm/page.h>
#define ASI_STBI_P ASI_BLK_INIT_QUAD_LDD_P
#define ASI_STBIMRU_P ASI_ST_BLKINIT_MRU_P
#define ST_CHUNK 24 /* multiple of 4 due to loop unrolling */
#define MIN_LOOP 16320
#define MIN_ZERO 512
.section ".text"
.align 32
/*
* Define clear_page(dest) as memset(dest, 0, PAGE_SIZE)
* (can create a more optimized version later.)
*/
.globl M7clear_page
.globl M7clear_user_page
M7clear_page: /* clear_page(dest) */
M7clear_user_page:
set PAGE_SIZE, %o1
/* fall through into bzero code */
.size M7clear_page,.-M7clear_page
.size M7clear_user_page,.-M7clear_user_page
/*
* Define bzero(dest, n) as memset(dest, 0, n)
* (can create a more optimized version later.)
*/
.globl M7bzero
M7bzero: /* bzero(dest, size) */
mov %o1, %o2
mov 0, %o1
/* fall through into memset code */
.size M7bzero,.-M7bzero
.global M7memset
.type M7memset, #function
.register %g3, #scratch
M7memset:
mov %o0, %o5 ! copy sp1 before using it
cmp %o2, 7 ! if small counts, just write bytes
bleu,pn %xcc, .wrchar
and %o1, 0xff, %o1 ! o1 is (char)c
sll %o1, 8, %o3
or %o1, %o3, %o1 ! now o1 has 2 bytes of c
sll %o1, 16, %o3
cmp %o2, 32
blu,pn %xcc, .wdalign
or %o1, %o3, %o1 ! now o1 has 4 bytes of c
sllx %o1, 32, %o3
or %o1, %o3, %o1 ! now o1 has 8 bytes of c
.dbalign:
andcc %o5, 7, %o3 ! is sp1 aligned on a 8 byte bound?
bz,pt %xcc, .blkalign ! already long word aligned
sub %o3, 8, %o3 ! -(bytes till long word aligned)
add %o2, %o3, %o2 ! update o2 with new count
! Set -(%o3) bytes till sp1 long word aligned
1: stb %o1, [%o5] ! there is at least 1 byte to set
inccc %o3 ! byte clearing loop
bl,pt %xcc, 1b
inc %o5
! Now sp1 is long word aligned (sp1 is found in %o5)
.blkalign:
cmp %o2, 64 ! check if there are 64 bytes to set
blu,pn %xcc, .wrshort
mov %o2, %o3
andcc %o5, 63, %o3 ! is sp1 block aligned?
bz,pt %xcc, .blkwr ! now block aligned
sub %o3, 64, %o3 ! o3 is -(bytes till block aligned)
add %o2, %o3, %o2 ! o2 is the remainder
! Store -(%o3) bytes till dst is block (64 byte) aligned.
! Use long word stores.
! Recall that dst is already long word aligned
1:
addcc %o3, 8, %o3
stx %o1, [%o5]
bl,pt %xcc, 1b
add %o5, 8, %o5
! Now sp1 is block aligned
.blkwr:
andn %o2, 63, %o4 ! calculate size of blocks in bytes
brz,pn %o1, .wrzero ! special case if c == 0
and %o2, 63, %o3 ! %o3 = bytes left after blk stores.
set MIN_LOOP, %g1
cmp %o4, %g1 ! check there are enough bytes to set
blu,pn %xcc, .short_set ! to justify cost of membar
! must be > pre-cleared lines
nop
! initial cache-clearing stores
! get store pipeline moving
rd %asi, %g3 ! save %asi to be restored later
wr %g0, ASI_STBIMRU_P, %asi
! Primary memset loop for large memsets
.wr_loop:
sub %o5, 8, %o5 ! adjust %o5 for ASI store alignment
mov ST_CHUNK, %g1
.wr_loop_start:
stxa %o1, [%o5+8]%asi
subcc %g1, 4, %g1
stxa %o1, [%o5+8+64]%asi
add %o5, 256, %o5
stxa %o1, [%o5+8-128]%asi
bgu %xcc, .wr_loop_start
stxa %o1, [%o5+8-64]%asi
sub %o5, ST_CHUNK*64, %o5 ! reset %o5
mov ST_CHUNK, %g1
.wr_loop_rest:
stxa %o1, [%o5+8+8]%asi
sub %o4, 64, %o4
stxa %o1, [%o5+16+8]%asi
subcc %g1, 1, %g1
stxa %o1, [%o5+24+8]%asi
stxa %o1, [%o5+32+8]%asi
stxa %o1, [%o5+40+8]%asi
add %o5, 64, %o5
stxa %o1, [%o5-8]%asi
bgu %xcc, .wr_loop_rest
stxa %o1, [%o5]ASI_STBI_P
! If more than ST_CHUNK*64 bytes remain to set, continue
! setting the first long word of each cache line in advance
! to keep the store pipeline moving.
cmp %o4, ST_CHUNK*64
bge,pt %xcc, .wr_loop_start
mov ST_CHUNK, %g1
brz,a,pn %o4, .asi_done
add %o5, 8, %o5 ! restore %o5 offset
.wr_loop_small:
stxa %o1, [%o5+8]%asi
stxa %o1, [%o5+8+8]%asi
stxa %o1, [%o5+16+8]%asi
stxa %o1, [%o5+24+8]%asi
stxa %o1, [%o5+32+8]%asi
subcc %o4, 64, %o4
stxa %o1, [%o5+40+8]%asi
add %o5, 64, %o5
stxa %o1, [%o5-8]%asi
bgu,pt %xcc, .wr_loop_small
stxa %o1, [%o5]ASI_STBI_P
ba .asi_done
add %o5, 8, %o5 ! restore %o5 offset
! Special case loop for zero fill memsets
! For each 64 byte cache line, single STBI to first element
! clears line
.wrzero:
cmp %o4, MIN_ZERO ! check if enough bytes to set
! to pay %asi + membar cost
blu %xcc, .short_set
nop
sub %o4, 256, %o4
.wrzero_loop:
mov 64, %g3
stxa %o1, [%o5]ASI_STBI_P
subcc %o4, 256, %o4
stxa %o1, [%o5+%g3]ASI_STBI_P
add %o5, 256, %o5
sub %g3, 192, %g3
stxa %o1, [%o5+%g3]ASI_STBI_P
add %g3, 64, %g3
bge,pt %xcc, .wrzero_loop
stxa %o1, [%o5+%g3]ASI_STBI_P
add %o4, 256, %o4
brz,pn %o4, .bsi_done
nop
.wrzero_small:
stxa %o1, [%o5]ASI_STBI_P
subcc %o4, 64, %o4
bgu,pt %xcc, .wrzero_small
add %o5, 64, %o5
ba,a .bsi_done
.asi_done:
wr %g3, 0x0, %asi ! restored saved %asi
.bsi_done:
membar #StoreStore ! required by use of Block Store Init
.short_set:
cmp %o4, 64 ! check if 64 bytes to set
blu %xcc, 5f
nop
4: ! set final blocks of 64 bytes
stx %o1, [%o5]
stx %o1, [%o5+8]
stx %o1, [%o5+16]
stx %o1, [%o5+24]
subcc %o4, 64, %o4
stx %o1, [%o5+32]
stx %o1, [%o5+40]
add %o5, 64, %o5
stx %o1, [%o5-16]
bgu,pt %xcc, 4b
stx %o1, [%o5-8]
5:
! Set the remaining long words
.wrshort:
subcc %o3, 8, %o3 ! Can we store any long words?
blu,pn %xcc, .wrchars
and %o2, 7, %o2 ! calc bytes left after long words
6:
subcc %o3, 8, %o3
stx %o1, [%o5] ! store the long words
bgeu,pt %xcc, 6b
add %o5, 8, %o5
.wrchars: ! check for extra chars
brnz %o2, .wrfin
nop
retl
nop
.wdalign:
andcc %o5, 3, %o3 ! is sp1 aligned on a word boundary
bz,pn %xcc, .wrword
andn %o2, 3, %o3 ! create word sized count in %o3
dec %o2 ! decrement count
stb %o1, [%o5] ! clear a byte
b .wdalign
inc %o5 ! next byte
.wrword:
subcc %o3, 4, %o3
st %o1, [%o5] ! 4-byte writing loop
bnz,pt %xcc, .wrword
add %o5, 4, %o5
and %o2, 3, %o2 ! leftover count, if any
.wrchar:
! Set the remaining bytes, if any
brz %o2, .exit
nop
.wrfin:
deccc %o2
stb %o1, [%o5]
bgu,pt %xcc, .wrfin
inc %o5
.exit:
retl ! %o0 was preserved
nop
.size M7memset,.-M7memset
/*
* M7patch.S: Patch generic routines with M7 variant.
*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
*/
#include <linux/linkage.h>
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define NG_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
sll %g1, 11, %g1; \
srl %g1, 11 + 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
ENTRY(m7_patch_copyops)
NG_DO_PATCH(memcpy, M7memcpy)
NG_DO_PATCH(raw_copy_from_user, M7copy_from_user)
NG_DO_PATCH(raw_copy_to_user, M7copy_to_user)
retl
nop
ENDPROC(m7_patch_copyops)
ENTRY(m7_patch_bzero)
NG_DO_PATCH(memset, M7memset)
NG_DO_PATCH(__bzero, M7bzero)
NG_DO_PATCH(__clear_user, NGclear_user)
NG_DO_PATCH(tsb_init, NGtsb_init)
retl
nop
ENDPROC(m7_patch_bzero)
ENTRY(m7_patch_pageops)
NG_DO_PATCH(copy_user_page, NG4copy_user_page)
NG_DO_PATCH(_clear_page, M7clear_page)
NG_DO_PATCH(clear_user_page, M7clear_user_page)
retl
nop
ENDPROC(m7_patch_pageops)
...@@ -36,6 +36,11 @@ lib-$(CONFIG_SPARC64) += NG2patch.o ...@@ -36,6 +36,11 @@ lib-$(CONFIG_SPARC64) += NG2patch.o
lib-$(CONFIG_SPARC64) += NG4memcpy.o NG4copy_from_user.o NG4copy_to_user.o lib-$(CONFIG_SPARC64) += NG4memcpy.o NG4copy_from_user.o NG4copy_to_user.o
lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o
lib-$(CONFIG_SPARC64) += Memcpy_utils.o
lib-$(CONFIG_SPARC64) += M7memcpy.o M7copy_from_user.o M7copy_to_user.o
lib-$(CONFIG_SPARC64) += M7patch.o M7memset.o
lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
......
#ifndef __ASM_MEMCPY_UTILS
#define __ASM_MEMCPY_UTILS
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/visasm.h>
ENTRY(__restore_asi_fp)
VISExitHalf
retl
wr %g0, ASI_AIUS, %asi
ENDPROC(__restore_asi_fp)
ENTRY(__restore_asi)
retl
wr %g0, ASI_AIUS, %asi
ENDPROC(__restore_asi)
ENTRY(memcpy_retl_o2)
ba,pt %xcc, __restore_asi
mov %o2, %o0
ENDPROC(memcpy_retl_o2)
ENTRY(memcpy_retl_o2_plus_1)
ba,pt %xcc, __restore_asi
add %o2, 1, %o0
ENDPROC(memcpy_retl_o2_plus_1)
ENTRY(memcpy_retl_o2_plus_3)
ba,pt %xcc, __restore_asi
add %o2, 3, %o0
ENDPROC(memcpy_retl_o2_plus_3)
ENTRY(memcpy_retl_o2_plus_4)
ba,pt %xcc, __restore_asi
add %o2, 4, %o0
ENDPROC(memcpy_retl_o2_plus_4)
ENTRY(memcpy_retl_o2_plus_5)
ba,pt %xcc, __restore_asi
add %o2, 5, %o0
ENDPROC(memcpy_retl_o2_plus_5)
ENTRY(memcpy_retl_o2_plus_6)
ba,pt %xcc, __restore_asi
add %o2, 6, %o0
ENDPROC(memcpy_retl_o2_plus_6)
ENTRY(memcpy_retl_o2_plus_7)
ba,pt %xcc, __restore_asi
add %o2, 7, %o0
ENDPROC(memcpy_retl_o2_plus_7)
ENTRY(memcpy_retl_o2_plus_8)
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_8)
ENTRY(memcpy_retl_o2_plus_15)
ba,pt %xcc, __restore_asi
add %o2, 15, %o0
ENDPROC(memcpy_retl_o2_plus_15)
ENTRY(memcpy_retl_o2_plus_15_8)
add %o2, 15, %o2
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_15_8)
ENTRY(memcpy_retl_o2_plus_16)
ba,pt %xcc, __restore_asi
add %o2, 16, %o0
ENDPROC(memcpy_retl_o2_plus_16)
ENTRY(memcpy_retl_o2_plus_24)
ba,pt %xcc, __restore_asi
add %o2, 24, %o0
ENDPROC(memcpy_retl_o2_plus_24)
ENTRY(memcpy_retl_o2_plus_31)
ba,pt %xcc, __restore_asi
add %o2, 31, %o0
ENDPROC(memcpy_retl_o2_plus_31)
ENTRY(memcpy_retl_o2_plus_32)
ba,pt %xcc, __restore_asi
add %o2, 32, %o0
ENDPROC(memcpy_retl_o2_plus_32)
ENTRY(memcpy_retl_o2_plus_31_32)
add %o2, 31, %o2
ba,pt %xcc, __restore_asi
add %o2, 32, %o0
ENDPROC(memcpy_retl_o2_plus_31_32)
ENTRY(memcpy_retl_o2_plus_31_24)
add %o2, 31, %o2
ba,pt %xcc, __restore_asi
add %o2, 24, %o0
ENDPROC(memcpy_retl_o2_plus_31_24)
ENTRY(memcpy_retl_o2_plus_31_16)
add %o2, 31, %o2
ba,pt %xcc, __restore_asi
add %o2, 16, %o0
ENDPROC(memcpy_retl_o2_plus_31_16)
ENTRY(memcpy_retl_o2_plus_31_8)
add %o2, 31, %o2
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_31_8)
ENTRY(memcpy_retl_o2_plus_63)
ba,pt %xcc, __restore_asi
add %o2, 63, %o0
ENDPROC(memcpy_retl_o2_plus_63)
ENTRY(memcpy_retl_o2_plus_63_64)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 64, %o0
ENDPROC(memcpy_retl_o2_plus_63_64)
ENTRY(memcpy_retl_o2_plus_63_56)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 56, %o0
ENDPROC(memcpy_retl_o2_plus_63_56)
ENTRY(memcpy_retl_o2_plus_63_48)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 48, %o0
ENDPROC(memcpy_retl_o2_plus_63_48)
ENTRY(memcpy_retl_o2_plus_63_40)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 40, %o0
ENDPROC(memcpy_retl_o2_plus_63_40)
ENTRY(memcpy_retl_o2_plus_63_32)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 32, %o0
ENDPROC(memcpy_retl_o2_plus_63_32)
ENTRY(memcpy_retl_o2_plus_63_24)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 24, %o0
ENDPROC(memcpy_retl_o2_plus_63_24)
ENTRY(memcpy_retl_o2_plus_63_16)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 16, %o0
ENDPROC(memcpy_retl_o2_plus_63_16)
ENTRY(memcpy_retl_o2_plus_63_8)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_63_8)
ENTRY(memcpy_retl_o2_plus_o5)
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5)
ENTRY(memcpy_retl_o2_plus_o5_plus_1)
add %o5, 1, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_1)
ENTRY(memcpy_retl_o2_plus_o5_plus_4)
add %o5, 4, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_4)
ENTRY(memcpy_retl_o2_plus_o5_plus_8)
add %o5, 8, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_8)
ENTRY(memcpy_retl_o2_plus_o5_plus_16)
add %o5, 16, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_16)
ENTRY(memcpy_retl_o2_plus_o5_plus_24)
add %o5, 24, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_24)
ENTRY(memcpy_retl_o2_plus_o5_plus_32)
add %o5, 32, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_32)
ENTRY(memcpy_retl_o2_plus_o5_64)
add %o5, 32, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_64)
ENTRY(memcpy_retl_o2_plus_g1)
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(memcpy_retl_o2_plus_g1)
ENTRY(memcpy_retl_o2_plus_g1_plus_1)
add %g1, 1, %g1
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(memcpy_retl_o2_plus_g1_plus_1)
ENTRY(memcpy_retl_o2_plus_g1_plus_8)
add %g1, 8, %g1
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(memcpy_retl_o2_plus_g1_plus_8)
ENTRY(memcpy_retl_o2_plus_o4)
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4)
ENTRY(memcpy_retl_o2_plus_o4_plus_8)
add %o4, 8, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_8)
ENTRY(memcpy_retl_o2_plus_o4_plus_16)
add %o4, 16, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_16)
ENTRY(memcpy_retl_o2_plus_o4_plus_24)
add %o4, 24, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_24)
ENTRY(memcpy_retl_o2_plus_o4_plus_32)
add %o4, 32, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_32)
ENTRY(memcpy_retl_o2_plus_o4_plus_40)
add %o4, 40, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_40)
ENTRY(memcpy_retl_o2_plus_o4_plus_48)
add %o4, 48, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_48)
ENTRY(memcpy_retl_o2_plus_o4_plus_56)
add %o4, 56, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_56)
ENTRY(memcpy_retl_o2_plus_o4_plus_64)
add %o4, 64, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_64)
ENTRY(memcpy_retl_o2_plus_o5_plus_64)
add %o5, 64, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_64)
ENTRY(memcpy_retl_o2_plus_o3_fp)
ba,pt %xcc, __restore_asi_fp
add %o2, %o3, %o0
ENDPROC(memcpy_retl_o2_plus_o3_fp)
ENTRY(memcpy_retl_o2_plus_o3_plus_1_fp)
add %o3, 1, %o3
ba,pt %xcc, __restore_asi_fp
add %o2, %o3, %o0
ENDPROC(memcpy_retl_o2_plus_o3_plus_1_fp)
ENTRY(memcpy_retl_o2_plus_o3_plus_4_fp)
add %o3, 4, %o3
ba,pt %xcc, __restore_asi_fp
add %o2, %o3, %o0
ENDPROC(memcpy_retl_o2_plus_o3_plus_4_fp)
ENTRY(memcpy_retl_o2_plus_o4_fp)
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_8_fp)
add %o4, 8, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_8_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_16_fp)
add %o4, 16, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_16_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_24_fp)
add %o4, 24, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_24_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_32_fp)
add %o4, 32, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_32_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_40_fp)
add %o4, 40, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_40_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_48_fp)
add %o4, 48, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_48_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_56_fp)
add %o4, 56, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_56_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_64_fp)
add %o4, 64, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_64_fp)
ENTRY(memcpy_retl_o2_plus_o5_fp)
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_64_fp)
add %o5, 64, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_64_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_56_fp)
add %o5, 56, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_56_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_48_fp)
add %o5, 48, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_48_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_40_fp)
add %o5, 40, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_40_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_32_fp)
add %o5, 32, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_32_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_24_fp)
add %o5, 24, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_24_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_16_fp)
add %o5, 16, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_16_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_8_fp)
add %o5, 8, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_8_fp)
#endif
...@@ -94,155 +94,6 @@ ...@@ -94,155 +94,6 @@
.text .text
#ifndef EX_RETVAL #ifndef EX_RETVAL
#define EX_RETVAL(x) x #define EX_RETVAL(x) x
__restore_asi_fp:
VISExitHalf
__restore_asi:
retl
wr %g0, ASI_AIUS, %asi
ENTRY(NG4_retl_o2)
ba,pt %xcc, __restore_asi
mov %o2, %o0
ENDPROC(NG4_retl_o2)
ENTRY(NG4_retl_o2_plus_1)
ba,pt %xcc, __restore_asi
add %o2, 1, %o0
ENDPROC(NG4_retl_o2_plus_1)
ENTRY(NG4_retl_o2_plus_4)
ba,pt %xcc, __restore_asi
add %o2, 4, %o0
ENDPROC(NG4_retl_o2_plus_4)
ENTRY(NG4_retl_o2_plus_o5)
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(NG4_retl_o2_plus_o5)
ENTRY(NG4_retl_o2_plus_o5_plus_4)
add %o5, 4, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(NG4_retl_o2_plus_o5_plus_4)
ENTRY(NG4_retl_o2_plus_o5_plus_8)
add %o5, 8, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(NG4_retl_o2_plus_o5_plus_8)
ENTRY(NG4_retl_o2_plus_o5_plus_16)
add %o5, 16, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(NG4_retl_o2_plus_o5_plus_16)
ENTRY(NG4_retl_o2_plus_o5_plus_24)
add %o5, 24, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(NG4_retl_o2_plus_o5_plus_24)
ENTRY(NG4_retl_o2_plus_o5_plus_32)
add %o5, 32, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(NG4_retl_o2_plus_o5_plus_32)
ENTRY(NG4_retl_o2_plus_g1)
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(NG4_retl_o2_plus_g1)
ENTRY(NG4_retl_o2_plus_g1_plus_1)
add %g1, 1, %g1
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(NG4_retl_o2_plus_g1_plus_1)
ENTRY(NG4_retl_o2_plus_g1_plus_8)
add %g1, 8, %g1
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(NG4_retl_o2_plus_g1_plus_8)
ENTRY(NG4_retl_o2_plus_o4)
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4)
ENTRY(NG4_retl_o2_plus_o4_plus_8)
add %o4, 8, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_8)
ENTRY(NG4_retl_o2_plus_o4_plus_16)
add %o4, 16, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_16)
ENTRY(NG4_retl_o2_plus_o4_plus_24)
add %o4, 24, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_24)
ENTRY(NG4_retl_o2_plus_o4_plus_32)
add %o4, 32, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_32)
ENTRY(NG4_retl_o2_plus_o4_plus_40)
add %o4, 40, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_40)
ENTRY(NG4_retl_o2_plus_o4_plus_48)
add %o4, 48, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_48)
ENTRY(NG4_retl_o2_plus_o4_plus_56)
add %o4, 56, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_56)
ENTRY(NG4_retl_o2_plus_o4_plus_64)
add %o4, 64, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_64)
ENTRY(NG4_retl_o2_plus_o4_fp)
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_fp)
ENTRY(NG4_retl_o2_plus_o4_plus_8_fp)
add %o4, 8, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp)
ENTRY(NG4_retl_o2_plus_o4_plus_16_fp)
add %o4, 16, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp)
ENTRY(NG4_retl_o2_plus_o4_plus_24_fp)
add %o4, 24, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp)
ENTRY(NG4_retl_o2_plus_o4_plus_32_fp)
add %o4, 32, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp)
ENTRY(NG4_retl_o2_plus_o4_plus_40_fp)
add %o4, 40, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp)
ENTRY(NG4_retl_o2_plus_o4_plus_48_fp)
add %o4, 48, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp)
ENTRY(NG4_retl_o2_plus_o4_plus_56_fp)
add %o4, 56, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp)
ENTRY(NG4_retl_o2_plus_o4_plus_64_fp)
add %o4, 64, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp)
#endif #endif
.align 64 .align 64
...@@ -275,12 +126,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -275,12 +126,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g1, %o2 sub %o2, %g1, %o2
1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1)
add %o1, 1, %o1 add %o1, 1, %o1
subcc %g1, 1, %g1 subcc %g1, 1, %g1
add %o0, 1, %o0 add %o0, 1, %o0
bne,pt %icc, 1b bne,pt %icc, 1b
EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1)
51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong) 51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong)
LOAD(prefetch, %o1 + 0x080, #n_reads_strong) LOAD(prefetch, %o1 + 0x080, #n_reads_strong)
...@@ -305,43 +156,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -305,43 +156,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, .Llarge_aligned brz,pt %g1, .Llarge_aligned
sub %o2, %g1, %o2 sub %o2, %g1, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1)
add %o1, 8, %o1 add %o1, 8, %o1
subcc %g1, 8, %g1 subcc %g1, 8, %g1
add %o0, 8, %o0 add %o0, 8, %o0
bne,pt %icc, 1b bne,pt %icc, 1b
EX_ST(STORE(stx, %g2, %o0 - 0x08), NG4_retl_o2_plus_g1_plus_8) EX_ST(STORE(stx, %g2, %o0 - 0x08), memcpy_retl_o2_plus_g1_plus_8)
.Llarge_aligned: .Llarge_aligned:
/* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */ /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */
andn %o2, 0x3f, %o4 andn %o2, 0x3f, %o4
sub %o2, %o4, %o2 sub %o2, %o4, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o4) 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o4)
add %o1, 0x40, %o1 add %o1, 0x40, %o1
EX_LD(LOAD(ldx, %o1 - 0x38, %g2), NG4_retl_o2_plus_o4) EX_LD(LOAD(ldx, %o1 - 0x38, %g2), memcpy_retl_o2_plus_o4)
subcc %o4, 0x40, %o4 subcc %o4, 0x40, %o4
EX_LD(LOAD(ldx, %o1 - 0x30, %g3), NG4_retl_o2_plus_o4_plus_64) EX_LD(LOAD(ldx, %o1 - 0x30, %g3), memcpy_retl_o2_plus_o4_plus_64)
EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_64) EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), memcpy_retl_o2_plus_o4_plus_64)
EX_LD(LOAD(ldx, %o1 - 0x20, %o5), NG4_retl_o2_plus_o4_plus_64) EX_LD(LOAD(ldx, %o1 - 0x20, %o5), memcpy_retl_o2_plus_o4_plus_64)
EX_ST(STORE_INIT(%g1, %o0), NG4_retl_o2_plus_o4_plus_64) EX_ST(STORE_INIT(%g1, %o0), memcpy_retl_o2_plus_o4_plus_64)
add %o0, 0x08, %o0 add %o0, 0x08, %o0
EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_56) EX_ST(STORE_INIT(%g2, %o0), memcpy_retl_o2_plus_o4_plus_56)
add %o0, 0x08, %o0 add %o0, 0x08, %o0
EX_LD(LOAD(ldx, %o1 - 0x18, %g2), NG4_retl_o2_plus_o4_plus_48) EX_LD(LOAD(ldx, %o1 - 0x18, %g2), memcpy_retl_o2_plus_o4_plus_48)
EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_48) EX_ST(STORE_INIT(%g3, %o0), memcpy_retl_o2_plus_o4_plus_48)
add %o0, 0x08, %o0 add %o0, 0x08, %o0
EX_LD(LOAD(ldx, %o1 - 0x10, %g3), NG4_retl_o2_plus_o4_plus_40) EX_LD(LOAD(ldx, %o1 - 0x10, %g3), memcpy_retl_o2_plus_o4_plus_40)
EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_40) EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), memcpy_retl_o2_plus_o4_plus_40)
add %o0, 0x08, %o0 add %o0, 0x08, %o0
EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_32) EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), memcpy_retl_o2_plus_o4_plus_32)
EX_ST(STORE_INIT(%o5, %o0), NG4_retl_o2_plus_o4_plus_32) EX_ST(STORE_INIT(%o5, %o0), memcpy_retl_o2_plus_o4_plus_32)
add %o0, 0x08, %o0 add %o0, 0x08, %o0
EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_24) EX_ST(STORE_INIT(%g2, %o0), memcpy_retl_o2_plus_o4_plus_24)
add %o0, 0x08, %o0 add %o0, 0x08, %o0
EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_16) EX_ST(STORE_INIT(%g3, %o0), memcpy_retl_o2_plus_o4_plus_16)
add %o0, 0x08, %o0 add %o0, 0x08, %o0
EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_8) EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), memcpy_retl_o2_plus_o4_plus_8)
add %o0, 0x08, %o0 add %o0, 0x08, %o0
bne,pt %icc, 1b bne,pt %icc, 1b
LOAD(prefetch, %o1 + 0x200, #n_reads_strong) LOAD(prefetch, %o1 + 0x200, #n_reads_strong)
...@@ -367,17 +218,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -367,17 +218,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %o4, %o2 sub %o2, %o4, %o2
alignaddr %o1, %g0, %g1 alignaddr %o1, %g0, %g1
add %o1, %o4, %o1 add %o1, %o4, %o1
EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), NG4_retl_o2_plus_o4) EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), memcpy_retl_o2_plus_o4)
1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), NG4_retl_o2_plus_o4) 1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), memcpy_retl_o2_plus_o4)
subcc %o4, 0x40, %o4 subcc %o4, 0x40, %o4
EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), NG4_retl_o2_plus_o4_plus_64) EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), memcpy_retl_o2_plus_o4_plus_64)
EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), NG4_retl_o2_plus_o4_plus_64) EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), memcpy_retl_o2_plus_o4_plus_64)
EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), NG4_retl_o2_plus_o4_plus_64) EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), memcpy_retl_o2_plus_o4_plus_64)
EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), NG4_retl_o2_plus_o4_plus_64) EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), memcpy_retl_o2_plus_o4_plus_64)
EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), NG4_retl_o2_plus_o4_plus_64) EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), memcpy_retl_o2_plus_o4_plus_64)
EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), NG4_retl_o2_plus_o4_plus_64) EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), memcpy_retl_o2_plus_o4_plus_64)
faligndata %f0, %f2, %f16 faligndata %f0, %f2, %f16
EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), NG4_retl_o2_plus_o4_plus_64) EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), memcpy_retl_o2_plus_o4_plus_64)
faligndata %f2, %f4, %f18 faligndata %f2, %f4, %f18
add %g1, 0x40, %g1 add %g1, 0x40, %g1
faligndata %f4, %f6, %f20 faligndata %f4, %f6, %f20
...@@ -386,14 +237,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -386,14 +237,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
faligndata %f10, %f12, %f26 faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28 faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30 faligndata %f14, %f0, %f30
EX_ST_FP(STORE(std, %f16, %o0 + 0x00), NG4_retl_o2_plus_o4_plus_64) EX_ST_FP(STORE(std, %f16, %o0 + 0x00), memcpy_retl_o2_plus_o4_plus_64)
EX_ST_FP(STORE(std, %f18, %o0 + 0x08), NG4_retl_o2_plus_o4_plus_56) EX_ST_FP(STORE(std, %f18, %o0 + 0x08), memcpy_retl_o2_plus_o4_plus_56)
EX_ST_FP(STORE(std, %f20, %o0 + 0x10), NG4_retl_o2_plus_o4_plus_48) EX_ST_FP(STORE(std, %f20, %o0 + 0x10), memcpy_retl_o2_plus_o4_plus_48)
EX_ST_FP(STORE(std, %f22, %o0 + 0x18), NG4_retl_o2_plus_o4_plus_40) EX_ST_FP(STORE(std, %f22, %o0 + 0x18), memcpy_retl_o2_plus_o4_plus_40)
EX_ST_FP(STORE(std, %f24, %o0 + 0x20), NG4_retl_o2_plus_o4_plus_32) EX_ST_FP(STORE(std, %f24, %o0 + 0x20), memcpy_retl_o2_plus_o4_plus_32)
EX_ST_FP(STORE(std, %f26, %o0 + 0x28), NG4_retl_o2_plus_o4_plus_24) EX_ST_FP(STORE(std, %f26, %o0 + 0x28), memcpy_retl_o2_plus_o4_plus_24)
EX_ST_FP(STORE(std, %f28, %o0 + 0x30), NG4_retl_o2_plus_o4_plus_16) EX_ST_FP(STORE(std, %f28, %o0 + 0x30), memcpy_retl_o2_plus_o4_plus_16)
EX_ST_FP(STORE(std, %f30, %o0 + 0x38), NG4_retl_o2_plus_o4_plus_8) EX_ST_FP(STORE(std, %f30, %o0 + 0x38), memcpy_retl_o2_plus_o4_plus_8)
add %o0, 0x40, %o0 add %o0, 0x40, %o0
bne,pt %icc, 1b bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong) LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
...@@ -421,38 +272,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -421,38 +272,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andncc %o2, 0x20 - 1, %o5 andncc %o2, 0x20 - 1, %o5
be,pn %icc, 2f be,pn %icc, 2f
sub %o2, %o5, %o2 sub %o2, %o5, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1 + 0x08, %g2), NG4_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), NG4_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1 + 0x18, %o4), NG4_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5)
add %o1, 0x20, %o1 add %o1, 0x20, %o1
subcc %o5, 0x20, %o5 subcc %o5, 0x20, %o5
EX_ST(STORE(stx, %g1, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_32) EX_ST(STORE(stx, %g1, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32)
EX_ST(STORE(stx, %g2, %o0 + 0x08), NG4_retl_o2_plus_o5_plus_24) EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24)
EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), NG4_retl_o2_plus_o5_plus_24) EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24)
EX_ST(STORE(stx, %o4, %o0 + 0x18), NG4_retl_o2_plus_o5_plus_8) EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8)
bne,pt %icc, 1b bne,pt %icc, 1b
add %o0, 0x20, %o0 add %o0, 0x20, %o0
2: andcc %o2, 0x18, %o5 2: andcc %o2, 0x18, %o5
be,pt %icc, 3f be,pt %icc, 3f
sub %o2, %o5, %o2 sub %o2, %o5, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5)
add %o1, 0x08, %o1 add %o1, 0x08, %o1
add %o0, 0x08, %o0 add %o0, 0x08, %o0
subcc %o5, 0x08, %o5 subcc %o5, 0x08, %o5
bne,pt %icc, 1b bne,pt %icc, 1b
EX_ST(STORE(stx, %g1, %o0 - 0x08), NG4_retl_o2_plus_o5_plus_8) EX_ST(STORE(stx, %g1, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8)
3: brz,pt %o2, .Lexit 3: brz,pt %o2, .Lexit
cmp %o2, 0x04 cmp %o2, 0x04
bl,pn %icc, .Ltiny bl,pn %icc, .Ltiny
nop nop
EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2) EX_LD(LOAD(lduw, %o1 + 0x00, %g1), memcpy_retl_o2)
add %o1, 0x04, %o1 add %o1, 0x04, %o1
add %o0, 0x04, %o0 add %o0, 0x04, %o0
subcc %o2, 0x04, %o2 subcc %o2, 0x04, %o2
bne,pn %icc, .Ltiny bne,pn %icc, .Ltiny
EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_4) EX_ST(STORE(stw, %g1, %o0 - 0x04), memcpy_retl_o2_plus_4)
ba,a,pt %icc, .Lexit ba,a,pt %icc, .Lexit
.Lmedium_unaligned: .Lmedium_unaligned:
/* First get dest 8 byte aligned. */ /* First get dest 8 byte aligned. */
...@@ -461,12 +312,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -461,12 +312,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
brz,pt %g1, 2f brz,pt %g1, 2f
sub %o2, %g1, %o2 sub %o2, %g1, %o2
1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1)
add %o1, 1, %o1 add %o1, 1, %o1
subcc %g1, 1, %g1 subcc %g1, 1, %g1
add %o0, 1, %o0 add %o0, 1, %o0
bne,pt %icc, 1b bne,pt %icc, 1b
EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1)
2: 2:
and %o1, 0x7, %g1 and %o1, 0x7, %g1
brz,pn %g1, .Lmedium_noprefetch brz,pn %g1, .Lmedium_noprefetch
...@@ -474,16 +325,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -474,16 +325,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
mov 64, %g2 mov 64, %g2
sub %g2, %g1, %g2 sub %g2, %g1, %g2
andn %o1, 0x7, %o1 andn %o1, 0x7, %o1
EX_LD(LOAD(ldx, %o1 + 0x00, %o4), NG4_retl_o2) EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2)
sllx %o4, %g1, %o4 sllx %o4, %g1, %o4
andn %o2, 0x08 - 1, %o5 andn %o2, 0x08 - 1, %o5
sub %o2, %o5, %o2 sub %o2, %o5, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), NG4_retl_o2_plus_o5) 1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5)
add %o1, 0x08, %o1 add %o1, 0x08, %o1
subcc %o5, 0x08, %o5 subcc %o5, 0x08, %o5
srlx %g3, %g2, GLOBAL_SPARE srlx %g3, %g2, GLOBAL_SPARE
or GLOBAL_SPARE, %o4, GLOBAL_SPARE or GLOBAL_SPARE, %o4, GLOBAL_SPARE
EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_8) EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8)
add %o0, 0x08, %o0 add %o0, 0x08, %o0
bne,pt %icc, 1b bne,pt %icc, 1b
sllx %g3, %g1, %o4 sllx %g3, %g1, %o4
...@@ -494,17 +345,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -494,17 +345,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
ba,pt %icc, .Lsmall_unaligned ba,pt %icc, .Lsmall_unaligned
.Ltiny: .Ltiny:
EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) EX_LD(LOAD(ldub, %o1 + 0x00, %g1), memcpy_retl_o2)
subcc %o2, 1, %o2 subcc %o2, 1, %o2
be,pn %icc, .Lexit be,pn %icc, .Lexit
EX_ST(STORE(stb, %g1, %o0 + 0x00), NG4_retl_o2_plus_1) EX_ST(STORE(stb, %g1, %o0 + 0x00), memcpy_retl_o2_plus_1)
EX_LD(LOAD(ldub, %o1 + 0x01, %g1), NG4_retl_o2) EX_LD(LOAD(ldub, %o1 + 0x01, %g1), memcpy_retl_o2)
subcc %o2, 1, %o2 subcc %o2, 1, %o2
be,pn %icc, .Lexit be,pn %icc, .Lexit
EX_ST(STORE(stb, %g1, %o0 + 0x01), NG4_retl_o2_plus_1) EX_ST(STORE(stb, %g1, %o0 + 0x01), memcpy_retl_o2_plus_1)
EX_LD(LOAD(ldub, %o1 + 0x02, %g1), NG4_retl_o2) EX_LD(LOAD(ldub, %o1 + 0x02, %g1), memcpy_retl_o2)
ba,pt %icc, .Lexit ba,pt %icc, .Lexit
EX_ST(STORE(stb, %g1, %o0 + 0x02), NG4_retl_o2) EX_ST(STORE(stb, %g1, %o0 + 0x02), memcpy_retl_o2)
.Lsmall: .Lsmall:
andcc %g2, 0x3, %g0 andcc %g2, 0x3, %g0
...@@ -512,23 +363,23 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -512,23 +363,23 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o2, 0x4 - 1, %o5 andn %o2, 0x4 - 1, %o5
sub %o2, %o5, %o2 sub %o2, %o5, %o2
1: 1:
EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) EX_LD(LOAD(lduw, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5)
add %o1, 0x04, %o1 add %o1, 0x04, %o1
subcc %o5, 0x04, %o5 subcc %o5, 0x04, %o5
add %o0, 0x04, %o0 add %o0, 0x04, %o0
bne,pt %icc, 1b bne,pt %icc, 1b
EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_o5_plus_4) EX_ST(STORE(stw, %g1, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4)
brz,pt %o2, .Lexit brz,pt %o2, .Lexit
nop nop
ba,a,pt %icc, .Ltiny ba,a,pt %icc, .Ltiny
.Lsmall_unaligned: .Lsmall_unaligned:
1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), memcpy_retl_o2)
add %o1, 1, %o1 add %o1, 1, %o1
add %o0, 1, %o0 add %o0, 1, %o0
subcc %o2, 1, %o2 subcc %o2, 1, %o2
bne,pt %icc, 1b bne,pt %icc, 1b
EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) EX_ST(STORE(stb, %g1, %o0 - 0x01), memcpy_retl_o2_plus_1)
ba,a,pt %icc, .Lexit ba,a,pt %icc, .Lexit
nop nop
.size FUNC_NAME, .-FUNC_NAME .size FUNC_NAME, .-FUNC_NAME
...@@ -168,18 +168,25 @@ ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8) ...@@ -168,18 +168,25 @@ ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
srlx %o2, 31, %g2 srlx %o2, 31, %g2
cmp %g2, 0 cmp %g2, 0
/* software trap 5 "Range Check" if dst >= 0x80000000 */
tne %xcc, 5 tne %xcc, 5
PREAMBLE PREAMBLE
mov %o0, %o4 mov %o0, %o4
/* if len == 0 */
cmp %o2, 0 cmp %o2, 0
be,pn %XCC, 85f be,pn %XCC, end_return
or %o0, %o1, %o3 or %o0, %o1, %o3
/* if len < 16 */
cmp %o2, 16 cmp %o2, 16
blu,a,pn %XCC, 80f blu,a,pn %XCC, less_than_16
or %o3, %o2, %o3 or %o3, %o2, %o3
/* if len < 192 */
cmp %o2, (3 * 64) cmp %o2, (3 * 64)
blu,pt %XCC, 70f blu,pt %XCC, less_than_192
andcc %o3, 0x7, %g0 andcc %o3, 0x7, %g0
/* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve
...@@ -362,7 +369,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -362,7 +369,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
cmp %o2, 0 cmp %o2, 0
add %o1, %g1, %o1 add %o1, %g1, %o1
VISExitHalf VISExitHalf
be,pn %XCC, 85f be,pn %XCC, end_return
sub %o0, %o1, %o3 sub %o0, %o1, %o3
andcc %g1, 0x7, %g0 andcc %g1, 0x7, %g0
...@@ -392,14 +399,15 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -392,14 +399,15 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, 2, %o2 sub %o2, 2, %o2
1: andcc %o2, 0x1, %g0 1: andcc %o2, 0x1, %g0
be,pt %icc, 85f be,pt %icc, end_return
nop nop
EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2) EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2)
ba,pt %xcc, 85f ba,pt %xcc, end_return
EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2) EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2)
.align 64 .align 64
70: /* 16 < len <= 64 */ /* 16 <= len < 192 */
less_than_192:
bne,pn %XCC, 75f bne,pn %XCC, 75f
sub %o0, %o1, %o3 sub %o0, %o1, %o3
...@@ -429,7 +437,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -429,7 +437,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4) EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4)
add %o1, 0x4, %o1 add %o1, 0x4, %o1
1: cmp %o2, 0 1: cmp %o2, 0
be,pt %XCC, 85f be,pt %XCC, end_return
nop nop
ba,pt %xcc, 90f ba,pt %xcc, 90f
nop nop
...@@ -475,13 +483,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -475,13 +483,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
srl %g1, 3, %g1 srl %g1, 3, %g1
andcc %o2, 0x7, %o2 andcc %o2, 0x7, %o2
be,pn %icc, 85f be,pn %icc, end_return
add %o1, %g1, %o1 add %o1, %g1, %o1
ba,pt %xcc, 90f ba,pt %xcc, 90f
sub %o0, %o1, %o3 sub %o0, %o1, %o3
.align 64 .align 64
80: /* 0 < len <= 16 */ /* 0 < len < 16 */
less_than_16:
andcc %o3, 0x3, %g0 andcc %o3, 0x3, %g0
bne,pn %XCC, 90f bne,pn %XCC, 90f
sub %o0, %o1, %o3 sub %o0, %o1, %o3
...@@ -493,7 +502,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -493,7 +502,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
bgu,pt %XCC, 1b bgu,pt %XCC, 1b
add %o1, 4, %o1 add %o1, 4, %o1
85: retl end_return:
retl
mov EX_RETVAL(%o4), %o0 mov EX_RETVAL(%o4), %o0
.align 32 .align 32
......
...@@ -103,6 +103,45 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, ...@@ -103,6 +103,45 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
return 1; return 1;
} }
static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
unsigned long end, int write, struct page **pages,
int *nr)
{
struct page *head, *page;
int refs;
if (!(pud_val(pud) & _PAGE_VALID))
return 0;
if (write && !pud_write(pud))
return 0;
refs = 0;
page = pud_page(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
head = compound_head(page);
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
if (!page_cache_add_speculative(head, refs)) {
*nr -= refs;
return 0;
}
if (unlikely(pud_val(pud) != pud_val(*pudp))) {
*nr -= refs;
while (refs--)
put_page(head);
return 0;
}
return 1;
}
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr) int write, struct page **pages, int *nr)
{ {
...@@ -141,7 +180,11 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, ...@@ -141,7 +180,11 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (pud_none(pud)) if (pud_none(pud))
return 0; return 0;
if (!gup_pmd_range(pud, addr, next, write, pages, nr)) if (unlikely(pud_large(pud))) {
if (!gup_huge_pud(pudp, pud, addr, next,
write, pages, nr))
return 0;
} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
return 0; return 0;
} while (pudp++, addr = next, addr != end); } while (pudp++, addr = next, addr != end);
......
...@@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) ...@@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
switch (shift) { switch (shift) {
case HPAGE_16GB_SHIFT:
hugepage_size = _PAGE_SZ16GB_4V;
pte_val(entry) |= _PAGE_PUD_HUGE;
break;
case HPAGE_2GB_SHIFT: case HPAGE_2GB_SHIFT:
hugepage_size = _PAGE_SZ2GB_4V; hugepage_size = _PAGE_SZ2GB_4V;
pte_val(entry) |= _PAGE_PMD_HUGE; pte_val(entry) |= _PAGE_PMD_HUGE;
...@@ -187,6 +191,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry) ...@@ -187,6 +191,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
unsigned int shift; unsigned int shift;
switch (tte_szbits) { switch (tte_szbits) {
case _PAGE_SZ16GB_4V:
shift = HPAGE_16GB_SHIFT;
break;
case _PAGE_SZ2GB_4V: case _PAGE_SZ2GB_4V:
shift = HPAGE_2GB_SHIFT; shift = HPAGE_2GB_SHIFT;
break; break;
...@@ -259,22 +266,19 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, ...@@ -259,22 +266,19 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr); pud = pud_alloc(mm, pgd, addr);
if (pud) { if (!pud)
return NULL;
if (sz >= PUD_SIZE)
return (pte_t *)pud;
pmd = pmd_alloc(mm, pud, addr); pmd = pmd_alloc(mm, pud, addr);
if (!pmd) if (!pmd)
return NULL; return NULL;
if (sz >= PMD_SIZE) if (sz >= PMD_SIZE)
pte = (pte_t *)pmd; return (pte_t *)pmd;
else return pte_alloc_map(mm, pmd, addr);
pte = pte_alloc_map(mm, pmd, addr);
}
return pte;
} }
pte_t *huge_pte_offset(struct mm_struct *mm, pte_t *huge_pte_offset(struct mm_struct *mm,
...@@ -283,34 +287,40 @@ pte_t *huge_pte_offset(struct mm_struct *mm, ...@@ -283,34 +287,40 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd)) { if (pgd_none(*pgd))
return NULL;
pud = pud_offset(pgd, addr); pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) { if (pud_none(*pud))
return NULL;
if (is_hugetlb_pud(*pud))
return (pte_t *)pud;
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) { if (pmd_none(*pmd))
return NULL;
if (is_hugetlb_pmd(*pmd)) if (is_hugetlb_pmd(*pmd))
pte = (pte_t *)pmd; return (pte_t *)pmd;
else return pte_offset_map(pmd, addr);
pte = pte_offset_map(pmd, addr);
}
}
}
return pte;
} }
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry) pte_t *ptep, pte_t entry)
{ {
unsigned int i, nptes, orig_shift, shift; unsigned int nptes, orig_shift, shift;
unsigned long size; unsigned long i, size;
pte_t orig; pte_t orig;
size = huge_tte_to_size(entry); size = huge_tte_to_size(entry);
shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT;
shift = PAGE_SHIFT;
if (size >= PUD_SIZE)
shift = PUD_SHIFT;
else if (size >= PMD_SIZE)
shift = PMD_SHIFT;
else
shift = PAGE_SHIFT;
nptes = size >> shift; nptes = size >> shift;
if (!pte_present(*ptep) && pte_present(entry)) if (!pte_present(*ptep) && pte_present(entry))
...@@ -333,19 +343,23 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -333,19 +343,23 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
unsigned int i, nptes, hugepage_shift; unsigned int i, nptes, orig_shift, shift;
unsigned long size; unsigned long size;
pte_t entry; pte_t entry;
entry = *ptep; entry = *ptep;
size = huge_tte_to_size(entry); size = huge_tte_to_size(entry);
if (size >= HPAGE_SIZE)
nptes = size >> PMD_SHIFT; shift = PAGE_SHIFT;
if (size >= PUD_SIZE)
shift = PUD_SHIFT;
else if (size >= PMD_SIZE)
shift = PMD_SHIFT;
else else
nptes = size >> PAGE_SHIFT; shift = PAGE_SHIFT;
hugepage_shift = pte_none(entry) ? PAGE_SHIFT : nptes = size >> shift;
huge_tte_to_shift(entry); orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
if (pte_present(entry)) if (pte_present(entry))
mm->context.hugetlb_pte_count -= nptes; mm->context.hugetlb_pte_count -= nptes;
...@@ -354,11 +368,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -354,11 +368,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
for (i = 0; i < nptes; i++) for (i = 0; i < nptes; i++)
ptep[i] = __pte(0UL); ptep[i] = __pte(0UL);
maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift); maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
if (size == HPAGE_SIZE) if (size == HPAGE_SIZE)
maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
hugepage_shift); orig_shift);
return entry; return entry;
} }
...@@ -371,7 +385,8 @@ int pmd_huge(pmd_t pmd) ...@@ -371,7 +385,8 @@ int pmd_huge(pmd_t pmd)
int pud_huge(pud_t pud) int pud_huge(pud_t pud)
{ {
return 0; return !pud_none(pud) &&
(pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
} }
static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
...@@ -435,6 +450,9 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, ...@@ -435,6 +450,9 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud)) if (pud_none_or_clear_bad(pud))
continue; continue;
if (is_hugetlb_pud(*pud))
pud_clear(pud);
else
hugetlb_free_pmd_range(tlb, pud, addr, next, floor, hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
ceiling); ceiling);
} while (pud++, addr = next, addr != end); } while (pud++, addr = next, addr != end);
......
...@@ -348,6 +348,18 @@ static int __init hugetlbpage_init(void) ...@@ -348,6 +348,18 @@ static int __init hugetlbpage_init(void)
arch_initcall(hugetlbpage_init); arch_initcall(hugetlbpage_init);
static void __init pud_huge_patch(void)
{
struct pud_huge_patch_entry *p;
unsigned long addr;
p = &__pud_huge_patch;
addr = p->addr;
*(unsigned int *)addr = p->insn;
__asm__ __volatile__("flush %0" : : "r" (addr));
}
static int __init setup_hugepagesz(char *string) static int __init setup_hugepagesz(char *string)
{ {
unsigned long long hugepage_size; unsigned long long hugepage_size;
...@@ -360,6 +372,11 @@ static int __init setup_hugepagesz(char *string) ...@@ -360,6 +372,11 @@ static int __init setup_hugepagesz(char *string)
hugepage_shift = ilog2(hugepage_size); hugepage_shift = ilog2(hugepage_size);
switch (hugepage_shift) { switch (hugepage_shift) {
case HPAGE_16GB_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_16GB;
hv_pgsz_idx = HV_PGSZ_IDX_16GB;
pud_huge_patch();
break;
case HPAGE_2GB_SHIFT: case HPAGE_2GB_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_2GB; hv_pgsz_mask = HV_PGSZ_MASK_2GB;
hv_pgsz_idx = HV_PGSZ_IDX_2GB; hv_pgsz_idx = HV_PGSZ_IDX_2GB;
...@@ -400,6 +417,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * ...@@ -400,6 +417,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
{ {
struct mm_struct *mm; struct mm_struct *mm;
unsigned long flags; unsigned long flags;
bool is_huge_tsb;
pte_t pte = *ptep; pte_t pte = *ptep;
if (tlb_type != hypervisor) { if (tlb_type != hypervisor) {
...@@ -417,15 +435,37 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * ...@@ -417,15 +435,37 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
is_huge_tsb = false;
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
is_hugetlb_pmd(__pmd(pte_val(pte)))) { unsigned long hugepage_size = PAGE_SIZE;
/* We are fabricating 8MB pages using 4MB real hw pages. */
if (is_vm_hugetlb_page(vma))
hugepage_size = huge_page_size(hstate_vma(vma));
if (hugepage_size >= PUD_SIZE) {
unsigned long mask = 0x1ffc00000UL;
/* Transfer bits [32:22] from address to resolve
* at 4M granularity.
*/
pte_val(pte) &= ~mask;
pte_val(pte) |= (address & mask);
} else if (hugepage_size >= PMD_SIZE) {
/* We are fabricating 8MB pages using 4MB
* real hw pages.
*/
pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, }
address, pte_val(pte));
} else if (hugepage_size >= PMD_SIZE) {
__update_mmu_tsb_insert(mm, MM_TSB_HUGE,
REAL_HPAGE_SHIFT, address, pte_val(pte));
is_huge_tsb = true;
}
}
#endif #endif
if (!is_huge_tsb)
__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
address, pte_val(pte)); address, pte_val(pte));
......
...@@ -458,4 +458,9 @@ config MIPS_EJTAG_FDC_KGDB_CHAN ...@@ -458,4 +458,9 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
help help
FDC channel number to use for KGDB. FDC channel number to use for KGDB.
config VCC
tristate "Sun Virtual Console Concentrator"
depends on SUN_LDOMS
help
Support for Sun logical domain consoles.
endif # TTY endif # TTY
...@@ -33,5 +33,6 @@ obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o ...@@ -33,5 +33,6 @@ obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o
obj-$(CONFIG_DA_TTY) += metag_da.o obj-$(CONFIG_DA_TTY) += metag_da.o
obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o
obj-$(CONFIG_VCC) += vcc.o
obj-y += ipwireless/ obj-y += ipwireless/
/* vcc.c: sun4v virtual channel concentrator
*
* Copyright (C) 2017 Oracle. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <asm/vio.h>
#include <asm/ldc.h>
#define DRV_MODULE_NAME "vcc"
#define DRV_MODULE_VERSION "1.1"
#define DRV_MODULE_RELDATE "July 1, 2017"
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
MODULE_DESCRIPTION("Sun LDOM virtual console concentrator driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
struct vcc_port {
struct vio_driver_state vio;
spinlock_t lock;
char *domain;
struct tty_struct *tty; /* only populated while dev is open */
unsigned long index; /* index into the vcc_table */
u64 refcnt;
bool excl_locked;
bool removed;
/* This buffer is required to support the tty write_room interface
* and guarantee that any characters that the driver accepts will
* be eventually sent, either immediately or later.
*/
int chars_in_buffer;
struct vio_vcc buffer;
struct timer_list rx_timer;
struct timer_list tx_timer;
};
/* Microseconds that thread will delay waiting for a vcc port ref */
#define VCC_REF_DELAY 100
#define VCC_MAX_PORTS 1024
#define VCC_MINOR_START 0 /* must be zero */
#define VCC_BUFF_LEN VIO_VCC_MTU_SIZE
#define VCC_CTL_BREAK -1
#define VCC_CTL_HUP -2
static const char vcc_driver_name[] = "vcc";
static const char vcc_device_node[] = "vcc";
static struct tty_driver *vcc_tty_driver;
static struct vcc_port *vcc_table[VCC_MAX_PORTS];
static DEFINE_SPINLOCK(vcc_table_lock);
int vcc_dbg;
int vcc_dbg_ldc;
int vcc_dbg_vio;
module_param(vcc_dbg, uint, 0664);
module_param(vcc_dbg_ldc, uint, 0664);
module_param(vcc_dbg_vio, uint, 0664);
#define VCC_DBG_DRV 0x1
#define VCC_DBG_LDC 0x2
#define VCC_DBG_PKT 0x4
#define vccdbg(f, a...) \
do { \
if (vcc_dbg & VCC_DBG_DRV) \
pr_info(f, ## a); \
} while (0) \
#define vccdbgl(l) \
do { \
if (vcc_dbg & VCC_DBG_LDC) \
ldc_print(l); \
} while (0) \
#define vccdbgp(pkt) \
do { \
if (vcc_dbg & VCC_DBG_PKT) { \
int i; \
for (i = 0; i < pkt.tag.stype; i++) \
pr_info("[%c]", pkt.data[i]); \
} \
} while (0) \
/* Note: Be careful when adding flags to this line discipline. Don't
* add anything that will cause echoing or we'll go into recursive
* loop echoing chars back and forth with the console drivers.
*/
static const struct ktermios vcc_tty_termios = {
.c_iflag = IGNBRK | IGNPAR,
.c_oflag = OPOST,
.c_cflag = B38400 | CS8 | CREAD | HUPCL,
.c_cc = INIT_C_CC,
.c_ispeed = 38400,
.c_ospeed = 38400
};
/**
* vcc_table_add() - Add VCC port to the VCC table
* @port: pointer to the VCC port
*
* Return: index of the port in the VCC table on success,
* -1 on failure
*/
static int vcc_table_add(struct vcc_port *port)
{
unsigned long flags;
int i;
spin_lock_irqsave(&vcc_table_lock, flags);
for (i = VCC_MINOR_START; i < VCC_MAX_PORTS; i++) {
if (!vcc_table[i]) {
vcc_table[i] = port;
break;
}
}
spin_unlock_irqrestore(&vcc_table_lock, flags);
if (i < VCC_MAX_PORTS)
return i;
else
return -1;
}
/**
* vcc_table_remove() - Removes a VCC port from the VCC table
* @index: Index into the VCC table
*/
static void vcc_table_remove(unsigned long index)
{
unsigned long flags;
if (WARN_ON(index >= VCC_MAX_PORTS))
return;
spin_lock_irqsave(&vcc_table_lock, flags);
vcc_table[index] = NULL;
spin_unlock_irqrestore(&vcc_table_lock, flags);
}
/**
* vcc_get() - Gets a reference to VCC port
* @index: Index into the VCC table
* @excl: Indicates if an exclusive access is requested
*
* Return: reference to the VCC port, if found
* NULL, if port not found
*/
static struct vcc_port *vcc_get(unsigned long index, bool excl)
{
struct vcc_port *port;
unsigned long flags;
try_again:
spin_lock_irqsave(&vcc_table_lock, flags);
port = vcc_table[index];
if (!port) {
spin_unlock_irqrestore(&vcc_table_lock, flags);
return NULL;
}
if (!excl) {
if (port->excl_locked) {
spin_unlock_irqrestore(&vcc_table_lock, flags);
udelay(VCC_REF_DELAY);
goto try_again;
}
port->refcnt++;
spin_unlock_irqrestore(&vcc_table_lock, flags);
return port;
}
if (port->refcnt) {
spin_unlock_irqrestore(&vcc_table_lock, flags);
/* Threads wanting exclusive access will wait half the time,
* probably giving them higher priority in the case of
* multiple waiters.
*/
udelay(VCC_REF_DELAY/2);
goto try_again;
}
port->refcnt++;
port->excl_locked = true;
spin_unlock_irqrestore(&vcc_table_lock, flags);
return port;
}
/**
* vcc_put() - Returns a reference to VCC port
* @port: pointer to VCC port
* @excl: Indicates if the returned reference is an exclusive reference
*
* Note: It's the caller's responsibility to ensure the correct value
* for the excl flag
*/
static void vcc_put(struct vcc_port *port, bool excl)
{
unsigned long flags;
if (!port)
return;
spin_lock_irqsave(&vcc_table_lock, flags);
/* check if caller attempted to put with the wrong flags */
if (WARN_ON((excl && !port->excl_locked) ||
(!excl && port->excl_locked)))
goto done;
port->refcnt--;
if (excl)
port->excl_locked = false;
done:
spin_unlock_irqrestore(&vcc_table_lock, flags);
}
/**
* vcc_get_ne() - Get a non-exclusive reference to VCC port
* @index: Index into the VCC table
*
* Gets a non-exclusive reference to VCC port, if it's not removed
*
* Return: pointer to the VCC port, if found
* NULL, if port not found
*/
static struct vcc_port *vcc_get_ne(unsigned long index)
{
struct vcc_port *port;
port = vcc_get(index, false);
if (port && port->removed) {
vcc_put(port, false);
return NULL;
}
return port;
}
static void vcc_kick_rx(struct vcc_port *port)
{
struct vio_driver_state *vio = &port->vio;
assert_spin_locked(&port->lock);
if (!timer_pending(&port->rx_timer) && !port->removed) {
disable_irq_nosync(vio->vdev->rx_irq);
port->rx_timer.expires = (jiffies + 1);
add_timer(&port->rx_timer);
}
}
static void vcc_kick_tx(struct vcc_port *port)
{
assert_spin_locked(&port->lock);
if (!timer_pending(&port->tx_timer) && !port->removed) {
port->tx_timer.expires = (jiffies + 1);
add_timer(&port->tx_timer);
}
}
static int vcc_rx_check(struct tty_struct *tty, int size)
{
if (WARN_ON(!tty || !tty->port))
return 1;
/* tty_buffer_request_room won't sleep because it uses
* GFP_ATOMIC flag to allocate buffer
*/
if (test_bit(TTY_THROTTLED, &tty->flags) ||
(tty_buffer_request_room(tty->port, VCC_BUFF_LEN) < VCC_BUFF_LEN))
return 0;
return 1;
}
static int vcc_rx(struct tty_struct *tty, char *buf, int size)
{
int len = 0;
if (WARN_ON(!tty || !tty->port))
return len;
len = tty_insert_flip_string(tty->port, buf, size);
if (len)
tty_flip_buffer_push(tty->port);
return len;
}
static int vcc_ldc_read(struct vcc_port *port)
{
struct vio_driver_state *vio = &port->vio;
struct tty_struct *tty;
struct vio_vcc pkt;
int rv = 0;
tty = port->tty;
if (!tty) {
rv = ldc_rx_reset(vio->lp);
vccdbg("VCC: reset rx q: rv=%d\n", rv);
goto done;
}
/* Read as long as LDC has incoming data. */
while (1) {
if (!vcc_rx_check(tty, VIO_VCC_MTU_SIZE)) {
vcc_kick_rx(port);
break;
}
vccdbgl(vio->lp);
rv = ldc_read(vio->lp, &pkt, sizeof(pkt));
if (rv <= 0)
break;
vccdbg("VCC: ldc_read()=%d\n", rv);
vccdbg("TAG [%02x:%02x:%04x:%08x]\n",
pkt.tag.type, pkt.tag.stype,
pkt.tag.stype_env, pkt.tag.sid);
if (pkt.tag.type == VIO_TYPE_DATA) {
vccdbgp(pkt);
/* vcc_rx_check ensures memory availability */
vcc_rx(tty, pkt.data, pkt.tag.stype);
} else {
pr_err("VCC: unknown msg [%02x:%02x:%04x:%08x]\n",
pkt.tag.type, pkt.tag.stype,
pkt.tag.stype_env, pkt.tag.sid);
rv = -ECONNRESET;
break;
}
WARN_ON(rv != LDC_PACKET_SIZE);
}
done:
return rv;
}
static void vcc_rx_timer(unsigned long index)
{
struct vio_driver_state *vio;
struct vcc_port *port;
unsigned long flags;
int rv;
port = vcc_get_ne(index);
if (!port)
return;
spin_lock_irqsave(&port->lock, flags);
port->rx_timer.expires = 0;
vio = &port->vio;
enable_irq(vio->vdev->rx_irq);
if (!port->tty || port->removed)
goto done;
rv = vcc_ldc_read(port);
if (rv == -ECONNRESET)
vio_conn_reset(vio);
done:
spin_unlock_irqrestore(&port->lock, flags);
vcc_put(port, false);
}
static void vcc_tx_timer(unsigned long index)
{
struct vcc_port *port;
struct vio_vcc *pkt;
unsigned long flags;
int tosend = 0;
int rv;
port = vcc_get_ne(index);
if (!port)
return;
spin_lock_irqsave(&port->lock, flags);
port->tx_timer.expires = 0;
if (!port->tty || port->removed)
goto done;
tosend = min(VCC_BUFF_LEN, port->chars_in_buffer);
if (!tosend)
goto done;
pkt = &port->buffer;
pkt->tag.type = VIO_TYPE_DATA;
pkt->tag.stype = tosend;
vccdbgl(port->vio.lp);
rv = ldc_write(port->vio.lp, pkt, (VIO_TAG_SIZE + tosend));
WARN_ON(!rv);
if (rv < 0) {
vccdbg("VCC: ldc_write()=%d\n", rv);
vcc_kick_tx(port);
} else {
struct tty_struct *tty = port->tty;
port->chars_in_buffer = 0;
if (tty)
tty_wakeup(tty);
}
done:
spin_unlock_irqrestore(&port->lock, flags);
vcc_put(port, false);
}
/**
* vcc_event() - LDC event processing engine
* @arg: VCC private data
* @event: LDC event
*
* Handles LDC events for VCC
*/
static void vcc_event(void *arg, int event)
{
struct vio_driver_state *vio;
struct vcc_port *port;
unsigned long flags;
int rv;
port = arg;
vio = &port->vio;
spin_lock_irqsave(&port->lock, flags);
switch (event) {
case LDC_EVENT_RESET:
case LDC_EVENT_UP:
vio_link_state_change(vio, event);
break;
case LDC_EVENT_DATA_READY:
rv = vcc_ldc_read(port);
if (rv == -ECONNRESET)
vio_conn_reset(vio);
break;
default:
pr_err("VCC: unexpected LDC event(%d)\n", event);
}
spin_unlock_irqrestore(&port->lock, flags);
}
static struct ldc_channel_config vcc_ldc_cfg = {
.event = vcc_event,
.mtu = VIO_VCC_MTU_SIZE,
.mode = LDC_MODE_RAW,
.debug = 0,
};
/* Ordered from largest major to lowest */
static struct vio_version vcc_versions[] = {
{ .major = 1, .minor = 0 },
};
static struct tty_port_operations vcc_port_ops = { 0 };
static ssize_t vcc_sysfs_domain_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct vcc_port *port;
int rv;
port = dev_get_drvdata(dev);
if (!port)
return -ENODEV;
rv = scnprintf(buf, PAGE_SIZE, "%s\n", port->domain);
return rv;
}
static int vcc_send_ctl(struct vcc_port *port, int ctl)
{
struct vio_vcc pkt;
int rv;
pkt.tag.type = VIO_TYPE_CTRL;
pkt.tag.sid = ctl;
pkt.tag.stype = 0;
rv = ldc_write(port->vio.lp, &pkt, sizeof(pkt.tag));
WARN_ON(!rv);
vccdbg("VCC: ldc_write(%ld)=%d\n", sizeof(pkt.tag), rv);
return rv;
}
static ssize_t vcc_sysfs_break_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct vcc_port *port;
unsigned long flags;
int rv = count;
int brk;
port = dev_get_drvdata(dev);
if (!port)
return -ENODEV;
spin_lock_irqsave(&port->lock, flags);
if (sscanf(buf, "%ud", &brk) != 1 || brk != 1)
rv = -EINVAL;
else if (vcc_send_ctl(port, VCC_CTL_BREAK) < 0)
vcc_kick_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
return rv;
}
static DEVICE_ATTR(domain, 0400, vcc_sysfs_domain_show, NULL);
static DEVICE_ATTR(break, 0200, NULL, vcc_sysfs_break_store);
static struct attribute *vcc_sysfs_entries[] = {
&dev_attr_domain.attr,
&dev_attr_break.attr,
NULL
};
static struct attribute_group vcc_attribute_group = {
.name = NULL,
.attrs = vcc_sysfs_entries,
};
/**
* vcc_probe() - Initialize VCC port
* @vdev: Pointer to VIO device of the new VCC port
* @id: VIO device ID
*
* Initializes a VCC port to receive serial console data from
* the guest domain. Sets up a TTY end point on the control
* domain. Sets up VIO/LDC link between the guest & control
* domain endpoints.
*
* Return: status of the probe
*/
static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
struct vcc_port *port;
struct device *dev;
const char *domain;
char *name;
u64 node;
int rv;
vccdbg("VCC: name=%s\n", dev_name(&vdev->dev));
if (!vcc_tty_driver) {
pr_err("VCC: TTY driver not registered\n");
return -ENODEV;
}
port = kzalloc(sizeof(struct vcc_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL);
rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions,
ARRAY_SIZE(vcc_versions), NULL, name);
if (rv)
goto free_port;
port->vio.debug = vcc_dbg_vio;
vcc_ldc_cfg.debug = vcc_dbg_ldc;
rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port);
if (rv)
goto free_port;
spin_lock_init(&port->lock);
port->index = vcc_table_add(port);
if (port->index == -1) {
pr_err("VCC: no more TTY indices left for allocation\n");
goto free_ldc;
}
/* Register the device using VCC table index as TTY index */
dev = tty_register_device(vcc_tty_driver, port->index, &vdev->dev);
if (IS_ERR(dev)) {
rv = PTR_ERR(dev);
goto free_table;
}
hp = mdesc_grab();
node = vio_vdev_node(hp, vdev);
if (node == MDESC_NODE_NULL) {
rv = -ENXIO;
mdesc_release(hp);
goto unreg_tty;
}
domain = mdesc_get_property(hp, node, "vcc-domain-name", NULL);
if (!domain) {
rv = -ENXIO;
mdesc_release(hp);
goto unreg_tty;
}
port->domain = kstrdup(domain, GFP_KERNEL);
mdesc_release(hp);
rv = sysfs_create_group(&vdev->dev.kobj, &vcc_attribute_group);
if (rv)
goto free_domain;
init_timer(&port->rx_timer);
port->rx_timer.function = vcc_rx_timer;
port->rx_timer.data = port->index;
init_timer(&port->tx_timer);
port->tx_timer.function = vcc_tx_timer;
port->tx_timer.data = port->index;
dev_set_drvdata(&vdev->dev, port);
/* It's possible to receive IRQs in the middle of vio_port_up. Disable
* IRQs until the port is up.
*/
disable_irq_nosync(vdev->rx_irq);
vio_port_up(&port->vio);
enable_irq(vdev->rx_irq);
return 0;
free_domain:
kfree(port->domain);
unreg_tty:
tty_unregister_device(vcc_tty_driver, port->index);
free_table:
vcc_table_remove(port->index);
free_ldc:
vio_ldc_free(&port->vio);
free_port:
kfree(name);
kfree(port);
return rv;
}
/**
* vcc_remove() - Terminate a VCC port
* @vdev: Pointer to VIO device of the VCC port
*
* Terminates a VCC port. Sets up the teardown of TTY and
* VIO/LDC link between guest and primary domains.
*
* Return: status of removal
*/
static int vcc_remove(struct vio_dev *vdev)
{
struct vcc_port *port = dev_get_drvdata(&vdev->dev);
if (!port)
return -ENODEV;
del_timer_sync(&port->rx_timer);
del_timer_sync(&port->tx_timer);
/* If there's a process with the device open, do a synchronous
* hangup of the TTY. This *may* cause the process to call close
* asynchronously, but it's not guaranteed.
*/
if (port->tty)
tty_vhangup(port->tty);
/* Get exclusive reference to VCC, ensures that there are no other
* clients to this port
*/
port = vcc_get(port->index, true);
if (WARN_ON(!port))
return -ENODEV;
tty_unregister_device(vcc_tty_driver, port->index);
del_timer_sync(&port->vio.timer);
vio_ldc_free(&port->vio);
sysfs_remove_group(&vdev->dev.kobj, &vcc_attribute_group);
dev_set_drvdata(&vdev->dev, NULL);
if (port->tty) {
port->removed = true;
vcc_put(port, true);
} else {
vcc_table_remove(port->index);
kfree(port->vio.name);
kfree(port->domain);
kfree(port);
}
return 0;
}
static const struct vio_device_id vcc_match[] = {
{
.type = "vcc-port",
},
{},
};
MODULE_DEVICE_TABLE(vio, vcc_match);
static struct vio_driver vcc_driver = {
.id_table = vcc_match,
.probe = vcc_probe,
.remove = vcc_remove,
.name = "vcc",
};
static int vcc_open(struct tty_struct *tty, struct file *vcc_file)
{
struct vcc_port *port;
if (unlikely(!tty)) {
pr_err("VCC: open: Invalid TTY handle\n");
return -ENXIO;
}
if (tty->count > 1)
return -EBUSY;
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
pr_err("VCC: open: Failed to find VCC port\n");
return -ENODEV;
}
if (unlikely(!port->vio.lp)) {
pr_err("VCC: open: LDC channel not configured\n");
vcc_put(port, false);
return -EPIPE;
}
vccdbgl(port->vio.lp);
vcc_put(port, false);
if (unlikely(!tty->port)) {
pr_err("VCC: open: TTY port not found\n");
return -ENXIO;
}
if (unlikely(!tty->port->ops)) {
pr_err("VCC: open: TTY ops not defined\n");
return -ENXIO;
}
return tty_port_open(tty->port, tty, vcc_file);
}
static void vcc_close(struct tty_struct *tty, struct file *vcc_file)
{
if (unlikely(!tty)) {
pr_err("VCC: close: Invalid TTY handle\n");
return;
}
if (unlikely(tty->count > 1))
return;
if (unlikely(!tty->port)) {
pr_err("VCC: close: TTY port not found\n");
return;
}
tty_port_close(tty->port, tty, vcc_file);
}
static void vcc_ldc_hup(struct vcc_port *port)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (vcc_send_ctl(port, VCC_CTL_HUP) < 0)
vcc_kick_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static void vcc_hangup(struct tty_struct *tty)
{
struct vcc_port *port;
if (unlikely(!tty)) {
pr_err("VCC: hangup: Invalid TTY handle\n");
return;
}
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
pr_err("VCC: hangup: Failed to find VCC port\n");
return;
}
if (unlikely(!tty->port)) {
pr_err("VCC: hangup: TTY port not found\n");
vcc_put(port, false);
return;
}
vcc_ldc_hup(port);
vcc_put(port, false);
tty_port_hangup(tty->port);
}
static int vcc_write(struct tty_struct *tty, const unsigned char *buf,
int count)
{
struct vcc_port *port;
struct vio_vcc *pkt;
unsigned long flags;
int total_sent = 0;
int tosend = 0;
int rv = -EINVAL;
if (unlikely(!tty)) {
pr_err("VCC: write: Invalid TTY handle\n");
return -ENXIO;
}
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
pr_err("VCC: write: Failed to find VCC port");
return -ENODEV;
}
spin_lock_irqsave(&port->lock, flags);
pkt = &port->buffer;
pkt->tag.type = VIO_TYPE_DATA;
while (count > 0) {
/* Minimum of data to write and space available */
tosend = min(count, (VCC_BUFF_LEN - port->chars_in_buffer));
if (!tosend)
break;
memcpy(&pkt->data[port->chars_in_buffer], &buf[total_sent],
tosend);
port->chars_in_buffer += tosend;
pkt->tag.stype = tosend;
vccdbg("TAG [%02x:%02x:%04x:%08x]\n", pkt->tag.type,
pkt->tag.stype, pkt->tag.stype_env, pkt->tag.sid);
vccdbg("DATA [%s]\n", pkt->data);
vccdbgl(port->vio.lp);
/* Since we know we have enough room in VCC buffer for tosend
* we record that it was sent regardless of whether the
* hypervisor actually took it because we have it buffered.
*/
rv = ldc_write(port->vio.lp, pkt, (VIO_TAG_SIZE + tosend));
vccdbg("VCC: write: ldc_write(%d)=%d\n",
(VIO_TAG_SIZE + tosend), rv);
total_sent += tosend;
count -= tosend;
if (rv < 0) {
vcc_kick_tx(port);
break;
}
port->chars_in_buffer = 0;
}
spin_unlock_irqrestore(&port->lock, flags);
vcc_put(port, false);
vccdbg("VCC: write: total=%d rv=%d", total_sent, rv);
return total_sent ? total_sent : rv;
}
static int vcc_write_room(struct tty_struct *tty)
{
struct vcc_port *port;
u64 num;
if (unlikely(!tty)) {
pr_err("VCC: write_room: Invalid TTY handle\n");
return -ENXIO;
}
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
pr_err("VCC: write_room: Failed to find VCC port\n");
return -ENODEV;
}
num = VCC_BUFF_LEN - port->chars_in_buffer;
vcc_put(port, false);
return num;
}
static int vcc_chars_in_buffer(struct tty_struct *tty)
{
struct vcc_port *port;
u64 num;
if (unlikely(!tty)) {
pr_err("VCC: chars_in_buffer: Invalid TTY handle\n");
return -ENXIO;
}
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
pr_err("VCC: chars_in_buffer: Failed to find VCC port\n");
return -ENODEV;
}
num = port->chars_in_buffer;
vcc_put(port, false);
return num;
}
static int vcc_break_ctl(struct tty_struct *tty, int state)
{
struct vcc_port *port;
unsigned long flags;
if (unlikely(!tty)) {
pr_err("VCC: break_ctl: Invalid TTY handle\n");
return -ENXIO;
}
port = vcc_get_ne(tty->index);
if (unlikely(!port)) {
pr_err("VCC: break_ctl: Failed to find VCC port\n");
return -ENODEV;
}
/* Turn off break */
if (state == 0) {
vcc_put(port, false);
return 0;
}
spin_lock_irqsave(&port->lock, flags);
if (vcc_send_ctl(port, VCC_CTL_BREAK) < 0)
vcc_kick_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
vcc_put(port, false);
return 0;
}
static int vcc_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct vcc_port *port_vcc;
struct tty_port *port_tty;
int ret;
if (unlikely(!tty)) {
pr_err("VCC: install: Invalid TTY handle\n");
return -ENXIO;
}
if (tty->index >= VCC_MAX_PORTS)
return -EINVAL;
ret = tty_standard_install(driver, tty);
if (ret)
return ret;
port_tty = kzalloc(sizeof(struct tty_port), GFP_KERNEL);
if (!port_tty)
return -ENOMEM;
port_vcc = vcc_get(tty->index, true);
if (!port_vcc) {
pr_err("VCC: install: Failed to find VCC port\n");
tty->port = NULL;
kfree(port_tty);
return -ENODEV;
}
tty_port_init(port_tty);
port_tty->ops = &vcc_port_ops;
tty->port = port_tty;
port_vcc->tty = tty;
vcc_put(port_vcc, true);
return 0;
}
static void vcc_cleanup(struct tty_struct *tty)
{
struct vcc_port *port;
if (unlikely(!tty)) {
pr_err("VCC: cleanup: Invalid TTY handle\n");
return;
}
port = vcc_get(tty->index, true);
if (port) {
port->tty = NULL;
if (port->removed) {
vcc_table_remove(tty->index);
kfree(port->vio.name);
kfree(port->domain);
kfree(port);
} else {
vcc_put(port, true);
}
}
tty_port_destroy(tty->port);
kfree(tty->port);
tty->port = NULL;
}
static const struct tty_operations vcc_ops = {
.open = vcc_open,
.close = vcc_close,
.hangup = vcc_hangup,
.write = vcc_write,
.write_room = vcc_write_room,
.chars_in_buffer = vcc_chars_in_buffer,
.break_ctl = vcc_break_ctl,
.install = vcc_install,
.cleanup = vcc_cleanup,
};
#define VCC_TTY_FLAGS (TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_REAL_RAW)
static int vcc_tty_init(void)
{
int rv;
pr_info("VCC: %s\n", version);
vcc_tty_driver = tty_alloc_driver(VCC_MAX_PORTS, VCC_TTY_FLAGS);
if (IS_ERR(vcc_tty_driver)) {
pr_err("VCC: TTY driver alloc failed\n");
return PTR_ERR(vcc_tty_driver);
}
vcc_tty_driver->driver_name = vcc_driver_name;
vcc_tty_driver->name = vcc_device_node;
vcc_tty_driver->minor_start = VCC_MINOR_START;
vcc_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
vcc_tty_driver->init_termios = vcc_tty_termios;
tty_set_operations(vcc_tty_driver, &vcc_ops);
rv = tty_register_driver(vcc_tty_driver);
if (rv) {
pr_err("VCC: TTY driver registration failed\n");
put_tty_driver(vcc_tty_driver);
vcc_tty_driver = NULL;
return rv;
}
vccdbg("VCC: TTY driver registered\n");
return 0;
}
static void vcc_tty_exit(void)
{
tty_unregister_driver(vcc_tty_driver);
put_tty_driver(vcc_tty_driver);
vccdbg("VCC: TTY driver unregistered\n");
vcc_tty_driver = NULL;
}
static int __init vcc_init(void)
{
int rv;
rv = vcc_tty_init();
if (rv) {
pr_err("VCC: TTY init failed\n");
return rv;
}
rv = vio_register_driver(&vcc_driver);
if (rv) {
pr_err("VCC: VIO driver registration failed\n");
vcc_tty_exit();
} else {
vccdbg("VCC: VIO driver registered successfully\n");
}
return rv;
}
static void __exit vcc_exit(void)
{
vio_unregister_driver(&vcc_driver);
vccdbg("VCC: VIO driver unregistered\n");
vcc_tty_exit();
vccdbg("VCC: TTY driver unregistered\n");
}
module_init(vcc_init);
module_exit(vcc_exit);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment