Commit b7323ae6 authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/insn' into for-next/core

* for-next/insn:
  arm64: insn: add encoders for atomic operations
  arm64: move AARCH64_BREAK_FAULT into insn-def.h
  arm64: insn: Generate 64 bit mask immediates correctly
parents cd92fdfc fa1114d9
......@@ -34,18 +34,6 @@
*/
#define BREAK_INSTR_SIZE AARCH64_INSN_SIZE
/*
* BRK instruction encoding
* The #imm16 value should be placed at bits[20:5] within BRK ins
*/
#define AARCH64_BREAK_MON 0xd4200000
/*
* BRK instruction for provoking a fault on purpose
* Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
*/
#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
#define AARCH64_BREAK_KGDB_DYN_DBG \
(AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
......
......@@ -3,7 +3,21 @@
#ifndef __ASM_INSN_DEF_H
#define __ASM_INSN_DEF_H
#include <asm/brk-imm.h>
/* A64 instructions are always 32 bits. */
#define AARCH64_INSN_SIZE 4
/*
* BRK instruction encoding
* The #imm16 value should be placed at bits[20:5] within BRK ins
*/
#define AARCH64_BREAK_MON 0xd4200000
/*
* BRK instruction for provoking a fault on purpose
* Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
*/
#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
#endif /* __ASM_INSN_DEF_H */
......@@ -205,7 +205,9 @@ enum aarch64_insn_ldst_type {
AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
AARCH64_INSN_LDST_LOAD_EX,
AARCH64_INSN_LDST_LOAD_ACQ_EX,
AARCH64_INSN_LDST_STORE_EX,
AARCH64_INSN_LDST_STORE_REL_EX,
};
enum aarch64_insn_adsb_type {
......@@ -280,6 +282,36 @@ enum aarch64_insn_adr_type {
AARCH64_INSN_ADR_TYPE_ADR,
};
enum aarch64_insn_mem_atomic_op {
AARCH64_INSN_MEM_ATOMIC_ADD,
AARCH64_INSN_MEM_ATOMIC_CLR,
AARCH64_INSN_MEM_ATOMIC_EOR,
AARCH64_INSN_MEM_ATOMIC_SET,
AARCH64_INSN_MEM_ATOMIC_SWP,
};
enum aarch64_insn_mem_order_type {
AARCH64_INSN_MEM_ORDER_NONE,
AARCH64_INSN_MEM_ORDER_ACQ,
AARCH64_INSN_MEM_ORDER_REL,
AARCH64_INSN_MEM_ORDER_ACQREL,
};
enum aarch64_insn_mb_type {
AARCH64_INSN_MB_SY,
AARCH64_INSN_MB_ST,
AARCH64_INSN_MB_LD,
AARCH64_INSN_MB_ISH,
AARCH64_INSN_MB_ISHST,
AARCH64_INSN_MB_ISHLD,
AARCH64_INSN_MB_NSH,
AARCH64_INSN_MB_NSHST,
AARCH64_INSN_MB_NSHLD,
AARCH64_INSN_MB_OSH,
AARCH64_INSN_MB_OSHST,
AARCH64_INSN_MB_OSHLD,
};
#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
{ \
......@@ -303,6 +335,11 @@ __AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400)
__AARCH64_INSN_FUNCS(load_post, 0x3FE00C00, 0x38400400)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldclr, 0x3F20FC00, 0x38201000)
__AARCH64_INSN_FUNCS(ldeor, 0x3F20FC00, 0x38202000)
__AARCH64_INSN_FUNCS(ldset, 0x3F20FC00, 0x38203000)
__AARCH64_INSN_FUNCS(swp, 0x3F20FC00, 0x38208000)
__AARCH64_INSN_FUNCS(cas, 0x3FA07C00, 0x08A07C00)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
......@@ -474,13 +511,6 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
enum aarch64_insn_register state,
enum aarch64_insn_size_type size,
enum aarch64_insn_ldst_type type);
u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int imm, enum aarch64_insn_variant variant,
......@@ -541,6 +571,42 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
enum aarch64_insn_prfm_type type,
enum aarch64_insn_prfm_target target,
enum aarch64_insn_prfm_policy policy);
#ifdef CONFIG_ARM64_LSE_ATOMICS
u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_atomic_op op,
enum aarch64_insn_mem_order_type order);
u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_order_type order);
#else
static inline
u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_atomic_op op,
enum aarch64_insn_mem_order_type order)
{
return AARCH64_BREAK_FAULT;
}
static inline
u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_order_type order)
{
return AARCH64_BREAK_FAULT;
}
#endif
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
......
......@@ -578,10 +578,16 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
switch (type) {
case AARCH64_INSN_LDST_LOAD_EX:
case AARCH64_INSN_LDST_LOAD_ACQ_EX:
insn = aarch64_insn_get_load_ex_value();
if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
insn |= BIT(15);
break;
case AARCH64_INSN_LDST_STORE_EX:
case AARCH64_INSN_LDST_STORE_REL_EX:
insn = aarch64_insn_get_store_ex_value();
if (type == AARCH64_INSN_LDST_STORE_REL_EX)
insn |= BIT(15);
break;
default:
pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
......@@ -603,12 +609,65 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
state);
}
u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size)
#ifdef CONFIG_ARM64_LSE_ATOMICS
static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
u32 insn)
{
u32 insn = aarch64_insn_get_ldadd_value();
u32 order;
switch (type) {
case AARCH64_INSN_MEM_ORDER_NONE:
order = 0;
break;
case AARCH64_INSN_MEM_ORDER_ACQ:
order = 2;
break;
case AARCH64_INSN_MEM_ORDER_REL:
order = 1;
break;
case AARCH64_INSN_MEM_ORDER_ACQREL:
order = 3;
break;
default:
pr_err("%s: unknown mem order %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
insn &= ~GENMASK(23, 22);
insn |= order << 22;
return insn;
}
u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_atomic_op op,
enum aarch64_insn_mem_order_type order)
{
u32 insn;
switch (op) {
case AARCH64_INSN_MEM_ATOMIC_ADD:
insn = aarch64_insn_get_ldadd_value();
break;
case AARCH64_INSN_MEM_ATOMIC_CLR:
insn = aarch64_insn_get_ldclr_value();
break;
case AARCH64_INSN_MEM_ATOMIC_EOR:
insn = aarch64_insn_get_ldeor_value();
break;
case AARCH64_INSN_MEM_ATOMIC_SET:
insn = aarch64_insn_get_ldset_value();
break;
case AARCH64_INSN_MEM_ATOMIC_SWP:
insn = aarch64_insn_get_swp_value();
break;
default:
pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
return AARCH64_BREAK_FAULT;
}
switch (size) {
case AARCH64_INSN_SIZE_32:
......@@ -621,6 +680,8 @@ u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
insn = aarch64_insn_encode_ldst_size(size, insn);
insn = aarch64_insn_encode_ldst_order(order, insn);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
result);
......@@ -631,17 +692,68 @@ u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
value);
}
u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size)
static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
u32 insn)
{
/*
* STADD is simply encoded as an alias for LDADD with XZR as
* the destination register.
*/
return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
value, size);
u32 order;
switch (type) {
case AARCH64_INSN_MEM_ORDER_NONE:
order = 0;
break;
case AARCH64_INSN_MEM_ORDER_ACQ:
order = BIT(22);
break;
case AARCH64_INSN_MEM_ORDER_REL:
order = BIT(15);
break;
case AARCH64_INSN_MEM_ORDER_ACQREL:
order = BIT(15) | BIT(22);
break;
default:
pr_err("%s: unknown mem order %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
insn &= ~(BIT(15) | BIT(22));
insn |= order;
return insn;
}
u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_order_type order)
{
u32 insn;
switch (size) {
case AARCH64_INSN_SIZE_32:
case AARCH64_INSN_SIZE_64:
break;
default:
pr_err("%s: unimplemented size encoding %d\n", __func__, size);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_get_cas_value();
insn = aarch64_insn_encode_ldst_size(size, insn);
insn = aarch64_insn_encode_cas_order(order, insn);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
result);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
address);
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
value);
}
#endif
static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
enum aarch64_insn_prfm_target target,
......@@ -1379,7 +1491,7 @@ static u32 aarch64_encode_immediate(u64 imm,
* Compute the rotation to get a continuous set of
* ones, with the first bit set at position 0
*/
ror = fls(~imm);
ror = fls64(~imm);
}
/*
......@@ -1456,3 +1568,48 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
}
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
{
u32 opt;
u32 insn;
switch (type) {
case AARCH64_INSN_MB_SY:
opt = 0xf;
break;
case AARCH64_INSN_MB_ST:
opt = 0xe;
break;
case AARCH64_INSN_MB_LD:
opt = 0xd;
break;
case AARCH64_INSN_MB_ISH:
opt = 0xb;
break;
case AARCH64_INSN_MB_ISHST:
opt = 0xa;
break;
case AARCH64_INSN_MB_ISHLD:
opt = 0x9;
break;
case AARCH64_INSN_MB_NSH:
opt = 0x7;
break;
case AARCH64_INSN_MB_NSHST:
opt = 0x6;
break;
case AARCH64_INSN_MB_NSHLD:
opt = 0x5;
break;
default:
pr_err("%s: unknown dmb type %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_get_dmb_value();
insn &= ~GENMASK(11, 8);
insn |= (opt << 8);
return insn;
}
......@@ -89,9 +89,16 @@
#define A64_STXR(sf, Rt, Rn, Rs) \
A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
/* LSE atomics */
/*
* LSE atomics
*
* STADD is simply encoded as an alias for LDADD with XZR as
* the destination register.
*/
#define A64_STADD(sf, Rn, Rs) \
aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
aarch64_insn_gen_atomic_ld_op(A64_ZR, Rn, Rs, \
A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_ADD, \
AARCH64_INSN_MEM_ORDER_NONE)
/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment