Commit f1470fd7 authored by Alex Elder's avatar Alex Elder Committed by David S. Miller

net: ipa: generalize register field functions

Rename functions related to register fields so they don't appear to
be IPA-specific, and move their definitions into "reg.h":
    ipa_reg_fmask()	-> reg_fmask()
    ipa_reg_bit()	-> reg_bit()
    ipa_reg_field_max()	-> reg_field_max()
    ipa_reg_encode()	-> reg_encode()
    ipa_reg_decode()	-> reg_decode()
Signed-off-by: default avatarAlex Elder <elder@linaro.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fc4cecf7
This diff is collapsed.
......@@ -231,7 +231,7 @@ static void ipa_hardware_config_tx(struct ipa *ipa)
val = ioread32(ipa->reg_virt + offset);
val &= ~ipa_reg_bit(reg, PA_MASK_EN);
val &= ~reg_bit(reg, PA_MASK_EN);
iowrite32(val, ipa->reg_virt + offset);
}
......@@ -252,11 +252,11 @@ static void ipa_hardware_config_clkon(struct ipa *ipa)
reg = ipa_reg(ipa, CLKON_CFG);
if (version == IPA_VERSION_3_1) {
/* Disable MISC clock gating */
val = ipa_reg_bit(reg, CLKON_MISC);
val = reg_bit(reg, CLKON_MISC);
} else { /* IPA v4.0+ */
/* Enable open global clocks in the CLKON configuration */
val = ipa_reg_bit(reg, CLKON_GLOBAL);
val |= ipa_reg_bit(reg, GLOBAL_2X_CLK);
val = reg_bit(reg, CLKON_GLOBAL);
val |= reg_bit(reg, GLOBAL_2X_CLK);
}
iowrite32(val, ipa->reg_virt + reg_offset(reg));
......@@ -279,17 +279,17 @@ static void ipa_hardware_config_comp(struct ipa *ipa)
val = ioread32(ipa->reg_virt + offset);
if (ipa->version == IPA_VERSION_4_0) {
val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
val &= ~reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
val &= ~reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
val &= ~reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
} else if (ipa->version < IPA_VERSION_4_5) {
val |= ipa_reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
val |= reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
} else {
/* For IPA v4.5 FULL_FLUSH_WAIT_RS_CLOSURE_EN is 0 */
}
val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
val |= reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
val |= reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
iowrite32(val, ipa->reg_virt + offset);
}
......@@ -311,26 +311,24 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
/* Max outstanding write accesses for QSB masters */
reg = ipa_reg(ipa, QSB_MAX_WRITES);
val = ipa_reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
val = reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
if (data->qsb_count > 1)
val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_WRITES,
data1->max_writes);
val |= reg_encode(reg, GEN_QMB_1_MAX_WRITES, data1->max_writes);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Max outstanding read accesses for QSB masters */
reg = ipa_reg(ipa, QSB_MAX_READS);
val = ipa_reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
val = reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
val |= ipa_reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
data0->max_reads_beats);
val |= reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
data0->max_reads_beats);
if (data->qsb_count > 1) {
val = ipa_reg_encode(reg, GEN_QMB_1_MAX_READS,
data1->max_reads);
val = reg_encode(reg, GEN_QMB_1_MAX_READS, data1->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
data1->max_reads_beats);
val |= reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
data1->max_reads_beats);
}
iowrite32(val, ipa->reg_virt + reg_offset(reg));
......@@ -379,23 +377,23 @@ static void ipa_qtime_config(struct ipa *ipa)
reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
/* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
val = ipa_reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
val |= ipa_reg_bit(reg, DPL_TIMESTAMP_SEL);
val = reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
val |= reg_bit(reg, DPL_TIMESTAMP_SEL);
/* Configure tag and NAT Qtime timestamp resolution as well */
val = ipa_reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
val = ipa_reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
val = reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
val = reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Set granularity of pulse generators used for other timers */
reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
val = ipa_reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
val |= ipa_reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
val = reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
val |= reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
if (ipa->version >= IPA_VERSION_5_0) {
val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_10_MS);
val |= ipa_reg_encode(reg, PULSE_GRAN_3, IPA_GRAN_10_MS);
val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_10_MS);
val |= reg_encode(reg, PULSE_GRAN_3, IPA_GRAN_10_MS);
} else {
val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
}
iowrite32(val, ipa->reg_virt + reg_offset(reg));
......@@ -404,12 +402,12 @@ static void ipa_qtime_config(struct ipa *ipa)
reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
offset = reg_offset(reg);
val = ipa_reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
val = reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
iowrite32(val, ipa->reg_virt + offset);
/* Divider value is set; re-enable the common timer clock divider */
val |= ipa_reg_bit(reg, DIV_ENABLE);
val |= reg_bit(reg, DIV_ENABLE);
iowrite32(val, ipa->reg_virt + offset);
}
......@@ -423,7 +421,7 @@ static void ipa_hardware_config_counter(struct ipa *ipa)
reg = ipa_reg(ipa, COUNTER_CFG);
/* If defined, EOT_COAL_GRANULARITY is 0 */
val = ipa_reg_encode(reg, AGGR_GRANULARITY, granularity);
val = reg_encode(reg, AGGR_GRANULARITY, granularity);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
......@@ -467,10 +465,10 @@ static void ipa_idle_indication_cfg(struct ipa *ipa,
return;
reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
val = ipa_reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
enter_idle_debounce_thresh);
val = reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
enter_idle_debounce_thresh);
if (const_non_idle_enable)
val |= ipa_reg_bit(reg, CONST_NON_IDLE_ENABLE);
val |= reg_bit(reg, CONST_NON_IDLE_ENABLE);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
......
......@@ -115,7 +115,7 @@ int ipa_mem_setup(struct ipa *ipa)
offset = ipa->mem_offset + mem->offset;
reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
val = ipa_reg_encode(reg, IPA_BASE_ADDR, offset);
val = reg_encode(reg, IPA_BASE_ADDR, offset);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
return 0;
......@@ -331,10 +331,10 @@ int ipa_mem_config(struct ipa *ipa)
val = ioread32(ipa->reg_virt + reg_offset(reg));
/* The fields in the register are in 8 byte units */
ipa->mem_offset = 8 * ipa_reg_decode(reg, MEM_BADDR, val);
ipa->mem_offset = 8 * reg_decode(reg, MEM_BADDR, val);
/* Make sure the end is within the region's mapped space */
mem_size = 8 * ipa_reg_decode(reg, MEM_SIZE, val);
mem_size = 8 * reg_decode(reg, MEM_SIZE, val);
/* If the sizes don't match, issue a warning */
if (ipa->mem_offset + mem_size < ipa->mem_size) {
......
......@@ -45,9 +45,9 @@ struct ipa;
* an array of field masks, indexed by field ID. Two functions are
* used to access register fields; both take an ipa_reg structure as
* argument. To encode a value to be represented in a register field,
* the value and field ID are passed to ipa_reg_encode(). To extract
* the value and field ID are passed to reg_encode(). To extract
* a value encoded in a register field, the field ID is passed to
* ipa_reg_decode(). In addition, for single-bit fields, ipa_reg_bit()
* reg_decode(). In addition, for single-bit fields, reg_bit()
* can be used to either encode the bit value, or to generate a mask
* used to extract the bit value.
*/
......@@ -646,58 +646,6 @@ extern const struct regs ipa_regs_v4_7;
extern const struct regs ipa_regs_v4_9;
extern const struct regs ipa_regs_v4_11;
/* Return the field mask for a field in a register */
static inline u32 ipa_reg_fmask(const struct reg *reg, u32 field_id)
{
if (!reg || WARN_ON(field_id >= reg->fcount))
return 0;
return reg->fmask[field_id];
}
/* Return the mask for a single-bit field in a register */
static inline u32 ipa_reg_bit(const struct reg *reg, u32 field_id)
{
u32 fmask = ipa_reg_fmask(reg, field_id);
WARN_ON(!is_power_of_2(fmask));
return fmask;
}
/* Encode a value into the given field of a register */
static inline u32
ipa_reg_encode(const struct reg *reg, u32 field_id, u32 val)
{
u32 fmask = ipa_reg_fmask(reg, field_id);
if (!fmask)
return 0;
val <<= __ffs(fmask);
if (WARN_ON(val & ~fmask))
return 0;
return val;
}
/* Given a register value, decode (extract) the value in the given field */
static inline u32
ipa_reg_decode(const struct reg *reg, u32 field_id, u32 val)
{
u32 fmask = ipa_reg_fmask(reg, field_id);
return fmask ? (val & fmask) >> __ffs(fmask) : 0;
}
/* Return the maximum value representable by the given field; always 2^n - 1 */
static inline u32 ipa_reg_field_max(const struct reg *reg, u32 field_id)
{
u32 fmask = ipa_reg_fmask(reg, field_id);
return fmask ? fmask >> __ffs(fmask) : 0;
}
const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
int ipa_reg_init(struct ipa *ipa);
......
......@@ -76,11 +76,11 @@ ipa_resource_config_common(struct ipa *ipa, u32 resource_type,
{
u32 val;
val = ipa_reg_encode(reg, X_MIN_LIM, xlimits->min);
val |= ipa_reg_encode(reg, X_MAX_LIM, xlimits->max);
val = reg_encode(reg, X_MIN_LIM, xlimits->min);
val |= reg_encode(reg, X_MAX_LIM, xlimits->max);
if (ylimits) {
val |= ipa_reg_encode(reg, Y_MIN_LIM, ylimits->min);
val |= ipa_reg_encode(reg, Y_MAX_LIM, ylimits->max);
val |= reg_encode(reg, Y_MIN_LIM, ylimits->min);
val |= reg_encode(reg, Y_MAX_LIM, ylimits->max);
}
iowrite32(val, ipa->reg_virt + reg_n_offset(reg, resource_type));
......
......@@ -361,16 +361,16 @@ int ipa_table_hash_flush(struct ipa *ipa)
if (ipa->version < IPA_VERSION_5_0) {
reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
val = ipa_reg_bit(reg, IPV6_ROUTER_HASH);
val |= ipa_reg_bit(reg, IPV6_FILTER_HASH);
val |= ipa_reg_bit(reg, IPV4_ROUTER_HASH);
val |= ipa_reg_bit(reg, IPV4_FILTER_HASH);
val = reg_bit(reg, IPV6_ROUTER_HASH);
val |= reg_bit(reg, IPV6_FILTER_HASH);
val |= reg_bit(reg, IPV4_ROUTER_HASH);
val |= reg_bit(reg, IPV4_FILTER_HASH);
} else {
reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
/* IPA v5.0+ uses a unified cache (both IPv4 and IPv6) */
val = ipa_reg_bit(reg, ROUTER_CACHE);
val |= ipa_reg_bit(reg, FILTER_CACHE);
val = reg_bit(reg, ROUTER_CACHE);
val |= reg_bit(reg, FILTER_CACHE);
}
ipa_cmd_register_write_add(trans, reg_offset(reg), val, val, false);
......@@ -503,7 +503,7 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
val = ioread32(endpoint->ipa->reg_virt + offset);
/* Zero all filter-related fields, preserving the rest */
val &= ~ipa_reg_fmask(reg, FILTER_HASH_MSK_ALL);
val &= ~reg_fmask(reg, FILTER_HASH_MSK_ALL);
} else {
/* IPA v5.0 separates filter and router cache configuration */
reg = ipa_reg(ipa, ENDP_FILTER_CACHE_CFG);
......@@ -562,7 +562,7 @@ static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
val = ioread32(ipa->reg_virt + offset);
/* Zero all route-related fields, preserving the rest */
val &= ~ipa_reg_fmask(reg, ROUTER_HASH_MSK_ALL);
val &= ~reg_fmask(reg, ROUTER_HASH_MSK_ALL);
} else {
/* IPA v5.0 separates filter and router cache configuration */
reg = ipa_reg(ipa, ENDP_ROUTER_CACHE_CFG);
......
......@@ -243,7 +243,7 @@ static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
/* Use an interrupt to tell the microcontroller the command is ready */
reg = ipa_reg(ipa, IPA_IRQ_UC);
val = ipa_reg_bit(reg, UC_INTR);
val = reg_bit(reg, UC_INTR);
iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
......
......@@ -67,6 +67,57 @@ static inline const struct reg *reg(const struct regs *regs, u32 reg_id)
return regs->reg[reg_id];
}
/* Return the field mask for a field in a register, or 0 on error */
static inline u32 reg_fmask(const struct reg *reg, u32 field_id)
{
if (!reg || WARN_ON(field_id >= reg->fcount))
return 0;
return reg->fmask[field_id];
}
/* Return the mask for a single-bit field in a register, or 0 on error */
static inline u32 reg_bit(const struct reg *reg, u32 field_id)
{
u32 fmask = reg_fmask(reg, field_id);
if (WARN_ON(!is_power_of_2(fmask)))
return 0;
return fmask;
}
/* Return the maximum value representable by the given field; always 2^n - 1 */
static inline u32 reg_field_max(const struct reg *reg, u32 field_id)
{
u32 fmask = reg_fmask(reg, field_id);
return fmask ? fmask >> __ffs(fmask) : 0;
}
/* Encode a value into the given field of a register */
static inline u32 reg_encode(const struct reg *reg, u32 field_id, u32 val)
{
u32 fmask = reg_fmask(reg, field_id);
if (!fmask)
return 0;
val <<= __ffs(fmask);
if (WARN_ON(val & ~fmask))
return 0;
return val;
}
/* Given a register value, decode (extract) the value in the given field */
static inline u32 reg_decode(const struct reg *reg, u32 field_id, u32 val)
{
u32 fmask = reg_fmask(reg, field_id);
return fmask ? (val & fmask) >> __ffs(fmask) : 0;
}
/* Returns 0 for NULL reg; warning should have already been issued */
static inline u32 reg_offset(const struct reg *reg)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment