Commit 660c0a86 authored by Dave Jiang's avatar Dave Jiang

Merge remote-tracking branch 'cxl/for-6.10/dpa-to-hpa' into cxl-for-next

Support for HPA to DPA translation for CXL events cxl_dram and
cxl_general_media.
parents c26a55e5 6aec0013
......@@ -27,7 +27,21 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
int cxl_region_init(void);
void cxl_region_exit(void);
int cxl_get_poison_by_endpoint(struct cxl_port *port);
struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
u64 dpa);
#else
static inline u64
cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, u64 dpa)
{
return ULLONG_MAX;
}
static inline
struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
{
return NULL;
}
static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
{
return 0;
......
......@@ -854,14 +854,38 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt)
{
if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
trace_cxl_general_media(cxlmd, type, &evt->gen_media);
else if (event_type == CXL_CPER_EVENT_DRAM)
trace_cxl_dram(cxlmd, type, &evt->dram);
else if (event_type == CXL_CPER_EVENT_MEM_MODULE)
if (event_type == CXL_CPER_EVENT_MEM_MODULE) {
trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
else
return;
}
if (event_type == CXL_CPER_EVENT_GENERIC) {
trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
return;
}
if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
u64 dpa, hpa = ULLONG_MAX;
struct cxl_region *cxlr;
/*
* These trace points are annotated with HPA and region
* translations. Take topology mutation locks and lookup
* { HPA, REGION } from { DPA, MEMDEV } in the event record.
*/
guard(rwsem_read)(&cxl_region_rwsem);
guard(rwsem_read)(&cxl_dpa_rwsem);
dpa = le64_to_cpu(evt->common.phys_addr) & CXL_DPA_MASK;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
hpa = cxl_trace_hpa(cxlr, cxlmd, dpa);
if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
&evt->gen_media);
else if (event_type == CXL_CPER_EVENT_DRAM)
trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram);
}
}
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
......
......@@ -251,50 +251,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
struct cxl_dpa_to_region_context {
struct cxl_region *cxlr;
u64 dpa;
};
static int __cxl_dpa_to_region(struct device *dev, void *arg)
{
struct cxl_dpa_to_region_context *ctx = arg;
struct cxl_endpoint_decoder *cxled;
u64 dpa = ctx->dpa;
if (!is_endpoint_decoder(dev))
return 0;
cxled = to_cxl_endpoint_decoder(dev);
if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
return 0;
if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
return 0;
dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
dev_name(&cxled->cxld.region->dev));
ctx->cxlr = cxled->cxld.region;
return 1;
}
static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_dpa_to_region_context ctx;
struct cxl_port *port;
ctx = (struct cxl_dpa_to_region_context) {
.dpa = dpa,
};
port = cxlmd->endpoint;
if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
return ctx.cxlr;
}
static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
......
......@@ -2679,6 +2679,141 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
return rc;
}
struct cxl_dpa_to_region_context {
struct cxl_region *cxlr;
u64 dpa;
};
static int __cxl_dpa_to_region(struct device *dev, void *arg)
{
struct cxl_dpa_to_region_context *ctx = arg;
struct cxl_endpoint_decoder *cxled;
u64 dpa = ctx->dpa;
if (!is_endpoint_decoder(dev))
return 0;
cxled = to_cxl_endpoint_decoder(dev);
if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
return 0;
if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
return 0;
dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
dev_name(&cxled->cxld.region->dev));
ctx->cxlr = cxled->cxld.region;
return 1;
}
struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_dpa_to_region_context ctx;
struct cxl_port *port;
ctx = (struct cxl_dpa_to_region_context) {
.dpa = dpa,
};
port = cxlmd->endpoint;
if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
return ctx.cxlr;
}
static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos)
{
struct cxl_region_params *p = &cxlr->params;
int gran = p->interleave_granularity;
int ways = p->interleave_ways;
u64 offset;
/* Is the hpa within this region at all */
if (hpa < p->res->start || hpa > p->res->end) {
dev_dbg(&cxlr->dev,
"Addr trans fail: hpa 0x%llx not in region\n", hpa);
return false;
}
/* Is the hpa in an expected chunk for its pos(-ition) */
offset = hpa - p->res->start;
offset = do_div(offset, gran * ways);
if ((offset >= pos * gran) && (offset < (pos + 1) * gran))
return true;
dev_dbg(&cxlr->dev,
"Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa);
return false;
}
static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled)
{
u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
struct cxl_region_params *p = &cxlr->params;
int pos = cxled->pos;
u16 eig = 0;
u8 eiw = 0;
ways_to_eiw(p->interleave_ways, &eiw);
granularity_to_eig(p->interleave_granularity, &eig);
/*
* The device position in the region interleave set was removed
* from the offset at HPA->DPA translation. To reconstruct the
* HPA, place the 'pos' in the offset.
*
* The placement of 'pos' in the HPA is determined by interleave
* ways and granularity and is defined in the CXL Spec 3.0 Section
* 8.2.4.19.13 Implementation Note: Device Decode Logic
*/
/* Remove the dpa base */
dpa_offset = dpa - cxl_dpa_resource_start(cxled);
mask_upper = GENMASK_ULL(51, eig + 8);
if (eiw < 8) {
hpa_offset = (dpa_offset & mask_upper) << eiw;
hpa_offset |= pos << (eig + 8);
} else {
bits_upper = (dpa_offset & mask_upper) >> (eig + 8);
bits_upper = bits_upper * 3;
hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8);
}
/* The lower bits remain unchanged */
hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
/* Apply the hpa_offset to the region base address */
hpa = hpa_offset + p->res->start;
if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos))
return ULLONG_MAX;
return hpa;
}
u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
u64 dpa)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled = NULL;
for (int i = 0; i < p->nr_targets; i++) {
cxled = p->targets[i];
if (cxlmd == cxled_to_memdev(cxled))
break;
}
if (!cxled || cxlmd != cxled_to_memdev(cxled))
return ULLONG_MAX;
return cxl_dpa_to_hpa(dpa, cxlr, cxled);
}
static struct lock_class_key cxl_pmem_region_key;
static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
......
......@@ -6,94 +6,3 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos)
{
struct cxl_region_params *p = &cxlr->params;
int gran = p->interleave_granularity;
int ways = p->interleave_ways;
u64 offset;
/* Is the hpa within this region at all */
if (hpa < p->res->start || hpa > p->res->end) {
dev_dbg(&cxlr->dev,
"Addr trans fail: hpa 0x%llx not in region\n", hpa);
return false;
}
/* Is the hpa in an expected chunk for its pos(-ition) */
offset = hpa - p->res->start;
offset = do_div(offset, gran * ways);
if ((offset >= pos * gran) && (offset < (pos + 1) * gran))
return true;
dev_dbg(&cxlr->dev,
"Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa);
return false;
}
static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled)
{
u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
struct cxl_region_params *p = &cxlr->params;
int pos = cxled->pos;
u16 eig = 0;
u8 eiw = 0;
ways_to_eiw(p->interleave_ways, &eiw);
granularity_to_eig(p->interleave_granularity, &eig);
/*
* The device position in the region interleave set was removed
* from the offset at HPA->DPA translation. To reconstruct the
* HPA, place the 'pos' in the offset.
*
* The placement of 'pos' in the HPA is determined by interleave
* ways and granularity and is defined in the CXL Spec 3.0 Section
* 8.2.4.19.13 Implementation Note: Device Decode Logic
*/
/* Remove the dpa base */
dpa_offset = dpa - cxl_dpa_resource_start(cxled);
mask_upper = GENMASK_ULL(51, eig + 8);
if (eiw < 8) {
hpa_offset = (dpa_offset & mask_upper) << eiw;
hpa_offset |= pos << (eig + 8);
} else {
bits_upper = (dpa_offset & mask_upper) >> (eig + 8);
bits_upper = bits_upper * 3;
hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8);
}
/* The lower bits remain unchanged */
hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
/* Apply the hpa_offset to the region base address */
hpa = hpa_offset + p->res->start;
if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos))
return ULLONG_MAX;
return hpa;
}
u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *cxlmd,
u64 dpa)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled = NULL;
for (int i = 0; i < p->nr_targets; i++) {
cxled = p->targets[i];
if (cxlmd == cxled_to_memdev(cxled))
break;
}
if (!cxled || cxlmd != cxled_to_memdev(cxled))
return ULLONG_MAX;
return cxl_dpa_to_hpa(dpa, cxlr, cxled);
}
......@@ -253,8 +253,8 @@ TRACE_EVENT(cxl_generic_event,
* DRAM Event Record
* CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
*/
#define CXL_DPA_FLAGS_MASK 0x3F
#define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK)
#define CXL_DPA_FLAGS_MASK GENMASK(1, 0)
#define CXL_DPA_MASK GENMASK_ULL(63, 6)
#define CXL_DPA_VOLATILE BIT(0)
#define CXL_DPA_NOT_REPAIRABLE BIT(1)
......@@ -316,9 +316,9 @@ TRACE_EVENT(cxl_generic_event,
TRACE_EVENT(cxl_general_media,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
struct cxl_event_gen_media *rec),
struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec),
TP_ARGS(cxlmd, log, rec),
TP_ARGS(cxlmd, log, cxlr, hpa, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
......@@ -330,10 +330,13 @@ TRACE_EVENT(cxl_general_media,
__field(u8, channel)
__field(u32, device)
__array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
__field(u16, validity_flags)
/* Following are out of order to pack trace record */
__field(u64, hpa)
__field_struct(uuid_t, region_uuid)
__field(u16, validity_flags)
__field(u8, rank)
__field(u8, dpa_flags)
__string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
TP_fast_assign(
......@@ -354,18 +357,28 @@ TRACE_EVENT(cxl_general_media,
memcpy(__entry->comp_id, &rec->component_id,
CXL_EVENT_GEN_MED_COMP_ID_SIZE);
__entry->validity_flags = get_unaligned_le16(&rec->validity_flags);
__entry->hpa = hpa;
if (cxlr) {
__assign_str(region_name, dev_name(&cxlr->dev));
uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
} else {
__assign_str(region_name, "");
uuid_copy(&__entry->region_uuid, &uuid_null);
}
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \
"descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \
"device=%x comp_id=%s validity_flags='%s'",
"device=%x comp_id=%s validity_flags='%s' " \
"hpa=%llx region=%s region_uuid=%pUb",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_mem_event_type(__entry->type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->device,
__print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
show_valid_flags(__entry->validity_flags)
show_valid_flags(__entry->validity_flags),
__entry->hpa, __get_str(region_name), &__entry->region_uuid
)
);
......@@ -400,9 +413,9 @@ TRACE_EVENT(cxl_general_media,
TRACE_EVENT(cxl_dram,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
struct cxl_event_dram *rec),
struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec),
TP_ARGS(cxlmd, log, rec),
TP_ARGS(cxlmd, log, cxlr, hpa, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
......@@ -417,10 +430,13 @@ TRACE_EVENT(cxl_dram,
__field(u32, nibble_mask)
__field(u32, row)
__array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE)
__field(u64, hpa)
__field_struct(uuid_t, region_uuid)
__field(u8, rank) /* Out of order to pack trace record */
__field(u8, bank_group) /* Out of order to pack trace record */
__field(u8, bank) /* Out of order to pack trace record */
__field(u8, dpa_flags) /* Out of order to pack trace record */
__string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
TP_fast_assign(
......@@ -444,12 +460,21 @@ TRACE_EVENT(cxl_dram,
__entry->column = get_unaligned_le16(rec->column);
memcpy(__entry->cor_mask, &rec->correction_mask,
CXL_EVENT_DER_CORRECTION_MASK_SIZE);
__entry->hpa = hpa;
if (cxlr) {
__assign_str(region_name, dev_name(&cxlr->dev));
uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
} else {
__assign_str(region_name, "");
uuid_copy(&__entry->region_uuid, &uuid_null);
}
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \
"transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \
"bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \
"validity_flags='%s'",
"validity_flags='%s' " \
"hpa=%llx region=%s region_uuid=%pUb",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_mem_event_type(__entry->type),
......@@ -458,7 +483,8 @@ TRACE_EVENT(cxl_dram,
__entry->bank_group, __entry->bank,
__entry->row, __entry->column,
__print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE),
show_dram_valid_flags(__entry->validity_flags)
show_dram_valid_flags(__entry->validity_flags),
__entry->hpa, __get_str(region_name), &__entry->region_uuid
)
);
......@@ -642,8 +668,6 @@ TRACE_EVENT(cxl_memory_module,
#define cxl_poison_overflow(flags, time) \
(flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0)
u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa);
TRACE_EVENT(cxl_poison,
TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr,
......
......@@ -94,11 +94,21 @@ struct cxl_event_mem_module {
u8 reserved[0x3d];
} __packed;
/*
* General Media or DRAM Event Common Fields
* - provides common access to phys_addr
*/
struct cxl_event_common {
struct cxl_event_record_hdr hdr;
__le64 phys_addr;
} __packed;
union cxl_event {
struct cxl_event_generic generic;
struct cxl_event_gen_media gen_media;
struct cxl_event_dram dram;
struct cxl_event_mem_module mem_module;
struct cxl_event_common common;
} __packed;
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment