Commit 0d21bb85 authored by Maximilian Luz's avatar Maximilian Luz Committed by Hans de Goede

platform/surface: aggregator: Add trace points

Add trace points to the Surface Aggregator subsystem core. These trace
points can be used to track packets, requests, and allocations. They are
further intended for debugging and testing/validation, specifically in
combination with the error injection capabilities introduced in the
subsequent commit.
Signed-off-by: default avatarMaximilian Luz <luzmaximilian@gmail.com>
Reviewed-by: default avatarHans de Goede <hdegoede@redhat.com>
Acked-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
Link: https://lore.kernel.org/r/20201221183959.1186143-5-luzmaximilian@gmail.comSigned-off-by: default avatarHans de Goede <hdegoede@redhat.com>
parent 3a7081f6
# SPDX-License-Identifier: GPL-2.0+
# Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
# For include/trace/define_trace.h to include trace.h
CFLAGS_core.o = -I$(src)
obj-$(CONFIG_SURFACE_AGGREGATOR) += surface_aggregator.o
surface_aggregator-objs := core.o
......
......@@ -32,6 +32,8 @@
#include "ssh_msgb.h"
#include "ssh_request_layer.h"
#include "trace.h"
/* -- Safe counters. -------------------------------------------------------- */
......@@ -568,6 +570,7 @@ static void __ssam_event_item_free_generic(struct ssam_event_item *item)
*/
static void ssam_event_item_free(struct ssam_event_item *item)
{
trace_ssam_event_item_free(item);
item->ops.free(item);
}
......@@ -603,6 +606,8 @@ static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags)
}
item->event.length = len;
trace_ssam_event_item_alloc(item, len);
return item;
}
......
......@@ -24,6 +24,9 @@
#include <linux/surface_aggregator/controller.h>
#include "controller.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
/* -- Static controller reference. ------------------------------------------ */
......
......@@ -26,6 +26,8 @@
#include "ssh_packet_layer.h"
#include "ssh_parser.h"
#include "trace.h"
/*
* To simplify reasoning about the code below, we define a few concepts. The
* system below is similar to a state-machine for packets, however, there are
......@@ -228,6 +230,8 @@ static void __ssh_ptl_packet_release(struct kref *kref)
{
struct ssh_packet *p = container_of(kref, struct ssh_packet, refcnt);
trace_ssam_packet_release(p);
ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p);
p->ops->release(p);
}
......@@ -356,6 +360,7 @@ static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
buffer->ptr = (u8 *)(*packet + 1);
buffer->len = SSH_MSG_LEN_CTRL;
trace_ssam_ctrl_packet_alloc(*packet, buffer->len);
return 0;
}
......@@ -365,6 +370,7 @@ static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
*/
static void ssh_ctrl_packet_free(struct ssh_packet *p)
{
trace_ssam_ctrl_packet_free(p);
kmem_cache_free(ssh_ctrl_packet_cache, p);
}
......@@ -398,7 +404,12 @@ static void ssh_packet_next_try(struct ssh_packet *p)
lockdep_assert_held(&p->ptl->queue.lock);
p->priority = __SSH_PACKET_PRIORITY(base, try + 1);
/*
* Ensure that we write the priority in one go via WRITE_ONCE() so we
* can access it via READ_ONCE() for tracing. Note that other access
* is guarded by the queue lock, so no need to use READ_ONCE() there.
*/
WRITE_ONCE(p->priority, __SSH_PACKET_PRIORITY(base, try + 1));
}
/* Must be called with queue lock held. */
......@@ -560,6 +571,7 @@ static void __ssh_ptl_complete(struct ssh_packet *p, int status)
{
struct ssh_ptl *ptl = READ_ONCE(p->ptl);
trace_ssam_packet_complete(p, status);
ptl_dbg_cond(ptl, "ptl: completing packet %p (status: %d)\n", p, status);
if (p->ops->complete)
......@@ -1014,6 +1026,8 @@ int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p)
struct ssh_ptl *ptl_old;
int status;
trace_ssam_packet_submit(p);
/* Validate packet fields. */
if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) {
if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state))
......@@ -1065,6 +1079,8 @@ static int __ssh_ptl_resubmit(struct ssh_packet *packet)
lockdep_assert_held(&packet->ptl->pending.lock);
trace_ssam_packet_resubmit(packet);
spin_lock(&packet->ptl->queue.lock);
/* Check if the packet is out of tries. */
......@@ -1148,6 +1164,8 @@ void ssh_ptl_cancel(struct ssh_packet *p)
if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state))
return;
trace_ssam_packet_cancel(p);
/*
* Lock packet and commit with memory barrier. If this packet has
* already been locked, it's going to be removed and completed by
......@@ -1202,6 +1220,8 @@ static void ssh_ptl_timeout_reap(struct work_struct *work)
bool resub = false;
int status;
trace_ssam_ptl_timeout_reap(atomic_read(&ptl->pending.count));
/*
* Mark reaper as "not pending". This is done before checking any
* packets to avoid lost-update type problems.
......@@ -1224,6 +1244,8 @@ static void ssh_ptl_timeout_reap(struct work_struct *work)
continue;
}
trace_ssam_packet_timeout(p);
status = __ssh_ptl_resubmit(p);
/*
......@@ -1416,6 +1438,8 @@ static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
if (!frame) /* Not enough data. */
return aligned.ptr - source->ptr;
trace_ssam_rx_frame_received(frame);
switch (frame->type) {
case SSH_FRAME_TYPE_ACK:
ssh_ptl_acknowledge(ptl, frame->seq);
......
......@@ -22,6 +22,8 @@
#include "ssh_packet_layer.h"
#include "ssh_request_layer.h"
#include "trace.h"
/*
* SSH_RTL_REQUEST_TIMEOUT - Request timeout.
*
......@@ -144,6 +146,8 @@ static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
{
struct ssh_rtl *rtl = ssh_request_rtl(rqst);
trace_ssam_request_complete(rqst, status);
/* rtl/ptl may not be set if we're canceling before submitting. */
rtl_dbg_cond(rtl, "rtl: completing request (rqid: %#06x, status: %d)\n",
ssh_request_get_rqid_safe(rqst), status);
......@@ -157,6 +161,8 @@ static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
{
struct ssh_rtl *rtl = ssh_request_rtl(rqst);
trace_ssam_request_complete(rqst, 0);
rtl_dbg(rtl, "rtl: completing request with response (rqid: %#06x)\n",
ssh_request_get_rqid(rqst));
......@@ -329,6 +335,8 @@ static void ssh_rtl_tx_work_fn(struct work_struct *work)
*/
int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
{
trace_ssam_request_submit(rqst);
/*
* Ensure that requests expecting a response are sequenced. If this
* invariant ever changes, see the comment in ssh_rtl_complete() on what
......@@ -439,6 +447,8 @@ static void ssh_rtl_complete(struct ssh_rtl *rtl,
struct ssh_request *p, *n;
u16 rqid = get_unaligned_le16(&command->rqid);
trace_ssam_rx_response_received(command, command_data->len);
/*
* Get request from pending based on request ID and mark it as response
* received and locked.
......@@ -688,6 +698,8 @@ bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
return true;
trace_ssam_request_cancel(rqst);
if (pending)
canceled = ssh_rtl_cancel_pending(rqst);
else
......@@ -779,6 +791,8 @@ static void ssh_rtl_timeout_reap(struct work_struct *work)
ktime_t timeout = rtl->rtx_timeout.timeout;
ktime_t next = KTIME_MAX;
trace_ssam_rtl_timeout_reap(atomic_read(&rtl->pending.count));
/*
* Mark reaper as "not pending". This is done before checking any
* requests to avoid lost-update type problems.
......@@ -822,6 +836,8 @@ static void ssh_rtl_timeout_reap(struct work_struct *work)
/* Cancel and complete the request. */
list_for_each_entry_safe(r, n, &claimed, node) {
trace_ssam_request_timeout(r);
/*
* At this point we've removed the packet from pending. This
* means that we've obtained the last (only) reference of the
......@@ -849,6 +865,8 @@ static void ssh_rtl_timeout_reap(struct work_struct *work)
static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
const struct ssam_span *data)
{
trace_ssam_rx_event_received(cmd, data->len);
rtl_dbg(rtl, "rtl: handling event (rqid: %#06x)\n",
get_unaligned_le16(&cmd->rqid));
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment