Commit b41a3829 authored by Jonathan Kim's avatar Jonathan Kim Committed by Alex Deucher

drm/amdkfd: fix debug watchpoints for logical devices

The number of watchpoints should be set and constrained per logical
partition device, not by the socket device.
Signed-off-by: default avatarJonathan Kim <jonathan.kim@amd.com>
Reviewed-by: default avatarHarish Kasiviswanathan <harish.kasiviswanathan@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 67c4ca9f
......@@ -365,47 +365,47 @@ static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_i
*watch_id = KFD_DEBUGGER_INVALID_WATCH_POINT_ID;
spin_lock(&pdd->dev->kfd->watch_points_lock);
spin_lock(&pdd->dev->watch_points_lock);
for (i = 0; i < MAX_WATCH_ADDRESSES; i++) {
/* device watchpoint in use so skip */
if ((pdd->dev->kfd->alloc_watch_ids >> i) & 0x1)
if ((pdd->dev->alloc_watch_ids >> i) & 0x1)
continue;
pdd->alloc_watch_ids |= 0x1 << i;
pdd->dev->kfd->alloc_watch_ids |= 0x1 << i;
pdd->dev->alloc_watch_ids |= 0x1 << i;
*watch_id = i;
spin_unlock(&pdd->dev->kfd->watch_points_lock);
spin_unlock(&pdd->dev->watch_points_lock);
return 0;
}
spin_unlock(&pdd->dev->kfd->watch_points_lock);
spin_unlock(&pdd->dev->watch_points_lock);
return -ENOMEM;
}
static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
{
spin_lock(&pdd->dev->kfd->watch_points_lock);
spin_lock(&pdd->dev->watch_points_lock);
/* process owns device watch point so safe to clear */
if ((pdd->alloc_watch_ids >> watch_id) & 0x1) {
pdd->alloc_watch_ids &= ~(0x1 << watch_id);
pdd->dev->kfd->alloc_watch_ids &= ~(0x1 << watch_id);
pdd->dev->alloc_watch_ids &= ~(0x1 << watch_id);
}
spin_unlock(&pdd->dev->kfd->watch_points_lock);
spin_unlock(&pdd->dev->watch_points_lock);
}
static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
{
bool owns_watch_id = false;
spin_lock(&pdd->dev->kfd->watch_points_lock);
spin_lock(&pdd->dev->watch_points_lock);
owns_watch_id = watch_id < MAX_WATCH_ADDRESSES &&
((pdd->alloc_watch_ids >> watch_id) & 0x1);
spin_unlock(&pdd->dev->kfd->watch_points_lock);
spin_unlock(&pdd->dev->watch_points_lock);
return owns_watch_id;
}
......
......@@ -884,13 +884,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
dev_err(kfd_device, "Error initializing KFD node\n");
goto node_init_error;
}
spin_lock_init(&node->watch_points_lock);
kfd->nodes[i] = node;
}
svm_range_set_max_pages(kfd->adev);
spin_lock_init(&kfd->watch_points_lock);
kfd->init_complete = true;
dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
kfd->adev->pdev->device);
......
......@@ -310,6 +310,10 @@ struct kfd_node {
struct kfd_local_mem_info local_mem_info;
struct kfd_dev *kfd;
/* Track per device allocated watch points */
uint32_t alloc_watch_ids;
spinlock_t watch_points_lock;
};
struct kfd_dev {
......@@ -362,10 +366,6 @@ struct kfd_dev {
struct kfd_node *nodes[MAX_KFD_NODES];
unsigned int num_nodes;
/* Track per device allocated watch points */
uint32_t alloc_watch_ids;
spinlock_t watch_points_lock;
/* Kernel doorbells for KFD device */
struct amdgpu_bo *doorbells;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment