Commit db1a8dd9 authored by Tomer Tayar's avatar Tomer Tayar Committed by Oded Gabbay

habanalabs: add support for dma-buf exporter

Implement the calls to the dma-buf kernel api to create a dma-buf
object backed by FD.

We block the option to mmap the DMA-BUF object because we don't support
DIRECT_IO and implicit P2P. We only implement support for explicit P2P
through importing the FD of the DMA-BUF.

In the export phase, we provide to the DMA-BUF object an array of pages
that represent the device's memory area. During the map callback,
we convert the array of pages into an SGT. We split/merge the pages
according to the dma max segment size of the importer.

To get the DMA address of the PCI bar, we use the dma_map_resources()
kernel API, because our device memory is not backed by page struct
and this API doesn't need page struct to map the physical address to
a DMA address.

We set the orig_nents member of the SGT to be 0, to indicate to other
drivers that we don't support CPU mappings.

Note that in Habanalabs's ASICs, the device memory is pinned and
immutable. Therefore, there is no need for dynamic mappings and pinning
callbacks.

Also note that in GAUDI we don't have an MMU towards the device memory
and the user works on physical addresses. Therefore, the user doesn't
pass through the kernel driver to allocate memory there. As a result,
only for GAUDI we receive from the user a device memory physical address
(instead of a handle) and a size.

We check the p2p distance using pci_p2pdma_distance_many() and refusing
to map dmabuf in case the distance doesn't allow p2p.
Signed-off-by: default avatarTomer Tayar <ttayar@habana.ai>
Reviewed-by: default avatarOded Gabbay <ogabbay@kernel.org>
Reviewed-by: default avatarGal Pressman <galpress@amazon.com>
Reviewed-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarOded Gabbay <ogabbay@kernel.org>
parent a9498ee5
...@@ -8,6 +8,7 @@ config HABANA_AI ...@@ -8,6 +8,7 @@ config HABANA_AI
depends on PCI && HAS_IOMEM depends on PCI && HAS_IOMEM
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
select HWMON select HWMON
select DMA_SHARED_BUFFER
help help
Enables PCIe card driver for Habana's AI Processors (AIP) that are Enables PCIe card driver for Habana's AI Processors (AIP) that are
designed to accelerate Deep Learning inference and training workloads. designed to accelerate Deep Learning inference and training workloads.
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/coresight.h> #include <linux/coresight.h>
#include <linux/dma-buf.h>
#define HL_NAME "habanalabs" #define HL_NAME "habanalabs"
...@@ -1366,6 +1367,23 @@ struct hl_cs_counters_atomic { ...@@ -1366,6 +1367,23 @@ struct hl_cs_counters_atomic {
atomic64_t validation_drop_cnt; atomic64_t validation_drop_cnt;
}; };
/**
* struct hl_dmabuf_priv - a dma-buf private object.
* @dmabuf: pointer to dma-buf object.
* @ctx: pointer to the dma-buf owner's context.
* @phys_pg_pack: pointer to physical page pack if the dma-buf was exported for
* memory allocation handle.
* @device_address: physical address of the device's memory. Relevant only
* if phys_pg_pack is NULL (dma-buf was exported from address).
* The total size can be taken from the dmabuf object.
*/
struct hl_dmabuf_priv {
struct dma_buf *dmabuf;
struct hl_ctx *ctx;
struct hl_vm_phys_pg_pack *phys_pg_pack;
uint64_t device_address;
};
/** /**
* struct hl_ctx - user/kernel context. * struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area * @mem_hash: holds mapping from virtual address to virtual memory area
...@@ -1676,6 +1694,7 @@ struct hl_vm_hw_block_list_node { ...@@ -1676,6 +1694,7 @@ struct hl_vm_hw_block_list_node {
* @npages: num physical pages in the pack. * @npages: num physical pages in the pack.
* @total_size: total size of all the pages in this list. * @total_size: total size of all the pages in this list.
* @mapping_cnt: number of shared mappings. * @mapping_cnt: number of shared mappings.
* @exporting_cnt: number of dma-buf exporting.
* @asid: the context related to this list. * @asid: the context related to this list.
* @page_size: size of each page in the pack. * @page_size: size of each page in the pack.
* @flags: HL_MEM_* flags related to this list. * @flags: HL_MEM_* flags related to this list.
...@@ -1690,6 +1709,7 @@ struct hl_vm_phys_pg_pack { ...@@ -1690,6 +1709,7 @@ struct hl_vm_phys_pg_pack {
u64 npages; u64 npages;
u64 total_size; u64 total_size;
atomic_t mapping_cnt; atomic_t mapping_cnt;
u32 exporting_cnt;
u32 asid; u32 asid;
u32 page_size; u32 page_size;
u32 flags; u32 flags;
...@@ -2410,6 +2430,7 @@ struct multi_cs_data { ...@@ -2410,6 +2430,7 @@ struct multi_cs_data {
* the error will be ignored by the driver during * the error will be ignored by the driver during
* device initialization. Mainly used to debug and * device initialization. Mainly used to debug and
* workaround firmware bugs * workaround firmware bugs
* @dram_pci_bar_start: start bus address of PCIe bar towards DRAM.
* @last_successful_open_jif: timestamp (jiffies) of the last successful * @last_successful_open_jif: timestamp (jiffies) of the last successful
* device open. * device open.
* @last_open_session_duration_jif: duration (jiffies) of the last device open * @last_open_session_duration_jif: duration (jiffies) of the last device open
...@@ -2559,6 +2580,7 @@ struct hl_device { ...@@ -2559,6 +2580,7 @@ struct hl_device {
u64 max_power; u64 max_power;
u64 clock_gating_mask; u64 clock_gating_mask;
u64 boot_error_status_mask; u64 boot_error_status_mask;
u64 dram_pci_bar_start;
u64 last_successful_open_jif; u64 last_successful_open_jif;
u64 last_open_session_duration_jif; u64 last_open_session_duration_jif;
u64 open_counter; u64 open_counter;
......
This diff is collapsed.
...@@ -795,6 +795,7 @@ static int gaudi_early_init(struct hl_device *hdev) ...@@ -795,6 +795,7 @@ static int gaudi_early_init(struct hl_device *hdev)
} }
prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID); prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
hdev->dram_pci_bar_start = pci_resource_start(pdev, HBM_BAR_ID);
/* If FW security is enabled at this point it means no access to ELBI */ /* If FW security is enabled at this point it means no access to ELBI */
if (hdev->asic_prop.fw_security_enabled) { if (hdev->asic_prop.fw_security_enabled) {
......
...@@ -622,6 +622,7 @@ static int goya_early_init(struct hl_device *hdev) ...@@ -622,6 +622,7 @@ static int goya_early_init(struct hl_device *hdev)
} }
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID); prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
hdev->dram_pci_bar_start = pci_resource_start(pdev, DDR_BAR_ID);
/* If FW security is enabled at this point it means no access to ELBI */ /* If FW security is enabled at this point it means no access to ELBI */
if (hdev->asic_prop.fw_security_enabled) { if (hdev->asic_prop.fw_security_enabled) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment