Commit af87a943 authored by Roland Dreier's avatar Roland Dreier Committed by Linus Torvalds

[PATCH] InfiniBand/mthca: more Arbel Mem-Free support

Continue development of Arbel Mem-Free support: we now compute a valid
profile, allocate context memory, map sufficient aux memory for HCA page
tables, map sufficient context memory to cover all reserved firmware resources
and successfully call the INIT_HCA and QUERY_ADAPTER firmware commands.  Fix a
few error gotos that unwound the wrong things.
Signed-off-by: default avatarRoland Dreier <roland@topspin.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 94d4b2d3
...@@ -509,7 +509,8 @@ int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status) ...@@ -509,7 +509,8 @@ int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, HZ, status); return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, HZ, status);
} }
int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
u64 virt, u8 *status)
{ {
u32 *inbox; u32 *inbox;
dma_addr_t indma; dma_addr_t indma;
...@@ -518,12 +519,17 @@ int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) ...@@ -518,12 +519,17 @@ int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
int nent = 0; int nent = 0;
int i; int i;
int err = 0; int err = 0;
int ts = 0; int ts = 0, tc = 0;
inbox = pci_alloc_consistent(dev->pdev, PAGE_SIZE, &indma); inbox = pci_alloc_consistent(dev->pdev, PAGE_SIZE, &indma);
if (!inbox)
return -ENOMEM;
memset(inbox, 0, PAGE_SIZE); memset(inbox, 0, PAGE_SIZE);
for (mthca_icm_first(icm, &iter); !mthca_icm_last(&iter); mthca_icm_next(&iter)) { for (mthca_icm_first(icm, &iter);
!mthca_icm_last(&iter);
mthca_icm_next(&iter)) {
/* /*
* We have to pass pages that are aligned to their * We have to pass pages that are aligned to their
* size, so find the least significant 1 in the * size, so find the least significant 1 in the
...@@ -538,13 +544,20 @@ int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) ...@@ -538,13 +544,20 @@ int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
goto out; goto out;
} }
for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) { for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) {
if (virt != -1) {
*((__be64 *) (inbox + nent * 4)) =
cpu_to_be64(virt);
virt += 1 << lg;
}
*((__be64 *) (inbox + nent * 4 + 2)) = *((__be64 *) (inbox + nent * 4 + 2)) =
cpu_to_be64((mthca_icm_addr(&iter) + cpu_to_be64((mthca_icm_addr(&iter) +
(i << lg)) | (i << lg)) | (lg - 12));
(lg - 12));
ts += 1 << (lg - 10); ts += 1 << (lg - 10);
++tc;
if (nent == PAGE_SIZE / 16) { if (nent == PAGE_SIZE / 16) {
err = mthca_cmd(dev, indma, nent, 0, CMD_MAP_FA, err = mthca_cmd(dev, indma, nent, 0, op,
CMD_TIME_CLASS_B, status); CMD_TIME_CLASS_B, status);
if (err || *status) if (err || *status)
goto out; goto out;
...@@ -553,18 +566,33 @@ int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) ...@@ -553,18 +566,33 @@ int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
} }
} }
if (nent) { if (nent)
err = mthca_cmd(dev, indma, nent, 0, CMD_MAP_FA, err = mthca_cmd(dev, indma, nent, 0, op,
CMD_TIME_CLASS_B, status); CMD_TIME_CLASS_B, status);
}
mthca_dbg(dev, "Mapped %d KB of host memory for FW.\n", ts); switch (op) {
case CMD_MAP_FA:
mthca_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
break;
case CMD_MAP_ICM_AUX:
mthca_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
break;
case CMD_MAP_ICM:
mthca_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
tc, ts, (unsigned long long) virt - (ts << 10));
break;
}
out: out:
pci_free_consistent(dev->pdev, PAGE_SIZE, inbox, indma); pci_free_consistent(dev->pdev, PAGE_SIZE, inbox, indma);
return err; return err;
} }
int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
{
return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1, status);
}
int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status) int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status)
{ {
return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status); return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status);
...@@ -1068,8 +1096,11 @@ int mthca_INIT_HCA(struct mthca_dev *dev, ...@@ -1068,8 +1096,11 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
#define INIT_HCA_UAR_OFFSET 0x120 #define INIT_HCA_UAR_OFFSET 0x120
#define INIT_HCA_UAR_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x00) #define INIT_HCA_UAR_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x00)
#define INIT_HCA_UARC_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x09)
#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
#define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10) #define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10)
#define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18)
inbox = pci_alloc_consistent(dev->pdev, INIT_HCA_IN_SIZE, &indma); inbox = pci_alloc_consistent(dev->pdev, INIT_HCA_IN_SIZE, &indma);
if (!inbox) if (!inbox)
...@@ -1117,7 +1148,8 @@ int mthca_INIT_HCA(struct mthca_dev *dev, ...@@ -1117,7 +1148,8 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
/* TPT attributes */ /* TPT attributes */
MTHCA_PUT(inbox, param->mpt_base, INIT_HCA_MPT_BASE_OFFSET); MTHCA_PUT(inbox, param->mpt_base, INIT_HCA_MPT_BASE_OFFSET);
MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET); if (dev->hca_type != ARBEL_NATIVE)
MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET);
MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
MTHCA_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); MTHCA_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
...@@ -1125,7 +1157,14 @@ int mthca_INIT_HCA(struct mthca_dev *dev, ...@@ -1125,7 +1157,14 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
{ {
u8 uar_page_sz = PAGE_SHIFT - 12; u8 uar_page_sz = PAGE_SHIFT - 12;
MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET); }
MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET);
if (dev->hca_type == ARBEL_NATIVE) {
MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET);
MTHCA_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET);
} }
err = mthca_cmd(dev, indma, 0, 0, CMD_INIT_HCA, err = mthca_cmd(dev, indma, 0, 0, CMD_INIT_HCA,
...@@ -1199,6 +1238,68 @@ int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status) ...@@ -1199,6 +1238,68 @@ int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, HZ, status); return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, HZ, status);
} }
int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status)
{
return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status);
}
int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
{
u64 *inbox;
dma_addr_t indma;
int err;
inbox = pci_alloc_consistent(dev->pdev, 16, &indma);
if (!inbox)
return -ENOMEM;
inbox[0] = cpu_to_be64(virt);
inbox[1] = cpu_to_be64(dma_addr | (PAGE_SHIFT - 12));
err = mthca_cmd(dev, indma, 1, 0, CMD_MAP_ICM, CMD_TIME_CLASS_B, status);
pci_free_consistent(dev->pdev, 16, inbox, indma);
if (!err)
mthca_dbg(dev, "Mapped page at %llx for ICM.\n",
(unsigned long long) virt);
return err;
}
int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status)
{
return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status);
}
int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
{
return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1, status);
}
int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status)
{
return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status);
}
int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
u8 *status)
{
int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE,
CMD_TIME_CLASS_A, status);
if (ret || status)
return ret;
/*
* Arbel page size is always 4 KB; round up number of system
* pages needed.
*/
*aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12);
return 0;
}
int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry, int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry,
int mpt_index, u8 *status) int mpt_index, u8 *status)
{ {
......
...@@ -174,27 +174,30 @@ struct mthca_adapter { ...@@ -174,27 +174,30 @@ struct mthca_adapter {
struct mthca_init_hca_param { struct mthca_init_hca_param {
u64 qpc_base; u64 qpc_base;
u8 log_num_qps;
u64 eec_base; u64 eec_base;
u8 log_num_eecs;
u64 srqc_base; u64 srqc_base;
u8 log_num_srqs;
u64 cqc_base; u64 cqc_base;
u8 log_num_cqs;
u64 eqpc_base; u64 eqpc_base;
u64 eeec_base; u64 eeec_base;
u64 eqc_base; u64 eqc_base;
u8 log_num_eqs;
u64 rdb_base; u64 rdb_base;
u64 mc_base; u64 mc_base;
u64 mpt_base;
u64 mtt_base;
u64 uar_scratch_base;
u64 uarc_base;
u16 log_mc_entry_sz; u16 log_mc_entry_sz;
u16 mc_hash_sz; u16 mc_hash_sz;
u8 log_num_qps;
u8 log_num_eecs;
u8 log_num_srqs;
u8 log_num_cqs;
u8 log_num_eqs;
u8 log_mc_table_sz; u8 log_mc_table_sz;
u64 mpt_base;
u8 mtt_seg_sz; u8 mtt_seg_sz;
u8 log_mpt_sz; u8 log_mpt_sz;
u64 mtt_base; u8 log_uar_sz;
u64 uar_scratch_base; u8 log_uarc_sz;
}; };
struct mthca_init_ib_param { struct mthca_init_ib_param {
...@@ -238,6 +241,13 @@ int mthca_INIT_IB(struct mthca_dev *dev, ...@@ -238,6 +241,13 @@ int mthca_INIT_IB(struct mthca_dev *dev,
int port, u8 *status); int port, u8 *status);
int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status); int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status);
int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status); int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status);
int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status);
int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status);
int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status);
int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status);
int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
u8 *status);
int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry, int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry,
int mpt_index, u8 *status); int mpt_index, u8 *status);
int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry, int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry,
......
/* /*
* Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -153,10 +153,12 @@ struct mthca_pd_table { ...@@ -153,10 +153,12 @@ struct mthca_pd_table {
}; };
struct mthca_mr_table { struct mthca_mr_table {
struct mthca_alloc mpt_alloc; struct mthca_alloc mpt_alloc;
int max_mtt_order; int max_mtt_order;
unsigned long **mtt_buddy; unsigned long **mtt_buddy;
u64 mtt_base; u64 mtt_base;
struct mthca_icm_table *mtt_table;
struct mthca_icm_table *mpt_table;
}; };
struct mthca_eq_table { struct mthca_eq_table {
...@@ -164,23 +166,29 @@ struct mthca_eq_table { ...@@ -164,23 +166,29 @@ struct mthca_eq_table {
void __iomem *clr_int; void __iomem *clr_int;
u32 clr_mask; u32 clr_mask;
struct mthca_eq eq[MTHCA_NUM_EQ]; struct mthca_eq eq[MTHCA_NUM_EQ];
u64 icm_virt;
struct page *icm_page;
dma_addr_t icm_dma;
int have_irq; int have_irq;
u8 inta_pin; u8 inta_pin;
}; };
struct mthca_cq_table { struct mthca_cq_table {
struct mthca_alloc alloc; struct mthca_alloc alloc;
spinlock_t lock; spinlock_t lock;
struct mthca_array cq; struct mthca_array cq;
struct mthca_icm_table *table;
}; };
struct mthca_qp_table { struct mthca_qp_table {
struct mthca_alloc alloc; struct mthca_alloc alloc;
u32 rdb_base; u32 rdb_base;
int rdb_shift; int rdb_shift;
int sqp_start; int sqp_start;
spinlock_t lock; spinlock_t lock;
struct mthca_array qp; struct mthca_array qp;
struct mthca_icm_table *qp_table;
struct mthca_icm_table *eqp_table;
}; };
struct mthca_av_table { struct mthca_av_table {
...@@ -216,7 +224,8 @@ struct mthca_dev { ...@@ -216,7 +224,8 @@ struct mthca_dev {
u64 clr_int_base; u64 clr_int_base;
u64 eq_arm_base; u64 eq_arm_base;
u64 eq_set_ci_base; u64 eq_set_ci_base;
struct mthca_icm *icm; struct mthca_icm *fw_icm;
struct mthca_icm *aux_icm;
u16 fw_pages; u16 fw_pages;
} arbel; } arbel;
} fw; } fw;
...@@ -329,6 +338,9 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, ...@@ -329,6 +338,9 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_mr *mr); u32 access, struct mthca_mr *mr);
void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr); void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr);
int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt);
void mthca_unmap_eq_icm(struct mthca_dev *dev);
int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
struct ib_wc *entry); struct ib_wc *entry);
void mthca_arm_cq(struct mthca_dev *dev, struct mthca_cq *cq, void mthca_arm_cq(struct mthca_dev *dev, struct mthca_cq *cq,
......
...@@ -574,6 +574,50 @@ static void mthca_free_irqs(struct mthca_dev *dev) ...@@ -574,6 +574,50 @@ static void mthca_free_irqs(struct mthca_dev *dev)
dev->eq_table.eq + i); dev->eq_table.eq + i);
} }
int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
{
int ret;
u8 status;
/*
* We assume that mapping one page is enough for the whole EQ
* context table. This is fine with all current HCAs, because
* we only use 32 EQs and each EQ uses 32 bytes of context
* memory, or 1 KB total.
*/
dev->eq_table.icm_virt = icm_virt;
dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
if (!dev->eq_table.icm_page)
return -ENOMEM;
dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->eq_table.icm_dma)) {
__free_page(dev->eq_table.icm_page);
return -ENOMEM;
}
ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status);
if (!ret && status)
ret = -EINVAL;
if (ret) {
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page);
}
return ret;
}
void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev)
{
u8 status;
mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status);
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page);
}
int __devinit mthca_init_eq_table(struct mthca_dev *dev) int __devinit mthca_init_eq_table(struct mthca_dev *dev)
{ {
int err; int err;
......
This diff is collapsed.
/* /*
* Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -34,6 +34,16 @@ ...@@ -34,6 +34,16 @@
#include "mthca_memfree.h" #include "mthca_memfree.h"
#include "mthca_dev.h" #include "mthca_dev.h"
#include "mthca_cmd.h"
/*
* We allocate in as big chunks as we can, up to a maximum of 256 KB
* per chunk.
*/
enum {
MTHCA_ICM_ALLOC_SIZE = 1 << 18,
MTHCA_TABLE_CHUNK_SIZE = 1 << 18
};
void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm) void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
{ {
...@@ -71,11 +81,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, ...@@ -71,11 +81,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
INIT_LIST_HEAD(&icm->chunk_list); INIT_LIST_HEAD(&icm->chunk_list);
/* cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);
* We allocate in as big chunks as we can, up to a maximum of
* 256 KB per chunk.
*/
cur_order = get_order(1 << 18);
while (npages > 0) { while (npages > 0) {
if (!chunk) { if (!chunk) {
...@@ -131,3 +137,70 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, ...@@ -131,3 +137,70 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
mthca_free_icm(dev, icm); mthca_free_icm(dev, icm);
return NULL; return NULL;
} }
struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
u64 virt, unsigned size,
unsigned reserved,
int use_lowmem)
{
struct mthca_icm_table *table;
int num_icm;
int i;
u8 status;
num_icm = size / MTHCA_TABLE_CHUNK_SIZE;
table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
if (!table)
return NULL;
table->virt = virt;
table->num_icm = num_icm;
init_MUTEX(&table->sem);
for (i = 0; i < num_icm; ++i)
table->icm[i] = NULL;
for (i = 0; i < (reserved + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE; ++i) {
table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN);
if (!table->icm[i])
goto err;
if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
&status) || status) {
mthca_free_icm(dev, table->icm[i]);
table->icm[i] = NULL;
goto err;
}
}
return table;
err:
for (i = 0; i < num_icm; ++i)
if (table->icm[i]) {
mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
mthca_free_icm(dev, table->icm[i]);
}
kfree(table);
return NULL;
}
void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
{
int i;
u8 status;
for (i = 0; i < table->num_icm; ++i)
if (table->icm[i]) {
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
mthca_free_icm(dev, table->icm[i]);
}
kfree(table);
}
/* /*
* Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -38,8 +38,10 @@ ...@@ -38,8 +38,10 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <asm/semaphore.h>
#define MTHCA_ICM_CHUNK_LEN \ #define MTHCA_ICM_CHUNK_LEN \
((512 - sizeof (struct list_head) - 2 * sizeof (int)) / \ ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
(sizeof (struct scatterlist))) (sizeof (struct scatterlist)))
struct mthca_icm_chunk { struct mthca_icm_chunk {
...@@ -53,6 +55,13 @@ struct mthca_icm { ...@@ -53,6 +55,13 @@ struct mthca_icm {
struct list_head chunk_list; struct list_head chunk_list;
}; };
struct mthca_icm_table {
u64 virt;
int num_icm;
struct semaphore sem;
struct mthca_icm *icm[0];
};
struct mthca_icm_iter { struct mthca_icm_iter {
struct mthca_icm *icm; struct mthca_icm *icm;
struct mthca_icm_chunk *chunk; struct mthca_icm_chunk *chunk;
...@@ -65,6 +74,12 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, ...@@ -65,6 +74,12 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
unsigned int gfp_mask); unsigned int gfp_mask);
void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm); void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm);
struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
u64 virt, unsigned size,
unsigned reserved,
int use_lowmem);
void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table);
static inline void mthca_icm_first(struct mthca_icm *icm, static inline void mthca_icm_first(struct mthca_icm *icm,
struct mthca_icm_iter *iter) struct mthca_icm_iter *iter)
{ {
......
/* /*
* Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -60,7 +60,7 @@ enum { ...@@ -60,7 +60,7 @@ enum {
MTHCA_NUM_PDS = 1 << 15 MTHCA_NUM_PDS = 1 << 15
}; };
int mthca_make_profile(struct mthca_dev *dev, u64 mthca_make_profile(struct mthca_dev *dev,
struct mthca_profile *request, struct mthca_profile *request,
struct mthca_dev_lim *dev_lim, struct mthca_dev_lim *dev_lim,
struct mthca_init_hca_param *init_hca) struct mthca_init_hca_param *init_hca)
...@@ -116,6 +116,8 @@ int mthca_make_profile(struct mthca_dev *dev, ...@@ -116,6 +116,8 @@ int mthca_make_profile(struct mthca_dev *dev,
profile[i].type = i; profile[i].type = i;
profile[i].log_num = max(ffs(profile[i].num) - 1, 0); profile[i].log_num = max(ffs(profile[i].num) - 1, 0);
profile[i].size *= profile[i].num; profile[i].size *= profile[i].num;
if (dev->hca_type == ARBEL_NATIVE)
profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
} }
if (dev->hca_type == ARBEL_NATIVE) { if (dev->hca_type == ARBEL_NATIVE) {
...@@ -239,6 +241,10 @@ int mthca_make_profile(struct mthca_dev *dev, ...@@ -239,6 +241,10 @@ int mthca_make_profile(struct mthca_dev *dev,
case MTHCA_RES_UDAV: case MTHCA_RES_UDAV:
dev->av_table.ddr_av_base = profile[i].start; dev->av_table.ddr_av_base = profile[i].start;
dev->av_table.num_ddr_avs = profile[i].num; dev->av_table.num_ddr_avs = profile[i].num;
case MTHCA_RES_UARC:
init_hca->uarc_base = profile[i].start;
init_hca->log_uarc_sz = ffs(request->uarc_size) - 13;
init_hca->log_uar_sz = ffs(request->num_uar) - 1;
default: default:
break; break;
} }
...@@ -251,5 +257,5 @@ int mthca_make_profile(struct mthca_dev *dev, ...@@ -251,5 +257,5 @@ int mthca_make_profile(struct mthca_dev *dev,
dev->limits.num_pds = MTHCA_NUM_PDS; dev->limits.num_pds = MTHCA_NUM_PDS;
kfree(profile); kfree(profile);
return 0; return total_size;
} }
/* /*
* Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -50,7 +50,7 @@ struct mthca_profile { ...@@ -50,7 +50,7 @@ struct mthca_profile {
int uarc_size; int uarc_size;
}; };
int mthca_make_profile(struct mthca_dev *mdev, u64 mthca_make_profile(struct mthca_dev *mdev,
struct mthca_profile *request, struct mthca_profile *request,
struct mthca_dev_lim *dev_lim, struct mthca_dev_lim *dev_lim,
struct mthca_init_hca_param *init_hca); struct mthca_init_hca_param *init_hca);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment