Commit 32f7ad0f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-next-2023-04-27' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "A bit out of routine fixes pull for rc1.

  There's a build breakage on some platforms due to ttm, this has that
  fix + qaic uapi removal + minor panel fixes.

  ttm:
   - Fix TTM build on archs where PMD_SHIFT is not constant

  qaic:
   - Revert uAPI from accel/qaic

  panel:
   - Improve error handling in nt35950
   - Fix double unregister in otm8009a when removing the driver"

* tag 'drm-next-2023-04-27' of git://anongit.freedesktop.org/drm/drm:
  drm/panel: novatek-nt35950: Only unregister DSI1 if it exists
  drm/panel: otm8009a: Set backlight parent to panel device
  drm/panel: novatek-nt35950: Improve error handling
  drm/ttm: revert "Reduce the number of used allocation orders for TTM pages"
  Revert "accel/qaic: Add mhi_qaic_cntl"
parents 7e775787 cf03e295
...@@ -7,7 +7,6 @@ obj-$(CONFIG_DRM_ACCEL_QAIC) := qaic.o ...@@ -7,7 +7,6 @@ obj-$(CONFIG_DRM_ACCEL_QAIC) := qaic.o
qaic-y := \ qaic-y := \
mhi_controller.o \ mhi_controller.o \
mhi_qaic_ctrl.o \
qaic_control.o \ qaic_control.o \
qaic_data.o \ qaic_data.o \
qaic_drv.o qaic_drv.o
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
#include <linux/kernel.h>
#include <linux/mhi.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/xarray.h>
#include <uapi/linux/eventpoll.h>
#include "mhi_qaic_ctrl.h"
#include "qaic.h"
#define MHI_QAIC_CTRL_DRIVER_NAME "mhi_qaic_ctrl"
#define MHI_QAIC_CTRL_MAX_MINORS 128
#define MHI_MAX_MTU 0xffff
static DEFINE_XARRAY_ALLOC(mqc_xa);
static struct class *mqc_dev_class;
static int mqc_dev_major;
/**
* struct mqc_buf - Buffer structure used to receive data from device
* @data: Address of data to read from
* @odata: Original address returned from *alloc() API. Used to free this buf.
* @len: Length of data in byte
* @node: This buffer will be part of list managed in struct mqc_dev
*/
struct mqc_buf {
void *data;
void *odata;
size_t len;
struct list_head node;
};
/**
* struct mqc_dev - MHI QAIC Control Device
* @minor: MQC device node minor number
* @mhi_dev: Associated mhi device object
* @mtu: Max TRE buffer length
* @enabled: Flag to track the state of the MQC device
* @lock: Mutex lock to serialize access to open_count
* @read_lock: Mutex lock to serialize readers
* @write_lock: Mutex lock to serialize writers
* @ul_wq: Wait queue for writers
* @dl_wq: Wait queue for readers
* @dl_queue_lock: Spin lock to serialize access to download queue
* @dl_queue: Queue of downloaded buffers
* @open_count: Track open counts
* @ref_count: Reference count for this structure
*/
struct mqc_dev {
u32 minor;
struct mhi_device *mhi_dev;
size_t mtu;
bool enabled;
struct mutex lock;
struct mutex read_lock;
struct mutex write_lock;
wait_queue_head_t ul_wq;
wait_queue_head_t dl_wq;
spinlock_t dl_queue_lock;
struct list_head dl_queue;
unsigned int open_count;
struct kref ref_count;
};
static void mqc_dev_release(struct kref *ref)
{
struct mqc_dev *mqcdev = container_of(ref, struct mqc_dev, ref_count);
mutex_destroy(&mqcdev->read_lock);
mutex_destroy(&mqcdev->write_lock);
mutex_destroy(&mqcdev->lock);
kfree(mqcdev);
}
static int mhi_qaic_ctrl_fill_dl_queue(struct mqc_dev *mqcdev)
{
struct mhi_device *mhi_dev = mqcdev->mhi_dev;
struct mqc_buf *ctrlbuf;
int rx_budget;
int ret = 0;
void *data;
rx_budget = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
if (rx_budget < 0)
return -EIO;
while (rx_budget--) {
data = kzalloc(mqcdev->mtu + sizeof(*ctrlbuf), GFP_KERNEL);
if (!data)
return -ENOMEM;
ctrlbuf = data + mqcdev->mtu;
ctrlbuf->odata = data;
ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, data, mqcdev->mtu, MHI_EOT);
if (ret) {
kfree(data);
dev_err(&mhi_dev->dev, "Failed to queue buffer\n");
return ret;
}
}
return ret;
}
static int mhi_qaic_ctrl_dev_start_chan(struct mqc_dev *mqcdev)
{
struct device *dev = &mqcdev->mhi_dev->dev;
int ret = 0;
ret = mutex_lock_interruptible(&mqcdev->lock);
if (ret)
return ret;
if (!mqcdev->enabled) {
ret = -ENODEV;
goto release_dev_lock;
}
if (!mqcdev->open_count) {
ret = mhi_prepare_for_transfer(mqcdev->mhi_dev);
if (ret) {
dev_err(dev, "Error starting transfer channels\n");
goto release_dev_lock;
}
ret = mhi_qaic_ctrl_fill_dl_queue(mqcdev);
if (ret) {
dev_err(dev, "Error filling download queue.\n");
goto mhi_unprepare;
}
}
mqcdev->open_count++;
mutex_unlock(&mqcdev->lock);
return 0;
mhi_unprepare:
mhi_unprepare_from_transfer(mqcdev->mhi_dev);
release_dev_lock:
mutex_unlock(&mqcdev->lock);
return ret;
}
static struct mqc_dev *mqc_dev_get_by_minor(unsigned int minor)
{
struct mqc_dev *mqcdev;
xa_lock(&mqc_xa);
mqcdev = xa_load(&mqc_xa, minor);
if (mqcdev)
kref_get(&mqcdev->ref_count);
xa_unlock(&mqc_xa);
return mqcdev;
}
static int mhi_qaic_ctrl_open(struct inode *inode, struct file *filp)
{
struct mqc_dev *mqcdev;
int ret;
mqcdev = mqc_dev_get_by_minor(iminor(inode));
if (!mqcdev) {
pr_debug("mqc: minor %d not found\n", iminor(inode));
return -EINVAL;
}
ret = mhi_qaic_ctrl_dev_start_chan(mqcdev);
if (ret) {
kref_put(&mqcdev->ref_count, mqc_dev_release);
return ret;
}
filp->private_data = mqcdev;
return 0;
}
static void mhi_qaic_ctrl_buf_free(struct mqc_buf *ctrlbuf)
{
list_del(&ctrlbuf->node);
kfree(ctrlbuf->odata);
}
static void __mhi_qaic_ctrl_release(struct mqc_dev *mqcdev)
{
struct mqc_buf *ctrlbuf, *tmp;
mhi_unprepare_from_transfer(mqcdev->mhi_dev);
wake_up_interruptible(&mqcdev->ul_wq);
wake_up_interruptible(&mqcdev->dl_wq);
/*
* Free the dl_queue. As we have already unprepared mhi transfers, we
* do not expect any callback functions that update dl_queue hence no need
* to grab dl_queue lock.
*/
mutex_lock(&mqcdev->read_lock);
list_for_each_entry_safe(ctrlbuf, tmp, &mqcdev->dl_queue, node)
mhi_qaic_ctrl_buf_free(ctrlbuf);
mutex_unlock(&mqcdev->read_lock);
}
static int mhi_qaic_ctrl_release(struct inode *inode, struct file *file)
{
struct mqc_dev *mqcdev = file->private_data;
mutex_lock(&mqcdev->lock);
mqcdev->open_count--;
if (!mqcdev->open_count && mqcdev->enabled)
__mhi_qaic_ctrl_release(mqcdev);
mutex_unlock(&mqcdev->lock);
kref_put(&mqcdev->ref_count, mqc_dev_release);
return 0;
}
static __poll_t mhi_qaic_ctrl_poll(struct file *file, poll_table *wait)
{
struct mqc_dev *mqcdev = file->private_data;
struct mhi_device *mhi_dev;
__poll_t mask = 0;
mhi_dev = mqcdev->mhi_dev;
poll_wait(file, &mqcdev->ul_wq, wait);
poll_wait(file, &mqcdev->dl_wq, wait);
mutex_lock(&mqcdev->lock);
if (!mqcdev->enabled) {
mutex_unlock(&mqcdev->lock);
return EPOLLERR;
}
spin_lock_bh(&mqcdev->dl_queue_lock);
if (!list_empty(&mqcdev->dl_queue))
mask |= EPOLLIN | EPOLLRDNORM;
spin_unlock_bh(&mqcdev->dl_queue_lock);
if (mutex_lock_interruptible(&mqcdev->write_lock)) {
mutex_unlock(&mqcdev->lock);
return EPOLLERR;
}
if (mhi_get_free_desc_count(mhi_dev, DMA_TO_DEVICE) > 0)
mask |= EPOLLOUT | EPOLLWRNORM;
mutex_unlock(&mqcdev->write_lock);
mutex_unlock(&mqcdev->lock);
dev_dbg(&mhi_dev->dev, "Client attempted to poll, returning mask 0x%x\n", mask);
return mask;
}
static int mhi_qaic_ctrl_tx(struct mqc_dev *mqcdev)
{
int ret;
ret = wait_event_interruptible(mqcdev->ul_wq, !mqcdev->enabled ||
mhi_get_free_desc_count(mqcdev->mhi_dev, DMA_TO_DEVICE) > 0);
if (!mqcdev->enabled)
return -ENODEV;
return ret;
}
static ssize_t mhi_qaic_ctrl_write(struct file *file, const char __user *buf, size_t count,
loff_t *offp)
{
struct mqc_dev *mqcdev = file->private_data;
struct mhi_device *mhi_dev;
size_t bytes_xfered = 0;
struct device *dev;
int ret, nr_desc;
mhi_dev = mqcdev->mhi_dev;
dev = &mhi_dev->dev;
if (!mhi_dev->ul_chan)
return -EOPNOTSUPP;
if (!buf || !count)
return -EINVAL;
dev_dbg(dev, "Request to transfer %zu bytes\n", count);
ret = mhi_qaic_ctrl_tx(mqcdev);
if (ret)
return ret;
if (mutex_lock_interruptible(&mqcdev->write_lock))
return -EINTR;
nr_desc = mhi_get_free_desc_count(mhi_dev, DMA_TO_DEVICE);
if (nr_desc * mqcdev->mtu < count) {
ret = -EMSGSIZE;
dev_dbg(dev, "Buffer too big to transfer\n");
goto unlock_mutex;
}
while (count != bytes_xfered) {
enum mhi_flags flags;
size_t to_copy;
void *kbuf;
to_copy = min_t(size_t, count - bytes_xfered, mqcdev->mtu);
kbuf = kmalloc(to_copy, GFP_KERNEL);
if (!kbuf) {
ret = -ENOMEM;
goto unlock_mutex;
}
ret = copy_from_user(kbuf, buf + bytes_xfered, to_copy);
if (ret) {
kfree(kbuf);
ret = -EFAULT;
goto unlock_mutex;
}
if (bytes_xfered + to_copy == count)
flags = MHI_EOT;
else
flags = MHI_CHAIN;
ret = mhi_queue_buf(mhi_dev, DMA_TO_DEVICE, kbuf, to_copy, flags);
if (ret) {
kfree(kbuf);
dev_err(dev, "Failed to queue buf of size %zu\n", to_copy);
goto unlock_mutex;
}
bytes_xfered += to_copy;
}
mutex_unlock(&mqcdev->write_lock);
dev_dbg(dev, "bytes xferred: %zu\n", bytes_xfered);
return bytes_xfered;
unlock_mutex:
mutex_unlock(&mqcdev->write_lock);
return ret;
}
static int mhi_qaic_ctrl_rx(struct mqc_dev *mqcdev)
{
int ret;
ret = wait_event_interruptible(mqcdev->dl_wq,
!mqcdev->enabled || !list_empty(&mqcdev->dl_queue));
if (!mqcdev->enabled)
return -ENODEV;
return ret;
}
static ssize_t mhi_qaic_ctrl_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct mqc_dev *mqcdev = file->private_data;
struct mqc_buf *ctrlbuf;
size_t to_copy;
int ret;
if (!mqcdev->mhi_dev->dl_chan)
return -EOPNOTSUPP;
ret = mhi_qaic_ctrl_rx(mqcdev);
if (ret)
return ret;
if (mutex_lock_interruptible(&mqcdev->read_lock))
return -EINTR;
ctrlbuf = list_first_entry_or_null(&mqcdev->dl_queue, struct mqc_buf, node);
if (!ctrlbuf) {
mutex_unlock(&mqcdev->read_lock);
ret = -ENODEV;
goto error_out;
}
to_copy = min_t(size_t, count, ctrlbuf->len);
if (copy_to_user(buf, ctrlbuf->data, to_copy)) {
mutex_unlock(&mqcdev->read_lock);
dev_dbg(&mqcdev->mhi_dev->dev, "Failed to copy data to user buffer\n");
ret = -EFAULT;
goto error_out;
}
ctrlbuf->len -= to_copy;
ctrlbuf->data += to_copy;
if (!ctrlbuf->len) {
spin_lock_bh(&mqcdev->dl_queue_lock);
mhi_qaic_ctrl_buf_free(ctrlbuf);
spin_unlock_bh(&mqcdev->dl_queue_lock);
mhi_qaic_ctrl_fill_dl_queue(mqcdev);
dev_dbg(&mqcdev->mhi_dev->dev, "Read buf freed\n");
}
mutex_unlock(&mqcdev->read_lock);
return to_copy;
error_out:
mutex_unlock(&mqcdev->read_lock);
return ret;
}
static const struct file_operations mhidev_fops = {
.owner = THIS_MODULE,
.open = mhi_qaic_ctrl_open,
.release = mhi_qaic_ctrl_release,
.read = mhi_qaic_ctrl_read,
.write = mhi_qaic_ctrl_write,
.poll = mhi_qaic_ctrl_poll,
};
static void mhi_qaic_ctrl_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
{
struct mqc_dev *mqcdev = dev_get_drvdata(&mhi_dev->dev);
dev_dbg(&mhi_dev->dev, "%s: status: %d xfer_len: %zu\n", __func__,
mhi_result->transaction_status, mhi_result->bytes_xferd);
kfree(mhi_result->buf_addr);
if (!mhi_result->transaction_status)
wake_up_interruptible(&mqcdev->ul_wq);
}
static void mhi_qaic_ctrl_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
{
struct mqc_dev *mqcdev = dev_get_drvdata(&mhi_dev->dev);
struct mqc_buf *ctrlbuf;
dev_dbg(&mhi_dev->dev, "%s: status: %d receive_len: %zu\n", __func__,
mhi_result->transaction_status, mhi_result->bytes_xferd);
if (mhi_result->transaction_status &&
mhi_result->transaction_status != -EOVERFLOW) {
kfree(mhi_result->buf_addr);
return;
}
ctrlbuf = mhi_result->buf_addr + mqcdev->mtu;
ctrlbuf->data = mhi_result->buf_addr;
ctrlbuf->len = mhi_result->bytes_xferd;
spin_lock_bh(&mqcdev->dl_queue_lock);
list_add_tail(&ctrlbuf->node, &mqcdev->dl_queue);
spin_unlock_bh(&mqcdev->dl_queue_lock);
wake_up_interruptible(&mqcdev->dl_wq);
}
static int mhi_qaic_ctrl_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
struct mqc_dev *mqcdev;
struct device *dev;
int ret;
mqcdev = kzalloc(sizeof(*mqcdev), GFP_KERNEL);
if (!mqcdev)
return -ENOMEM;
kref_init(&mqcdev->ref_count);
mutex_init(&mqcdev->lock);
mqcdev->mhi_dev = mhi_dev;
ret = xa_alloc(&mqc_xa, &mqcdev->minor, mqcdev, XA_LIMIT(0, MHI_QAIC_CTRL_MAX_MINORS),
GFP_KERNEL);
if (ret) {
kfree(mqcdev);
return ret;
}
init_waitqueue_head(&mqcdev->ul_wq);
init_waitqueue_head(&mqcdev->dl_wq);
mutex_init(&mqcdev->read_lock);
mutex_init(&mqcdev->write_lock);
spin_lock_init(&mqcdev->dl_queue_lock);
INIT_LIST_HEAD(&mqcdev->dl_queue);
mqcdev->mtu = min_t(size_t, id->driver_data, MHI_MAX_MTU);
mqcdev->enabled = true;
mqcdev->open_count = 0;
dev_set_drvdata(&mhi_dev->dev, mqcdev);
dev = device_create(mqc_dev_class, &mhi_dev->dev, MKDEV(mqc_dev_major, mqcdev->minor),
mqcdev, "%s", dev_name(&mhi_dev->dev));
if (IS_ERR(dev)) {
xa_erase(&mqc_xa, mqcdev->minor);
dev_set_drvdata(&mhi_dev->dev, NULL);
kfree(mqcdev);
return PTR_ERR(dev);
}
return 0;
};
static void mhi_qaic_ctrl_remove(struct mhi_device *mhi_dev)
{
struct mqc_dev *mqcdev = dev_get_drvdata(&mhi_dev->dev);
device_destroy(mqc_dev_class, MKDEV(mqc_dev_major, mqcdev->minor));
mutex_lock(&mqcdev->lock);
mqcdev->enabled = false;
if (mqcdev->open_count)
__mhi_qaic_ctrl_release(mqcdev);
mutex_unlock(&mqcdev->lock);
xa_erase(&mqc_xa, mqcdev->minor);
kref_put(&mqcdev->ref_count, mqc_dev_release);
}
/* .driver_data stores max mtu */
static const struct mhi_device_id mhi_qaic_ctrl_match_table[] = {
{ .chan = "QAIC_SAHARA", .driver_data = SZ_32K},
{},
};
MODULE_DEVICE_TABLE(mhi, mhi_qaic_ctrl_match_table);
static struct mhi_driver mhi_qaic_ctrl_driver = {
.id_table = mhi_qaic_ctrl_match_table,
.remove = mhi_qaic_ctrl_remove,
.probe = mhi_qaic_ctrl_probe,
.ul_xfer_cb = mhi_qaic_ctrl_ul_xfer_cb,
.dl_xfer_cb = mhi_qaic_ctrl_dl_xfer_cb,
.driver = {
.name = MHI_QAIC_CTRL_DRIVER_NAME,
},
};
int mhi_qaic_ctrl_init(void)
{
int ret;
ret = register_chrdev(0, MHI_QAIC_CTRL_DRIVER_NAME, &mhidev_fops);
if (ret < 0)
return ret;
mqc_dev_major = ret;
mqc_dev_class = class_create(THIS_MODULE, MHI_QAIC_CTRL_DRIVER_NAME);
if (IS_ERR(mqc_dev_class)) {
ret = PTR_ERR(mqc_dev_class);
goto unregister_chrdev;
}
ret = mhi_driver_register(&mhi_qaic_ctrl_driver);
if (ret)
goto destroy_class;
return 0;
destroy_class:
class_destroy(mqc_dev_class);
unregister_chrdev:
unregister_chrdev(mqc_dev_major, MHI_QAIC_CTRL_DRIVER_NAME);
return ret;
}
void mhi_qaic_ctrl_deinit(void)
{
mhi_driver_unregister(&mhi_qaic_ctrl_driver);
class_destroy(mqc_dev_class);
unregister_chrdev(mqc_dev_major, MHI_QAIC_CTRL_DRIVER_NAME);
xa_destroy(&mqc_xa);
}
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __MHI_QAIC_CTRL_H__
#define __MHI_QAIC_CTRL_H__
int mhi_qaic_ctrl_init(void);
void mhi_qaic_ctrl_deinit(void);
#endif /* __MHI_QAIC_CTRL_H__ */
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <uapi/drm/qaic_accel.h> #include <uapi/drm/qaic_accel.h>
#include "mhi_controller.h" #include "mhi_controller.h"
#include "mhi_qaic_ctrl.h"
#include "qaic.h" #include "qaic.h"
MODULE_IMPORT_NS(DMA_BUF); MODULE_IMPORT_NS(DMA_BUF);
...@@ -601,16 +600,8 @@ static int __init qaic_init(void) ...@@ -601,16 +600,8 @@ static int __init qaic_init(void)
goto free_mhi; goto free_mhi;
} }
ret = mhi_qaic_ctrl_init();
if (ret) {
pr_debug("qaic: mhi_qaic_ctrl_init failed %d\n", ret);
goto free_pci;
}
return 0; return 0;
free_pci:
pci_unregister_driver(&qaic_pci_driver);
free_mhi: free_mhi:
mhi_driver_unregister(&qaic_mhi_driver); mhi_driver_unregister(&qaic_mhi_driver);
return ret; return ret;
...@@ -634,7 +625,6 @@ static void __exit qaic_exit(void) ...@@ -634,7 +625,6 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done. * reinitializing the link_up state after the cleanup is done.
*/ */
link_up = true; link_up = true;
mhi_qaic_ctrl_deinit();
pci_unregister_driver(&qaic_pci_driver); pci_unregister_driver(&qaic_pci_driver);
mhi_driver_unregister(&qaic_mhi_driver); mhi_driver_unregister(&qaic_mhi_driver);
} }
......
...@@ -585,8 +585,12 @@ static int nt35950_probe(struct mipi_dsi_device *dsi) ...@@ -585,8 +585,12 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
DRM_MODE_CONNECTOR_DSI); DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&nt->panel); ret = drm_panel_of_backlight(&nt->panel);
if (ret) if (ret) {
if (num_dsis == 2)
mipi_dsi_device_unregister(nt->dsi[1]);
return dev_err_probe(dev, ret, "Failed to get backlight\n"); return dev_err_probe(dev, ret, "Failed to get backlight\n");
}
drm_panel_add(&nt->panel); drm_panel_add(&nt->panel);
...@@ -602,6 +606,10 @@ static int nt35950_probe(struct mipi_dsi_device *dsi) ...@@ -602,6 +606,10 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
ret = mipi_dsi_attach(nt->dsi[i]); ret = mipi_dsi_attach(nt->dsi[i]);
if (ret < 0) { if (ret < 0) {
/* If we fail to attach to either host, we're done */
if (num_dsis == 2)
mipi_dsi_device_unregister(nt->dsi[1]);
return dev_err_probe(dev, ret, return dev_err_probe(dev, ret,
"Cannot attach to DSI%d host.\n", i); "Cannot attach to DSI%d host.\n", i);
} }
......
...@@ -471,7 +471,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi) ...@@ -471,7 +471,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
DRM_MODE_CONNECTOR_DSI); DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev), ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
dsi->host->dev, ctx, dev, ctx,
&otm8009a_backlight_ops, &otm8009a_backlight_ops,
NULL); NULL);
if (IS_ERR(ctx->bl_dev)) { if (IS_ERR(ctx->bl_dev)) {
......
...@@ -47,11 +47,6 @@ ...@@ -47,11 +47,6 @@
#include "ttm_module.h" #include "ttm_module.h"
#define TTM_MAX_ORDER (PMD_SHIFT - PAGE_SHIFT)
#define __TTM_DIM_ORDER (TTM_MAX_ORDER + 1)
/* Some architectures have a weird PMD_SHIFT */
#define TTM_DIM_ORDER (__TTM_DIM_ORDER <= MAX_ORDER ? __TTM_DIM_ORDER : MAX_ORDER)
/** /**
* struct ttm_pool_dma - Helper object for coherent DMA mappings * struct ttm_pool_dma - Helper object for coherent DMA mappings
* *
...@@ -70,11 +65,11 @@ module_param(page_pool_size, ulong, 0644); ...@@ -70,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages; static atomic_long_t allocated_pages;
static struct ttm_pool_type global_write_combined[TTM_DIM_ORDER]; static struct ttm_pool_type global_write_combined[MAX_ORDER];
static struct ttm_pool_type global_uncached[TTM_DIM_ORDER]; static struct ttm_pool_type global_uncached[MAX_ORDER];
static struct ttm_pool_type global_dma32_write_combined[TTM_DIM_ORDER]; static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
static struct ttm_pool_type global_dma32_uncached[TTM_DIM_ORDER]; static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
static spinlock_t shrinker_lock; static spinlock_t shrinker_lock;
static struct list_head shrinker_list; static struct list_head shrinker_list;
...@@ -449,7 +444,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, ...@@ -449,7 +444,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else else
gfp_flags |= GFP_HIGHUSER; gfp_flags |= GFP_HIGHUSER;
for (order = min_t(unsigned int, TTM_MAX_ORDER, __fls(num_pages)); for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
num_pages; num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) { order = min_t(unsigned int, order, __fls(num_pages))) {
struct ttm_pool_type *pt; struct ttm_pool_type *pt;
...@@ -568,7 +563,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev, ...@@ -568,7 +563,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
if (use_dma_alloc) { if (use_dma_alloc) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j < TTM_DIM_ORDER; ++j) for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j], ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j); pool, i, j);
} }
...@@ -588,7 +583,7 @@ void ttm_pool_fini(struct ttm_pool *pool) ...@@ -588,7 +583,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
if (pool->use_dma_alloc) { if (pool->use_dma_alloc) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j < TTM_DIM_ORDER; ++j) for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]); ttm_pool_type_fini(&pool->caching[i].orders[j]);
} }
...@@ -642,7 +637,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m) ...@@ -642,7 +637,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
unsigned int i; unsigned int i;
seq_puts(m, "\t "); seq_puts(m, "\t ");
for (i = 0; i < TTM_DIM_ORDER; ++i) for (i = 0; i < MAX_ORDER; ++i)
seq_printf(m, " ---%2u---", i); seq_printf(m, " ---%2u---", i);
seq_puts(m, "\n"); seq_puts(m, "\n");
} }
...@@ -653,7 +648,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, ...@@ -653,7 +648,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{ {
unsigned int i; unsigned int i;
for (i = 0; i < TTM_DIM_ORDER; ++i) for (i = 0; i < MAX_ORDER; ++i)
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i])); seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
seq_puts(m, "\n"); seq_puts(m, "\n");
} }
...@@ -756,16 +751,13 @@ int ttm_pool_mgr_init(unsigned long num_pages) ...@@ -756,16 +751,13 @@ int ttm_pool_mgr_init(unsigned long num_pages)
{ {
unsigned int i; unsigned int i;
BUILD_BUG_ON(TTM_DIM_ORDER > MAX_ORDER);
BUILD_BUG_ON(TTM_DIM_ORDER < 1);
if (!page_pool_size) if (!page_pool_size)
page_pool_size = num_pages; page_pool_size = num_pages;
spin_lock_init(&shrinker_lock); spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list); INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < TTM_DIM_ORDER; ++i) { for (i = 0; i < MAX_ORDER; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL, ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i); ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
...@@ -798,7 +790,7 @@ void ttm_pool_mgr_fini(void) ...@@ -798,7 +790,7 @@ void ttm_pool_mgr_fini(void)
{ {
unsigned int i; unsigned int i;
for (i = 0; i < TTM_DIM_ORDER; ++i) { for (i = 0; i < MAX_ORDER; ++i) {
ttm_pool_type_fini(&global_write_combined[i]); ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]); ttm_pool_type_fini(&global_uncached[i]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment