Commit 99db9494 authored by Bart Van Assche's avatar Bart Van Assche Committed by Doug Ledford

IB/core: Remove ib_device.dma_device

Add code in ib_register_device() for copying the DMA masks. Use
&ib_device.dev in DMA mapping operations instead of dma_device.
Remove ib_device.dma_device because due to this and previous patches
it is no longer used.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 92f4ae35
...@@ -333,14 +333,15 @@ int ib_register_device(struct ib_device *device, ...@@ -333,14 +333,15 @@ int ib_register_device(struct ib_device *device,
int ret; int ret;
struct ib_client *client; struct ib_client *client;
struct ib_udata uhw = {.outlen = 0, .inlen = 0}; struct ib_udata uhw = {.outlen = 0, .inlen = 0};
struct device *parent = device->dev.parent;
WARN_ON_ONCE(!device->dev.parent && !device->dma_device);
WARN_ON_ONCE(device->dev.parent && device->dma_device WARN_ON_ONCE(!parent);
&& device->dev.parent != device->dma_device); if (!device->dev.dma_ops)
if (!device->dev.parent) device->dev.dma_ops = parent->dma_ops;
device->dev.parent = device->dma_device; if (!device->dev.dma_mask)
if (!device->dma_device) device->dev.dma_mask = parent->dma_mask;
device->dma_device = device->dev.parent; if (!device->dev.coherent_dma_mask)
device->dev.coherent_dma_mask = parent->coherent_dma_mask;
mutex_lock(&device_mutex); mutex_lock(&device_mutex);
......
...@@ -1841,8 +1841,6 @@ struct ib_port_immutable { ...@@ -1841,8 +1841,6 @@ struct ib_port_immutable {
}; };
struct ib_device { struct ib_device {
struct device *dma_device;
char name[IB_DEVICE_NAME_MAX]; char name[IB_DEVICE_NAME_MAX];
struct list_head event_handler_list; struct list_head event_handler_list;
...@@ -2969,7 +2967,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) ...@@ -2969,7 +2967,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
{ {
if (dev->dma_ops) if (dev->dma_ops)
return dev->dma_ops->mapping_error(dev, dma_addr); return dev->dma_ops->mapping_error(dev, dma_addr);
return dma_mapping_error(dev->dma_device, dma_addr); return dma_mapping_error(&dev->dev, dma_addr);
} }
/** /**
...@@ -2985,7 +2983,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev, ...@@ -2985,7 +2983,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
{ {
if (dev->dma_ops) if (dev->dma_ops)
return dev->dma_ops->map_single(dev, cpu_addr, size, direction); return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
return dma_map_single(dev->dma_device, cpu_addr, size, direction); return dma_map_single(&dev->dev, cpu_addr, size, direction);
} }
/** /**
...@@ -3002,7 +3000,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, ...@@ -3002,7 +3000,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
if (dev->dma_ops) if (dev->dma_ops)
dev->dma_ops->unmap_single(dev, addr, size, direction); dev->dma_ops->unmap_single(dev, addr, size, direction);
else else
dma_unmap_single(dev->dma_device, addr, size, direction); dma_unmap_single(&dev->dev, addr, size, direction);
} }
/** /**
...@@ -3021,7 +3019,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev, ...@@ -3021,7 +3019,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
{ {
if (dev->dma_ops) if (dev->dma_ops)
return dev->dma_ops->map_page(dev, page, offset, size, direction); return dev->dma_ops->map_page(dev, page, offset, size, direction);
return dma_map_page(dev->dma_device, page, offset, size, direction); return dma_map_page(&dev->dev, page, offset, size, direction);
} }
/** /**
...@@ -3038,7 +3036,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev, ...@@ -3038,7 +3036,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
if (dev->dma_ops) if (dev->dma_ops)
dev->dma_ops->unmap_page(dev, addr, size, direction); dev->dma_ops->unmap_page(dev, addr, size, direction);
else else
dma_unmap_page(dev->dma_device, addr, size, direction); dma_unmap_page(&dev->dev, addr, size, direction);
} }
/** /**
...@@ -3054,7 +3052,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev, ...@@ -3054,7 +3052,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
{ {
if (dev->dma_ops) if (dev->dma_ops)
return dev->dma_ops->map_sg(dev, sg, nents, direction); return dev->dma_ops->map_sg(dev, sg, nents, direction);
return dma_map_sg(dev->dma_device, sg, nents, direction); return dma_map_sg(&dev->dev, sg, nents, direction);
} }
/** /**
...@@ -3071,7 +3069,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, ...@@ -3071,7 +3069,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
if (dev->dma_ops) if (dev->dma_ops)
dev->dma_ops->unmap_sg(dev, sg, nents, direction); dev->dma_ops->unmap_sg(dev, sg, nents, direction);
else else
dma_unmap_sg(dev->dma_device, sg, nents, direction); dma_unmap_sg(&dev->dev, sg, nents, direction);
} }
static inline int ib_dma_map_sg_attrs(struct ib_device *dev, static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
...@@ -3082,9 +3080,7 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, ...@@ -3082,9 +3080,7 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
if (dev->dma_ops) if (dev->dma_ops)
return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
dma_attrs); dma_attrs);
else return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
dma_attrs);
} }
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
...@@ -3096,8 +3092,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, ...@@ -3096,8 +3092,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
dma_attrs); dma_attrs);
else else
dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
dma_attrs);
} }
/** /**
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
...@@ -3142,7 +3137,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, ...@@ -3142,7 +3137,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
if (dev->dma_ops) if (dev->dma_ops)
dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
else else
dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); dma_sync_single_for_cpu(&dev->dev, addr, size, dir);
} }
/** /**
...@@ -3160,7 +3155,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, ...@@ -3160,7 +3155,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
if (dev->dma_ops) if (dev->dma_ops)
dev->dma_ops->sync_single_for_device(dev, addr, size, dir); dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
else else
dma_sync_single_for_device(dev->dma_device, addr, size, dir); dma_sync_single_for_device(&dev->dev, addr, size, dir);
} }
/** /**
...@@ -3183,7 +3178,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, ...@@ -3183,7 +3178,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
*dma_handle = handle; *dma_handle = handle;
return ret; return ret;
} }
return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); return dma_alloc_coherent(&dev->dev, size, dma_handle, flag);
} }
/** /**
...@@ -3200,7 +3195,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev, ...@@ -3200,7 +3195,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
if (dev->dma_ops) if (dev->dma_ops)
dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
else else
dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment