Commit 7bced397 authored by Dan Williams's avatar Dan Williams

net_dma: simple removal

Per commit "77873803 net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.

This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.

Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():

    https://lkml.org/lkml/2014/9/3/177

Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: David Whipple <whipple@securedatainnovations.ch>
Cc: Alexander Duyck <alexander.h.duyck@intel.com>
Cc: <stable@vger.kernel.org>
Reported-by: default avatarRoman Gushchin <klamm@yandex-team.ru>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 08223d80
What: tcp_dma_copybreak sysctl
Date: Removed in kernel v3.13
Contact: Dan Williams <dan.j.williams@intel.com>
Description:
Formerly the lower limit, in bytes, of the size of socket reads
that will be offloaded to a DMA copy engine. Removed due to
coherency issues of the cpu potentially touching the buffers
while dma is in flight.
...@@ -582,12 +582,6 @@ tcp_workaround_signed_windows - BOOLEAN ...@@ -582,12 +582,6 @@ tcp_workaround_signed_windows - BOOLEAN
not receive a window scaling option from them. not receive a window scaling option from them.
Default: 0 Default: 0
tcp_dma_copybreak - INTEGER
Lower limit, in bytes, of the size of socket reads that will be
offloaded to a DMA copy engine, if one is present in the system
and CONFIG_NET_DMA is enabled.
Default: 4096
tcp_thin_linear_timeouts - BOOLEAN tcp_thin_linear_timeouts - BOOLEAN
Enable dynamic triggering of linear timeouts for thin streams. Enable dynamic triggering of linear timeouts for thin streams.
If set, a check is performed upon retransmission by timeout to If set, a check is performed upon retransmission by timeout to
......
...@@ -368,18 +368,6 @@ config DMA_OF ...@@ -368,18 +368,6 @@ config DMA_OF
comment "DMA Clients" comment "DMA Clients"
depends on DMA_ENGINE depends on DMA_ENGINE
config NET_DMA
bool "Network: TCP receive copy offload"
depends on DMA_ENGINE && NET
default (INTEL_IOATDMA || FSL_DMA)
depends on BROKEN
help
This enables the use of DMA engines in the network stack to
offload receive copy-to-user operations, freeing CPU cycles.
Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
say N.
config ASYNC_TX_DMA config ASYNC_TX_DMA
bool "Async_tx: Offload support for the async_tx api" bool "Async_tx: Offload support for the async_tx api"
depends on DMA_ENGINE depends on DMA_ENGINE
......
...@@ -6,7 +6,6 @@ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o ...@@ -6,7 +6,6 @@ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
obj-$(CONFIG_DMA_ACPI) += acpi-dma.o obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
obj-$(CONFIG_DMA_OF) += of-dma.o obj-$(CONFIG_DMA_OF) += of-dma.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
......
...@@ -1084,110 +1084,6 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) ...@@ -1084,110 +1084,6 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
} }
EXPORT_SYMBOL(dmaengine_get_unmap_data); EXPORT_SYMBOL(dmaengine_get_unmap_data);
/**
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
* @chan: DMA channel to offload copy to
* @dest_pg: destination page
* @dest_off: offset in page to copy to
* @src_pg: source page
* @src_off: offset in page to copy from
* @len: length
*
* Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
* address according to the DMA mapping API rules for streaming mappings.
* Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
* (kernel memory or locked user space pages).
*/
dma_cookie_t
dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
unsigned int dest_off, struct page *src_pg, unsigned int src_off,
size_t len)
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
unsigned long flags;
unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
if (!unmap)
return -ENOMEM;
unmap->to_cnt = 1;
unmap->from_cnt = 1;
unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
DMA_TO_DEVICE);
unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
DMA_FROM_DEVICE);
unmap->len = len;
flags = DMA_CTRL_ACK;
tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
len, flags);
if (!tx) {
dmaengine_unmap_put(unmap);
return -ENOMEM;
}
dma_set_unmap(tx, unmap);
cookie = tx->tx_submit(tx);
dmaengine_unmap_put(unmap);
preempt_disable();
__this_cpu_add(chan->local->bytes_transferred, len);
__this_cpu_inc(chan->local->memcpy_count);
preempt_enable();
return cookie;
}
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
/**
* dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
* @chan: DMA channel to offload copy to
* @dest: destination address (virtual)
* @src: source address (virtual)
* @len: length
*
* Both @dest and @src must be mappable to a bus address according to the
* DMA mapping API rules for streaming mappings.
* Both @dest and @src must stay memory resident (kernel memory or locked
* user space pages).
*/
dma_cookie_t
dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
void *src, size_t len)
{
return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
(unsigned long) dest & ~PAGE_MASK,
virt_to_page(src),
(unsigned long) src & ~PAGE_MASK, len);
}
EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
/**
* dma_async_memcpy_buf_to_pg - offloaded copy from address to page
* @chan: DMA channel to offload copy to
* @page: destination page
* @offset: offset in page to copy to
* @kdata: source address (virtual)
* @len: length
*
* Both @page/@offset and @kdata must be mappable to a bus address according
* to the DMA mapping API rules for streaming mappings.
* Both @page/@offset and @kdata must stay memory resident (kernel memory or
* locked user space pages)
*/
dma_cookie_t
dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
unsigned int offset, void *kdata, size_t len)
{
return dma_async_memcpy_pg_to_pg(chan, page, offset,
virt_to_page(kdata),
(unsigned long) kdata & ~PAGE_MASK, len);
}
EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan) struct dma_chan *chan)
{ {
......
...@@ -1222,7 +1222,6 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca) ...@@ -1222,7 +1222,6 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca)
err = ioat_probe(device); err = ioat_probe(device);
if (err) if (err)
return err; return err;
ioat_set_tcp_copy_break(4096);
err = ioat_register(device); err = ioat_register(device);
if (err) if (err)
return err; return err;
......
...@@ -214,13 +214,6 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, ...@@ -214,13 +214,6 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
#define dump_desc_dbg(c, d) \ #define dump_desc_dbg(c, d) \
({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
{
#ifdef CONFIG_NET_DMA
sysctl_tcp_dma_copybreak = copybreak;
#endif
}
static inline struct ioat_chan_common * static inline struct ioat_chan_common *
ioat_chan_by_index(struct ioatdma_device *device, int index) ioat_chan_by_index(struct ioatdma_device *device, int index)
{ {
......
...@@ -900,7 +900,6 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca) ...@@ -900,7 +900,6 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca)
err = ioat_probe(device); err = ioat_probe(device);
if (err) if (err)
return err; return err;
ioat_set_tcp_copy_break(2048);
list_for_each_entry(c, &dma->channels, device_node) { list_for_each_entry(c, &dma->channels, device_node) {
chan = to_chan_common(c); chan = to_chan_common(c);
......
...@@ -1655,7 +1655,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) ...@@ -1655,7 +1655,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
err = ioat_probe(device); err = ioat_probe(device);
if (err) if (err)
return err; return err;
ioat_set_tcp_copy_break(262144);
list_for_each_entry(c, &dma->channels, device_node) { list_for_each_entry(c, &dma->channels, device_node) {
chan = to_chan_common(c); chan = to_chan_common(c);
......
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
* Portions based on net/core/datagram.c and copyrighted by their authors.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
/*
* This code allows the net stack to make use of a DMA engine for
* skb to iovec copies.
*/
#include <linux/dmaengine.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <net/tcp.h> /* for memcpy_toiovec */
#include <asm/io.h>
#include <asm/uaccess.h>
static int num_pages_spanned(struct iovec *iov)
{
return
((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT);
}
/*
* Pin down all the iovec pages needed for len bytes.
* Return a struct dma_pinned_list to keep track of pages pinned down.
*
* We are allocating a single chunk of memory, and then carving it up into
* 3 sections, the latter 2 whose size depends on the number of iovecs and the
* total number of pages, respectively.
*/
struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
{
struct dma_pinned_list *local_list;
struct page **pages;
int i;
int ret;
int nr_iovecs = 0;
int iovec_len_used = 0;
int iovec_pages_used = 0;
/* don't pin down non-user-based iovecs */
if (segment_eq(get_fs(), KERNEL_DS))
return NULL;
/* determine how many iovecs/pages there are, up front */
do {
iovec_len_used += iov[nr_iovecs].iov_len;
iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
nr_iovecs++;
} while (iovec_len_used < len);
/* single kmalloc for pinned list, page_list[], and the page arrays */
local_list = kmalloc(sizeof(*local_list)
+ (nr_iovecs * sizeof (struct dma_page_list))
+ (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
if (!local_list)
goto out;
/* list of pages starts right after the page list array */
pages = (struct page **) &local_list->page_list[nr_iovecs];
local_list->nr_iovecs = 0;
for (i = 0; i < nr_iovecs; i++) {
struct dma_page_list *page_list = &local_list->page_list[i];
len -= iov[i].iov_len;
if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
goto unpin;
page_list->nr_pages = num_pages_spanned(&iov[i]);
page_list->base_address = iov[i].iov_base;
page_list->pages = pages;
pages += page_list->nr_pages;
/* pin pages down */
down_read(&current->mm->mmap_sem);
ret = get_user_pages(
current,
current->mm,
(unsigned long) iov[i].iov_base,
page_list->nr_pages,
1, /* write */
0, /* force */
page_list->pages,
NULL);
up_read(&current->mm->mmap_sem);
if (ret != page_list->nr_pages)
goto unpin;
local_list->nr_iovecs = i + 1;
}
return local_list;
unpin:
dma_unpin_iovec_pages(local_list);
out:
return NULL;
}
void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
{
int i, j;
if (!pinned_list)
return;
for (i = 0; i < pinned_list->nr_iovecs; i++) {
struct dma_page_list *page_list = &pinned_list->page_list[i];
for (j = 0; j < page_list->nr_pages; j++) {
set_page_dirty_lock(page_list->pages[j]);
page_cache_release(page_list->pages[j]);
}
}
kfree(pinned_list);
}
/*
* We have already pinned down the pages we will be using in the iovecs.
* Each entry in iov array has corresponding entry in pinned_list->page_list.
* Using array indexing to keep iov[] and page_list[] in sync.
* Initial elements in iov array's iov->iov_len will be 0 if already copied into
* by another call.
* iov array length remaining guaranteed to be bigger than len.
*/
dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len)
{
int iov_byte_offset;
int copy;
dma_cookie_t dma_cookie = 0;
int iovec_idx;
int page_idx;
if (!chan)
return memcpy_toiovec(iov, kdata, len);
iovec_idx = 0;
while (iovec_idx < pinned_list->nr_iovecs) {
struct dma_page_list *page_list;
/* skip already used-up iovecs */
while (!iov[iovec_idx].iov_len)
iovec_idx++;
page_list = &pinned_list->page_list[iovec_idx];
iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
- ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
/* break up copies to not cross page boundary */
while (iov[iovec_idx].iov_len) {
copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
copy = min_t(int, copy, iov[iovec_idx].iov_len);
dma_cookie = dma_async_memcpy_buf_to_pg(chan,
page_list->pages[page_idx],
iov_byte_offset,
kdata,
copy);
/* poll for a descriptor slot */
if (unlikely(dma_cookie < 0)) {
dma_async_issue_pending(chan);
continue;
}
len -= copy;
iov[iovec_idx].iov_len -= copy;
iov[iovec_idx].iov_base += copy;
if (!len)
return dma_cookie;
kdata += copy;
iov_byte_offset = 0;
page_idx++;
}
iovec_idx++;
}
/* really bad if we ever run out of iovecs */
BUG();
return -EFAULT;
}
dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
struct dma_pinned_list *pinned_list, struct page *page,
unsigned int offset, size_t len)
{
int iov_byte_offset;
int copy;
dma_cookie_t dma_cookie = 0;
int iovec_idx;
int page_idx;
int err;
/* this needs as-yet-unimplemented buf-to-buff, so punt. */
/* TODO: use dma for this */
if (!chan || !pinned_list) {
u8 *vaddr = kmap(page);
err = memcpy_toiovec(iov, vaddr + offset, len);
kunmap(page);
return err;
}
iovec_idx = 0;
while (iovec_idx < pinned_list->nr_iovecs) {
struct dma_page_list *page_list;
/* skip already used-up iovecs */
while (!iov[iovec_idx].iov_len)
iovec_idx++;
page_list = &pinned_list->page_list[iovec_idx];
iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
- ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
/* break up copies to not cross page boundary */
while (iov[iovec_idx].iov_len) {
copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
copy = min_t(int, copy, iov[iovec_idx].iov_len);
dma_cookie = dma_async_memcpy_pg_to_pg(chan,
page_list->pages[page_idx],
iov_byte_offset,
page,
offset,
copy);
/* poll for a descriptor slot */
if (unlikely(dma_cookie < 0)) {
dma_async_issue_pending(chan);
continue;
}
len -= copy;
iov[iovec_idx].iov_len -= copy;
iov[iovec_idx].iov_base += copy;
if (!len)
return dma_cookie;
offset += copy;
iov_byte_offset = 0;
page_idx++;
}
iovec_idx++;
}
/* really bad if we ever run out of iovecs */
BUG();
return -EFAULT;
}
...@@ -903,18 +903,6 @@ static inline void dmaengine_put(void) ...@@ -903,18 +903,6 @@ static inline void dmaengine_put(void)
} }
#endif #endif
#ifdef CONFIG_NET_DMA
#define net_dmaengine_get() dmaengine_get()
#define net_dmaengine_put() dmaengine_put()
#else
static inline void net_dmaengine_get(void)
{
}
static inline void net_dmaengine_put(void)
{
}
#endif
#ifdef CONFIG_ASYNC_TX_DMA #ifdef CONFIG_ASYNC_TX_DMA
#define async_dmaengine_get() dmaengine_get() #define async_dmaengine_get() dmaengine_get()
#define async_dmaengine_put() dmaengine_put() #define async_dmaengine_put() dmaengine_put()
...@@ -936,16 +924,8 @@ async_dma_find_channel(enum dma_transaction_type type) ...@@ -936,16 +924,8 @@ async_dma_find_channel(enum dma_transaction_type type)
return NULL; return NULL;
} }
#endif /* CONFIG_ASYNC_TX_DMA */ #endif /* CONFIG_ASYNC_TX_DMA */
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
struct page *page, unsigned int offset, void *kdata, size_t len);
dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
unsigned int src_off, size_t len);
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan); struct dma_chan *chan);
static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
{ {
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include <linux/textsearch.h> #include <linux/textsearch.h>
#include <net/checksum.h> #include <net/checksum.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/dmaengine.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/netdev_features.h> #include <linux/netdev_features.h>
...@@ -515,11 +514,8 @@ struct sk_buff { ...@@ -515,11 +514,8 @@ struct sk_buff {
/* 6/8 bit hole (depending on ndisc_nodetype presence) */ /* 6/8 bit hole (depending on ndisc_nodetype presence) */
kmemcheck_bitfield_end(flags2); kmemcheck_bitfield_end(flags2);
#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
union { unsigned int napi_id;
unsigned int napi_id;
dma_cookie_t dma_cookie;
};
#endif #endif
#ifdef CONFIG_NETWORK_SECMARK #ifdef CONFIG_NETWORK_SECMARK
__u32 secmark; __u32 secmark;
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/dmaengine.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/inet_connection_sock.h> #include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h> #include <net/inet_timewait_sock.h>
...@@ -169,13 +168,6 @@ struct tcp_sock { ...@@ -169,13 +168,6 @@ struct tcp_sock {
struct iovec *iov; struct iovec *iov;
int memory; int memory;
int len; int len;
#ifdef CONFIG_NET_DMA
/* members for async copy */
struct dma_chan *dma_chan;
int wakeup;
struct dma_pinned_list *pinned_list;
dma_cookie_t dma_cookie;
#endif
} ucopy; } ucopy;
u32 snd_wl1; /* Sequence for window update */ u32 snd_wl1; /* Sequence for window update */
......
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef NETDMA_H
#define NETDMA_H
#ifdef CONFIG_NET_DMA
#include <linux/dmaengine.h>
#include <linux/skbuff.h>
int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
struct sk_buff *skb, int offset, struct iovec *to,
size_t len, struct dma_pinned_list *pinned_list);
#endif /* CONFIG_NET_DMA */
#endif /* NETDMA_H */
...@@ -231,7 +231,6 @@ struct cg_proto; ...@@ -231,7 +231,6 @@ struct cg_proto;
* @sk_receive_queue: incoming packets * @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed * @sk_wmem_alloc: transmit queue bytes committed
* @sk_write_queue: Packet sending queue * @sk_write_queue: Packet sending queue
* @sk_async_wait_queue: DMA copied packets
* @sk_omem_alloc: "o" is "option" or "other" * @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size * @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward * @sk_forward_alloc: space allocated forward
...@@ -354,10 +353,6 @@ struct sock { ...@@ -354,10 +353,6 @@ struct sock {
struct sk_filter __rcu *sk_filter; struct sk_filter __rcu *sk_filter;
struct socket_wq __rcu *sk_wq; struct socket_wq __rcu *sk_wq;
#ifdef CONFIG_NET_DMA
struct sk_buff_head sk_async_wait_queue;
#endif
#ifdef CONFIG_XFRM #ifdef CONFIG_XFRM
struct xfrm_policy *sk_policy[2]; struct xfrm_policy *sk_policy[2];
#endif #endif
...@@ -2214,27 +2209,15 @@ void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags); ...@@ -2214,27 +2209,15 @@ void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
* sk_eat_skb - Release a skb if it is no longer needed * sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from * @sk: socket to eat this skb from
* @skb: socket buffer to eat * @skb: socket buffer to eat
* @copied_early: flag indicating whether DMA operations copied this data early
* *
* This routine must be called with interrupts disabled or with the socket * This routine must be called with interrupts disabled or with the socket
* locked so that the sk_buff queue operation is ok. * locked so that the sk_buff queue operation is ok.
*/ */
#ifdef CONFIG_NET_DMA static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
{
__skb_unlink(skb, &sk->sk_receive_queue);
if (!copied_early)
__kfree_skb(skb);
else
__skb_queue_tail(&sk->sk_async_wait_queue, skb);
}
#else
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
{ {
__skb_unlink(skb, &sk->sk_receive_queue); __skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb); __kfree_skb(skb);
} }
#endif
static inline static inline
struct net *sock_net(const struct sock *sk) struct net *sock_net(const struct sock *sk)
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/dmaengine.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/cryptohash.h> #include <linux/cryptohash.h>
#include <linux/kref.h> #include <linux/kref.h>
...@@ -267,7 +266,6 @@ extern int sysctl_tcp_adv_win_scale; ...@@ -267,7 +266,6 @@ extern int sysctl_tcp_adv_win_scale;
extern int sysctl_tcp_tw_reuse; extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto; extern int sysctl_tcp_frto;
extern int sysctl_tcp_low_latency; extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_dma_copybreak;
extern int sysctl_tcp_nometrics_save; extern int sysctl_tcp_nometrics_save;
extern int sysctl_tcp_moderate_rcvbuf; extern int sysctl_tcp_moderate_rcvbuf;
extern int sysctl_tcp_tso_win_divisor; extern int sysctl_tcp_tso_win_divisor;
...@@ -1023,12 +1021,6 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) ...@@ -1023,12 +1021,6 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
tp->ucopy.len = 0; tp->ucopy.len = 0;
tp->ucopy.memory = 0; tp->ucopy.memory = 0;
skb_queue_head_init(&tp->ucopy.prequeue); skb_queue_head_init(&tp->ucopy.prequeue);
#ifdef CONFIG_NET_DMA
tp->ucopy.dma_chan = NULL;
tp->ucopy.wakeup = 0;
tp->ucopy.pinned_list = NULL;
tp->ucopy.dma_cookie = 0;
#endif
} }
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
......
...@@ -390,7 +390,6 @@ static const struct bin_table bin_net_ipv4_table[] = { ...@@ -390,7 +390,6 @@ static const struct bin_table bin_net_ipv4_table[] = {
{ CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" }, { CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
{ CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" }, { CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" },
{ CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" }, { CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
{ CTL_INT, NET_TCP_DMA_COPYBREAK, "tcp_dma_copybreak" },
{ CTL_INT, NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" }, { CTL_INT, NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" },
{ CTL_INT, NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" }, { CTL_INT, NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" },
{ CTL_INT, NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" }, { CTL_INT, NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" },
......
...@@ -16,7 +16,6 @@ obj-y += net-sysfs.o ...@@ -16,7 +16,6 @@ obj-y += net-sysfs.o
obj-$(CONFIG_PROC_FS) += net-procfs.o obj-$(CONFIG_PROC_FS) += net-procfs.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o obj-$(CONFIG_NET_PKTGEN) += pktgen.o
obj-$(CONFIG_NETPOLL) += netpoll.o obj-$(CONFIG_NETPOLL) += netpoll.o
obj-$(CONFIG_NET_DMA) += user_dma.o
obj-$(CONFIG_FIB_RULES) += fib_rules.o obj-$(CONFIG_FIB_RULES) += fib_rules.o
obj-$(CONFIG_TRACEPOINTS) += net-traces.o obj-$(CONFIG_TRACEPOINTS) += net-traces.o
obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
......
...@@ -1266,7 +1266,6 @@ static int __dev_open(struct net_device *dev) ...@@ -1266,7 +1266,6 @@ static int __dev_open(struct net_device *dev)
clear_bit(__LINK_STATE_START, &dev->state); clear_bit(__LINK_STATE_START, &dev->state);
else { else {
dev->flags |= IFF_UP; dev->flags |= IFF_UP;
net_dmaengine_get();
dev_set_rx_mode(dev); dev_set_rx_mode(dev);
dev_activate(dev); dev_activate(dev);
add_device_randomness(dev->dev_addr, dev->addr_len); add_device_randomness(dev->dev_addr, dev->addr_len);
...@@ -1342,7 +1341,6 @@ static int __dev_close_many(struct list_head *head) ...@@ -1342,7 +1341,6 @@ static int __dev_close_many(struct list_head *head)
ops->ndo_stop(dev); ops->ndo_stop(dev);
dev->flags &= ~IFF_UP; dev->flags &= ~IFF_UP;
net_dmaengine_put();
} }
return 0; return 0;
...@@ -4405,14 +4403,6 @@ static void net_rx_action(struct softirq_action *h) ...@@ -4405,14 +4403,6 @@ static void net_rx_action(struct softirq_action *h)
out: out:
net_rps_action_and_irq_enable(sd); net_rps_action_and_irq_enable(sd);
#ifdef CONFIG_NET_DMA
/*
* There may not be any more sk_buffs coming right now, so push
* any pending DMA copies to hardware
*/
dma_issue_pending_all();
#endif
return; return;
softnet_break: softnet_break:
......
...@@ -1452,9 +1452,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) ...@@ -1452,9 +1452,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
atomic_set(&newsk->sk_omem_alloc, 0); atomic_set(&newsk->sk_omem_alloc, 0);
skb_queue_head_init(&newsk->sk_receive_queue); skb_queue_head_init(&newsk->sk_receive_queue);
skb_queue_head_init(&newsk->sk_write_queue); skb_queue_head_init(&newsk->sk_write_queue);
#ifdef CONFIG_NET_DMA
skb_queue_head_init(&newsk->sk_async_wait_queue);
#endif
spin_lock_init(&newsk->sk_dst_lock); spin_lock_init(&newsk->sk_dst_lock);
rwlock_init(&newsk->sk_callback_lock); rwlock_init(&newsk->sk_callback_lock);
...@@ -2265,9 +2262,6 @@ void sock_init_data(struct socket *sock, struct sock *sk) ...@@ -2265,9 +2262,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
skb_queue_head_init(&sk->sk_receive_queue); skb_queue_head_init(&sk->sk_receive_queue);
skb_queue_head_init(&sk->sk_write_queue); skb_queue_head_init(&sk->sk_write_queue);
skb_queue_head_init(&sk->sk_error_queue); skb_queue_head_init(&sk->sk_error_queue);
#ifdef CONFIG_NET_DMA
skb_queue_head_init(&sk->sk_async_wait_queue);
#endif
sk->sk_send_head = NULL; sk->sk_send_head = NULL;
......
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
* Portions based on net/core/datagram.c and copyrighted by their authors.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
/*
* This code allows the net stack to make use of a DMA engine for
* skb to iovec copies.
*/
#include <linux/dmaengine.h>
#include <linux/socket.h>
#include <linux/export.h>
#include <net/tcp.h>
#include <net/netdma.h>
#define NET_DMA_DEFAULT_COPYBREAK 4096
int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK;
EXPORT_SYMBOL(sysctl_tcp_dma_copybreak);
/**
* dma_skb_copy_datagram_iovec - Copy a datagram to an iovec.
* @skb - buffer to copy
* @offset - offset in the buffer to start copying from
* @iovec - io vector to copy to
* @len - amount of data to copy from buffer to iovec
* @pinned_list - locked iovec buffer data
*
* Note: the iovec is modified during the copy.
*/
int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
struct sk_buff *skb, int offset, struct iovec *to,
size_t len, struct dma_pinned_list *pinned_list)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
dma_cookie_t cookie = 0;
/* Copy header. */
if (copy > 0) {
if (copy > len)
copy = len;
cookie = dma_memcpy_to_iovec(chan, to, pinned_list,
skb->data + offset, copy);
if (cookie < 0)
goto fault;
len -= copy;
if (len == 0)
goto end;
offset += copy;
}
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
end = start + skb_frag_size(frag);
copy = end - offset;
if (copy > 0) {
struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
frag->page_offset + offset - start, copy);
if (cookie < 0)
goto fault;
len -= copy;
if (len == 0)
goto end;
offset += copy;
}
start = end;
}
skb_walk_frags(skb, frag_iter) {
int end;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
copy = end - offset;
if (copy > 0) {
if (copy > len)
copy = len;
cookie = dma_skb_copy_datagram_iovec(chan, frag_iter,
offset - start,
to, copy,
pinned_list);
if (cookie < 0)
goto fault;
len -= copy;
if (len == 0)
goto end;
offset += copy;
}
start = end;
}
end:
if (!len) {
skb->dma_cookie = cookie;
return cookie;
}
fault:
return -EFAULT;
}
...@@ -848,7 +848,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -848,7 +848,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
default: default:
dccp_pr_debug("packet_type=%s\n", dccp_pr_debug("packet_type=%s\n",
dccp_packet_name(dh->dccph_type)); dccp_packet_name(dh->dccph_type));
sk_eat_skb(sk, skb, false); sk_eat_skb(sk, skb);
} }
verify_sock_status: verify_sock_status:
if (sock_flag(sk, SOCK_DONE)) { if (sock_flag(sk, SOCK_DONE)) {
...@@ -905,7 +905,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -905,7 +905,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
len = skb->len; len = skb->len;
found_fin_ok: found_fin_ok:
if (!(flags & MSG_PEEK)) if (!(flags & MSG_PEEK))
sk_eat_skb(sk, skb, false); sk_eat_skb(sk, skb);
break; break;
} while (1); } while (1);
out: out:
......
...@@ -635,15 +635,6 @@ static struct ctl_table ipv4_table[] = { ...@@ -635,15 +635,6 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec .proc_handler = proc_dointvec
}, },
#ifdef CONFIG_NET_DMA
{
.procname = "tcp_dma_copybreak",
.data = &sysctl_tcp_dma_copybreak,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
#endif
{ {
.procname = "tcp_slow_start_after_idle", .procname = "tcp_slow_start_after_idle",
.data = &sysctl_tcp_slow_start_after_idle, .data = &sysctl_tcp_slow_start_after_idle,
......
...@@ -274,7 +274,6 @@ ...@@ -274,7 +274,6 @@
#include <net/tcp.h> #include <net/tcp.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/netdma.h>
#include <net/sock.h> #include <net/sock.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -1454,39 +1453,6 @@ static void tcp_prequeue_process(struct sock *sk) ...@@ -1454,39 +1453,6 @@ static void tcp_prequeue_process(struct sock *sk)
tp->ucopy.memory = 0; tp->ucopy.memory = 0;
} }
#ifdef CONFIG_NET_DMA
static void tcp_service_net_dma(struct sock *sk, bool wait)
{
dma_cookie_t done, used;
dma_cookie_t last_issued;
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan)
return;
last_issued = tp->ucopy.dma_cookie;
dma_async_issue_pending(tp->ucopy.dma_chan);
do {
if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
last_issued, &done,
&used) == DMA_COMPLETE) {
/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
break;
} else {
struct sk_buff *skb;
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
(dma_async_is_complete(skb->dma_cookie, done,
used) == DMA_COMPLETE)) {
__skb_dequeue(&sk->sk_async_wait_queue);
kfree_skb(skb);
}
}
} while (wait);
}
#endif
static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1504,7 +1470,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) ...@@ -1504,7 +1470,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
* splitted a fat GRO packet, while we released socket lock * splitted a fat GRO packet, while we released socket lock
* in skb_splice_bits() * in skb_splice_bits()
*/ */
sk_eat_skb(sk, skb, false); sk_eat_skb(sk, skb);
} }
return NULL; return NULL;
} }
...@@ -1570,11 +1536,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, ...@@ -1570,11 +1536,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
continue; continue;
} }
if (tcp_hdr(skb)->fin) { if (tcp_hdr(skb)->fin) {
sk_eat_skb(sk, skb, false); sk_eat_skb(sk, skb);
++seq; ++seq;
break; break;
} }
sk_eat_skb(sk, skb, false); sk_eat_skb(sk, skb);
if (!desc->count) if (!desc->count)
break; break;
tp->copied_seq = seq; tp->copied_seq = seq;
...@@ -1612,7 +1578,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1612,7 +1578,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int target; /* Read at least this many bytes */ int target; /* Read at least this many bytes */
long timeo; long timeo;
struct task_struct *user_recv = NULL; struct task_struct *user_recv = NULL;
bool copied_early = false;
struct sk_buff *skb; struct sk_buff *skb;
u32 urg_hole = 0; u32 urg_hole = 0;
...@@ -1655,28 +1620,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1655,28 +1620,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
#ifdef CONFIG_NET_DMA
tp->ucopy.dma_chan = NULL;
preempt_disable();
skb = skb_peek_tail(&sk->sk_receive_queue);
{
int available = 0;
if (skb)
available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
if ((available < target) &&
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency &&
net_dma_find_channel()) {
preempt_enable();
tp->ucopy.pinned_list =
dma_pin_iovec_pages(msg->msg_iov, len);
} else {
preempt_enable();
}
}
#endif
do { do {
u32 offset; u32 offset;
...@@ -1807,16 +1750,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1807,16 +1750,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* __ Set realtime policy in scheduler __ */ /* __ Set realtime policy in scheduler __ */
} }
#ifdef CONFIG_NET_DMA
if (tp->ucopy.dma_chan) {
if (tp->rcv_wnd == 0 &&
!skb_queue_empty(&sk->sk_async_wait_queue)) {
tcp_service_net_dma(sk, true);
tcp_cleanup_rbuf(sk, copied);
} else
dma_async_issue_pending(tp->ucopy.dma_chan);
}
#endif
if (copied >= target) { if (copied >= target) {
/* Do not sleep, just process backlog. */ /* Do not sleep, just process backlog. */
release_sock(sk); release_sock(sk);
...@@ -1824,11 +1757,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1824,11 +1757,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
} else } else
sk_wait_data(sk, &timeo); sk_wait_data(sk, &timeo);
#ifdef CONFIG_NET_DMA
tcp_service_net_dma(sk, false); /* Don't block */
tp->ucopy.wakeup = 0;
#endif
if (user_recv) { if (user_recv) {
int chunk; int chunk;
...@@ -1886,43 +1814,13 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1886,43 +1814,13 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
} }
if (!(flags & MSG_TRUNC)) { if (!(flags & MSG_TRUNC)) {
#ifdef CONFIG_NET_DMA err = skb_copy_datagram_iovec(skb, offset,
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) msg->msg_iov, used);
tp->ucopy.dma_chan = net_dma_find_channel(); if (err) {
/* Exception. Bailout! */
if (tp->ucopy.dma_chan) { if (!copied)
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( copied = -EFAULT;
tp->ucopy.dma_chan, skb, offset, break;
msg->msg_iov, used,
tp->ucopy.pinned_list);
if (tp->ucopy.dma_cookie < 0) {
pr_alert("%s: dma_cookie < 0\n",
__func__);
/* Exception. Bailout! */
if (!copied)
copied = -EFAULT;
break;
}
dma_async_issue_pending(tp->ucopy.dma_chan);
if ((offset + used) == skb->len)
copied_early = true;
} else
#endif
{
err = skb_copy_datagram_iovec(skb, offset,
msg->msg_iov, used);
if (err) {
/* Exception. Bailout! */
if (!copied)
copied = -EFAULT;
break;
}
} }
} }
...@@ -1942,19 +1840,15 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1942,19 +1840,15 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (tcp_hdr(skb)->fin) if (tcp_hdr(skb)->fin)
goto found_fin_ok; goto found_fin_ok;
if (!(flags & MSG_PEEK)) { if (!(flags & MSG_PEEK))
sk_eat_skb(sk, skb, copied_early); sk_eat_skb(sk, skb);
copied_early = false;
}
continue; continue;
found_fin_ok: found_fin_ok:
/* Process the FIN. */ /* Process the FIN. */
++*seq; ++*seq;
if (!(flags & MSG_PEEK)) { if (!(flags & MSG_PEEK))
sk_eat_skb(sk, skb, copied_early); sk_eat_skb(sk, skb);
copied_early = false;
}
break; break;
} while (len > 0); } while (len > 0);
...@@ -1977,16 +1871,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1977,16 +1871,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
tp->ucopy.len = 0; tp->ucopy.len = 0;
} }
#ifdef CONFIG_NET_DMA
tcp_service_net_dma(sk, true); /* Wait for queue to drain */
tp->ucopy.dma_chan = NULL;
if (tp->ucopy.pinned_list) {
dma_unpin_iovec_pages(tp->ucopy.pinned_list);
tp->ucopy.pinned_list = NULL;
}
#endif
/* According to UNIX98, msg_name/msg_namelen are ignored /* According to UNIX98, msg_name/msg_namelen are ignored
* on connected socket. I was just happy when found this 8) --ANK * on connected socket. I was just happy when found this 8) --ANK
*/ */
...@@ -2330,9 +2214,6 @@ int tcp_disconnect(struct sock *sk, int flags) ...@@ -2330,9 +2214,6 @@ int tcp_disconnect(struct sock *sk, int flags)
__skb_queue_purge(&sk->sk_receive_queue); __skb_queue_purge(&sk->sk_receive_queue);
tcp_write_queue_purge(sk); tcp_write_queue_purge(sk);
__skb_queue_purge(&tp->out_of_order_queue); __skb_queue_purge(&tp->out_of_order_queue);
#ifdef CONFIG_NET_DMA
__skb_queue_purge(&sk->sk_async_wait_queue);
#endif
inet->inet_dport = 0; inet->inet_dport = 0;
......
...@@ -73,7 +73,6 @@ ...@@ -73,7 +73,6 @@
#include <net/inet_common.h> #include <net/inet_common.h>
#include <linux/ipsec.h> #include <linux/ipsec.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <net/netdma.h>
int sysctl_tcp_timestamps __read_mostly = 1; int sysctl_tcp_timestamps __read_mostly = 1;
int sysctl_tcp_window_scaling __read_mostly = 1; int sysctl_tcp_window_scaling __read_mostly = 1;
...@@ -4970,53 +4969,6 @@ static inline bool tcp_checksum_complete_user(struct sock *sk, ...@@ -4970,53 +4969,6 @@ static inline bool tcp_checksum_complete_user(struct sock *sk,
__tcp_checksum_complete_user(sk, skb); __tcp_checksum_complete_user(sk, skb);
} }
#ifdef CONFIG_NET_DMA
static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
int hlen)
{
struct tcp_sock *tp = tcp_sk(sk);
int chunk = skb->len - hlen;
int dma_cookie;
bool copied_early = false;
if (tp->ucopy.wakeup)
return false;
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
skb, hlen,
tp->ucopy.iov, chunk,
tp->ucopy.pinned_list);
if (dma_cookie < 0)
goto out;
tp->ucopy.dma_cookie = dma_cookie;
copied_early = true;
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
tcp_rcv_space_adjust(sk);
if ((tp->ucopy.len == 0) ||
(tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
(atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
tp->ucopy.wakeup = 1;
sk->sk_data_ready(sk, 0);
}
} else if (chunk > 0) {
tp->ucopy.wakeup = 1;
sk->sk_data_ready(sk, 0);
}
out:
return copied_early;
}
#endif /* CONFIG_NET_DMA */
/* Does PAWS and seqno based validation of an incoming segment, flags will /* Does PAWS and seqno based validation of an incoming segment, flags will
* play significant role here. * play significant role here.
*/ */
...@@ -5201,14 +5153,6 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -5201,14 +5153,6 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
if (tp->copied_seq == tp->rcv_nxt && if (tp->copied_seq == tp->rcv_nxt &&
len - tcp_header_len <= tp->ucopy.len) { len - tcp_header_len <= tp->ucopy.len) {
#ifdef CONFIG_NET_DMA
if (tp->ucopy.task == current &&
sock_owned_by_user(sk) &&
tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
copied_early = 1;
eaten = 1;
}
#endif
if (tp->ucopy.task == current && if (tp->ucopy.task == current &&
sock_owned_by_user(sk) && !copied_early) { sock_owned_by_user(sk) && !copied_early) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
...@@ -5274,11 +5218,6 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -5274,11 +5218,6 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
if (!copied_early || tp->rcv_nxt != tp->rcv_wup) if (!copied_early || tp->rcv_nxt != tp->rcv_wup)
__tcp_ack_snd_check(sk, 0); __tcp_ack_snd_check(sk, 0);
no_ack: no_ack:
#ifdef CONFIG_NET_DMA
if (copied_early)
__skb_queue_tail(&sk->sk_async_wait_queue, skb);
else
#endif
if (eaten) if (eaten)
kfree_skb_partial(skb, fragstolen); kfree_skb_partial(skb, fragstolen);
sk->sk_data_ready(sk, 0); sk->sk_data_ready(sk, 0);
......
...@@ -72,7 +72,6 @@ ...@@ -72,7 +72,6 @@
#include <net/inet_common.h> #include <net/inet_common.h>
#include <net/timewait_sock.h> #include <net/timewait_sock.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/netdma.h>
#include <net/secure_seq.h> #include <net/secure_seq.h>
#include <net/tcp_memcontrol.h> #include <net/tcp_memcontrol.h>
#include <net/busy_poll.h> #include <net/busy_poll.h>
...@@ -1999,18 +1998,8 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1999,18 +1998,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
bh_lock_sock_nested(sk); bh_lock_sock_nested(sk);
ret = 0; ret = 0;
if (!sock_owned_by_user(sk)) { if (!sock_owned_by_user(sk)) {
#ifdef CONFIG_NET_DMA if (!tcp_prequeue(sk, skb))
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan)
ret = tcp_v4_do_rcv(sk, skb); ret = tcp_v4_do_rcv(sk, skb);
else
#endif
{
if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb);
}
} else if (unlikely(sk_add_backlog(sk, skb, } else if (unlikely(sk_add_backlog(sk, skb,
sk->sk_rcvbuf + sk->sk_sndbuf))) { sk->sk_rcvbuf + sk->sk_sndbuf))) {
bh_unlock_sock(sk); bh_unlock_sock(sk);
...@@ -2169,11 +2158,6 @@ void tcp_v4_destroy_sock(struct sock *sk) ...@@ -2169,11 +2158,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
} }
#endif #endif
#ifdef CONFIG_NET_DMA
/* Cleans up our sk_async_wait_queue */
__skb_queue_purge(&sk->sk_async_wait_queue);
#endif
/* Clean prequeue, it must be empty really */ /* Clean prequeue, it must be empty really */
__skb_queue_purge(&tp->ucopy.prequeue); __skb_queue_purge(&tp->ucopy.prequeue);
......
...@@ -59,7 +59,6 @@ ...@@ -59,7 +59,6 @@
#include <net/snmp.h> #include <net/snmp.h>
#include <net/dsfield.h> #include <net/dsfield.h>
#include <net/timewait_sock.h> #include <net/timewait_sock.h>
#include <net/netdma.h>
#include <net/inet_common.h> #include <net/inet_common.h>
#include <net/secure_seq.h> #include <net/secure_seq.h>
#include <net/tcp_memcontrol.h> #include <net/tcp_memcontrol.h>
...@@ -1520,18 +1519,8 @@ static int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1520,18 +1519,8 @@ static int tcp_v6_rcv(struct sk_buff *skb)
bh_lock_sock_nested(sk); bh_lock_sock_nested(sk);
ret = 0; ret = 0;
if (!sock_owned_by_user(sk)) { if (!sock_owned_by_user(sk)) {
#ifdef CONFIG_NET_DMA if (!tcp_prequeue(sk, skb))
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan)
ret = tcp_v6_do_rcv(sk, skb); ret = tcp_v6_do_rcv(sk, skb);
else
#endif
{
if (!tcp_prequeue(sk, skb))
ret = tcp_v6_do_rcv(sk, skb);
}
} else if (unlikely(sk_add_backlog(sk, skb, } else if (unlikely(sk_add_backlog(sk, skb,
sk->sk_rcvbuf + sk->sk_sndbuf))) { sk->sk_rcvbuf + sk->sk_sndbuf))) {
bh_unlock_sock(sk); bh_unlock_sock(sk);
......
...@@ -839,7 +839,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, ...@@ -839,7 +839,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!(flags & MSG_PEEK)) { if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
sk_eat_skb(sk, skb, false); sk_eat_skb(sk, skb);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0; *seq = 0;
} }
...@@ -861,10 +861,10 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, ...@@ -861,10 +861,10 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
llc_cmsg_rcv(msg, skb); llc_cmsg_rcv(msg, skb);
if (!(flags & MSG_PEEK)) { if (!(flags & MSG_PEEK)) {
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
sk_eat_skb(sk, skb, false); sk_eat_skb(sk, skb);
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0; *seq = 0;
} }
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment