Commit a6ca1094 authored by Ying Xue's avatar Ying Xue Committed by David S. Miller

tipc: use generic SKB list APIs to manage TIPC outgoing packet chains

Use standard SKB list APIs associated with struct sk_buff_head to
manage socket outgoing packet chain and name table outgoing packet
chain, having relevant code simpler and more readable.
Signed-off-by: default avatarYing Xue <ying.xue@windriver.com>
Reviewed-by: default avatarJon Maloy <jon.maloy@ericsson.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f03273f1
...@@ -398,20 +398,20 @@ static void bclink_peek_nack(struct tipc_msg *msg) ...@@ -398,20 +398,20 @@ static void bclink_peek_nack(struct tipc_msg *msg)
/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster /* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
* and to identified node local sockets * and to identified node local sockets
* @buf: chain of buffers containing message * @list: chain of buffers containing message
* Consumes the buffer chain, except when returning -ELINKCONG * Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
*/ */
int tipc_bclink_xmit(struct sk_buff *buf) int tipc_bclink_xmit(struct sk_buff_head *list)
{ {
int rc = 0; int rc = 0;
int bc = 0; int bc = 0;
struct sk_buff *clbuf; struct sk_buff *skb;
/* Prepare clone of message for local node */ /* Prepare clone of message for local node */
clbuf = tipc_msg_reassemble(buf); skb = tipc_msg_reassemble(list);
if (unlikely(!clbuf)) { if (unlikely(!skb)) {
kfree_skb_list(buf); __skb_queue_purge(list);
return -EHOSTUNREACH; return -EHOSTUNREACH;
} }
...@@ -419,7 +419,7 @@ int tipc_bclink_xmit(struct sk_buff *buf) ...@@ -419,7 +419,7 @@ int tipc_bclink_xmit(struct sk_buff *buf)
if (likely(bclink)) { if (likely(bclink)) {
tipc_bclink_lock(); tipc_bclink_lock();
if (likely(bclink->bcast_nodes.count)) { if (likely(bclink->bcast_nodes.count)) {
rc = __tipc_link_xmit(bcl, buf); rc = __tipc_link_xmit(bcl, list);
if (likely(!rc)) { if (likely(!rc)) {
u32 len = skb_queue_len(&bcl->outqueue); u32 len = skb_queue_len(&bcl->outqueue);
...@@ -433,13 +433,13 @@ int tipc_bclink_xmit(struct sk_buff *buf) ...@@ -433,13 +433,13 @@ int tipc_bclink_xmit(struct sk_buff *buf)
} }
if (unlikely(!bc)) if (unlikely(!bc))
kfree_skb_list(buf); __skb_queue_purge(list);
/* Deliver message clone */ /* Deliver message clone */
if (likely(!rc)) if (likely(!rc))
tipc_sk_mcast_rcv(clbuf); tipc_sk_mcast_rcv(skb);
else else
kfree_skb(clbuf); kfree_skb(skb);
return rc; return rc;
} }
......
...@@ -100,7 +100,7 @@ int tipc_bclink_reset_stats(void); ...@@ -100,7 +100,7 @@ int tipc_bclink_reset_stats(void);
int tipc_bclink_set_queue_limits(u32 limit); int tipc_bclink_set_queue_limits(u32 limit);
void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action); void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
uint tipc_bclink_get_mtu(void); uint tipc_bclink_get_mtu(void);
int tipc_bclink_xmit(struct sk_buff *buf); int tipc_bclink_xmit(struct sk_buff_head *list);
void tipc_bclink_wakeup_users(void); void tipc_bclink_wakeup_users(void);
int tipc_nl_add_bc_link(struct tipc_nl_msg *msg); int tipc_nl_add_bc_link(struct tipc_nl_msg *msg);
......
...@@ -664,9 +664,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) ...@@ -664,9 +664,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
* - For all other messages we discard the buffer and return -EHOSTUNREACH * - For all other messages we discard the buffer and return -EHOSTUNREACH
* - For TIPC internal messages we also reset the link * - For TIPC internal messages we also reset the link
*/ */
static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf) static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
{ {
struct tipc_msg *msg = buf_msg(buf); struct sk_buff *skb = skb_peek(list);
struct tipc_msg *msg = buf_msg(skb);
uint imp = tipc_msg_tot_importance(msg); uint imp = tipc_msg_tot_importance(msg);
u32 oport = msg_tot_origport(msg); u32 oport = msg_tot_origport(msg);
...@@ -679,28 +680,29 @@ static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf) ...@@ -679,28 +680,29 @@ static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
goto drop; goto drop;
if (unlikely(msg_reroute_cnt(msg))) if (unlikely(msg_reroute_cnt(msg)))
goto drop; goto drop;
if (TIPC_SKB_CB(buf)->wakeup_pending) if (TIPC_SKB_CB(skb)->wakeup_pending)
return -ELINKCONG; return -ELINKCONG;
if (link_schedule_user(link, oport, TIPC_SKB_CB(buf)->chain_sz, imp)) if (link_schedule_user(link, oport, skb_queue_len(list), imp))
return -ELINKCONG; return -ELINKCONG;
drop: drop:
kfree_skb_list(buf); __skb_queue_purge(list);
return -EHOSTUNREACH; return -EHOSTUNREACH;
} }
/** /**
* __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
* @link: link to use * @link: link to use
* @skb: chain of buffers containing message * @list: chain of buffers containing message
*
* Consumes the buffer chain, except when returning -ELINKCONG * Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
* user data messages) or -EHOSTUNREACH (all other messages/senders) * user data messages) or -EHOSTUNREACH (all other messages/senders)
* Only the socket functions tipc_send_stream() and tipc_send_packet() need * Only the socket functions tipc_send_stream() and tipc_send_packet() need
* to act on the return value, since they may need to do more send attempts. * to act on the return value, since they may need to do more send attempts.
*/ */
int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb) int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
{ {
struct tipc_msg *msg = buf_msg(skb); struct tipc_msg *msg = buf_msg(skb_peek(list));
uint psz = msg_size(msg); uint psz = msg_size(msg);
uint sndlim = link->queue_limit[0]; uint sndlim = link->queue_limit[0];
uint imp = tipc_msg_tot_importance(msg); uint imp = tipc_msg_tot_importance(msg);
...@@ -710,21 +712,21 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb) ...@@ -710,21 +712,21 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb)
uint bc_last_in = link->owner->bclink.last_in; uint bc_last_in = link->owner->bclink.last_in;
struct tipc_media_addr *addr = &link->media_addr; struct tipc_media_addr *addr = &link->media_addr;
struct sk_buff_head *outqueue = &link->outqueue; struct sk_buff_head *outqueue = &link->outqueue;
struct sk_buff *next; struct sk_buff *skb, *tmp;
/* Match queue limits against msg importance: */ /* Match queue limits against msg importance: */
if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp])) if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
return tipc_link_cong(link, skb); return tipc_link_cong(link, list);
/* Has valid packet limit been used ? */ /* Has valid packet limit been used ? */
if (unlikely(psz > mtu)) { if (unlikely(psz > mtu)) {
kfree_skb_list(skb); __skb_queue_purge(list);
return -EMSGSIZE; return -EMSGSIZE;
} }
/* Prepare each packet for sending, and add to outqueue: */ /* Prepare each packet for sending, and add to outqueue: */
while (skb) { skb_queue_walk_safe(list, skb, tmp) {
next = skb->next; __skb_unlink(skb, list);
msg = buf_msg(skb); msg = buf_msg(skb);
msg_set_word(msg, 2, ((ack << 16) | mod(seqno))); msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
msg_set_bcast_ack(msg, bc_last_in); msg_set_bcast_ack(msg, bc_last_in);
...@@ -736,7 +738,6 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb) ...@@ -736,7 +738,6 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb)
link->unacked_window = 0; link->unacked_window = 0;
} else if (tipc_msg_bundle(outqueue, skb, mtu)) { } else if (tipc_msg_bundle(outqueue, skb, mtu)) {
link->stats.sent_bundled++; link->stats.sent_bundled++;
skb = next;
continue; continue;
} else if (tipc_msg_make_bundle(outqueue, skb, mtu, } else if (tipc_msg_make_bundle(outqueue, skb, mtu,
link->addr)) { link->addr)) {
...@@ -750,22 +751,43 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb) ...@@ -750,22 +751,43 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb)
link->next_out = skb; link->next_out = skb;
} }
seqno++; seqno++;
skb = next;
} }
link->next_out_no = seqno; link->next_out_no = seqno;
return 0; return 0;
} }
static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
{
__skb_queue_head_init(list);
__skb_queue_tail(list, skb);
}
static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
{
struct sk_buff_head head;
skb2list(skb, &head);
return __tipc_link_xmit(link, &head);
}
int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
{
struct sk_buff_head head;
skb2list(skb, &head);
return tipc_link_xmit(&head, dnode, selector);
}
/** /**
* tipc_link_xmit() is the general link level function for message sending * tipc_link_xmit() is the general link level function for message sending
* @buf: chain of buffers containing message * @list: chain of buffers containing message
* @dsz: amount of user data to be sent * @dsz: amount of user data to be sent
* @dnode: address of destination node * @dnode: address of destination node
* @selector: a number used for deterministic link selection * @selector: a number used for deterministic link selection
* Consumes the buffer chain, except when returning -ELINKCONG * Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
*/ */
int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector)
{ {
struct tipc_link *link = NULL; struct tipc_link *link = NULL;
struct tipc_node *node; struct tipc_node *node;
...@@ -776,17 +798,22 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) ...@@ -776,17 +798,22 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
tipc_node_lock(node); tipc_node_lock(node);
link = node->active_links[selector & 1]; link = node->active_links[selector & 1];
if (link) if (link)
rc = __tipc_link_xmit(link, buf); rc = __tipc_link_xmit(link, list);
tipc_node_unlock(node); tipc_node_unlock(node);
} }
if (link) if (link)
return rc; return rc;
if (likely(in_own_node(dnode))) if (likely(in_own_node(dnode))) {
return tipc_sk_rcv(buf); /* As a node local message chain never contains more than one
* buffer, we just need to dequeue one SKB buffer from the
* head list.
*/
return tipc_sk_rcv(__skb_dequeue(list));
}
__skb_queue_purge(list);
kfree_skb_list(buf);
return rc; return rc;
} }
...@@ -800,17 +827,17 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) ...@@ -800,17 +827,17 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
*/ */
static void tipc_link_sync_xmit(struct tipc_link *link) static void tipc_link_sync_xmit(struct tipc_link *link)
{ {
struct sk_buff *buf; struct sk_buff *skb;
struct tipc_msg *msg; struct tipc_msg *msg;
buf = tipc_buf_acquire(INT_H_SIZE); skb = tipc_buf_acquire(INT_H_SIZE);
if (!buf) if (!skb)
return; return;
msg = buf_msg(buf); msg = buf_msg(skb);
tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr); tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
msg_set_last_bcast(msg, link->owner->bclink.acked); msg_set_last_bcast(msg, link->owner->bclink.acked);
__tipc_link_xmit(link, buf); __tipc_link_xmit_skb(link, skb);
} }
/* /*
...@@ -1053,8 +1080,7 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr) ...@@ -1053,8 +1080,7 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
u32 ackd; u32 ackd;
u32 released; u32 released;
__skb_queue_head_init(&head); skb2list(skb, &head);
__skb_queue_tail(&head, skb);
while ((skb = __skb_dequeue(&head))) { while ((skb = __skb_dequeue(&head))) {
/* Ensure message is well-formed */ /* Ensure message is well-formed */
...@@ -1573,7 +1599,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, ...@@ -1573,7 +1599,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
u32 selector) u32 selector)
{ {
struct tipc_link *tunnel; struct tipc_link *tunnel;
struct sk_buff *buf; struct sk_buff *skb;
u32 length = msg_size(msg); u32 length = msg_size(msg);
tunnel = l_ptr->owner->active_links[selector & 1]; tunnel = l_ptr->owner->active_links[selector & 1];
...@@ -1582,14 +1608,14 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, ...@@ -1582,14 +1608,14 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
return; return;
} }
msg_set_size(tunnel_hdr, length + INT_H_SIZE); msg_set_size(tunnel_hdr, length + INT_H_SIZE);
buf = tipc_buf_acquire(length + INT_H_SIZE); skb = tipc_buf_acquire(length + INT_H_SIZE);
if (!buf) { if (!skb) {
pr_warn("%sunable to send tunnel msg\n", link_co_err); pr_warn("%sunable to send tunnel msg\n", link_co_err);
return; return;
} }
skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
__tipc_link_xmit(tunnel, buf); __tipc_link_xmit_skb(tunnel, skb);
} }
...@@ -1620,7 +1646,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) ...@@ -1620,7 +1646,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
if (skb) { if (skb) {
skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
msg_set_size(&tunnel_hdr, INT_H_SIZE); msg_set_size(&tunnel_hdr, INT_H_SIZE);
__tipc_link_xmit(tunnel, skb); __tipc_link_xmit_skb(tunnel, skb);
} else { } else {
pr_warn("%sunable to send changeover msg\n", pr_warn("%sunable to send changeover msg\n",
link_co_err); link_co_err);
...@@ -1691,7 +1717,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, ...@@ -1691,7 +1717,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE); skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data, skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
length); length);
__tipc_link_xmit(tunnel, outskb); __tipc_link_xmit_skb(tunnel, outskb);
if (!tipc_link_is_up(l_ptr)) if (!tipc_link_is_up(l_ptr))
return; return;
} }
......
...@@ -213,8 +213,9 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, ...@@ -213,8 +213,9 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
void tipc_link_reset_all(struct tipc_node *node); void tipc_link_reset_all(struct tipc_node *node);
void tipc_link_reset(struct tipc_link *l_ptr); void tipc_link_reset(struct tipc_link *l_ptr);
void tipc_link_reset_list(unsigned int bearer_id); void tipc_link_reset_list(unsigned int bearer_id);
int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector); int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector);
int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf); int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector);
int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list);
u32 tipc_link_get_max_pkt(u32 dest, u32 selector); u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
void tipc_link_bundle_rcv(struct sk_buff *buf); void tipc_link_bundle_rcv(struct sk_buff *buf);
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
......
...@@ -166,11 +166,12 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) ...@@ -166,11 +166,12 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
* @offset: Posision in iov to start copying from * @offset: Posision in iov to start copying from
* @dsz: Total length of user data * @dsz: Total length of user data
* @pktmax: Max packet size that can be used * @pktmax: Max packet size that can be used
* @chain: Buffer or chain of buffers to be returned to caller * @list: Buffer or chain of buffers to be returned to caller
*
* Returns message data size or errno: -ENOMEM, -EFAULT * Returns message data size or errno: -ENOMEM, -EFAULT
*/ */
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
int offset, int dsz, int pktmax , struct sk_buff **chain) int dsz, int pktmax, struct sk_buff_head *list)
{ {
int mhsz = msg_hdr_sz(mhdr); int mhsz = msg_hdr_sz(mhdr);
int msz = mhsz + dsz; int msz = mhsz + dsz;
...@@ -179,22 +180,22 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, ...@@ -179,22 +180,22 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int pktrem = pktmax; int pktrem = pktmax;
int drem = dsz; int drem = dsz;
struct tipc_msg pkthdr; struct tipc_msg pkthdr;
struct sk_buff *buf, *prev; struct sk_buff *skb;
char *pktpos; char *pktpos;
int rc; int rc;
uint chain_sz = 0;
msg_set_size(mhdr, msz); msg_set_size(mhdr, msz);
/* No fragmentation needed? */ /* No fragmentation needed? */
if (likely(msz <= pktmax)) { if (likely(msz <= pktmax)) {
buf = tipc_buf_acquire(msz); skb = tipc_buf_acquire(msz);
*chain = buf; if (unlikely(!skb))
if (unlikely(!buf))
return -ENOMEM; return -ENOMEM;
skb_copy_to_linear_data(buf, mhdr, mhsz); __skb_queue_tail(list, skb);
pktpos = buf->data + mhsz; skb_copy_to_linear_data(skb, mhdr, mhsz);
TIPC_SKB_CB(buf)->chain_sz = 1; pktpos = skb->data + mhsz;
if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iov, offset, dsz)) if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iov, offset,
dsz))
return dsz; return dsz;
rc = -EFAULT; rc = -EFAULT;
goto error; goto error;
...@@ -207,15 +208,15 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, ...@@ -207,15 +208,15 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
msg_set_fragm_no(&pkthdr, pktno); msg_set_fragm_no(&pkthdr, pktno);
/* Prepare first fragment */ /* Prepare first fragment */
*chain = buf = tipc_buf_acquire(pktmax); skb = tipc_buf_acquire(pktmax);
if (!buf) if (!skb)
return -ENOMEM; return -ENOMEM;
chain_sz = 1; __skb_queue_tail(list, skb);
pktpos = buf->data; pktpos = skb->data;
skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
pktpos += INT_H_SIZE; pktpos += INT_H_SIZE;
pktrem -= INT_H_SIZE; pktrem -= INT_H_SIZE;
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz); skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
pktpos += mhsz; pktpos += mhsz;
pktrem -= mhsz; pktrem -= mhsz;
...@@ -238,28 +239,25 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, ...@@ -238,28 +239,25 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
pktsz = drem + INT_H_SIZE; pktsz = drem + INT_H_SIZE;
else else
pktsz = pktmax; pktsz = pktmax;
prev = buf; skb = tipc_buf_acquire(pktsz);
buf = tipc_buf_acquire(pktsz); if (!skb) {
if (!buf) {
rc = -ENOMEM; rc = -ENOMEM;
goto error; goto error;
} }
chain_sz++; __skb_queue_tail(list, skb);
prev->next = buf;
msg_set_type(&pkthdr, FRAGMENT); msg_set_type(&pkthdr, FRAGMENT);
msg_set_size(&pkthdr, pktsz); msg_set_size(&pkthdr, pktsz);
msg_set_fragm_no(&pkthdr, ++pktno); msg_set_fragm_no(&pkthdr, ++pktno);
skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
pktpos = buf->data + INT_H_SIZE; pktpos = skb->data + INT_H_SIZE;
pktrem = pktsz - INT_H_SIZE; pktrem = pktsz - INT_H_SIZE;
} while (1); } while (1);
TIPC_SKB_CB(*chain)->chain_sz = chain_sz; msg_set_type(buf_msg(skb), LAST_FRAGMENT);
msg_set_type(buf_msg(buf), LAST_FRAGMENT);
return dsz; return dsz;
error: error:
kfree_skb_list(*chain); __skb_queue_purge(list);
*chain = NULL; __skb_queue_head_init(list);
return rc; return rc;
} }
...@@ -430,22 +428,23 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode) ...@@ -430,22 +428,23 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
/* tipc_msg_reassemble() - clone a buffer chain of fragments and /* tipc_msg_reassemble() - clone a buffer chain of fragments and
* reassemble the clones into one message * reassemble the clones into one message
*/ */
struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain) struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
{ {
struct sk_buff *buf = chain; struct sk_buff *skb;
struct sk_buff *frag = buf; struct sk_buff *frag = NULL;
struct sk_buff *head = NULL; struct sk_buff *head = NULL;
int hdr_sz; int hdr_sz;
/* Copy header if single buffer */ /* Copy header if single buffer */
if (!buf->next) { if (skb_queue_len(list) == 1) {
hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf)); skb = skb_peek(list);
return __pskb_copy(buf, hdr_sz, GFP_ATOMIC); hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
return __pskb_copy(skb, hdr_sz, GFP_ATOMIC);
} }
/* Clone all fragments and reassemble */ /* Clone all fragments and reassemble */
while (buf) { skb_queue_walk(list, skb) {
frag = skb_clone(buf, GFP_ATOMIC); frag = skb_clone(skb, GFP_ATOMIC);
if (!frag) if (!frag)
goto error; goto error;
frag->next = NULL; frag->next = NULL;
...@@ -453,7 +452,6 @@ struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain) ...@@ -453,7 +452,6 @@ struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain)
break; break;
if (!head) if (!head)
goto error; goto error;
buf = buf->next;
} }
return frag; return frag;
error: error:
......
...@@ -739,9 +739,9 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu); ...@@ -739,9 +739,9 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
u32 mtu, u32 dnode); u32 mtu, u32 dnode);
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
int offset, int dsz, int mtu , struct sk_buff **chain); int dsz, int mtu, struct sk_buff_head *list);
struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain); struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
#endif #endif
...@@ -114,9 +114,9 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) ...@@ -114,9 +114,9 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
return buf; return buf;
} }
void named_cluster_distribute(struct sk_buff *buf) void named_cluster_distribute(struct sk_buff *skb)
{ {
struct sk_buff *obuf; struct sk_buff *oskb;
struct tipc_node *node; struct tipc_node *node;
u32 dnode; u32 dnode;
...@@ -127,15 +127,15 @@ void named_cluster_distribute(struct sk_buff *buf) ...@@ -127,15 +127,15 @@ void named_cluster_distribute(struct sk_buff *buf)
continue; continue;
if (!tipc_node_active_links(node)) if (!tipc_node_active_links(node))
continue; continue;
obuf = skb_copy(buf, GFP_ATOMIC); oskb = skb_copy(skb, GFP_ATOMIC);
if (!obuf) if (!oskb)
break; break;
msg_set_destnode(buf_msg(obuf), dnode); msg_set_destnode(buf_msg(oskb), dnode);
tipc_link_xmit(obuf, dnode, dnode); tipc_link_xmit_skb(oskb, dnode, dnode);
} }
rcu_read_unlock(); rcu_read_unlock();
kfree_skb(buf); kfree_skb(skb);
} }
/** /**
...@@ -190,15 +190,15 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ) ...@@ -190,15 +190,15 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
/** /**
* named_distribute - prepare name info for bulk distribution to another node * named_distribute - prepare name info for bulk distribution to another node
* @msg_list: list of messages (buffers) to be returned from this function * @list: list of messages (buffers) to be returned from this function
* @dnode: node to be updated * @dnode: node to be updated
* @pls: linked list of publication items to be packed into buffer chain * @pls: linked list of publication items to be packed into buffer chain
*/ */
static void named_distribute(struct list_head *msg_list, u32 dnode, static void named_distribute(struct sk_buff_head *list, u32 dnode,
struct publ_list *pls) struct publ_list *pls)
{ {
struct publication *publ; struct publication *publ;
struct sk_buff *buf = NULL; struct sk_buff *skb = NULL;
struct distr_item *item = NULL; struct distr_item *item = NULL;
uint dsz = pls->size * ITEM_SIZE; uint dsz = pls->size * ITEM_SIZE;
uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE; uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
...@@ -207,15 +207,15 @@ static void named_distribute(struct list_head *msg_list, u32 dnode, ...@@ -207,15 +207,15 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
list_for_each_entry(publ, &pls->list, local_list) { list_for_each_entry(publ, &pls->list, local_list) {
/* Prepare next buffer: */ /* Prepare next buffer: */
if (!buf) { if (!skb) {
msg_rem = min_t(uint, rem, msg_dsz); msg_rem = min_t(uint, rem, msg_dsz);
rem -= msg_rem; rem -= msg_rem;
buf = named_prepare_buf(PUBLICATION, msg_rem, dnode); skb = named_prepare_buf(PUBLICATION, msg_rem, dnode);
if (!buf) { if (!skb) {
pr_warn("Bulk publication failure\n"); pr_warn("Bulk publication failure\n");
return; return;
} }
item = (struct distr_item *)msg_data(buf_msg(buf)); item = (struct distr_item *)msg_data(buf_msg(skb));
} }
/* Pack publication into message: */ /* Pack publication into message: */
...@@ -225,8 +225,8 @@ static void named_distribute(struct list_head *msg_list, u32 dnode, ...@@ -225,8 +225,8 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
/* Append full buffer to list: */ /* Append full buffer to list: */
if (!msg_rem) { if (!msg_rem) {
list_add_tail((struct list_head *)buf, msg_list); __skb_queue_tail(list, skb);
buf = NULL; skb = NULL;
} }
} }
} }
...@@ -236,18 +236,16 @@ static void named_distribute(struct list_head *msg_list, u32 dnode, ...@@ -236,18 +236,16 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
*/ */
void tipc_named_node_up(u32 dnode) void tipc_named_node_up(u32 dnode)
{ {
LIST_HEAD(msg_list); struct sk_buff_head head;
struct sk_buff *buf_chain;
__skb_queue_head_init(&head);
read_lock_bh(&tipc_nametbl_lock); read_lock_bh(&tipc_nametbl_lock);
named_distribute(&msg_list, dnode, &publ_cluster); named_distribute(&head, dnode, &publ_cluster);
named_distribute(&msg_list, dnode, &publ_zone); named_distribute(&head, dnode, &publ_zone);
read_unlock_bh(&tipc_nametbl_lock); read_unlock_bh(&tipc_nametbl_lock);
/* Convert circular list to linear list and send: */ tipc_link_xmit(&head, dnode, dnode);
buf_chain = (struct sk_buff *)msg_list.next;
((struct sk_buff *)msg_list.prev)->next = NULL;
tipc_link_xmit(buf_chain, dnode, dnode);
} }
static void tipc_publ_subscribe(struct publication *publ, u32 addr) static void tipc_publ_subscribe(struct publication *publ, u32 addr)
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment