Commit f541a945 authored by Andrew Morton's avatar Andrew Morton Committed by David S. Miller

[SPARSE]: Fix warnings in net/sctp/

Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarDavid S. Miller <davem@redhat.com>
parent ed67d80f
......@@ -78,7 +78,7 @@ void sctp_inq_free(struct sctp_inq *queue)
struct sctp_chunk *chunk;
/* Empty the queue. */
while ((chunk = (struct sctp_chunk *) skb_dequeue(&queue->in)))
while ((chunk = (struct sctp_chunk *) skb_dequeue(&queue->in)) != NULL)
sctp_chunk_free(chunk);
/* If there is a packet which is currently being worked on,
......
......@@ -133,7 +133,7 @@ void sctp_packet_free(struct sctp_packet *packet)
SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet);
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)))
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL)
sctp_chunk_free(chunk);
if (packet->malloced)
......@@ -370,7 +370,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* [This whole comment explains WORD_ROUND() below.]
*/
SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n");
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks))) {
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) {
if (sctp_chunk_is_data(chunk)) {
if (!chunk->has_tsn) {
......@@ -511,7 +511,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* will get resent or dropped later.
*/
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks))) {
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) {
if (!sctp_chunk_is_data(chunk))
sctp_chunk_free(chunk);
}
......
......@@ -245,7 +245,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
/* Throw away unacknowledged chunks. */
list_for_each(pos, &q->asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport, transports);
while ((lchunk = sctp_list_dequeue(&transport->transmitted))) {
while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
/* Mark as part of a failed message. */
......@@ -282,7 +282,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
}
/* Throw away any leftover data chunks. */
while ((chunk = sctp_outq_dequeue_data(q))) {
while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
/* Mark as send failure. */
sctp_chunk_fail(chunk, q->error);
......@@ -292,7 +292,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
q->error = 0;
/* Throw away any leftover control chunks. */
while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)))
while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL)
sctp_chunk_free(chunk);
}
......@@ -681,7 +681,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
*/
queue = &q->control;
while ((chunk = (struct sctp_chunk *)skb_dequeue(queue))) {
while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) {
/* Pick the right transport to use. */
new_transport = chunk->transport;
......@@ -812,7 +812,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
start_timer = 0;
queue = &q->out;
while ((chunk = sctp_outq_dequeue_data(q))) {
while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
*/
......@@ -866,7 +866,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "
"%p skb->users %d.\n",
ntohl(chunk->subh.data_hdr->tsn),
chunk->skb ?chunk->skb->head : 0,
chunk->skb ?chunk->skb->head : NULL,
chunk->skb ?
atomic_read(&chunk->skb->users) : -1);
......
......@@ -101,7 +101,7 @@ __init int sctp_proc_init(void)
{
if (!proc_net_sctp) {
struct proc_dir_entry *ent;
ent = proc_mkdir("net/sctp", 0);
ent = proc_mkdir("net/sctp", NULL);
if (ent) {
ent->owner = THIS_MODULE;
proc_net_sctp = ent;
......
......@@ -995,7 +995,7 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
/* Search through all current addresses and make sure
* we aren't adding any new ones.
*/
new_addr = 0;
new_addr = NULL;
found = 0;
list_for_each(pos, &new_asoc->peer.transport_addr_list) {
......
......@@ -86,8 +86,6 @@
/* Forward declarations for internal helper functions. */
static int sctp_writeable(struct sock *sk);
static inline int sctp_wspace(struct sctp_association *asoc);
static inline void sctp_set_owner_w(struct sctp_chunk *chunk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
size_t msg_len);
......@@ -95,7 +93,8 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static int sctp_wait_for_accept(struct sock *sk, long timeo);
static void sctp_wait_for_close(struct sock *sk, long timeo);
static inline int sctp_verify_addr(struct sock *, union sctp_addr *, int);
static struct sctp_af *sctp_sockaddr_af(struct sctp_opt *opt,
union sctp_addr *addr, int len);
static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
......@@ -111,6 +110,64 @@ static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
extern kmem_cache_t *sctp_bucket_cachep;
extern int sctp_assoc_valid(struct sock *sk, struct sctp_association *asoc);
/* Get the sndbuf space available at the time on the association. */
static inline int sctp_wspace(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
int amt = 0;
amt = sk->sk_sndbuf - asoc->sndbuf_used;
if (amt < 0)
amt = 0;
return amt;
}
/* Increment the used sndbuf space count of the corresponding association by
* the size of the outgoing data chunk.
* Also, set the skb destructor for sndbuf accounting later.
*
* Since it is always 1-1 between chunk and skb, and also a new skb is always
* allocated for chunk bundling in sctp_packet_transmit(), we can use the
* destructor in the data chunk skb for the purpose of the sndbuf space
* tracking.
*/
static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
{
struct sctp_association *asoc = chunk->asoc;
struct sock *sk = asoc->base.sk;
/* The sndbuf space is tracked per association. */
sctp_association_hold(asoc);
chunk->skb->destructor = sctp_wfree;
/* Save the chunk pointer in skb for sctp_wfree to use later. */
*((struct sctp_chunk **)(chunk->skb->cb)) = chunk;
asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk);
sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk);
}
/* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
{
struct sctp_af *af;
/* Verify basic sockaddr. */
af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
if (!af)
return -EINVAL;
/* Is this a valid SCTP address? */
if (!af->addr_valid(addr, sctp_sk(sk)))
return -EINVAL;
if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
return -EINVAL;
return 0;
}
/* Look up the association by its id. If this is not a UDP-style
* socket, the ID field is always ignored.
*/
......@@ -1008,7 +1065,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct sctp_sndrcvinfo *sinfo;
struct sctp_initmsg *sinit;
sctp_assoc_t associd = NULL;
sctp_cmsgs_t cmsgs = { 0 };
sctp_cmsgs_t cmsgs = { NULL };
int err;
sctp_scope_t scope;
long timeo;
......@@ -4144,64 +4201,6 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
return NULL;
}
/* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
{
struct sctp_af *af;
/* Verify basic sockaddr. */
af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
if (!af)
return -EINVAL;
/* Is this a valid SCTP address? */
if (!af->addr_valid(addr, sctp_sk(sk)))
return -EINVAL;
if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
return -EINVAL;
return 0;
}
/* Get the sndbuf space available at the time on the association. */
static inline int sctp_wspace(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
int amt = 0;
amt = sk->sk_sndbuf - asoc->sndbuf_used;
if (amt < 0)
amt = 0;
return amt;
}
/* Increment the used sndbuf space count of the corresponding association by
* the size of the outgoing data chunk.
* Also, set the skb destructor for sndbuf accounting later.
*
* Since it is always 1-1 between chunk and skb, and also a new skb is always
* allocated for chunk bundling in sctp_packet_transmit(), we can use the
* destructor in the data chunk skb for the purpose of the sndbuf space
* tracking.
*/
static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
{
struct sctp_association *asoc = chunk->asoc;
struct sock *sk = asoc->base.sk;
/* The sndbuf space is tracked per association. */
sctp_association_hold(asoc);
chunk->skb->destructor = sctp_wfree;
/* Save the chunk pointer in skb for sctp_wfree to use later. */
*((struct sctp_chunk **)(chunk->skb->cb)) = chunk;
asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk);
sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk);
}
/* If sndbuf has changed, wake up per association sndbuf waiters. */
static void __sctp_write_space(struct sctp_association *asoc)
{
......
......@@ -48,13 +48,23 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event,
const struct sctp_association *asoc);
static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event);
static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
struct sctp_association *asoc);
static void sctp_ulpevent_release_data(struct sctp_ulpevent *event);
/* Stub skb destructor. */
static void sctp_stub_rfree(struct sk_buff *skb)
{
/* WARNING: This function is just a warning not to use the
* skb destructor. If the skb is shared, we may get the destructor
* callback on some processor that does not own the sock_lock. This
* was occuring with PACKET socket applications that were monitoring
* our skbs. We can't take the sock_lock, because we can't risk
* recursing if we do really own the sock lock. Instead, do all
* of our rwnd manipulation while we own the sock_lock outright.
*/
}
/* Create a new sctp_ulpevent. */
struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, int gfp)
{
......@@ -87,6 +97,30 @@ int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event)
return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION);
}
/* Hold the association in case the msg_name needs read out of
* the association.
*/
static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event,
const struct sctp_association *asoc)
{
struct sk_buff *skb;
/* Cast away the const, as we are just wanting to
* bump the reference count.
*/
sctp_association_hold((struct sctp_association *)asoc);
skb = sctp_event2skb(event);
skb->sk = asoc->base.sk;
event->asoc = (struct sctp_association *)asoc;
skb->destructor = sctp_stub_rfree;
}
/* A simple destructor to give up the reference to the association. */
static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event)
{
sctp_association_put(event->asoc);
}
/* Create and initialize an SCTP_ASSOC_CHANGE event.
*
* 5.3.1.1 SCTP_ASSOC_CHANGE
......@@ -789,43 +823,6 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
}
/* Stub skb destructor. */
static void sctp_stub_rfree(struct sk_buff *skb)
{
/* WARNING: This function is just a warning not to use the
* skb destructor. If the skb is shared, we may get the destructor
* callback on some processor that does not own the sock_lock. This
* was occuring with PACKET socket applications that were monitoring
* our skbs. We can't take the sock_lock, because we can't risk
* recursing if we do really own the sock lock. Instead, do all
* of our rwnd manipulation while we own the sock_lock outright.
*/
}
/* Hold the association in case the msg_name needs read out of
* the association.
*/
static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event,
const struct sctp_association *asoc)
{
struct sk_buff *skb;
/* Cast away the const, as we are just wanting to
* bump the reference count.
*/
sctp_association_hold((struct sctp_association *)asoc);
skb = sctp_event2skb(event);
skb->sk = asoc->base.sk;
event->asoc = (struct sctp_association *)asoc;
skb->destructor = sctp_stub_rfree;
}
/* A simple destructor to give up the reference to the association. */
static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event)
{
sctp_association_put(event->asoc);
}
/* Do accounting for bytes received and hold a reference to the association
* for each skb.
*/
......
......@@ -49,10 +49,10 @@
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
static inline struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *);
static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
struct sctp_ulpevent *);
static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *);
static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
struct sctp_ulpevent *);
/* 1st Level Abstractions */
......@@ -97,12 +97,12 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
struct sk_buff *skb;
struct sctp_ulpevent *event;
while ((skb = __skb_dequeue(&ulpq->lobby))) {
while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
while ((skb = __skb_dequeue(&ulpq->reasm))) {
while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
......@@ -466,8 +466,8 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
* need reassembling.
*/
static inline struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_ulpevent *retval = NULL;
......@@ -645,8 +645,8 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
}
static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
__u16 sid, ssn;
struct sctp_stream *in;
......@@ -756,7 +756,7 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
tsnmap = &ulpq->asoc->peer.tsn_map;
while ((skb = __skb_dequeue_tail(&ulpq->lobby))) {
while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->tsn;
......@@ -782,7 +782,7 @@ static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
tsnmap = &ulpq->asoc->peer.tsn_map;
/* Walk backwards through the list, reneges the newest tsns. */
while ((skb = __skb_dequeue_tail(&ulpq->reasm))) {
while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->tsn;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment