Commit 97bf1aa1 authored by Dean Nelson's avatar Dean Nelson Committed by Linus Torvalds

sgi-xp: move xpc_allocate() into xpc_send()/xpc_send_notify()

Move xpc_allocate() functionality into xpc_send()/xpc_send_notify() so
xpc_allocate() no longer needs to be called by XPNET.
Signed-off-by: default avatarDean Nelson <dcn@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent aaa3cd69
...@@ -116,12 +116,6 @@ ...@@ -116,12 +116,6 @@
* The size of the payload is defined by the user via xpc_connect(). A user- * The size of the payload is defined by the user via xpc_connect(). A user-
* defined message resides in the payload area. * defined message resides in the payload area.
* *
* The user should have no dealings with the message header, but only the
* message's payload. When a message entry is allocated (via xpc_allocate())
* a pointer to the payload area is returned and not the actual beginning of
* the XPC message. The user then constructs a message in the payload area
* and passes that pointer as an argument on xpc_send() or xpc_send_notify().
*
* The size of a message entry (within a message queue) must be a cacheline * The size of a message entry (within a message queue) must be a cacheline
* sized multiple in order to facilitate the BTE transfer of messages from one * sized multiple in order to facilitate the BTE transfer of messages from one
* message queue to another. A macro, XPC_MSG_SIZE(), is provided for the user * message queue to another. A macro, XPC_MSG_SIZE(), is provided for the user
...@@ -221,9 +215,10 @@ enum xp_retval { ...@@ -221,9 +215,10 @@ enum xp_retval {
xpBteCopyError, /* 52: bte_copy() returned error */ xpBteCopyError, /* 52: bte_copy() returned error */
xpSalError, /* 53: sn SAL error */ xpSalError, /* 53: sn SAL error */
xpRsvdPageNotSet, /* 54: the reserved page is not set up */ xpRsvdPageNotSet, /* 54: the reserved page is not set up */
xpPayloadTooBig, /* 55: payload too large for message slot */
xpUnsupported, /* 55: unsupported functionality or resource */ xpUnsupported, /* 56: unsupported functionality or resource */
xpUnknownReason /* 56: unknown reason - must be last in enum */ xpUnknownReason /* 57: unknown reason - must be last in enum */
}; };
/* /*
...@@ -304,16 +299,15 @@ struct xpc_registration { ...@@ -304,16 +299,15 @@ struct xpc_registration {
#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) #define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL)
/* the following are valid xpc_allocate() flags */ /* the following are valid xpc_send() or xpc_send_notify() flags */
#define XPC_WAIT 0 /* wait flag */ #define XPC_WAIT 0 /* wait flag */
#define XPC_NOWAIT 1 /* no wait flag */ #define XPC_NOWAIT 1 /* no wait flag */
struct xpc_interface { struct xpc_interface {
void (*connect) (int); void (*connect) (int);
void (*disconnect) (int); void (*disconnect) (int);
enum xp_retval (*allocate) (short, int, u32, void **); enum xp_retval (*send) (short, int, u32, void *, u16);
enum xp_retval (*send) (short, int, void *); enum xp_retval (*send_notify) (short, int, u32, void *, u16,
enum xp_retval (*send_notify) (short, int, void *,
xpc_notify_func, void *); xpc_notify_func, void *);
void (*received) (short, int, void *); void (*received) (short, int, void *);
enum xp_retval (*partid_to_nasids) (short, void *); enum xp_retval (*partid_to_nasids) (short, void *);
...@@ -323,10 +317,9 @@ extern struct xpc_interface xpc_interface; ...@@ -323,10 +317,9 @@ extern struct xpc_interface xpc_interface;
extern void xpc_set_interface(void (*)(int), extern void xpc_set_interface(void (*)(int),
void (*)(int), void (*)(int),
enum xp_retval (*)(short, int, u32, void **), enum xp_retval (*)(short, int, u32, void *, u16),
enum xp_retval (*)(short, int, void *), enum xp_retval (*)(short, int, u32, void *, u16,
enum xp_retval (*)(short, int, void *, xpc_notify_func, void *),
xpc_notify_func, void *),
void (*)(short, int, void *), void (*)(short, int, void *),
enum xp_retval (*)(short, void *)); enum xp_retval (*)(short, void *));
extern void xpc_clear_interface(void); extern void xpc_clear_interface(void);
...@@ -336,22 +329,19 @@ extern enum xp_retval xpc_connect(int, xpc_channel_func, void *, u16, ...@@ -336,22 +329,19 @@ extern enum xp_retval xpc_connect(int, xpc_channel_func, void *, u16,
extern void xpc_disconnect(int); extern void xpc_disconnect(int);
static inline enum xp_retval static inline enum xp_retval
xpc_allocate(short partid, int ch_number, u32 flags, void **payload) xpc_send(short partid, int ch_number, u32 flags, void *payload,
{ u16 payload_size)
return xpc_interface.allocate(partid, ch_number, flags, payload);
}
static inline enum xp_retval
xpc_send(short partid, int ch_number, void *payload)
{ {
return xpc_interface.send(partid, ch_number, payload); return xpc_interface.send(partid, ch_number, flags, payload,
payload_size);
} }
static inline enum xp_retval static inline enum xp_retval
xpc_send_notify(short partid, int ch_number, void *payload, xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
xpc_notify_func func, void *key) u16 payload_size, xpc_notify_func func, void *key)
{ {
return xpc_interface.send_notify(partid, ch_number, payload, func, key); return xpc_interface.send_notify(partid, ch_number, flags, payload,
payload_size, func, key);
} }
static inline void static inline void
......
...@@ -58,10 +58,9 @@ xpc_notloaded(void) ...@@ -58,10 +58,9 @@ xpc_notloaded(void)
struct xpc_interface xpc_interface = { struct xpc_interface xpc_interface = {
(void (*)(int))xpc_notloaded, (void (*)(int))xpc_notloaded,
(void (*)(int))xpc_notloaded, (void (*)(int))xpc_notloaded,
(enum xp_retval(*)(short, int, u32, void **))xpc_notloaded, (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
(enum xp_retval(*)(short, int, void *))xpc_notloaded, (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
(enum xp_retval(*)(short, int, void *, xpc_notify_func, void *)) void *))xpc_notloaded,
xpc_notloaded,
(void (*)(short, int, void *))xpc_notloaded, (void (*)(short, int, void *))xpc_notloaded,
(enum xp_retval(*)(short, void *))xpc_notloaded (enum xp_retval(*)(short, void *))xpc_notloaded
}; };
...@@ -73,16 +72,14 @@ EXPORT_SYMBOL_GPL(xpc_interface); ...@@ -73,16 +72,14 @@ EXPORT_SYMBOL_GPL(xpc_interface);
void void
xpc_set_interface(void (*connect) (int), xpc_set_interface(void (*connect) (int),
void (*disconnect) (int), void (*disconnect) (int),
enum xp_retval (*allocate) (short, int, u32, void **), enum xp_retval (*send) (short, int, u32, void *, u16),
enum xp_retval (*send) (short, int, void *), enum xp_retval (*send_notify) (short, int, u32, void *, u16,
enum xp_retval (*send_notify) (short, int, void *,
xpc_notify_func, void *), xpc_notify_func, void *),
void (*received) (short, int, void *), void (*received) (short, int, void *),
enum xp_retval (*partid_to_nasids) (short, void *)) enum xp_retval (*partid_to_nasids) (short, void *))
{ {
xpc_interface.connect = connect; xpc_interface.connect = connect;
xpc_interface.disconnect = disconnect; xpc_interface.disconnect = disconnect;
xpc_interface.allocate = allocate;
xpc_interface.send = send; xpc_interface.send = send;
xpc_interface.send_notify = send_notify; xpc_interface.send_notify = send_notify;
xpc_interface.received = received; xpc_interface.received = received;
...@@ -98,13 +95,11 @@ xpc_clear_interface(void) ...@@ -98,13 +95,11 @@ xpc_clear_interface(void)
{ {
xpc_interface.connect = (void (*)(int))xpc_notloaded; xpc_interface.connect = (void (*)(int))xpc_notloaded;
xpc_interface.disconnect = (void (*)(int))xpc_notloaded; xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
xpc_interface.allocate = (enum xp_retval(*)(short, int, u32, xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16))
void **))xpc_notloaded;
xpc_interface.send = (enum xp_retval(*)(short, int, void *))
xpc_notloaded; xpc_notloaded;
xpc_interface.send_notify = (enum xp_retval(*)(short, int, void *, xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *,
xpc_notify_func, u16, xpc_notify_func,
void *))xpc_notloaded; void *))xpc_notloaded;
xpc_interface.received = (void (*)(short, int, void *)) xpc_interface.received = (void (*)(short, int, void *))
xpc_notloaded; xpc_notloaded;
xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *)) xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
......
...@@ -624,9 +624,7 @@ extern void (*xpc_IPI_send_closereply) (struct xpc_channel *, unsigned long *); ...@@ -624,9 +624,7 @@ extern void (*xpc_IPI_send_closereply) (struct xpc_channel *, unsigned long *);
extern void (*xpc_IPI_send_openrequest) (struct xpc_channel *, unsigned long *); extern void (*xpc_IPI_send_openrequest) (struct xpc_channel *, unsigned long *);
extern void (*xpc_IPI_send_openreply) (struct xpc_channel *, unsigned long *); extern void (*xpc_IPI_send_openreply) (struct xpc_channel *, unsigned long *);
extern enum xp_retval (*xpc_allocate_msg) (struct xpc_channel *, u32, extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, u32, void *, u16,
struct xpc_msg **);
extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, struct xpc_msg *,
u8, xpc_notify_func, void *); u8, xpc_notify_func, void *);
extern void (*xpc_received_msg) (struct xpc_channel *, struct xpc_msg *); extern void (*xpc_received_msg) (struct xpc_channel *, struct xpc_msg *);
...@@ -664,9 +662,8 @@ extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **); ...@@ -664,9 +662,8 @@ extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **);
extern void xpc_initiate_connect(int); extern void xpc_initiate_connect(int);
extern void xpc_initiate_disconnect(int); extern void xpc_initiate_disconnect(int);
extern enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *); extern enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *);
extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **); extern enum xp_retval xpc_initiate_send(short, int, u32, void *, u16);
extern enum xp_retval xpc_initiate_send(short, int, void *); extern enum xp_retval xpc_initiate_send_notify(short, int, u32, void *, u16,
extern enum xp_retval xpc_initiate_send_notify(short, int, void *,
xpc_notify_func, void *); xpc_notify_func, void *);
extern void xpc_initiate_received(short, int, void *); extern void xpc_initiate_received(short, int, void *);
extern void xpc_process_channel_activity(struct xpc_partition *); extern void xpc_process_channel_activity(struct xpc_partition *);
......
...@@ -1192,87 +1192,54 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) ...@@ -1192,87 +1192,54 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
} }
/* /*
* Allocate an entry for a message from the message queue associated with the * Send a message that contains the user's payload on the specified channel
* specified channel. NOTE that this routine can sleep waiting for a message * connected to the specified partition.
* entry to become available. To not sleep, pass in the XPC_NOWAIT flag.
* *
* Arguments: * NOTE that this routine can sleep waiting for a message entry to become
* available. To not sleep, pass in the XPC_NOWAIT flag.
* *
* partid - ID of partition to which the channel is connected. * Once sent, this routine will not wait for the message to be received, nor
* ch_number - channel #. * will notification be given when it does happen.
* flags - see xpc.h for valid flags.
* payload - address of the allocated payload area pointer (filled in on
* return) in which the user-defined message is constructed.
*/
enum xp_retval
xpc_initiate_allocate(short partid, int ch_number, u32 flags, void **payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
enum xp_retval ret = xpUnknownReason;
struct xpc_msg *msg = NULL;
DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
*payload = NULL;
if (xpc_part_ref(part)) {
ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
xpc_part_deref(part);
if (msg != NULL)
*payload = &msg->payload;
}
return ret;
}
/*
* Send a message previously allocated using xpc_initiate_allocate() on the
* specified channel connected to the specified partition.
*
* This routine will not wait for the message to be received, nor will
* notification be given when it does happen. Once this routine has returned
* the message entry allocated via xpc_initiate_allocate() is no longer
* accessable to the caller.
*
* This routine, although called by users, does not call xpc_part_ref() to
* ensure that the partition infrastructure is in place. It relies on the
* fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
* *
* Arguments: * Arguments:
* *
* partid - ID of partition to which the channel is connected. * partid - ID of partition to which the channel is connected.
* ch_number - channel # to send message on. * ch_number - channel # to send message on.
* payload - pointer to the payload area allocated via * flags - see xp.h for valid flags.
* xpc_initiate_allocate(). * payload - pointer to the payload which is to be sent.
* payload_size - size of the payload in bytes.
*/ */
enum xp_retval enum xp_retval
xpc_initiate_send(short partid, int ch_number, void *payload) xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
u16 payload_size)
{ {
struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); enum xp_retval ret = xpUnknownReason;
enum xp_retval ret;
dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
partid, ch_number); partid, ch_number);
DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
DBUG_ON(msg == NULL); DBUG_ON(payload == NULL);
ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL); if (xpc_part_ref(part)) {
ret = xpc_send_msg(&part->channels[ch_number], flags, payload,
payload_size, 0, NULL, NULL);
xpc_part_deref(part);
}
return ret; return ret;
} }
/* /*
* Send a message previously allocated using xpc_initiate_allocate on the * Send a message that contains the user's payload on the specified channel
* specified channel connected to the specified partition. * connected to the specified partition.
* *
* This routine will not wait for the message to be sent. Once this routine * NOTE that this routine can sleep waiting for a message entry to become
* has returned the message entry allocated via xpc_initiate_allocate() is no * available. To not sleep, pass in the XPC_NOWAIT flag.
* longer accessable to the caller. *
* This routine will not wait for the message to be sent or received.
* *
* Once the remote end of the channel has received the message, the function * Once the remote end of the channel has received the message, the function
* passed as an argument to xpc_initiate_send_notify() will be called. This * passed as an argument to xpc_initiate_send_notify() will be called. This
...@@ -1282,38 +1249,37 @@ xpc_initiate_send(short partid, int ch_number, void *payload) ...@@ -1282,38 +1249,37 @@ xpc_initiate_send(short partid, int ch_number, void *payload)
* *
* If this routine returns an error, the caller's function will NOT be called. * If this routine returns an error, the caller's function will NOT be called.
* *
* This routine, although called by users, does not call xpc_part_ref() to
* ensure that the partition infrastructure is in place. It relies on the
* fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
*
* Arguments: * Arguments:
* *
* partid - ID of partition to which the channel is connected. * partid - ID of partition to which the channel is connected.
* ch_number - channel # to send message on. * ch_number - channel # to send message on.
* payload - pointer to the payload area allocated via * flags - see xp.h for valid flags.
* xpc_initiate_allocate(). * payload - pointer to the payload which is to be sent.
* payload_size - size of the payload in bytes.
* func - function to call with asynchronous notification of message * func - function to call with asynchronous notification of message
* receipt. THIS FUNCTION MUST BE NON-BLOCKING. * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
* key - user-defined key to be passed to the function when it's called. * key - user-defined key to be passed to the function when it's called.
*/ */
enum xp_retval enum xp_retval
xpc_initiate_send_notify(short partid, int ch_number, void *payload, xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
xpc_notify_func func, void *key) u16 payload_size, xpc_notify_func func, void *key)
{ {
struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); enum xp_retval ret = xpUnknownReason;
enum xp_retval ret;
dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
partid, ch_number); partid, ch_number);
DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
DBUG_ON(msg == NULL); DBUG_ON(payload == NULL);
DBUG_ON(func == NULL); DBUG_ON(func == NULL);
ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL, if (xpc_part_ref(part)) {
func, key); ret = xpc_send_msg(&part->channels[ch_number], flags, payload,
payload_size, XPC_N_CALL, func, key);
xpc_part_deref(part);
}
return ret; return ret;
} }
...@@ -1372,7 +1338,7 @@ xpc_deliver_msg(struct xpc_channel *ch) ...@@ -1372,7 +1338,7 @@ xpc_deliver_msg(struct xpc_channel *ch)
* partid - ID of partition to which the channel is connected. * partid - ID of partition to which the channel is connected.
* ch_number - channel # message received on. * ch_number - channel # message received on.
* payload - pointer to the payload area allocated via * payload - pointer to the payload area allocated via
* xpc_initiate_allocate(). * xpc_initiate_send() or xpc_initiate_send_notify().
*/ */
void void
xpc_initiate_received(short partid, int ch_number, void *payload) xpc_initiate_received(short partid, int ch_number, void *payload)
......
...@@ -217,12 +217,9 @@ void (*xpc_IPI_send_openrequest) (struct xpc_channel *ch, ...@@ -217,12 +217,9 @@ void (*xpc_IPI_send_openrequest) (struct xpc_channel *ch,
void (*xpc_IPI_send_openreply) (struct xpc_channel *ch, void (*xpc_IPI_send_openreply) (struct xpc_channel *ch,
unsigned long *irq_flags); unsigned long *irq_flags);
enum xp_retval (*xpc_allocate_msg) (struct xpc_channel *ch, u32 flags, enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags,
struct xpc_msg **address_of_msg); void *payload, u16 payload_size, u8 notify_type,
xpc_notify_func func, void *key);
enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, struct xpc_msg *msg,
u8 notify_type, xpc_notify_func func,
void *key);
void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg); void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg);
/* /*
...@@ -1286,9 +1283,8 @@ xpc_init(void) ...@@ -1286,9 +1283,8 @@ xpc_init(void)
/* set the interface to point at XPC's functions */ /* set the interface to point at XPC's functions */
xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
xpc_initiate_allocate, xpc_initiate_send, xpc_initiate_send, xpc_initiate_send_notify,
xpc_initiate_send_notify, xpc_initiate_received, xpc_initiate_received, xpc_initiate_partid_to_nasids);
xpc_initiate_partid_to_nasids);
return 0; return 0;
......
...@@ -1532,18 +1532,6 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, ...@@ -1532,18 +1532,6 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
enum xp_retval ret; enum xp_retval ret;
s64 put; s64 put;
/* this reference will be dropped in xpc_send_msg_sn2() */
xpc_msgqueue_ref(ch);
if (ch->flags & XPC_C_DISCONNECTING) {
xpc_msgqueue_deref(ch);
return ch->reason;
}
if (!(ch->flags & XPC_C_CONNECTED)) {
xpc_msgqueue_deref(ch);
return xpNotConnected;
}
/* /*
* Get the next available message entry from the local message queue. * Get the next available message entry from the local message queue.
* If none are available, we'll make sure that we grab the latest * If none are available, we'll make sure that we grab the latest
...@@ -1582,16 +1570,12 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, ...@@ -1582,16 +1570,12 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
if (ret == xpTimeout) if (ret == xpTimeout)
xpc_IPI_send_local_msgrequest_sn2(ch); xpc_IPI_send_local_msgrequest_sn2(ch);
if (flags & XPC_NOWAIT) { if (flags & XPC_NOWAIT)
xpc_msgqueue_deref(ch);
return xpNoWait; return xpNoWait;
}
ret = xpc_allocate_msg_wait(ch); ret = xpc_allocate_msg_wait(ch);
if (ret != xpInterrupted && ret != xpTimeout) { if (ret != xpInterrupted && ret != xpTimeout)
xpc_msgqueue_deref(ch);
return ret; return ret;
}
} }
/* get the message's address and initialize it */ /* get the message's address and initialize it */
...@@ -1606,7 +1590,6 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, ...@@ -1606,7 +1590,6 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
(void *)msg, msg->number, ch->partid, ch->number); (void *)msg, msg->number, ch->partid, ch->number);
*address_of_msg = msg; *address_of_msg = msg;
return xpSuccess; return xpSuccess;
} }
...@@ -1616,24 +1599,38 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, ...@@ -1616,24 +1599,38 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
* message is being sent to. * message is being sent to.
*/ */
static enum xp_retval static enum xp_retval
xpc_send_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload,
xpc_notify_func func, void *key) u16 payload_size, u8 notify_type, xpc_notify_func func,
void *key)
{ {
enum xp_retval ret = xpSuccess; enum xp_retval ret = xpSuccess;
struct xpc_msg *msg = msg;
struct xpc_notify *notify = notify; struct xpc_notify *notify = notify;
s64 put, msg_number = msg->number; s64 msg_number;
s64 put;
DBUG_ON(notify_type == XPC_N_CALL && func == NULL); DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
DBUG_ON((((u64)msg - (u64)ch->local_msgqueue) / ch->msg_size) !=
msg_number % ch->local_nentries); if (XPC_MSG_SIZE(payload_size) > ch->msg_size)
DBUG_ON(msg->flags & XPC_M_READY); return xpPayloadTooBig;
xpc_msgqueue_ref(ch);
if (ch->flags & XPC_C_DISCONNECTING) { if (ch->flags & XPC_C_DISCONNECTING) {
/* drop the reference grabbed in xpc_allocate_msg_sn2() */ ret = ch->reason;
xpc_msgqueue_deref(ch); goto out_1;
return ch->reason; }
if (!(ch->flags & XPC_C_CONNECTED)) {
ret = xpNotConnected;
goto out_1;
} }
ret = xpc_allocate_msg_sn2(ch, flags, &msg);
if (ret != xpSuccess)
goto out_1;
msg_number = msg->number;
if (notify_type != 0) { if (notify_type != 0) {
/* /*
* Tell the remote side to send an ACK interrupt when the * Tell the remote side to send an ACK interrupt when the
...@@ -1663,13 +1660,12 @@ xpc_send_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, ...@@ -1663,13 +1660,12 @@ xpc_send_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
atomic_dec(&ch->n_to_notify); atomic_dec(&ch->n_to_notify);
ret = ch->reason; ret = ch->reason;
} }
goto out_1;
/* drop reference grabbed in xpc_allocate_msg_sn2() */
xpc_msgqueue_deref(ch);
return ret;
} }
} }
memcpy(&msg->payload, payload, payload_size);
msg->flags |= XPC_M_READY; msg->flags |= XPC_M_READY;
/* /*
...@@ -1684,7 +1680,7 @@ xpc_send_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, ...@@ -1684,7 +1680,7 @@ xpc_send_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
if (put == msg_number) if (put == msg_number)
xpc_send_msgs_sn2(ch, put); xpc_send_msgs_sn2(ch, put);
/* drop the reference grabbed in xpc_allocate_msg_sn2() */ out_1:
xpc_msgqueue_deref(ch); xpc_msgqueue_deref(ch);
return ret; return ret;
} }
...@@ -1821,8 +1817,6 @@ xpc_init_sn2(void) ...@@ -1821,8 +1817,6 @@ xpc_init_sn2(void)
xpc_IPI_send_openrequest = xpc_IPI_send_openrequest_sn2; xpc_IPI_send_openrequest = xpc_IPI_send_openrequest_sn2;
xpc_IPI_send_openreply = xpc_IPI_send_openreply_sn2; xpc_IPI_send_openreply = xpc_IPI_send_openreply_sn2;
xpc_allocate_msg = xpc_allocate_msg_sn2;
xpc_send_msg = xpc_send_msg_sn2; xpc_send_msg = xpc_send_msg_sn2;
xpc_received_msg = xpc_received_msg_sn2; xpc_received_msg = xpc_received_msg_sn2;
} }
......
...@@ -438,7 +438,8 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -438,7 +438,8 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct xpnet_pending_msg *queued_msg; struct xpnet_pending_msg *queued_msg;
enum xp_retval ret; enum xp_retval ret;
struct xpnet_message *msg; u8 msg_buffer[XPNET_MSG_SIZE];
struct xpnet_message *msg = (struct xpnet_message *)&msg_buffer[0];
u64 start_addr, end_addr; u64 start_addr, end_addr;
long dp; long dp;
u8 second_mac_octet; u8 second_mac_octet;
...@@ -524,11 +525,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -524,11 +525,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* found a partition to send to */ /* found a partition to send to */
ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
XPC_NOWAIT, (void **)&msg);
if (unlikely(ret != xpSuccess))
continue;
msg->embedded_bytes = embedded_bytes; msg->embedded_bytes = embedded_bytes;
if (unlikely(embedded_bytes != 0)) { if (unlikely(embedded_bytes != 0)) {
msg->version = XPNET_VERSION_EMBED; msg->version = XPNET_VERSION_EMBED;
...@@ -553,7 +549,8 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -553,7 +549,8 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
atomic_inc(&queued_msg->use_count); atomic_inc(&queued_msg->use_count);
ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT,
&msg, sizeof(msg) + embedded_bytes - 1,
xpnet_send_completed, queued_msg); xpnet_send_completed, queued_msg);
if (unlikely(ret != xpSuccess)) { if (unlikely(ret != xpSuccess)) {
atomic_dec(&queued_msg->use_count); atomic_dec(&queued_msg->use_count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment