Commit 026321c6 authored by Jon Maloy's avatar Jon Maloy Committed by David S. Miller

tipc: rename tipc_server to tipc_topsrv

We rename struct tipc_server to struct tipc_topsrv. This reflect its now
specialized role as topology server. Accoringly, we change or add function
prefixes to make it clearer which functionality those belong to.

There are no functional changes in this commit.
Acked-by: default avatarYing.Xue <ying.xue@windriver.com>
Signed-off-by: default avatarJon Maloy <jon.maloy@ericsson.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0ef897be
......@@ -9,7 +9,7 @@ tipc-y += addr.o bcast.o bearer.o \
core.o link.o discover.o msg.o \
name_distr.o subscr.o monitor.o name_table.o net.o \
netlink.o netlink_compat.o node.o socket.o eth_media.o \
server.o socket.o group.o
topsrv.o socket.o group.o
tipc-$(CONFIG_TIPC_MEDIA_UDP) += udp_media.o
tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
......
......@@ -64,7 +64,7 @@ struct tipc_bearer;
struct tipc_bc_base;
struct tipc_link;
struct tipc_name_table;
struct tipc_server;
struct tipc_topsrv;
struct tipc_monitor;
#define TIPC_MOD_VER "2.0.0"
......@@ -112,7 +112,7 @@ struct tipc_net {
struct list_head dist_queue;
/* Topology subscription server */
struct tipc_server *topsrv;
struct tipc_topsrv *topsrv;
atomic_t subscription_count;
};
......@@ -131,7 +131,7 @@ static inline struct list_head *tipc_nodes(struct net *net)
return &tipc_net(net)->node_list;
}
static inline struct tipc_server *tipc_topsrv(struct net *net)
static inline struct tipc_topsrv *tipc_topsrv(struct net *net)
{
return tipc_net(net)->topsrv;
}
......
......@@ -37,7 +37,7 @@
#include "addr.h"
#include "group.h"
#include "bcast.h"
#include "server.h"
#include "topsrv.h"
#include "msg.h"
#include "socket.h"
#include "node.h"
......
......@@ -51,7 +51,7 @@ static void tipc_sub_send_event(struct tipc_subscription *sub,
tipc_evt_write(evt, found_upper, found_upper);
tipc_evt_write(evt, port.ref, port);
tipc_evt_write(evt, port.node, node);
tipc_conn_queue_evt(sub->net, sub->conid, event, evt);
tipc_topsrv_queue_evt(sub->net, sub->conid, event, evt);
}
/**
......
......@@ -37,7 +37,7 @@
#ifndef _TIPC_SUBSCR_H
#define _TIPC_SUBSCR_H
#include "server.h"
#include "topsrv.h"
#define TIPC_MAX_SUBSCR 65535
#define TIPC_MAX_PUBLICATIONS 65535
......
......@@ -2,7 +2,7 @@
* net/tipc/server.c: TIPC server infrastructure
*
* Copyright (c) 2012-2013, Wind River Systems
* Copyright (c) 2017, Ericsson AB
* Copyright (c) 2017-2018, Ericsson AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -35,7 +35,7 @@
*/
#include "subscr.h"
#include "server.h"
#include "topsrv.h"
#include "core.h"
#include "socket.h"
#include "addr.h"
......@@ -52,7 +52,7 @@
#define TIPC_SERVER_NAME_LEN 32
/**
* struct tipc_server - TIPC server structure
* struct tipc_topsrv - TIPC server structure
* @conn_idr: identifier set of connection
* @idr_lock: protect the connection identifier set
* @idr_in_use: amount of allocated identifier entry
......@@ -68,7 +68,7 @@
* @imp: message importance
* @type: socket type
*/
struct tipc_server {
struct tipc_topsrv {
struct idr conn_idr;
spinlock_t idr_lock; /* for idr list */
int idr_in_use;
......@@ -102,12 +102,12 @@ struct tipc_conn {
int conid;
struct socket *sock;
unsigned long flags;
struct tipc_server *server;
struct tipc_topsrv *server;
struct list_head sub_list;
spinlock_t sub_lock; /* for subscription list */
struct work_struct rwork;
struct list_head outqueue;
spinlock_t outqueue_lock;
spinlock_t outqueue_lock; /* for outqueue */
struct work_struct swork;
};
......@@ -118,8 +118,10 @@ struct outqueue_entry {
struct list_head list;
};
static void tipc_recv_work(struct work_struct *work);
static void tipc_send_work(struct work_struct *work);
static void tipc_conn_recv_work(struct work_struct *work);
static void tipc_conn_send_work(struct work_struct *work);
static void tipc_topsrv_kern_evt(struct net *net, struct tipc_event *evt);
static void tipc_conn_delete_sub(struct tipc_conn *con, struct tipc_subscr *s);
static bool connected(struct tipc_conn *con)
{
......@@ -129,7 +131,7 @@ static bool connected(struct tipc_conn *con)
static void tipc_conn_kref_release(struct kref *kref)
{
struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
struct tipc_server *s = con->server;
struct tipc_topsrv *s = con->server;
struct outqueue_entry *e, *safe;
spin_lock_bh(&s->idr_lock);
......@@ -158,75 +160,7 @@ static void conn_get(struct tipc_conn *con)
kref_get(&con->kref);
}
static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
{
struct tipc_conn *con;
spin_lock_bh(&s->idr_lock);
con = idr_find(&s->conn_idr, conid);
if (!connected(con) || !kref_get_unless_zero(&con->kref))
con = NULL;
spin_unlock_bh(&s->idr_lock);
return con;
}
/* sock_data_ready - interrupt callback indicating the socket has data to read
* The queued work is launched into tipc_recv_work()->tipc_recv_from_sock()
*/
static void sock_data_ready(struct sock *sk)
{
struct tipc_conn *con;
read_lock_bh(&sk->sk_callback_lock);
con = sk->sk_user_data;
if (connected(con)) {
conn_get(con);
if (!queue_work(con->server->rcv_wq, &con->rwork))
conn_put(con);
}
read_unlock_bh(&sk->sk_callback_lock);
}
/* sock_write_space - interrupt callback after a sendmsg EAGAIN
* Indicates that there now is more space in the send buffer
* The queued work is launched into tipc_send_work()->tipc_send_to_sock()
*/
static void sock_write_space(struct sock *sk)
{
struct tipc_conn *con;
read_lock_bh(&sk->sk_callback_lock);
con = sk->sk_user_data;
if (connected(con)) {
conn_get(con);
if (!queue_work(con->server->send_wq, &con->swork))
conn_put(con);
}
read_unlock_bh(&sk->sk_callback_lock);
}
/* tipc_con_delete_sub - delete a specific or all subscriptions
* for a given subscriber
*/
static void tipc_con_delete_sub(struct tipc_conn *con, struct tipc_subscr *s)
{
struct list_head *sub_list = &con->sub_list;
struct tipc_net *tn = tipc_net(con->server->net);
struct tipc_subscription *sub, *tmp;
spin_lock_bh(&con->sub_lock);
list_for_each_entry_safe(sub, tmp, sub_list, sub_list) {
if (!s || !memcmp(s, &sub->evt.s, sizeof(*s))) {
tipc_sub_unsubscribe(sub);
atomic_dec(&tn->subscription_count);
} else if (s) {
break;
}
}
spin_unlock_bh(&con->sub_lock);
}
static void tipc_close_conn(struct tipc_conn *con)
static void tipc_conn_close(struct tipc_conn *con)
{
struct sock *sk = con->sock->sk;
bool disconnect = false;
......@@ -236,7 +170,7 @@ static void tipc_close_conn(struct tipc_conn *con)
if (disconnect) {
sk->sk_user_data = NULL;
tipc_con_delete_sub(con, NULL);
tipc_conn_delete_sub(con, NULL);
}
write_unlock_bh(&sk->sk_callback_lock);
......@@ -250,12 +184,12 @@ static void tipc_close_conn(struct tipc_conn *con)
conn_put(con);
}
static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s)
{
struct tipc_conn *con;
int ret;
con = kzalloc(sizeof(struct tipc_conn), GFP_ATOMIC);
con = kzalloc(sizeof(*con), GFP_ATOMIC);
if (!con)
return ERR_PTR(-ENOMEM);
......@@ -264,8 +198,8 @@ static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
INIT_LIST_HEAD(&con->sub_list);
spin_lock_init(&con->outqueue_lock);
spin_lock_init(&con->sub_lock);
INIT_WORK(&con->swork, tipc_send_work);
INIT_WORK(&con->rwork, tipc_recv_work);
INIT_WORK(&con->swork, tipc_conn_send_work);
INIT_WORK(&con->rwork, tipc_conn_recv_work);
spin_lock_bh(&s->idr_lock);
ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
......@@ -284,65 +218,108 @@ static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
return con;
}
static int tipc_con_rcv_sub(struct tipc_server *srv,
struct tipc_conn *con,
struct tipc_subscr *s)
static struct tipc_conn *tipc_conn_lookup(struct tipc_topsrv *s, int conid)
{
struct tipc_net *tn = tipc_net(srv->net);
struct tipc_subscription *sub;
struct tipc_conn *con;
if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
tipc_con_delete_sub(con, s);
return 0;
spin_lock_bh(&s->idr_lock);
con = idr_find(&s->conn_idr, conid);
if (!connected(con) || !kref_get_unless_zero(&con->kref))
con = NULL;
spin_unlock_bh(&s->idr_lock);
return con;
}
/* tipc_conn_delete_sub - delete a specific or all subscriptions
* for a given subscriber
*/
static void tipc_conn_delete_sub(struct tipc_conn *con, struct tipc_subscr *s)
{
struct tipc_net *tn = tipc_net(con->server->net);
struct list_head *sub_list = &con->sub_list;
struct tipc_subscription *sub, *tmp;
spin_lock_bh(&con->sub_lock);
list_for_each_entry_safe(sub, tmp, sub_list, sub_list) {
if (!s || !memcmp(s, &sub->evt.s, sizeof(*s))) {
tipc_sub_unsubscribe(sub);
atomic_dec(&tn->subscription_count);
} else if (s) {
break;
}
if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCR) {
pr_warn("Subscription rejected, max (%u)\n", TIPC_MAX_SUBSCR);
return -1;
}
sub = tipc_sub_subscribe(srv->net, s, con->conid);
if (!sub)
return -1;
atomic_inc(&tn->subscription_count);
spin_lock_bh(&con->sub_lock);
list_add(&sub->sub_list, &con->sub_list);
spin_unlock_bh(&con->sub_lock);
return 0;
}
static int tipc_receive_from_sock(struct tipc_conn *con)
static void tipc_conn_send_to_sock(struct tipc_conn *con)
{
struct tipc_server *srv = con->server;
struct sock *sk = con->sock->sk;
struct msghdr msg = {};
struct tipc_subscr s;
struct list_head *queue = &con->outqueue;
struct tipc_topsrv *srv = con->server;
struct outqueue_entry *e;
struct tipc_event *evt;
struct msghdr msg;
struct kvec iov;
int count = 0;
int ret;
iov.iov_base = &s;
iov.iov_len = sizeof(s);
spin_lock_bh(&con->outqueue_lock);
while (!list_empty(queue)) {
e = list_first_entry(queue, struct outqueue_entry, list);
evt = &e->evt;
spin_unlock_bh(&con->outqueue_lock);
if (e->inactive)
tipc_conn_delete_sub(con, &evt->s);
memset(&msg, 0, sizeof(msg));
msg.msg_flags = MSG_DONTWAIT;
iov.iov_base = evt;
iov.iov_len = sizeof(*evt);
msg.msg_name = NULL;
iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len);
ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
if (ret == -EWOULDBLOCK)
return -EWOULDBLOCK;
if (ret > 0) {
read_lock_bh(&sk->sk_callback_lock);
ret = tipc_con_rcv_sub(srv, con, &s);
read_unlock_bh(&sk->sk_callback_lock);
if (con->sock) {
ret = kernel_sendmsg(con->sock, &msg, &iov,
1, sizeof(*evt));
if (ret == -EWOULDBLOCK || ret == 0) {
cond_resched();
return;
} else if (ret < 0) {
return tipc_conn_close(con);
}
} else {
tipc_topsrv_kern_evt(srv->net, evt);
}
if (ret < 0)
tipc_close_conn(con);
return ret;
/* Don't starve users filling buffers */
if (++count >= MAX_SEND_MSG_COUNT) {
cond_resched();
count = 0;
}
spin_lock_bh(&con->outqueue_lock);
list_del(&e->list);
kfree(e);
}
spin_unlock_bh(&con->outqueue_lock);
}
static void tipc_conn_send_work(struct work_struct *work)
{
struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
if (connected(con))
tipc_conn_send_to_sock(con);
conn_put(con);
}
/* tipc_conn_queue_evt() - interrupt level call from a subscription instance
* The queued work is launched into tipc_send_work()->tipc_send_to_sock()
*/
void tipc_conn_queue_evt(struct net *net, int conid,
void tipc_topsrv_queue_evt(struct net *net, int conid,
u32 event, struct tipc_event *evt)
{
struct tipc_server *srv = tipc_topsrv(net);
struct tipc_topsrv *srv = tipc_topsrv(net);
struct outqueue_entry *e;
struct tipc_conn *con;
......@@ -368,123 +345,83 @@ void tipc_conn_queue_evt(struct net *net, int conid,
conn_put(con);
}
bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
u32 upper, u32 filter, int *conid)
{
struct tipc_subscr sub;
struct tipc_conn *con;
int rc;
sub.seq.type = type;
sub.seq.lower = lower;
sub.seq.upper = upper;
sub.timeout = TIPC_WAIT_FOREVER;
sub.filter = filter;
*(u32 *)&sub.usr_handle = port;
con = tipc_alloc_conn(tipc_topsrv(net));
if (IS_ERR(con))
return false;
*conid = con->conid;
con->sock = NULL;
rc = tipc_con_rcv_sub(tipc_topsrv(net), con, &sub);
if (rc < 0)
tipc_close_conn(con);
return !rc;
}
void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
/* tipc_conn_write_space - interrupt callback after a sendmsg EAGAIN
* Indicates that there now is more space in the send buffer
* The queued work is launched into tipc_send_work()->tipc_conn_send_to_sock()
*/
static void tipc_conn_write_space(struct sock *sk)
{
struct tipc_conn *con;
con = tipc_conn_lookup(tipc_topsrv(net), conid);
if (!con)
return;
test_and_clear_bit(CF_CONNECTED, &con->flags);
tipc_con_delete_sub(con, NULL);
conn_put(con);
read_lock_bh(&sk->sk_callback_lock);
con = sk->sk_user_data;
if (connected(con)) {
conn_get(con);
if (!queue_work(con->server->send_wq, &con->swork))
conn_put(con);
}
read_unlock_bh(&sk->sk_callback_lock);
}
static void tipc_send_kern_top_evt(struct net *net, struct tipc_event *evt)
static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
struct tipc_conn *con,
struct tipc_subscr *s)
{
u32 port = *(u32 *)&evt->s.usr_handle;
u32 self = tipc_own_addr(net);
struct sk_buff_head evtq;
struct sk_buff *skb;
struct tipc_net *tn = tipc_net(srv->net);
struct tipc_subscription *sub;
skb = tipc_msg_create(TOP_SRV, 0, INT_H_SIZE, sizeof(*evt),
self, self, port, port, 0);
if (!skb)
return;
msg_set_dest_droppable(buf_msg(skb), true);
memcpy(msg_data(buf_msg(skb)), evt, sizeof(*evt));
skb_queue_head_init(&evtq);
__skb_queue_tail(&evtq, skb);
tipc_sk_rcv(net, &evtq);
if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
tipc_conn_delete_sub(con, s);
return 0;
}
if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCR) {
pr_warn("Subscription rejected, max (%u)\n", TIPC_MAX_SUBSCR);
return -1;
}
sub = tipc_sub_subscribe(srv->net, s, con->conid);
if (!sub)
return -1;
atomic_inc(&tn->subscription_count);
spin_lock_bh(&con->sub_lock);
list_add(&sub->sub_list, &con->sub_list);
spin_unlock_bh(&con->sub_lock);
return 0;
}
static void tipc_send_to_sock(struct tipc_conn *con)
static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
{
struct list_head *queue = &con->outqueue;
struct tipc_server *srv = con->server;
struct outqueue_entry *e;
struct tipc_event *evt;
struct msghdr msg;
struct tipc_topsrv *srv = con->server;
struct sock *sk = con->sock->sk;
struct msghdr msg = {};
struct tipc_subscr s;
struct kvec iov;
int count = 0;
int ret;
spin_lock_bh(&con->outqueue_lock);
while (!list_empty(queue)) {
e = list_first_entry(queue, struct outqueue_entry, list);
evt = &e->evt;
spin_unlock_bh(&con->outqueue_lock);
if (e->inactive)
tipc_con_delete_sub(con, &evt->s);
memset(&msg, 0, sizeof(msg));
msg.msg_flags = MSG_DONTWAIT;
iov.iov_base = evt;
iov.iov_len = sizeof(*evt);
iov.iov_base = &s;
iov.iov_len = sizeof(s);
msg.msg_name = NULL;
if (con->sock) {
ret = kernel_sendmsg(con->sock, &msg, &iov,
1, sizeof(*evt));
if (ret == -EWOULDBLOCK || ret == 0) {
cond_resched();
return;
} else if (ret < 0) {
return tipc_close_conn(con);
}
} else {
tipc_send_kern_top_evt(srv->net, evt);
iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len);
ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
if (ret == -EWOULDBLOCK)
return -EWOULDBLOCK;
if (ret > 0) {
read_lock_bh(&sk->sk_callback_lock);
ret = tipc_conn_rcv_sub(srv, con, &s);
read_unlock_bh(&sk->sk_callback_lock);
}
if (ret < 0)
tipc_conn_close(con);
/* Don't starve users filling buffers */
if (++count >= MAX_SEND_MSG_COUNT) {
cond_resched();
count = 0;
}
spin_lock_bh(&con->outqueue_lock);
list_del(&e->list);
kfree(e);
}
spin_unlock_bh(&con->outqueue_lock);
return ret;
}
static void tipc_recv_work(struct work_struct *work)
static void tipc_conn_recv_work(struct work_struct *work)
{
struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
int count = 0;
while (connected(con)) {
if (tipc_receive_from_sock(con))
if (tipc_conn_rcv_from_sock(con))
break;
/* Don't flood Rx machine */
......@@ -496,19 +433,26 @@ static void tipc_recv_work(struct work_struct *work)
conn_put(con);
}
static void tipc_send_work(struct work_struct *work)
/* tipc_conn_data_ready - interrupt callback indicating the socket has data
* The queued work is launched into tipc_recv_work()->tipc_conn_rcv_from_sock()
*/
static void tipc_conn_data_ready(struct sock *sk)
{
struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
if (connected(con))
tipc_send_to_sock(con);
struct tipc_conn *con;
read_lock_bh(&sk->sk_callback_lock);
con = sk->sk_user_data;
if (connected(con)) {
conn_get(con);
if (!queue_work(con->server->rcv_wq, &con->rwork))
conn_put(con);
}
read_unlock_bh(&sk->sk_callback_lock);
}
static void tipc_accept_from_sock(struct work_struct *work)
static void tipc_topsrv_accept(struct work_struct *work)
{
struct tipc_server *srv = container_of(work, struct tipc_server, awork);
struct tipc_topsrv *srv = container_of(work, struct tipc_topsrv, awork);
struct socket *lsock = srv->listener;
struct socket *newsock;
struct tipc_conn *con;
......@@ -519,7 +463,7 @@ static void tipc_accept_from_sock(struct work_struct *work)
ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
if (ret < 0)
return;
con = tipc_alloc_conn(srv);
con = tipc_conn_alloc(srv);
if (IS_ERR(con)) {
ret = PTR_ERR(con);
sock_release(newsock);
......@@ -528,8 +472,8 @@ static void tipc_accept_from_sock(struct work_struct *work)
/* Register callbacks */
newsk = newsock->sk;
write_lock_bh(&newsk->sk_callback_lock);
newsk->sk_data_ready = sock_data_ready;
newsk->sk_write_space = sock_write_space;
newsk->sk_data_ready = tipc_conn_data_ready;
newsk->sk_write_space = tipc_conn_write_space;
newsk->sk_user_data = con;
con->sock = newsock;
write_unlock_bh(&newsk->sk_callback_lock);
......@@ -539,12 +483,12 @@ static void tipc_accept_from_sock(struct work_struct *work)
}
}
/* listener_sock_data_ready - interrupt callback indicating new connection
* The queued job is launched into tipc_accept_from_sock()
/* tipc_toprsv_listener_data_ready - interrupt callback with connection request
* The queued job is launched into tipc_topsrv_accept()
*/
static void listener_sock_data_ready(struct sock *sk)
static void tipc_topsrv_listener_data_ready(struct sock *sk)
{
struct tipc_server *srv;
struct tipc_topsrv *srv;
read_lock_bh(&sk->sk_callback_lock);
srv = sk->sk_user_data;
......@@ -553,7 +497,7 @@ static void listener_sock_data_ready(struct sock *sk)
read_unlock_bh(&sk->sk_callback_lock);
}
static int tipc_create_listener_sock(struct tipc_server *srv)
static int tipc_topsrv_create_listener(struct tipc_topsrv *srv)
{
int imp = TIPC_CRITICAL_IMPORTANCE;
struct socket *lsock = NULL;
......@@ -568,7 +512,7 @@ static int tipc_create_listener_sock(struct tipc_server *srv)
srv->listener = lsock;
sk = lsock->sk;
write_lock_bh(&sk->sk_callback_lock);
sk->sk_data_ready = listener_sock_data_ready;
sk->sk_data_ready = tipc_topsrv_listener_data_ready;
sk->sk_user_data = srv;
write_unlock_bh(&sk->sk_callback_lock);
......@@ -615,7 +559,65 @@ static int tipc_create_listener_sock(struct tipc_server *srv)
return -EINVAL;
}
static int tipc_work_start(struct tipc_server *s)
bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
u32 upper, u32 filter, int *conid)
{
struct tipc_subscr sub;
struct tipc_conn *con;
int rc;
sub.seq.type = type;
sub.seq.lower = lower;
sub.seq.upper = upper;
sub.timeout = TIPC_WAIT_FOREVER;
sub.filter = filter;
*(u32 *)&sub.usr_handle = port;
con = tipc_conn_alloc(tipc_topsrv(net));
if (IS_ERR(con))
return false;
*conid = con->conid;
con->sock = NULL;
rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub);
if (rc < 0)
tipc_conn_close(con);
return !rc;
}
void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
{
struct tipc_conn *con;
con = tipc_conn_lookup(tipc_topsrv(net), conid);
if (!con)
return;
test_and_clear_bit(CF_CONNECTED, &con->flags);
tipc_conn_delete_sub(con, NULL);
conn_put(con);
conn_put(con);
}
static void tipc_topsrv_kern_evt(struct net *net, struct tipc_event *evt)
{
u32 port = *(u32 *)&evt->s.usr_handle;
u32 self = tipc_own_addr(net);
struct sk_buff_head evtq;
struct sk_buff *skb;
skb = tipc_msg_create(TOP_SRV, 0, INT_H_SIZE, sizeof(*evt),
self, self, port, port, 0);
if (!skb)
return;
msg_set_dest_droppable(buf_msg(skb), true);
memcpy(msg_data(buf_msg(skb)), evt, sizeof(*evt));
skb_queue_head_init(&evtq);
__skb_queue_tail(&evtq, skb);
tipc_sk_rcv(net, &evtq);
}
static int tipc_topsrv_work_start(struct tipc_topsrv *s)
{
s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0);
if (!s->rcv_wq) {
......@@ -633,7 +635,7 @@ static int tipc_work_start(struct tipc_server *s)
return 0;
}
static void tipc_work_stop(struct tipc_server *s)
static void tipc_topsrv_work_stop(struct tipc_topsrv *s)
{
destroy_workqueue(s->rcv_wq);
destroy_workqueue(s->send_wq);
......@@ -643,7 +645,7 @@ int tipc_topsrv_start(struct net *net)
{
struct tipc_net *tn = tipc_net(net);
const char name[] = "topology_server";
struct tipc_server *srv;
struct tipc_topsrv *srv;
int ret;
srv = kzalloc(sizeof(*srv), GFP_ATOMIC);
......@@ -652,7 +654,7 @@ int tipc_topsrv_start(struct net *net)
srv->net = net;
srv->max_rcvbuf_size = sizeof(struct tipc_subscr);
INIT_WORK(&srv->awork, tipc_accept_from_sock);
INIT_WORK(&srv->awork, tipc_topsrv_accept);
strncpy(srv->name, name, strlen(name) + 1);
tn->topsrv = srv;
......@@ -662,20 +664,20 @@ int tipc_topsrv_start(struct net *net)
idr_init(&srv->conn_idr);
srv->idr_in_use = 0;
ret = tipc_work_start(srv);
ret = tipc_topsrv_work_start(srv);
if (ret < 0)
return ret;
ret = tipc_create_listener_sock(srv);
ret = tipc_topsrv_create_listener(srv);
if (ret < 0)
tipc_work_stop(srv);
tipc_topsrv_work_stop(srv);
return ret;
}
void tipc_topsrv_stop(struct net *net)
{
struct tipc_server *srv = tipc_topsrv(net);
struct tipc_topsrv *srv = tipc_topsrv(net);
struct socket *lsock = srv->listener;
struct tipc_conn *con;
int id;
......@@ -685,7 +687,7 @@ void tipc_topsrv_stop(struct net *net)
con = idr_find(&srv->conn_idr, id);
if (con) {
spin_unlock_bh(&srv->idr_lock);
tipc_close_conn(con);
tipc_conn_close(con);
spin_lock_bh(&srv->idr_lock);
}
}
......@@ -694,7 +696,7 @@ void tipc_topsrv_stop(struct net *net)
sock_release(lsock);
srv->listener = NULL;
spin_unlock_bh(&srv->idr_lock);
tipc_work_stop(srv);
tipc_topsrv_work_stop(srv);
idr_destroy(&srv->conn_idr);
kfree(srv);
}
......@@ -38,16 +38,13 @@
#define _TIPC_SERVER_H
#include "core.h"
#include <linux/idr.h>
#include <linux/tipc.h>
#include <net/net_namespace.h>
#define TIPC_SERVER_NAME_LEN 32
#define TIPC_SUB_CLUSTER_SCOPE 0x20
#define TIPC_SUB_NODE_SCOPE 0x40
#define TIPC_SUB_NO_STATUS 0x80
void tipc_conn_queue_evt(struct net *net, int conid,
void tipc_topsrv_queue_evt(struct net *net, int conid,
u32 event, struct tipc_event *evt);
bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment