Commit 8de08f90 authored by David S. Miller's avatar David S. Miller

Merge bk://212.42.230.204:994/nf-2.6

into sunset.davemloft.net:/home/davem/src/BK/net-2.6
parents 0f634eb3 b20f3c6c
......@@ -23,13 +23,16 @@ enum tcp_conntrack {
/* SACK is permitted by the sender */
#define IP_CT_TCP_FLAG_SACK_PERM 0x02
/* This sender sent FIN first */
#define IP_CT_TCP_FLAG_CLOSE_INIT 0x03
struct ip_ct_tcp_state {
u_int32_t td_end; /* max of seq + len */
u_int32_t td_maxend; /* max of ack + max(win, 1) */
u_int32_t td_maxwin; /* max(win) */
u_int8_t td_scale; /* window scale factor */
u_int8_t loose; /* used when connection picked up from the middle */
u_int8_t flags; /* per direction state flags */
u_int8_t flags; /* per direction options */
};
struct ip_ct_tcp
......
......@@ -822,10 +822,10 @@ static int translate_table(struct ebt_replace *repl,
/* this will get free'd in do_replace()/ebt_register_table()
if an error occurs */
newinfo->chainstack = (struct ebt_chainstack **)
vmalloc(NR_CPUS * sizeof(struct ebt_chainstack));
vmalloc(num_possible_cpus() * sizeof(struct ebt_chainstack));
if (!newinfo->chainstack)
return -ENOMEM;
for (i = 0; i < NR_CPUS; i++) {
for (i = 0; i < num_possible_cpus(); i++) {
newinfo->chainstack[i] =
vmalloc(udc_cnt * sizeof(struct ebt_chainstack));
if (!newinfo->chainstack[i]) {
......@@ -898,7 +898,7 @@ static void get_counters(struct ebt_counter *oldcounters,
memcpy(counters, oldcounters,
sizeof(struct ebt_counter) * nentries);
/* add other counters to those of cpu 0 */
for (cpu = 1; cpu < NR_CPUS; cpu++) {
for (cpu = 1; cpu < num_possible_cpus(); cpu++) {
counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
for (i = 0; i < nentries; i++) {
counters[i].pcnt += counter_base[i].pcnt;
......@@ -930,7 +930,7 @@ static int do_replace(void __user *user, unsigned int len)
BUGPRINT("Entries_size never zero\n");
return -EINVAL;
}
countersize = COUNTER_OFFSET(tmp.nentries) * NR_CPUS;
countersize = COUNTER_OFFSET(tmp.nentries) * num_possible_cpus();
newinfo = (struct ebt_table_info *)
vmalloc(sizeof(struct ebt_table_info) + countersize);
if (!newinfo)
......@@ -1023,7 +1023,7 @@ static int do_replace(void __user *user, unsigned int len)
vfree(table->entries);
if (table->chainstack) {
for (i = 0; i < NR_CPUS; i++)
for (i = 0; i < num_possible_cpus(); i++)
vfree(table->chainstack[i]);
vfree(table->chainstack);
}
......@@ -1043,7 +1043,7 @@ static int do_replace(void __user *user, unsigned int len)
vfree(counterstmp);
/* can be initialized in translate_table() */
if (newinfo->chainstack) {
for (i = 0; i < NR_CPUS; i++)
for (i = 0; i < num_possible_cpus(); i++)
vfree(newinfo->chainstack[i]);
vfree(newinfo->chainstack);
}
......@@ -1137,7 +1137,7 @@ int ebt_register_table(struct ebt_table *table)
return -EINVAL;
}
countersize = COUNTER_OFFSET(table->table->nentries) * NR_CPUS;
countersize = COUNTER_OFFSET(table->table->nentries) * num_possible_cpus();
newinfo = (struct ebt_table_info *)
vmalloc(sizeof(struct ebt_table_info) + countersize);
ret = -ENOMEM;
......@@ -1191,7 +1191,7 @@ int ebt_register_table(struct ebt_table *table)
up(&ebt_mutex);
free_chainstack:
if (newinfo->chainstack) {
for (i = 0; i < NR_CPUS; i++)
for (i = 0; i < num_possible_cpus(); i++)
vfree(newinfo->chainstack[i]);
vfree(newinfo->chainstack);
}
......@@ -1215,7 +1215,7 @@ void ebt_unregister_table(struct ebt_table *table)
if (table->private->entries)
vfree(table->private->entries);
if (table->private->chainstack) {
for (i = 0; i < NR_CPUS; i++)
for (i = 0; i < num_possible_cpus(); i++)
vfree(table->private->chainstack[i]);
vfree(table->private->chainstack);
}
......
......@@ -717,7 +717,7 @@ static int translate_table(const char *name,
}
/* And one copy for every other CPU */
for (i = 1; i < NR_CPUS; i++) {
for (i = 1; i < num_possible_cpus(); i++) {
memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i,
newinfo->entries,
SMP_ALIGN(newinfo->size));
......@@ -768,7 +768,7 @@ static void get_counters(const struct arpt_table_info *t,
unsigned int cpu;
unsigned int i;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
i = 0;
ARPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
t->size,
......@@ -886,7 +886,7 @@ static int do_replace(void __user *user, unsigned int len)
return -ENOMEM;
newinfo = vmalloc(sizeof(struct arpt_table_info)
+ SMP_ALIGN(tmp.size) * NR_CPUS);
+ SMP_ALIGN(tmp.size) * num_possible_cpus());
if (!newinfo)
return -ENOMEM;
......@@ -1159,7 +1159,7 @@ int arpt_register_table(struct arpt_table *table,
= { 0, 0, 0, { 0 }, { 0 }, { } };
newinfo = vmalloc(sizeof(struct arpt_table_info)
+ SMP_ALIGN(repl->size) * NR_CPUS);
+ SMP_ALIGN(repl->size) * num_possible_cpus());
if (!newinfo) {
ret = -ENOMEM;
return ret;
......
......@@ -400,8 +400,8 @@ static int sctp_packet(struct ip_conntrack *conntrack,
return -1;
}
DEBUGP("Setting vtag %x for dir %d\n",
ih->init_tag, CTINFO2DIR(ctinfo));
conntrack->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = ih->init_tag;
ih->init_tag, !CTINFO2DIR(ctinfo));
conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag;
}
conntrack->proto.sctp.state = newconntrack;
......
This diff is collapsed.
......@@ -77,34 +77,70 @@ seq_print_counters(struct seq_file *s,
#define seq_print_counters(x, y) 0
#endif
static void *ct_seq_start(struct seq_file *s, loff_t *pos)
struct ct_iter_state {
unsigned int bucket;
};
static struct list_head *ct_get_first(struct seq_file *seq)
{
if (*pos >= ip_conntrack_htable_size)
return NULL;
return &ip_conntrack_hash[*pos];
struct ct_iter_state *st = seq->private;
for (st->bucket = 0;
st->bucket < ip_conntrack_htable_size;
st->bucket++) {
if (!list_empty(&ip_conntrack_hash[st->bucket]))
return ip_conntrack_hash[st->bucket].next;
}
return NULL;
}
static void ct_seq_stop(struct seq_file *s, void *v)
static struct list_head *ct_get_next(struct seq_file *seq, struct list_head *head)
{
struct ct_iter_state *st = seq->private;
head = head->next;
while (head == &ip_conntrack_hash[st->bucket]) {
if (++st->bucket >= ip_conntrack_htable_size)
return NULL;
head = ip_conntrack_hash[st->bucket].next;
}
return head;
}
static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos)
{
struct list_head *head = ct_get_first(seq);
if (head)
while (pos && (head = ct_get_next(seq, head)))
pos--;
return pos ? NULL : head;
}
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
{
READ_LOCK(&ip_conntrack_lock);
return ct_get_idx(seq, *pos);
}
static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
(*pos)++;
if (*pos >= ip_conntrack_htable_size)
return NULL;
return &ip_conntrack_hash[*pos];
return ct_get_next(s, v);
}
/* return 0 on success, 1 in case of error */
static int ct_seq_real_show(const struct ip_conntrack_tuple_hash *hash,
struct seq_file *s)
static void ct_seq_stop(struct seq_file *s, void *v)
{
READ_UNLOCK(&ip_conntrack_lock);
}
static int ct_seq_show(struct seq_file *s, void *v)
{
const struct ip_conntrack_tuple_hash *hash = v;
const struct ip_conntrack *conntrack = tuplehash_to_ctrack(hash);
struct ip_conntrack_protocol *proto;
MUST_BE_READ_LOCKED(&ip_conntrack_lock);
IP_NF_ASSERT(conntrack);
/* we only want to print DIR_ORIGINAL */
......@@ -115,63 +151,50 @@ static int ct_seq_real_show(const struct ip_conntrack_tuple_hash *hash,
.tuple.dst.protonum);
IP_NF_ASSERT(proto);
if (seq_printf(s, "%-8s %u %lu ",
if (seq_printf(s, "%-8s %u %ld ",
proto->name,
conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
timer_pending(&conntrack->timeout)
? (conntrack->timeout.expires - jiffies)/HZ : 0) != 0)
return 1;
? (long)(conntrack->timeout.expires - jiffies)/HZ
: 0) != 0)
return -ENOSPC;
if (proto->print_conntrack(s, conntrack))
return 1;
return -ENOSPC;
if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
proto))
return 1;
return -ENOSPC;
if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL]))
return 1;
return -ENOSPC;
if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)))
if (seq_printf(s, "[UNREPLIED] "))
return 1;
return -ENOSPC;
if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple,
proto))
return 1;
return -ENOSPC;
if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY]))
return 1;
return -ENOSPC;
if (test_bit(IPS_ASSURED_BIT, &conntrack->status))
if (seq_printf(s, "[ASSURED] "))
return 1;
return -ENOSPC;
#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
if (seq_printf(s, "mark=%ld ", conntrack->mark))
return 1;
if (seq_printf(s, "mark=%lu ", conntrack->mark))
return -ENOSPC;
#endif
if (seq_printf(s, "use=%u\n", atomic_read(&conntrack->ct_general.use)))
return 1;
return -ENOSPC;
return 0;
}
static int ct_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
int ret = 0;
/* FIXME: Simply truncates if hash chain too long. */
READ_LOCK(&ip_conntrack_lock);
if (LIST_FIND(list, ct_seq_real_show,
struct ip_conntrack_tuple_hash *, s))
ret = -ENOSPC;
READ_UNLOCK(&ip_conntrack_lock);
return ret;
}
static struct seq_operations ct_seq_ops = {
.start = ct_seq_start,
.next = ct_seq_next,
......@@ -181,7 +204,23 @@ static struct seq_operations ct_seq_ops = {
static int ct_open(struct inode *inode, struct file *file)
{
return seq_open(file, &ct_seq_ops);
struct seq_file *seq;
struct ct_iter_state *st;
int ret;
st = kmalloc(sizeof(struct ct_iter_state), GFP_KERNEL);
if (st == NULL)
return -ENOMEM;
ret = seq_open(file, &ct_seq_ops);
if (ret)
goto out_free;
seq = file->private_data;
seq->private = st;
memset(st, 0, sizeof(struct ct_iter_state));
return ret;
out_free:
kfree(st);
return ret;
}
static struct file_operations ct_file_ops = {
......@@ -189,7 +228,7 @@ static struct file_operations ct_file_ops = {
.open = ct_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release
.release = seq_release_private,
};
/* expects */
......@@ -235,8 +274,8 @@ static int exp_seq_show(struct seq_file *s, void *v)
struct ip_conntrack_expect *expect = v;
if (expect->timeout.function)
seq_printf(s, "%lu ", timer_pending(&expect->timeout)
? (expect->timeout.expires - jiffies)/HZ : 0);
seq_printf(s, "%ld ", timer_pending(&expect->timeout)
? (long)(expect->timeout.expires - jiffies)/HZ : 0);
else
seq_printf(s, "- ");
......
......@@ -14,6 +14,9 @@
* Zander).
* 2000-08-01: Added Nick Williams' MAC support.
* 2002-06-25: Code cleanup.
* 2005-01-10: Added /proc counter for dropped packets; fixed so
* packets aren't delivered to user space if they're going
* to be dropped.
*
*/
#include <linux/module.h>
......@@ -59,6 +62,8 @@ static DEFINE_RWLOCK(queue_lock);
static int peer_pid;
static unsigned int copy_range;
static unsigned int queue_total;
static unsigned int queue_dropped = 0;
static unsigned int queue_user_dropped = 0;
static struct sock *ipqnl;
static LIST_HEAD(queue_list);
static DECLARE_MUTEX(ipqnl_sem);
......@@ -70,18 +75,11 @@ ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
kfree(entry);
}
static inline int
static inline void
__ipq_enqueue_entry(struct ipq_queue_entry *entry)
{
if (queue_total >= queue_maxlen) {
if (net_ratelimit())
printk(KERN_WARNING "ip_queue: full at %d entries, "
"dropping packet(s).\n", queue_total);
return -ENOSPC;
}
list_add(&entry->list, &queue_list);
queue_total++;
return 0;
}
/*
......@@ -308,14 +306,24 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, void *data)
if (!peer_pid)
goto err_out_free_nskb;
if (queue_total >= queue_maxlen) {
queue_dropped++;
status = -ENOSPC;
if (net_ratelimit())
printk (KERN_WARNING "ip_queue: full at %d entries, "
"dropping packets(s). Dropped: %d\n", queue_total,
queue_dropped);
goto err_out_free_nskb;
}
/* netlink_unicast will either free the nskb or attach it to a socket */
status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
if (status < 0)
goto err_out_unlock;
status = __ipq_enqueue_entry(entry);
if (status < 0)
if (status < 0) {
queue_user_dropped++;
goto err_out_unlock;
}
__ipq_enqueue_entry(entry);
write_unlock_bh(&queue_lock);
return status;
......@@ -637,12 +645,16 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length)
"Copy mode : %hu\n"
"Copy range : %u\n"
"Queue length : %u\n"
"Queue max. length : %u\n",
"Queue max. length : %u\n"
"Queue dropped : %u\n"
"Netlink dropped : %u\n",
peer_pid,
copy_mode,
copy_range,
queue_total,
queue_maxlen);
queue_maxlen,
queue_dropped,
queue_user_dropped);
read_unlock_bh(&queue_lock);
......
......@@ -923,7 +923,7 @@ translate_table(const char *name,
}
/* And one copy for every other CPU */
for (i = 1; i < NR_CPUS; i++) {
for (i = 1; i < num_possible_cpus(); i++) {
memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i,
newinfo->entries,
SMP_ALIGN(newinfo->size));
......@@ -945,7 +945,7 @@ replace_table(struct ipt_table *table,
struct ipt_entry *table_base;
unsigned int i;
for (i = 0; i < NR_CPUS; i++) {
for (i = 0; i < num_possible_cpus(); i++) {
table_base =
(void *)newinfo->entries
+ TABLE_OFFSET(newinfo, i);
......@@ -992,7 +992,7 @@ get_counters(const struct ipt_table_info *t,
unsigned int cpu;
unsigned int i;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
i = 0;
IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
t->size,
......@@ -1130,7 +1130,7 @@ do_replace(void __user *user, unsigned int len)
return -ENOMEM;
newinfo = vmalloc(sizeof(struct ipt_table_info)
+ SMP_ALIGN(tmp.size) * NR_CPUS);
+ SMP_ALIGN(tmp.size) * num_possible_cpus());
if (!newinfo)
return -ENOMEM;
......@@ -1460,7 +1460,7 @@ int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl)
= { 0, 0, 0, { 0 }, { 0 }, { } };
newinfo = vmalloc(sizeof(struct ipt_table_info)
+ SMP_ALIGN(repl->size) * NR_CPUS);
+ SMP_ALIGN(repl->size) * num_possible_cpus());
if (!newinfo)
return -ENOMEM;
......
......@@ -198,16 +198,16 @@ static void dump_packet(const struct ipt_log_info *info,
static size_t required_len[NR_ICMP_TYPES+1]
= { [ICMP_ECHOREPLY] = 4,
[ICMP_DEST_UNREACH]
= 8 + sizeof(struct iphdr) + 8,
= 8 + sizeof(struct iphdr),
[ICMP_SOURCE_QUENCH]
= 8 + sizeof(struct iphdr) + 8,
= 8 + sizeof(struct iphdr),
[ICMP_REDIRECT]
= 8 + sizeof(struct iphdr) + 8,
= 8 + sizeof(struct iphdr),
[ICMP_ECHO] = 4,
[ICMP_TIME_EXCEEDED]
= 8 + sizeof(struct iphdr) + 8,
= 8 + sizeof(struct iphdr),
[ICMP_PARAMETERPROB]
= 8 + sizeof(struct iphdr) + 8,
= 8 + sizeof(struct iphdr),
[ICMP_TIMESTAMP] = 20,
[ICMP_TIMESTAMPREPLY] = 20,
[ICMP_ADDRESS] = 12,
......
......@@ -252,10 +252,6 @@ static void send_unreach(struct sk_buff *skb_in, int code)
if (iph->frag_off&htons(IP_OFFSET))
return;
/* Ensure we have at least 8 bytes of proto header. */
if (skb_in->len < skb_in->nh.iph->ihl*4 + 8)
return;
/* If we send an ICMP error to an ICMP error a mess would result.. */
if (iph->protocol == IPPROTO_ICMP) {
struct icmphdr ihdr;
......
......@@ -625,7 +625,7 @@ static inline int dl_seq_real_show(struct dsthash_ent *ent, struct seq_file *s)
rateinfo_recalc(ent, jiffies);
return seq_printf(s, "%ld %u.%u.%u.%u:%u->%u.%u.%u.%u:%u %u %u %u\n",
(ent->expires - jiffies)/HZ,
(long)(ent->expires - jiffies)/HZ,
NIPQUAD(ent->dst.src_ip), ntohs(ent->dst.src_port),
NIPQUAD(ent->dst.dst_ip), ntohs(ent->dst.dst_port),
ent->rateinfo.credit, ent->rateinfo.credit_cap,
......
......@@ -20,6 +20,9 @@
* Few changes needed, mainly the hard_routing code and
* the netlink socket protocol (we're NETLINK_IP6_FW).
* 2002-06-25: Code cleanup. [JM: ported cleanup over from ip_queue.c]
* 2005-02-04: Added /proc counter for dropped packets; fixed so
* packets aren't delivered to user space if they're going
* to be dropped.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
......@@ -64,6 +67,8 @@ static DEFINE_RWLOCK(queue_lock);
static int peer_pid;
static unsigned int copy_range;
static unsigned int queue_total;
static unsigned int queue_dropped = 0;
static unsigned int queue_user_dropped = 0;
static struct sock *ipqnl;
static LIST_HEAD(queue_list);
static DECLARE_MUTEX(ipqnl_sem);
......@@ -75,18 +80,11 @@ ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
kfree(entry);
}
static inline int
static inline void
__ipq_enqueue_entry(struct ipq_queue_entry *entry)
{
if (queue_total >= queue_maxlen) {
if (net_ratelimit())
printk(KERN_WARNING "ip6_queue: full at %d entries, "
"dropping packet(s).\n", queue_total);
return -ENOSPC;
}
list_add(&entry->list, &queue_list);
queue_total++;
return 0;
}
/*
......@@ -312,14 +310,24 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, void *data)
if (!peer_pid)
goto err_out_free_nskb;
if (queue_total >= queue_maxlen) {
queue_dropped++;
status = -ENOSPC;
if (net_ratelimit())
printk (KERN_WARNING "ip6_queue: fill at %d entries, "
"dropping packet(s). Dropped: %d\n", queue_total,
queue_dropped);
goto err_out_free_nskb;
}
/* netlink_unicast will either free the nskb or attach it to a socket */
status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
if (status < 0)
if (status < 0) {
queue_user_dropped++;
goto err_out_unlock;
}
status = __ipq_enqueue_entry(entry);
if (status < 0)
goto err_out_unlock;
__ipq_enqueue_entry(entry);
write_unlock_bh(&queue_lock);
return status;
......@@ -639,12 +647,16 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length)
"Copy mode : %hu\n"
"Copy range : %u\n"
"Queue length : %u\n"
"Queue max. length : %u\n",
"Queue max. length : %u\n"
"Queue dropped : %u\n"
"Netfilter dropped : %u\n",
peer_pid,
copy_mode,
copy_range,
queue_total,
queue_maxlen);
queue_maxlen,
queue_dropped,
queue_user_dropped);
read_unlock_bh(&queue_lock);
......
......@@ -952,7 +952,7 @@ translate_table(const char *name,
}
/* And one copy for every other CPU */
for (i = 1; i < NR_CPUS; i++) {
for (i = 1; i < num_possible_cpus(); i++) {
memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i,
newinfo->entries,
SMP_ALIGN(newinfo->size));
......@@ -974,7 +974,7 @@ replace_table(struct ip6t_table *table,
struct ip6t_entry *table_base;
unsigned int i;
for (i = 0; i < NR_CPUS; i++) {
for (i = 0; i < num_possible_cpus(); i++) {
table_base =
(void *)newinfo->entries
+ TABLE_OFFSET(newinfo, i);
......@@ -1021,7 +1021,7 @@ get_counters(const struct ip6t_table_info *t,
unsigned int cpu;
unsigned int i;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
i = 0;
IP6T_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
t->size,
......@@ -1155,7 +1155,7 @@ do_replace(void __user *user, unsigned int len)
return -ENOMEM;
newinfo = vmalloc(sizeof(struct ip6t_table_info)
+ SMP_ALIGN(tmp.size) * NR_CPUS);
+ SMP_ALIGN(tmp.size) * num_possible_cpus());
if (!newinfo)
return -ENOMEM;
......@@ -1469,7 +1469,7 @@ int ip6t_register_table(struct ip6t_table *table,
= { 0, 0, 0, { 0 }, { 0 }, { } };
newinfo = vmalloc(sizeof(struct ip6t_table_info)
+ SMP_ALIGN(repl->size) * NR_CPUS);
+ SMP_ALIGN(repl->size) * num_possible_cpus());
if (!newinfo)
return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment