Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
0fb74dfb
Commit
0fb74dfb
authored
Sep 15, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
parents
40796c5e
4db2ce01
Changes
27
Show whitespace changes
Inline
Side-by-side
Showing
27 changed files
with
42 additions
and
396 deletions
+42
-396
arch/i386/Kconfig
arch/i386/Kconfig
+0
-5
arch/i386/lib/Makefile
arch/i386/lib/Makefile
+0
-1
arch/i386/lib/dec_and_lock.c
arch/i386/lib/dec_and_lock.c
+0
-42
arch/ia64/Kconfig
arch/ia64/Kconfig
+0
-5
arch/ia64/lib/Makefile
arch/ia64/lib/Makefile
+0
-1
arch/ia64/lib/dec_and_lock.c
arch/ia64/lib/dec_and_lock.c
+0
-42
arch/m32r/Kconfig
arch/m32r/Kconfig
+0
-5
arch/mips/Kconfig
arch/mips/Kconfig
+0
-4
arch/mips/lib/Makefile
arch/mips/lib/Makefile
+1
-1
arch/mips/lib/dec_and_lock.c
arch/mips/lib/dec_and_lock.c
+0
-47
arch/ppc/Kconfig
arch/ppc/Kconfig
+0
-4
arch/ppc/lib/Makefile
arch/ppc/lib/Makefile
+1
-1
arch/ppc/lib/dec_and_lock.c
arch/ppc/lib/dec_and_lock.c
+0
-38
arch/ppc64/Kconfig
arch/ppc64/Kconfig
+0
-4
arch/ppc64/lib/Makefile
arch/ppc64/lib/Makefile
+1
-1
arch/ppc64/lib/dec_and_lock.c
arch/ppc64/lib/dec_and_lock.c
+0
-47
arch/sparc64/Kconfig.debug
arch/sparc64/Kconfig.debug
+0
-8
arch/sparc64/kernel/sparc64_ksyms.c
arch/sparc64/kernel/sparc64_ksyms.c
+0
-3
arch/sparc64/lib/Makefile
arch/sparc64/lib/Makefile
+0
-2
arch/sparc64/lib/dec_and_lock.S
arch/sparc64/lib/dec_and_lock.S
+0
-80
arch/x86_64/Kconfig
arch/x86_64/Kconfig
+0
-5
arch/x86_64/kernel/x8664_ksyms.c
arch/x86_64/kernel/x8664_ksyms.c
+0
-4
arch/x86_64/lib/Makefile
arch/x86_64/lib/Makefile
+0
-2
arch/x86_64/lib/dec_and_lock.c
arch/x86_64/lib/dec_and_lock.c
+0
-40
arch/xtensa/Kconfig
arch/xtensa/Kconfig
+0
-4
fs/compat.c
fs/compat.c
+4
-0
lib/dec_and_lock.c
lib/dec_and_lock.c
+35
-0
No files found.
arch/i386/Kconfig
View file @
0fb74dfb
...
...
@@ -908,11 +908,6 @@ config IRQBALANCE
The default yes will allow the kernel to do irq load balancing.
Saying no will keep the kernel from doing irq load balancing.
config HAVE_DEC_LOCK
bool
depends on (SMP || PREEMPT) && X86_CMPXCHG
default y
# turning this on wastes a bunch of space.
# Summit needs it only when NUMA is on
config BOOT_IOREMAP
...
...
arch/i386/lib/Makefile
View file @
0fb74dfb
...
...
@@ -7,4 +7,3 @@ lib-y = checksum.o delay.o usercopy.o getuser.o putuser.o memcpy.o strstr.o \
bitops.o
lib-$(CONFIG_X86_USE_3DNOW)
+=
mmx.o
lib-$(CONFIG_HAVE_DEC_LOCK)
+=
dec_and_lock.o
arch/i386/lib/dec_and_lock.c
deleted
100644 → 0
View file @
40796c5e
/*
* x86 version of "atomic_dec_and_lock()" using
* the atomic "cmpxchg" instruction.
*
* (For CPU's lacking cmpxchg, we use the slow
* generic version, and this one never even gets
* compiled).
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/atomic.h>
int
_atomic_dec_and_lock
(
atomic_t
*
atomic
,
spinlock_t
*
lock
)
{
int
counter
;
int
newcount
;
repeat:
counter
=
atomic_read
(
atomic
);
newcount
=
counter
-
1
;
if
(
!
newcount
)
goto
slow_path
;
asm
volatile
(
"lock; cmpxchgl %1,%2"
:
"=a"
(
newcount
)
:
"r"
(
newcount
),
"m"
(
atomic
->
counter
),
"0"
(
counter
));
/* If the above failed, "eax" will have changed */
if
(
newcount
!=
counter
)
goto
repeat
;
return
0
;
slow_path:
spin_lock
(
lock
);
if
(
atomic_dec_and_test
(
atomic
))
return
1
;
spin_unlock
(
lock
);
return
0
;
}
EXPORT_SYMBOL
(
_atomic_dec_and_lock
);
arch/ia64/Kconfig
View file @
0fb74dfb
...
...
@@ -298,11 +298,6 @@ config PREEMPT
source "mm/Kconfig"
config HAVE_DEC_LOCK
bool
depends on (SMP || PREEMPT)
default y
config IA32_SUPPORT
bool "Support for Linux/x86 binaries"
help
...
...
arch/ia64/lib/Makefile
View file @
0fb74dfb
...
...
@@ -15,7 +15,6 @@ lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY)
+=
copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON)
+=
carta_random.o
lib-$(CONFIG_MD_RAID5)
+=
xor.o
lib-$(CONFIG_HAVE_DEC_LOCK)
+=
dec_and_lock.o
AFLAGS___divdi3.o
=
AFLAGS___udivdi3.o
=
-DUNSIGNED
...
...
arch/ia64/lib/dec_and_lock.c
deleted
100644 → 0
View file @
40796c5e
/*
* Copyright (C) 2003 Jerome Marchand, Bull S.A.
* Cleaned up by David Mosberger-Tang <davidm@hpl.hp.com>
*
* This file is released under the GPLv2, or at your option any later version.
*
* ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" instruction. This
* code is an adaptation of the x86 version of "atomic_dec_and_lock()".
*/
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
/*
* Decrement REFCOUNT and if the count reaches zero, acquire the spinlock. Both of these
* operations have to be done atomically, so that the count doesn't drop to zero without
* acquiring the spinlock first.
*/
int
_atomic_dec_and_lock
(
atomic_t
*
refcount
,
spinlock_t
*
lock
)
{
int
old
,
new
;
do
{
old
=
atomic_read
(
refcount
);
new
=
old
-
1
;
if
(
unlikely
(
old
==
1
))
{
/* oops, we may be decrementing to zero, do it the slow way... */
spin_lock
(
lock
);
if
(
atomic_dec_and_test
(
refcount
))
return
1
;
spin_unlock
(
lock
);
return
0
;
}
}
while
(
cmpxchg
(
&
refcount
->
counter
,
old
,
new
)
!=
old
);
return
0
;
}
EXPORT_SYMBOL
(
_atomic_dec_and_lock
);
arch/m32r/Kconfig
View file @
0fb74dfb
...
...
@@ -220,11 +220,6 @@ config PREEMPT
Say Y here if you are building a kernel for a desktop, embedded
or real-time system. Say N if you are unsure.
config HAVE_DEC_LOCK
bool
depends on (SMP || PREEMPT)
default n
config SMP
bool "Symmetric multi-processing support"
---help---
...
...
arch/mips/Kconfig
View file @
0fb74dfb
...
...
@@ -1009,10 +1009,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config HAVE_DEC_LOCK
bool
default y
#
# Select some configuration options automatically based on user selections.
#
...
...
arch/mips/lib/Makefile
View file @
0fb74dfb
...
...
@@ -2,7 +2,7 @@
# Makefile for MIPS-specific library files..
#
lib-y
+=
csum_partial_copy.o
dec_and_lock.o
memcpy.o promlib.o
\
lib-y
+=
csum_partial_copy.o memcpy.o promlib.o
\
strlen_user.o strncpy_user.o strnlen_user.o
obj-y
+=
iomap.o
...
...
arch/mips/lib/dec_and_lock.c
deleted
100644 → 0
View file @
40796c5e
/*
* MIPS version of atomic_dec_and_lock() using cmpxchg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/system.h>
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* This implementation can be used on any architecture that
* has a cmpxchg, and where atomic->value is an int holding
* the value of the atomic (i.e. the high bits aren't used
* for a lock or anything like that).
*/
int
_atomic_dec_and_lock
(
atomic_t
*
atomic
,
spinlock_t
*
lock
)
{
int
counter
;
int
newcount
;
for
(;;)
{
counter
=
atomic_read
(
atomic
);
newcount
=
counter
-
1
;
if
(
!
newcount
)
break
;
/* do it the slow way */
newcount
=
cmpxchg
(
&
atomic
->
counter
,
counter
,
newcount
);
if
(
newcount
==
counter
)
return
0
;
}
spin_lock
(
lock
);
if
(
atomic_dec_and_test
(
atomic
))
return
1
;
spin_unlock
(
lock
);
return
0
;
}
EXPORT_SYMBOL
(
_atomic_dec_and_lock
);
arch/ppc/Kconfig
View file @
0fb74dfb
...
...
@@ -26,10 +26,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config HAVE_DEC_LOCK
bool
default y
config PPC
bool
default y
...
...
arch/ppc/lib/Makefile
View file @
0fb74dfb
...
...
@@ -2,7 +2,7 @@
# Makefile for ppc-specific library files..
#
obj-y
:=
checksum.o string.o strcase.o d
ec_and_lock.o d
iv64.o
obj-y
:=
checksum.o string.o strcase.o div64.o
obj-$(CONFIG_8xx)
+=
rheap.o
obj-$(CONFIG_CPM2)
+=
rheap.o
arch/ppc/lib/dec_and_lock.c
deleted
100644 → 0
View file @
40796c5e
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/system.h>
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* This implementation can be used on any architecture that
* has a cmpxchg, and where atomic->value is an int holding
* the value of the atomic (i.e. the high bits aren't used
* for a lock or anything like that).
*/
int
_atomic_dec_and_lock
(
atomic_t
*
atomic
,
spinlock_t
*
lock
)
{
int
counter
;
int
newcount
;
for
(;;)
{
counter
=
atomic_read
(
atomic
);
newcount
=
counter
-
1
;
if
(
!
newcount
)
break
;
/* do it the slow way */
newcount
=
cmpxchg
(
&
atomic
->
counter
,
counter
,
newcount
);
if
(
newcount
==
counter
)
return
0
;
}
spin_lock
(
lock
);
if
(
atomic_dec_and_test
(
atomic
))
return
1
;
spin_unlock
(
lock
);
return
0
;
}
EXPORT_SYMBOL
(
_atomic_dec_and_lock
);
arch/ppc64/Kconfig
View file @
0fb74dfb
...
...
@@ -28,10 +28,6 @@ config GENERIC_ISA_DMA
bool
default y
config HAVE_DEC_LOCK
bool
default y
config EARLY_PRINTK
bool
default y
...
...
arch/ppc64/lib/Makefile
View file @
0fb74dfb
...
...
@@ -2,7 +2,7 @@
# Makefile for ppc64-specific library files..
#
lib-y
:=
checksum.o
dec_and_lock.o
string.o strcase.o
lib-y
:=
checksum.o string.o strcase.o
lib-y
+=
copypage.o memcpy.o copyuser.o usercopy.o
# Lock primitives are defined as no-ops in include/linux/spinlock.h
...
...
arch/ppc64/lib/dec_and_lock.c
deleted
100644 → 0
View file @
40796c5e
/*
* ppc64 version of atomic_dec_and_lock() using cmpxchg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/system.h>
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* This implementation can be used on any architecture that
* has a cmpxchg, and where atomic->value is an int holding
* the value of the atomic (i.e. the high bits aren't used
* for a lock or anything like that).
*/
int
_atomic_dec_and_lock
(
atomic_t
*
atomic
,
spinlock_t
*
lock
)
{
int
counter
;
int
newcount
;
for
(;;)
{
counter
=
atomic_read
(
atomic
);
newcount
=
counter
-
1
;
if
(
!
newcount
)
break
;
/* do it the slow way */
newcount
=
cmpxchg
(
&
atomic
->
counter
,
counter
,
newcount
);
if
(
newcount
==
counter
)
return
0
;
}
spin_lock
(
lock
);
if
(
atomic_dec_and_test
(
atomic
))
return
1
;
spin_unlock
(
lock
);
return
0
;
}
EXPORT_SYMBOL
(
_atomic_dec_and_lock
);
arch/sparc64/Kconfig.debug
View file @
0fb74dfb
...
...
@@ -33,14 +33,6 @@ config DEBUG_BOOTMEM
depends on DEBUG_KERNEL
bool "Debug BOOTMEM initialization"
# We have a custom atomic_dec_and_lock() implementation but it's not
# compatible with spinlock debugging so we need to fall back on
# the generic version in that case.
config HAVE_DEC_LOCK
bool
depends on SMP && !DEBUG_SPINLOCK
default y
config MCOUNT
bool
depends on STACK_DEBUG
...
...
arch/sparc64/kernel/sparc64_ksyms.c
View file @
0fb74dfb
...
...
@@ -163,9 +163,6 @@ EXPORT_SYMBOL(atomic64_add);
EXPORT_SYMBOL
(
atomic64_add_ret
);
EXPORT_SYMBOL
(
atomic64_sub
);
EXPORT_SYMBOL
(
atomic64_sub_ret
);
#ifdef CONFIG_SMP
EXPORT_SYMBOL
(
_atomic_dec_and_lock
);
#endif
/* Atomic bit operations. */
EXPORT_SYMBOL
(
test_and_set_bit
);
...
...
arch/sparc64/lib/Makefile
View file @
0fb74dfb
...
...
@@ -14,6 +14,4 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
copy_in_user.o user_fixup.o memmove.o
\
mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
lib-$(CONFIG_HAVE_DEC_LOCK)
+=
dec_and_lock.o
obj-y
+=
iomap.o
arch/sparc64/lib/dec_and_lock.S
deleted
100644 → 0
View file @
40796c5e
/*
$Id
:
dec_and_lock
.
S
,
v
1
.5
2001
/
11
/
18
00
:
12
:
56
davem
Exp
$
*
dec_and_lock
.
S
:
Sparc64
version
of
"atomic_dec_and_lock()"
*
using
cas
and
ldstub
instructions
.
*
*
Copyright
(
C
)
2000
David
S
.
Miller
(
davem
@
redhat
.
com
)
*/
#include <linux/config.h>
#include <asm/thread_info.h>
.
text
.
align
64
/
*
CAS
basically
works
like
this
:
*
*
void
CAS
(
MEM
,
REG1
,
REG2
)
*
{
*
START_ATOMIC
()
;
*
if
(*(
MEM
)
==
REG1
)
{
*
TMP
=
*(
MEM
)
;
*
*(
MEM
)
=
REG2
;
*
REG2
=
TMP
;
*
}
else
*
REG2
=
*(
MEM
)
;
*
END_ATOMIC
()
;
*
}
*/
.
globl
_atomic_dec_and_lock
_atomic_dec_and_lock
:
/
*
%
o0
=
counter
,
%
o1
=
lock
*/
loop1
:
lduw
[%
o0
],
%
g2
subcc
%
g2
,
1
,
%
g7
be
,
pn
%
icc
,
start_to_zero
nop
nzero
:
cas
[%
o0
],
%
g2
,
%
g7
cmp
%
g2
,
%
g7
bne
,
pn
%
icc
,
loop1
mov
0
,
%
g1
out
:
membar
#
StoreLoad
|
#
StoreStore
retl
mov
%
g1
,
%
o0
start_to_zero
:
#ifdef CONFIG_PREEMPT
ldsw
[%
g6
+
TI_PRE_COUNT
],
%
g3
add
%
g3
,
1
,
%
g3
stw
%
g3
,
[%
g6
+
TI_PRE_COUNT
]
#endif
to_zero
:
ldstub
[%
o1
],
%
g3
membar
#
StoreLoad
|
#
StoreStore
brnz
,
pn
%
g3
,
spin_on_lock
nop
loop2
:
cas
[%
o0
],
%
g2
,
%
g7
/*
ASSERT
(
g7
==
0
)
*/
cmp
%
g2
,
%
g7
be
,
pt
%
icc
,
out
mov
1
,
%
g1
lduw
[%
o0
],
%
g2
subcc
%
g2
,
1
,
%
g7
be
,
pn
%
icc
,
loop2
nop
membar
#
StoreStore
|
#
LoadStore
stb
%
g0
,
[%
o1
]
#ifdef CONFIG_PREEMPT
ldsw
[%
g6
+
TI_PRE_COUNT
],
%
g3
sub
%
g3
,
1
,
%
g3
stw
%
g3
,
[%
g6
+
TI_PRE_COUNT
]
#endif
b
,
pt
%
xcc
,
nzero
nop
spin_on_lock
:
ldub
[%
o1
],
%
g3
membar
#
LoadLoad
brnz
,
pt
%
g3
,
spin_on_lock
nop
ba
,
pt
%
xcc
,
to_zero
nop
nop
arch/x86_64/Kconfig
View file @
0fb74dfb
...
...
@@ -277,11 +277,6 @@ source "mm/Kconfig"
config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
config HAVE_DEC_LOCK
bool
depends on SMP
default y
config NR_CPUS
int "Maximum number of CPUs (2-256)"
range 2 256
...
...
arch/x86_64/kernel/x8664_ksyms.c
View file @
0fb74dfb
...
...
@@ -178,10 +178,6 @@ EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
EXPORT_SYMBOL
(
empty_zero_page
);
#ifdef CONFIG_HAVE_DEC_LOCK
EXPORT_SYMBOL
(
_atomic_dec_and_lock
);
#endif
EXPORT_SYMBOL
(
die_chain
);
EXPORT_SYMBOL
(
register_die_notifier
);
...
...
arch/x86_64/lib/Makefile
View file @
0fb74dfb
...
...
@@ -10,5 +10,3 @@ lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \
usercopy.o getuser.o putuser.o
\
thunk.o clear_page.o copy_page.o bitstr.o bitops.o
lib-y
+=
memcpy.o memmove.o memset.o copy_user.o
lib-$(CONFIG_HAVE_DEC_LOCK)
+=
dec_and_lock.o
arch/x86_64/lib/dec_and_lock.c
deleted
100644 → 0
View file @
40796c5e
/*
* x86 version of "atomic_dec_and_lock()" using
* the atomic "cmpxchg" instruction.
*
* (For CPU's lacking cmpxchg, we use the slow
* generic version, and this one never even gets
* compiled).
*/
#include <linux/spinlock.h>
#include <asm/atomic.h>
int
_atomic_dec_and_lock
(
atomic_t
*
atomic
,
spinlock_t
*
lock
)
{
int
counter
;
int
newcount
;
repeat:
counter
=
atomic_read
(
atomic
);
newcount
=
counter
-
1
;
if
(
!
newcount
)
goto
slow_path
;
asm
volatile
(
"lock; cmpxchgl %1,%2"
:
"=a"
(
newcount
)
:
"r"
(
newcount
),
"m"
(
atomic
->
counter
),
"0"
(
counter
));
/* If the above failed, "eax" will have changed */
if
(
newcount
!=
counter
)
goto
repeat
;
return
0
;
slow_path:
spin_lock
(
lock
);
if
(
atomic_dec_and_test
(
atomic
))
return
1
;
spin_unlock
(
lock
);
return
0
;
}
arch/xtensa/Kconfig
View file @
0fb74dfb
...
...
@@ -26,10 +26,6 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default y
config HAVE_DEC_LOCK
bool
default y
config GENERIC_HARDIRQS
bool
default y
...
...
fs/compat.c
View file @
0fb74dfb
...
...
@@ -44,6 +44,8 @@
#include <linux/nfsd/syscall.h>
#include <linux/personality.h>
#include <linux/rwsem.h>
#include <linux/acct.h>
#include <linux/mm.h>
#include <net/sock.h>
/* siocdevprivate_ioctl */
...
...
@@ -1487,6 +1489,8 @@ int compat_do_execve(char * filename,
/* execve success */
security_bprm_free
(
bprm
);
acct_update_integrals
(
current
);
update_mem_hiwater
(
current
);
kfree
(
bprm
);
return
retval
;
}
...
...
lib/dec_and_lock.c
View file @
0fb74dfb
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/system.h>
#ifdef __HAVE_ARCH_CMPXCHG
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* This implementation can be used on any architecture that
* has a cmpxchg, and where atomic->value is an int holding
* the value of the atomic (i.e. the high bits aren't used
* for a lock or anything like that).
*/
int
_atomic_dec_and_lock
(
atomic_t
*
atomic
,
spinlock_t
*
lock
)
{
int
counter
;
int
newcount
;
for
(;;)
{
counter
=
atomic_read
(
atomic
);
newcount
=
counter
-
1
;
if
(
!
newcount
)
break
;
/* do it the slow way */
newcount
=
cmpxchg
(
&
atomic
->
counter
,
counter
,
newcount
);
if
(
newcount
==
counter
)
return
0
;
}
spin_lock
(
lock
);
if
(
atomic_dec_and_test
(
atomic
))
return
1
;
spin_unlock
(
lock
);
return
0
;
}
#else
/*
* This is an architecture-neutral, but slow,
* implementation of the notion of "decrement
...
...
@@ -33,5 +67,6 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
spin_unlock
(
lock
);
return
0
;
}
#endif
EXPORT_SYMBOL
(
_atomic_dec_and_lock
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment