Commit a0872417 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-5.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Heiko Carstens:
 "This is mainly to decouple udelay() and arch_cpu_idle() and simplify
  both of them.

  Summary:

   - Always initialize kernel stack backchain when entering the kernel,
     so that unwinding works properly.

   - Fix stack unwinder test case to avoid rare interrupt stack
     corruption.

   - Simplify udelay() and just let it busy loop instead of implementing
     a complex logic.

   - arch_cpu_idle() cleanup.

   - Some other minor improvements"

* tag 's390-5.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/zcrypt: convert comma to semicolon
  s390/idle: allow arch_cpu_idle() to be kprobed
  s390/idle: remove raw_local_irq_save()/restore() from arch_cpu_idle()
  s390/idle: merge enabled_wait() and arch_cpu_idle()
  s390/delay: remove udelay_simple()
  s390/irq: select HAVE_IRQ_EXIT_ON_IRQ_STACK
  s390/delay: simplify udelay
  s390/test_unwind: use timer instead of udelay
  s390/test_unwind: fix CALL_ON_STACK tests
  s390: make calls to TRACE_IRQS_OFF/TRACE_IRQS_ON balanced
  s390: always clear kernel stack backchain before calling functions
parents 5ba836eb dfdc6e73
...@@ -150,6 +150,7 @@ config S390 ...@@ -150,6 +150,7 @@ config S390
select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO select HAVE_GENERIC_VDSO
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4 select HAVE_KERNEL_LZ4
......
...@@ -13,14 +13,12 @@ ...@@ -13,14 +13,12 @@
#ifndef _S390_DELAY_H #ifndef _S390_DELAY_H
#define _S390_DELAY_H #define _S390_DELAY_H
void udelay_enable(void); void __ndelay(unsigned long nsecs);
void __ndelay(unsigned long long nsecs); void __udelay(unsigned long usecs);
void __udelay(unsigned long long usecs);
void udelay_simple(unsigned long long usecs);
void __delay(unsigned long loops); void __delay(unsigned long loops);
#define ndelay(n) __ndelay((unsigned long long) (n)) #define ndelay(n) __ndelay((unsigned long)(n))
#define udelay(n) __udelay((unsigned long long) (n)) #define udelay(n) __udelay((unsigned long)(n))
#define mdelay(n) __udelay((unsigned long long) (n) * 1000) #define mdelay(n) __udelay((unsigned long)(n) * 1000)
#endif /* defined(_S390_DELAY_H) */ #endif /* defined(_S390_DELAY_H) */
...@@ -16,14 +16,12 @@ ...@@ -16,14 +16,12 @@
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ #define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define CIF_FPU 3 /* restore FPU registers */ #define CIF_FPU 3 /* restore FPU registers */
#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */ #define CIF_ENABLED_WAIT 5 /* in enabled wait state */
#define CIF_MCCK_GUEST 6 /* machine check happening in guest */ #define CIF_MCCK_GUEST 6 /* machine check happening in guest */
#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */ #define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY) #define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
#define _CIF_FPU BIT(CIF_FPU) #define _CIF_FPU BIT(CIF_FPU)
#define _CIF_IGNORE_IRQ BIT(CIF_IGNORE_IRQ)
#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT) #define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST) #define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU) #define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
...@@ -292,11 +290,6 @@ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) ...@@ -292,11 +290,6 @@ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
return (psw.addr - ilc) & mask; return (psw.addr - ilc) & mask;
} }
/*
* Function to stop a processor until the next interrupt occurs
*/
void enabled_wait(void);
/* /*
* Function to drop a processor into disabled wait state * Function to drop a processor into disabled wait state
*/ */
......
...@@ -414,6 +414,7 @@ ENTRY(system_call) ...@@ -414,6 +414,7 @@ ENTRY(system_call)
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11) stg %r14,__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
ENABLE_INTS ENABLE_INTS
.Lsysc_do_svc: .Lsysc_do_svc:
# clear user controlled register to prevent speculative use # clear user controlled register to prevent speculative use
...@@ -430,7 +431,6 @@ ENTRY(system_call) ...@@ -430,7 +431,6 @@ ENTRY(system_call)
jnl .Lsysc_nr_ok jnl .Lsysc_nr_ok
slag %r8,%r1,3 slag %r8,%r1,3
.Lsysc_nr_ok: .Lsysc_nr_ok:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11) stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15) stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r9,0(%r8,%r10) # get system call add. lg %r9,0(%r8,%r10) # get system call add.
...@@ -699,8 +699,8 @@ ENTRY(pgm_check_handler) ...@@ -699,8 +699,8 @@ ENTRY(pgm_check_handler)
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
6: RESTORE_SM_CLEAR_PER 6: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) RESTORE_SM_CLEAR_PER
larl %r1,pgm_check_table larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11) llgh %r10,__PT_INT_CODE+2(%r11)
nill %r10,0x007f nill %r10,0x007f
...@@ -731,8 +731,8 @@ ENTRY(pgm_check_handler) ...@@ -731,8 +731,8 @@ ENTRY(pgm_check_handler)
# PER event in supervisor state, must be kprobes # PER event in supervisor state, must be kprobes
# #
.Lpgm_kprobe: .Lpgm_kprobe:
RESTORE_SM_CLEAR_PER
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
RESTORE_SM_CLEAR_PER
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_per_trap brasl %r14,do_per_trap
j .Lpgm_return j .Lpgm_return
...@@ -778,10 +778,8 @@ ENTRY(io_int_handler) ...@@ -778,10 +778,8 @@ ENTRY(io_int_handler)
.Lio_skip_asce: .Lio_skip_asce:
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore
TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
TRACE_IRQS_OFF
.Lio_loop: .Lio_loop:
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,IO_INTERRUPT lghi %r3,IO_INTERRUPT
...@@ -966,10 +964,8 @@ ENTRY(ext_int_handler) ...@@ -966,10 +964,8 @@ ENTRY(ext_int_handler)
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore
TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
TRACE_IRQS_OFF
lgr %r2,%r11 # pass pointer to pt_regs lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,EXT_INTERRUPT lghi %r3,EXT_INTERRUPT
brasl %r14,do_IRQ brasl %r14,do_IRQ
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/kprobes.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cpu.h> #include <linux/cpu.h>
...@@ -21,22 +20,19 @@ ...@@ -21,22 +20,19 @@
static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
void enabled_wait(void) void arch_cpu_idle(void)
{ {
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
unsigned long long idle_time; unsigned long long idle_time;
unsigned long psw_mask, flags; unsigned long psw_mask;
/* Wait for external, I/O or machine check interrupt. */ /* Wait for external, I/O or machine check interrupt. */
psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT | psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY); clear_cpu_flag(CIF_NOHZ_DELAY);
raw_local_irq_save(flags); /* psw_idle() returns with interrupts disabled. */
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask); psw_idle(idle, psw_mask);
raw_local_irq_restore(flags);
/* Account time spent with enabled wait psw loaded as idle time. */ /* Account time spent with enabled wait psw loaded as idle time. */
raw_write_seqcount_begin(&idle->seqcount); raw_write_seqcount_begin(&idle->seqcount);
...@@ -46,8 +42,8 @@ void enabled_wait(void) ...@@ -46,8 +42,8 @@ void enabled_wait(void)
idle->idle_count++; idle->idle_count++;
account_idle_time(cputime_to_nsecs(idle_time)); account_idle_time(cputime_to_nsecs(idle_time));
raw_write_seqcount_end(&idle->seqcount); raw_write_seqcount_end(&idle->seqcount);
raw_local_irq_enable();
} }
NOKPROBE_SYMBOL(enabled_wait);
static ssize_t show_idle_count(struct device *dev, static ssize_t show_idle_count(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
...@@ -120,12 +116,6 @@ void arch_cpu_idle_enter(void) ...@@ -120,12 +116,6 @@ void arch_cpu_idle_enter(void)
{ {
} }
void arch_cpu_idle(void)
{
enabled_wait();
raw_local_irq_enable();
}
void arch_cpu_idle_exit(void) void arch_cpu_idle_exit(void)
{ {
} }
......
...@@ -1512,7 +1512,7 @@ static void diag308_dump(void *dump_block) ...@@ -1512,7 +1512,7 @@ static void diag308_dump(void *dump_block)
while (1) { while (1) {
if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302) if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302)
break; break;
udelay_simple(USEC_PER_SEC); udelay(USEC_PER_SEC);
} }
} }
......
...@@ -335,7 +335,6 @@ int __init arch_early_irq_init(void) ...@@ -335,7 +335,6 @@ int __init arch_early_irq_init(void)
if (!stack) if (!stack)
panic("Couldn't allocate async stack"); panic("Couldn't allocate async stack");
S390_lowcore.async_stack = stack + STACK_INIT_OFFSET; S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
udelay_enable();
return 0; return 0;
} }
......
...@@ -19,13 +19,6 @@ ...@@ -19,13 +19,6 @@
#include <asm/div64.h> #include <asm/div64.h>
#include <asm/idle.h> #include <asm/idle.h>
static DEFINE_STATIC_KEY_FALSE(udelay_ready);
void __init udelay_enable(void)
{
static_branch_enable(&udelay_ready);
}
void __delay(unsigned long loops) void __delay(unsigned long loops)
{ {
/* /*
...@@ -39,105 +32,25 @@ void __delay(unsigned long loops) ...@@ -39,105 +32,25 @@ void __delay(unsigned long loops)
} }
EXPORT_SYMBOL(__delay); EXPORT_SYMBOL(__delay);
static void __udelay_disabled(unsigned long long usecs) static void delay_loop(unsigned long delta)
{ {
unsigned long cr0, cr0_new, psw_mask; unsigned long end;
struct s390_idle_data idle;
u64 end;
end = get_tod_clock() + (usecs << 12); end = get_tod_clock_monotonic() + delta;
__ctl_store(cr0, 0, 0); while (!tod_after(get_tod_clock_monotonic(), end))
cr0_new = cr0 & ~CR0_IRQ_SUBCLASS_MASK; cpu_relax();
cr0_new |= (1UL << (63 - 52)); /* enable clock comparator irq */
__ctl_load(cr0_new, 0, 0);
psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
set_clock_comparator(end);
set_cpu_flag(CIF_IGNORE_IRQ);
psw_idle(&idle, psw_mask);
trace_hardirqs_off();
clear_cpu_flag(CIF_IGNORE_IRQ);
set_clock_comparator(S390_lowcore.clock_comparator);
__ctl_load(cr0, 0, 0);
} }
static void __udelay_enabled(unsigned long long usecs) void __udelay(unsigned long usecs)
{ {
u64 clock_saved, end; delay_loop(usecs << 12);
end = get_tod_clock_fast() + (usecs << 12);
do {
clock_saved = 0;
if (tod_after(S390_lowcore.clock_comparator, end)) {
clock_saved = local_tick_disable();
set_clock_comparator(end);
}
enabled_wait();
if (clock_saved)
local_tick_enable(clock_saved);
} while (get_tod_clock_fast() < end);
}
/*
* Waits for 'usecs' microseconds using the TOD clock comparator.
*/
void __udelay(unsigned long long usecs)
{
unsigned long flags;
if (!static_branch_likely(&udelay_ready)) {
udelay_simple(usecs);
return;
}
preempt_disable();
local_irq_save(flags);
if (in_irq()) {
__udelay_disabled(usecs);
goto out;
}
if (in_softirq()) {
if (raw_irqs_disabled_flags(flags))
__udelay_disabled(usecs);
else
__udelay_enabled(usecs);
goto out;
}
if (raw_irqs_disabled_flags(flags)) {
local_bh_disable();
__udelay_disabled(usecs);
_local_bh_enable();
goto out;
}
__udelay_enabled(usecs);
out:
local_irq_restore(flags);
preempt_enable();
} }
EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__udelay);
/* void __ndelay(unsigned long nsecs)
* Simple udelay variant. To be used on startup and reboot
* when the interrupt handler isn't working.
*/
void udelay_simple(unsigned long long usecs)
{
u64 end;
end = get_tod_clock_fast() + (usecs << 12);
while (get_tod_clock_fast() < end)
cpu_relax();
}
void __ndelay(unsigned long long nsecs)
{ {
u64 end;
nsecs <<= 9; nsecs <<= 9;
do_div(nsecs, 125); do_div(nsecs, 125);
end = get_tod_clock_fast() + nsecs; delay_loop(nsecs);
if (nsecs & ~0xfffUL)
__udelay(nsecs >> 12);
while (get_tod_clock_fast() < end)
barrier();
} }
EXPORT_SYMBOL(__ndelay); EXPORT_SYMBOL(__ndelay);
...@@ -9,12 +9,12 @@ ...@@ -9,12 +9,12 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/timer.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/delay.h>
#define BT_BUF_SIZE (PAGE_SIZE * 4) #define BT_BUF_SIZE (PAGE_SIZE * 4)
...@@ -205,12 +205,15 @@ static noinline int unwindme_func3(struct unwindme *u) ...@@ -205,12 +205,15 @@ static noinline int unwindme_func3(struct unwindme *u)
/* This function must appear in the backtrace. */ /* This function must appear in the backtrace. */
static noinline int unwindme_func2(struct unwindme *u) static noinline int unwindme_func2(struct unwindme *u)
{ {
unsigned long flags;
int rc; int rc;
if (u->flags & UWM_SWITCH_STACK) { if (u->flags & UWM_SWITCH_STACK) {
preempt_disable(); local_irq_save(flags);
local_mcck_disable();
rc = CALL_ON_STACK(unwindme_func3, S390_lowcore.nodat_stack, 1, u); rc = CALL_ON_STACK(unwindme_func3, S390_lowcore.nodat_stack, 1, u);
preempt_enable(); local_mcck_enable();
local_irq_restore(flags);
return rc; return rc;
} else { } else {
return unwindme_func3(u); return unwindme_func3(u);
...@@ -223,31 +226,27 @@ static noinline int unwindme_func1(void *u) ...@@ -223,31 +226,27 @@ static noinline int unwindme_func1(void *u)
return unwindme_func2((struct unwindme *)u); return unwindme_func2((struct unwindme *)u);
} }
static void unwindme_irq_handler(struct ext_code ext_code, static void unwindme_timer_fn(struct timer_list *unused)
unsigned int param32,
unsigned long param64)
{ {
struct unwindme *u = READ_ONCE(unwindme); struct unwindme *u = READ_ONCE(unwindme);
if (u && u->task == current) { if (u) {
unwindme = NULL; unwindme = NULL;
u->task = NULL; u->task = NULL;
u->ret = unwindme_func1(u); u->ret = unwindme_func1(u);
complete(&u->task_ready);
} }
} }
static struct timer_list unwind_timer;
static int test_unwind_irq(struct unwindme *u) static int test_unwind_irq(struct unwindme *u)
{ {
preempt_disable();
if (register_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler)) {
pr_info("Couldn't register external interrupt handler");
return -1;
}
u->task = current;
unwindme = u; unwindme = u;
udelay(1); init_completion(&u->task_ready);
unregister_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler); timer_setup(&unwind_timer, unwindme_timer_fn, 0);
preempt_enable(); mod_timer(&unwind_timer, jiffies + 1);
wait_for_completion(&u->task_ready);
return u->ret; return u->ret;
} }
......
...@@ -1681,7 +1681,7 @@ void ccw_device_wait_idle(struct ccw_device *cdev) ...@@ -1681,7 +1681,7 @@ void ccw_device_wait_idle(struct ccw_device *cdev)
cio_tsch(sch); cio_tsch(sch);
if (sch->schib.scsw.cmd.actl == 0) if (sch->schib.scsw.cmd.actl == 0)
break; break;
udelay_simple(100); udelay(100);
} }
} }
#endif #endif
......
...@@ -175,7 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev) ...@@ -175,7 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
atomic_set(&zq->load, 0); atomic_set(&zq->load, 0);
ap_queue_init_state(aq); ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply); ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2A_CLEANUP_TIME, aq->request_timeout = CEX2A_CLEANUP_TIME;
aq->private = zq; aq->private = zq;
rc = zcrypt_queue_register(zq); rc = zcrypt_queue_register(zq);
if (rc) { if (rc) {
......
...@@ -631,7 +631,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) ...@@ -631,7 +631,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
atomic_set(&zq->load, 0); atomic_set(&zq->load, 0);
ap_queue_init_state(aq); ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply); ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX4_CLEANUP_TIME, aq->request_timeout = CEX4_CLEANUP_TIME;
aq->private = zq; aq->private = zq;
rc = zcrypt_queue_register(zq); rc = zcrypt_queue_register(zq);
if (rc) { if (rc) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment