Commit 333a0743 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  Revert "[IA64] fix percpu warnings"
  [IA64] fix percpu warnings
  [IA64] SMT friendly version of spin_unlock_wait()
  [IA64] use printk_once() unaligned.c/io_common.c
  [IA64] Require SAL 3.2 in order to do extended config space ops
  [IA64] unsigned cannot be less than 0 in sn_hwperf_ioctl()
  [IA64] Restore registers in the stack on INIT
  [IA64] Re-implement spinaphores using ticket lock concepts
  [IA64] Squeeze ticket locks back into 4 bytes.
parents d4da6c9c e8c93fc7
...@@ -25,61 +25,82 @@ ...@@ -25,61 +25,82 @@
* by atomically noting the tail and incrementing it by one (thus adding * by atomically noting the tail and incrementing it by one (thus adding
* ourself to the queue and noting our position), then waiting until the head * ourself to the queue and noting our position), then waiting until the head
* becomes equal to the the initial value of the tail. * becomes equal to the the initial value of the tail.
* The pad bits in the middle are used to prevent the next_ticket number
* overflowing into the now_serving number.
* *
* 63 32 31 0 * 31 17 16 15 14 0
* +----------------------------------------------------+ * +----------------------------------------------------+
* | next_ticket_number | now_serving | * | now_serving | padding | next_ticket |
* +----------------------------------------------------+ * +----------------------------------------------------+
*/ */
#define TICKET_SHIFT 32 #define TICKET_SHIFT 17
#define TICKET_BITS 15
#define TICKET_MASK ((1 << TICKET_BITS) - 1)
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
{ {
int *p = (int *)&lock->lock, turn, now_serving; int *p = (int *)&lock->lock, ticket, serve;
now_serving = *p; ticket = ia64_fetchadd(1, p, acq);
turn = ia64_fetchadd(1, p+1, acq);
if (turn == now_serving) if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
return; return;
do { ia64_invala();
for (;;) {
asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
return;
cpu_relax(); cpu_relax();
} while (ACCESS_ONCE(*p) != turn); }
} }
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
{ {
long tmp = ACCESS_ONCE(lock->lock), try; int tmp = ACCESS_ONCE(lock->lock);
if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) { if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
try = tmp + (1L << TICKET_SHIFT); return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp;
}
return 0; return 0;
} }
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
{ {
int *p = (int *)&lock->lock; unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
(void)ia64_fetchadd(1, p, rel); asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}
static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket;
ia64_invala();
for (;;) {
asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
return;
cpu_relax();
}
} }
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
{ {
long tmp = ACCESS_ONCE(lock->lock); long tmp = ACCESS_ONCE(lock->lock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1)); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
} }
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
{ {
long tmp = ACCESS_ONCE(lock->lock); long tmp = ACCESS_ONCE(lock->lock);
return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1; return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
} }
static inline int __raw_spin_is_locked(raw_spinlock_t *lock) static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
...@@ -116,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, ...@@ -116,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{ {
while (__raw_spin_is_locked(lock)) __ticket_spin_unlock_wait(lock);
cpu_relax();
} }
#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#endif #endif
typedef struct { typedef struct {
volatile unsigned long lock; volatile unsigned int lock;
} raw_spinlock_t; } raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } #define __RAW_SPIN_LOCK_UNLOCKED { 0 }
......
...@@ -887,6 +887,60 @@ ia64_mca_modify_comm(const struct task_struct *previous_current) ...@@ -887,6 +887,60 @@ ia64_mca_modify_comm(const struct task_struct *previous_current)
memcpy(current->comm, comm, sizeof(current->comm)); memcpy(current->comm, comm, sizeof(current->comm));
} }
static void
finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms,
unsigned long *nat)
{
const u64 *bank;
/* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
* pmsa_{xip,xpsr,xfs}
*/
if (ia64_psr(regs)->ic) {
regs->cr_iip = ms->pmsa_iip;
regs->cr_ipsr = ms->pmsa_ipsr;
regs->cr_ifs = ms->pmsa_ifs;
} else {
regs->cr_iip = ms->pmsa_xip;
regs->cr_ipsr = ms->pmsa_xpsr;
regs->cr_ifs = ms->pmsa_xfs;
}
regs->pr = ms->pmsa_pr;
regs->b0 = ms->pmsa_br0;
regs->ar_rsc = ms->pmsa_rsc;
copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &regs->r1, nat);
copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &regs->r2, nat);
copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &regs->r3, nat);
copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &regs->r8, nat);
copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &regs->r9, nat);
copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &regs->r10, nat);
copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &regs->r11, nat);
copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &regs->r12, nat);
copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &regs->r13, nat);
copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &regs->r14, nat);
copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &regs->r15, nat);
if (ia64_psr(regs)->bn)
bank = ms->pmsa_bank1_gr;
else
bank = ms->pmsa_bank0_gr;
copy_reg(&bank[16-16], ms->pmsa_nat_bits, &regs->r16, nat);
copy_reg(&bank[17-16], ms->pmsa_nat_bits, &regs->r17, nat);
copy_reg(&bank[18-16], ms->pmsa_nat_bits, &regs->r18, nat);
copy_reg(&bank[19-16], ms->pmsa_nat_bits, &regs->r19, nat);
copy_reg(&bank[20-16], ms->pmsa_nat_bits, &regs->r20, nat);
copy_reg(&bank[21-16], ms->pmsa_nat_bits, &regs->r21, nat);
copy_reg(&bank[22-16], ms->pmsa_nat_bits, &regs->r22, nat);
copy_reg(&bank[23-16], ms->pmsa_nat_bits, &regs->r23, nat);
copy_reg(&bank[24-16], ms->pmsa_nat_bits, &regs->r24, nat);
copy_reg(&bank[25-16], ms->pmsa_nat_bits, &regs->r25, nat);
copy_reg(&bank[26-16], ms->pmsa_nat_bits, &regs->r26, nat);
copy_reg(&bank[27-16], ms->pmsa_nat_bits, &regs->r27, nat);
copy_reg(&bank[28-16], ms->pmsa_nat_bits, &regs->r28, nat);
copy_reg(&bank[29-16], ms->pmsa_nat_bits, &regs->r29, nat);
copy_reg(&bank[30-16], ms->pmsa_nat_bits, &regs->r30, nat);
copy_reg(&bank[31-16], ms->pmsa_nat_bits, &regs->r31, nat);
}
/* On entry to this routine, we are running on the per cpu stack, see /* On entry to this routine, we are running on the per cpu stack, see
* mca_asm.h. The original stack has not been touched by this event. Some of * mca_asm.h. The original stack has not been touched by this event. Some of
* the original stack's registers will be in the RBS on this stack. This stack * the original stack's registers will be in the RBS on this stack. This stack
...@@ -921,7 +975,6 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, ...@@ -921,7 +975,6 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
u64 ar_bspstore = regs->ar_bspstore; u64 ar_bspstore = regs->ar_bspstore;
u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
const u64 *bank;
const char *msg; const char *msg;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -1024,54 +1077,9 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, ...@@ -1024,54 +1077,9 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
p = (char *)r12 - sizeof(*regs); p = (char *)r12 - sizeof(*regs);
old_regs = (struct pt_regs *)p; old_regs = (struct pt_regs *)p;
memcpy(old_regs, regs, sizeof(*regs)); memcpy(old_regs, regs, sizeof(*regs));
/* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
* pmsa_{xip,xpsr,xfs}
*/
if (ia64_psr(regs)->ic) {
old_regs->cr_iip = ms->pmsa_iip;
old_regs->cr_ipsr = ms->pmsa_ipsr;
old_regs->cr_ifs = ms->pmsa_ifs;
} else {
old_regs->cr_iip = ms->pmsa_xip;
old_regs->cr_ipsr = ms->pmsa_xpsr;
old_regs->cr_ifs = ms->pmsa_xfs;
}
old_regs->pr = ms->pmsa_pr;
old_regs->b0 = ms->pmsa_br0;
old_regs->loadrs = loadrs; old_regs->loadrs = loadrs;
old_regs->ar_rsc = ms->pmsa_rsc;
old_unat = old_regs->ar_unat; old_unat = old_regs->ar_unat;
copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat); finish_pt_regs(old_regs, ms, &old_unat);
copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat);
copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat);
copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat);
copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat);
copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat);
copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat);
copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat);
copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat);
copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat);
copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat);
if (ia64_psr(old_regs)->bn)
bank = ms->pmsa_bank1_gr;
else
bank = ms->pmsa_bank0_gr;
copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat);
copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat);
copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat);
copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat);
copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat);
copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat);
copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat);
copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat);
copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat);
copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat);
copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat);
copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat);
copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat);
copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat);
copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat);
copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat);
/* Next stack a struct switch_stack. mca_asm.S built a partial /* Next stack a struct switch_stack. mca_asm.S built a partial
* switch_stack, copy it and fill in the blanks using pt_regs and * switch_stack, copy it and fill in the blanks using pt_regs and
...@@ -1141,6 +1149,8 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, ...@@ -1141,6 +1149,8 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
no_mod: no_mod:
mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
smp_processor_id(), type, msg); smp_processor_id(), type, msg);
old_unat = regs->ar_unat;
finish_pt_regs(regs, ms, &old_unat);
return previous_current; return previous_current;
} }
......
...@@ -60,7 +60,6 @@ dump (const char *str, void *vp, size_t len) ...@@ -60,7 +60,6 @@ dump (const char *str, void *vp, size_t len)
*/ */
int no_unaligned_warning; int no_unaligned_warning;
int unaligned_dump_stack; int unaligned_dump_stack;
static int noprint_warning;
/* /*
* For M-unit: * For M-unit:
...@@ -1357,9 +1356,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ...@@ -1357,9 +1356,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
/* watch for command names containing %s */ /* watch for command names containing %s */
printk(KERN_WARNING "%s", buf); printk(KERN_WARNING "%s", buf);
} else { } else {
if (no_unaligned_warning && !noprint_warning) { if (no_unaligned_warning) {
noprint_warning = 1; printk_once(KERN_WARNING "%s(%d) encountered an "
printk(KERN_WARNING "%s(%d) encountered an "
"unaligned exception which required\n" "unaligned exception which required\n"
"kernel assistance, which degrades " "kernel assistance, which degrades "
"the performance of the application.\n" "the performance of the application.\n"
......
...@@ -100,24 +100,36 @@ wrap_mmu_context (struct mm_struct *mm) ...@@ -100,24 +100,36 @@ wrap_mmu_context (struct mm_struct *mm)
* this primitive it can be moved up to a spinaphore.h header. * this primitive it can be moved up to a spinaphore.h header.
*/ */
struct spinaphore { struct spinaphore {
atomic_t cur; unsigned long ticket;
unsigned long serve;
}; };
static inline void spinaphore_init(struct spinaphore *ss, int val) static inline void spinaphore_init(struct spinaphore *ss, int val)
{ {
atomic_set(&ss->cur, val); ss->ticket = 0;
ss->serve = val;
} }
static inline void down_spin(struct spinaphore *ss) static inline void down_spin(struct spinaphore *ss)
{ {
while (unlikely(!atomic_add_unless(&ss->cur, -1, 0))) unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
while (atomic_read(&ss->cur) == 0)
if (time_before(t, ss->serve))
return;
ia64_invala();
for (;;) {
asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
if (time_before(t, serve))
return;
cpu_relax(); cpu_relax();
}
} }
static inline void up_spin(struct spinaphore *ss) static inline void up_spin(struct spinaphore *ss)
{ {
atomic_add(1, &ss->cur); ia64_fetchadd(1, &ss->serve, rel);
} }
static struct spinaphore ptcg_sem; static struct spinaphore ptcg_sem;
......
...@@ -56,10 +56,13 @@ int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn, ...@@ -56,10 +56,13 @@ int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
if ((seg | reg) <= 255) { if ((seg | reg) <= 255) {
addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
mode = 0; mode = 0;
} else { } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
mode = 1; mode = 1;
} else {
return -EINVAL;
} }
result = ia64_sal_pci_config_read(addr, mode, len, &data); result = ia64_sal_pci_config_read(addr, mode, len, &data);
if (result != 0) if (result != 0)
return -EINVAL; return -EINVAL;
...@@ -80,9 +83,11 @@ int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn, ...@@ -80,9 +83,11 @@ int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
if ((seg | reg) <= 255) { if ((seg | reg) <= 255) {
addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
mode = 0; mode = 0;
} else { } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
mode = 1; mode = 1;
} else {
return -EINVAL;
} }
result = ia64_sal_pci_config_write(addr, mode, len, value); result = ia64_sal_pci_config_write(addr, mode, len, value);
if (result != 0) if (result != 0)
......
...@@ -119,7 +119,6 @@ sn_pcidev_info_get(struct pci_dev *dev) ...@@ -119,7 +119,6 @@ sn_pcidev_info_get(struct pci_dev *dev)
* Additionally note that the struct sn_flush_device_war also has to be * Additionally note that the struct sn_flush_device_war also has to be
* removed from arch/ia64/sn/include/xtalk/hubdev.h * removed from arch/ia64/sn/include/xtalk/hubdev.h
*/ */
static u8 war_implemented = 0;
static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
struct sn_flush_device_common *common) struct sn_flush_device_common *common)
...@@ -128,11 +127,8 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, ...@@ -128,11 +127,8 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
struct sn_flush_device_war *dev_entry; struct sn_flush_device_war *dev_entry;
struct ia64_sal_retval isrv = {0,0,0,0}; struct ia64_sal_retval isrv = {0,0,0,0};
if (!war_implemented) { printk_once(KERN_WARNING
printk(KERN_WARNING "PROM version < 4.50 -- implementing old " "PROM version < 4.50 -- implementing old PROM flush WAR\n");
"PROM flush WAR\n");
war_implemented = 1;
}
war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
BUG_ON(!war_list); BUG_ON(!war_list);
......
...@@ -786,17 +786,18 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg) ...@@ -786,17 +786,18 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg)
break; break;
case SN_HWPERF_GET_OBJ_NODE: case SN_HWPERF_GET_OBJ_NODE:
if (a.sz != sizeof(u64) || a.arg < 0) { i = a.arg;
if (a.sz != sizeof(u64) || i < 0) {
r = -EINVAL; r = -EINVAL;
goto error; goto error;
} }
if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
if (a.arg >= nobj) { if (i >= nobj) {
r = -EINVAL; r = -EINVAL;
vfree(objs); vfree(objs);
goto error; goto error;
} }
if (objs[(i = a.arg)].id != a.arg) { if (objs[i].id != a.arg) {
for (i = 0; i < nobj; i++) { for (i = 0; i < nobj; i++) {
if (objs[i].id == a.arg) if (objs[i].id == a.arg)
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment