Commit ce380619 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'please-pull-ia64_misc' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux

Pull ia64 cleanups from Tony Luck:

 - More atomic cleanup from willy

 - Fix a python script to work with version 3

 - Some other small cleanups

* tag 'please-pull-ia64_misc' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux:
  ia64/err-inject: fix spelling mistake: "capapbilities" -> "capabilities"
  ia64/err-inject: Use get_user_pages_fast()
  ia64: doc: tweak whitespace for 'console=' parameter
  ia64: Convert remaining atomic operations
  ia64: convert unwcheck.py to python3
parents 094b58e1 48e362dd
...@@ -111,7 +111,7 @@ TROUBLESHOOTING SERIAL CONSOLE PROBLEMS ...@@ -111,7 +111,7 @@ TROUBLESHOOTING SERIAL CONSOLE PROBLEMS
- If you don't have an HCDP, the kernel doesn't know where - If you don't have an HCDP, the kernel doesn't know where
your console lives until the driver discovers serial your console lives until the driver discovers serial
devices. Use "console=uart, io,0x3f8" (or appropriate devices. Use "console=uart,io,0x3f8" (or appropriate
address for your machine). address for your machine).
Kernel and init script output works fine, but no "login:" prompt: Kernel and init script output works fine, but no "login:" prompt:
......
...@@ -66,38 +66,35 @@ ATOMIC_OPS(add, +) ...@@ -66,38 +66,35 @@ ATOMIC_OPS(add, +)
ATOMIC_OPS(sub, -) ATOMIC_OPS(sub, -)
#ifdef __OPTIMIZE__ #ifdef __OPTIMIZE__
#define __ia64_atomic_const(i) __builtin_constant_p(i) ? \ #define __ia64_atomic_const(i) \
static const int __ia64_atomic_p = __builtin_constant_p(i) ? \
((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \ ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
(i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
__ia64_atomic_p
#else
#define __ia64_atomic_const(i) 0
#endif
#define atomic_add_return(i, v) \ #define atomic_add_return(i,v) \
({ \ ({ \
int __i = (i); \ int __ia64_aar_i = (i); \
static const int __ia64_atomic_p = __ia64_atomic_const(i); \ __ia64_atomic_const(i) \
__ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) : \ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
ia64_atomic_add(__i, v); \ : ia64_atomic_add(__ia64_aar_i, v); \
}) })
#define atomic_sub_return(i, v) \ #define atomic_sub_return(i,v) \
({ \ ({ \
int __i = (i); \ int __ia64_asr_i = (i); \
static const int __ia64_atomic_p = __ia64_atomic_const(i); \ __ia64_atomic_const(i) \
__ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) : \ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
ia64_atomic_sub(__i, v); \ : ia64_atomic_sub(__ia64_asr_i, v); \
}) })
#else
#define atomic_add_return(i, v) ia64_atomic_add(i, v)
#define atomic_sub_return(i, v) ia64_atomic_sub(i, v)
#endif
#define atomic_fetch_add(i,v) \ #define atomic_fetch_add(i,v) \
({ \ ({ \
int __ia64_aar_i = (i); \ int __ia64_aar_i = (i); \
(__builtin_constant_p(i) \ __ia64_atomic_const(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
: ia64_atomic_fetch_add(__ia64_aar_i, v); \ : ia64_atomic_fetch_add(__ia64_aar_i, v); \
}) })
...@@ -105,11 +102,7 @@ ATOMIC_OPS(sub, -) ...@@ -105,11 +102,7 @@ ATOMIC_OPS(sub, -)
#define atomic_fetch_sub(i,v) \ #define atomic_fetch_sub(i,v) \
({ \ ({ \
int __ia64_asr_i = (i); \ int __ia64_asr_i = (i); \
(__builtin_constant_p(i) \ __ia64_atomic_const(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
: ia64_atomic_fetch_sub(__ia64_asr_i, v); \ : ia64_atomic_fetch_sub(__ia64_asr_i, v); \
}) })
...@@ -170,11 +163,7 @@ ATOMIC64_OPS(sub, -) ...@@ -170,11 +163,7 @@ ATOMIC64_OPS(sub, -)
#define atomic64_add_return(i,v) \ #define atomic64_add_return(i,v) \
({ \ ({ \
long __ia64_aar_i = (i); \ long __ia64_aar_i = (i); \
(__builtin_constant_p(i) \ __ia64_atomic_const(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic64_add(__ia64_aar_i, v); \ : ia64_atomic64_add(__ia64_aar_i, v); \
}) })
...@@ -182,11 +171,7 @@ ATOMIC64_OPS(sub, -) ...@@ -182,11 +171,7 @@ ATOMIC64_OPS(sub, -)
#define atomic64_sub_return(i,v) \ #define atomic64_sub_return(i,v) \
({ \ ({ \
long __ia64_asr_i = (i); \ long __ia64_asr_i = (i); \
(__builtin_constant_p(i) \ __ia64_atomic_const(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic64_sub(__ia64_asr_i, v); \ : ia64_atomic64_sub(__ia64_asr_i, v); \
}) })
...@@ -194,11 +179,7 @@ ATOMIC64_OPS(sub, -) ...@@ -194,11 +179,7 @@ ATOMIC64_OPS(sub, -)
#define atomic64_fetch_add(i,v) \ #define atomic64_fetch_add(i,v) \
({ \ ({ \
long __ia64_aar_i = (i); \ long __ia64_aar_i = (i); \
(__builtin_constant_p(i) \ __ia64_atomic_const(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
: ia64_atomic64_fetch_add(__ia64_aar_i, v); \ : ia64_atomic64_fetch_add(__ia64_aar_i, v); \
}) })
...@@ -206,11 +187,7 @@ ATOMIC64_OPS(sub, -) ...@@ -206,11 +187,7 @@ ATOMIC64_OPS(sub, -)
#define atomic64_fetch_sub(i,v) \ #define atomic64_fetch_sub(i,v) \
({ \ ({ \
long __ia64_asr_i = (i); \ long __ia64_asr_i = (i); \
(__builtin_constant_p(i) \ __ia64_atomic_const(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
: ia64_atomic64_fetch_sub(__ia64_asr_i, v); \ : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
}) })
......
...@@ -117,7 +117,7 @@ store_call_start(struct device *dev, struct device_attribute *attr, ...@@ -117,7 +117,7 @@ store_call_start(struct device *dev, struct device_attribute *attr,
#ifdef ERR_INJ_DEBUG #ifdef ERR_INJ_DEBUG
printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
printk(KERN_DEBUG "capapbilities=%lx,\n", capabilities[cpu]); printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
printk(KERN_DEBUG "resources=%lx\n", resources[cpu]); printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
#endif #endif
return size; return size;
...@@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr, ...@@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
u64 virt_addr=simple_strtoull(buf, NULL, 16); u64 virt_addr=simple_strtoull(buf, NULL, 16);
int ret; int ret;
ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL); ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
if (ret<=0) { if (ret<=0) {
#ifdef ERR_INJ_DEBUG #ifdef ERR_INJ_DEBUG
printk("Virtual address %lx is not existing.\n",virt_addr); printk("Virtual address %lx is not existing.\n",virt_addr);
......
...@@ -16,7 +16,7 @@ import re ...@@ -16,7 +16,7 @@ import re
import sys import sys
if len(sys.argv) != 2: if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0] print("Usage: %s FILE" % sys.argv[0])
sys.exit(2) sys.exit(2)
readelf = os.getenv("READELF", "readelf") readelf = os.getenv("READELF", "readelf")
...@@ -29,7 +29,7 @@ def check_func (func, slots, rlen_sum): ...@@ -29,7 +29,7 @@ def check_func (func, slots, rlen_sum):
global num_errors global num_errors
num_errors += 1 num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end) if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum) print("ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum))
return return
num_funcs = 0 num_funcs = 0
...@@ -43,23 +43,23 @@ for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): ...@@ -43,23 +43,23 @@ for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
check_func(func, slots, rlen_sum) check_func(func, slots, rlen_sum)
func = m.group(1) func = m.group(1)
start = long(m.group(2), 16) start = int(m.group(2), 16)
end = long(m.group(3), 16) end = int(m.group(3), 16)
slots = 3 * (end - start) / 16 slots = 3 * (end - start) / 16
rlen_sum = 0L rlen_sum = 0
num_funcs += 1 num_funcs += 1
else: else:
m = rlen_pattern.match(line) m = rlen_pattern.match(line)
if m: if m:
rlen_sum += long(m.group(1)) rlen_sum += int(m.group(1))
check_func(func, slots, rlen_sum) check_func(func, slots, rlen_sum)
if num_errors == 0: if num_errors == 0:
print "No errors detected in %u functions." % num_funcs print("No errors detected in %u functions." % num_funcs)
else: else:
if num_errors > 1: if num_errors > 1:
err="errors" err="errors"
else: else:
err="error" err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs) print("%u %s detected in %u functions." % (num_errors, err, num_funcs))
sys.exit(1) sys.exit(1)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment