diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 136ea79b4f9daeaad2c56c4c3de4a68bf82c434a..cb62f9bd8633303dc3430f673c5efb288d493bfa 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -52,7 +52,7 @@ drivers-$(CONFIG_IA64_SGI_SN)	+= arch/ia64/sn/fakeprom/
 makeboot =$(Q)$(MAKE) -f scripts/Makefile.build obj=arch/ia64/boot $(1)
 maketool =$(Q)$(MAKE) -f scripts/Makefile.build obj=arch/ia64/tools $(1)
 
-.PHONY: compressed archclean archmrproper include/asm-ia64/offsets.h
+.PHONY: boot compressed archclean archmrproper include/asm-ia64/offsets.h
 
 all compressed: vmlinux.gz
 
@@ -67,6 +67,8 @@ CLEAN_FILES += include/asm-ia64/offsets.h vmlinux.gz bootloader
 
 prepare: include/asm-ia64/offsets.h
 
-include/asm-ia64/offsets.h: include/asm include/linux/version.h \
-			    include/config/MARKER
+boot:
+	$(call makeboot,$@)
+
+include/asm-ia64/offsets.h: include/asm include/linux/version.h include/config/MARKER
 	$(call maketool,$@)
diff --git a/arch/ia64/boot/Makefile b/arch/ia64/boot/Makefile
index 23480269263ba11febcfd0c285663697e699eb41..498a75ecf0a78a99c70239dd41f52c75c6758305 100644
--- a/arch/ia64/boot/Makefile
+++ b/arch/ia64/boot/Makefile
@@ -21,6 +21,8 @@ vmlinux.gz: $(obj)/vmlinux.gz $(targets-y)
 	$(call cmd,cptotop)
 	@echo '  Kernel: $@ is ready'
 
+boot: bootloader
+
 bootloader: $(obj)/bootloader
 	$(call cmd,cptotop)
 
diff --git a/arch/ia64/tools/Makefile b/arch/ia64/tools/Makefile
index 065e2ab738eabc57d75b2847c1055fcc76a794f9..e8dc9c8f40a31d0bfc38fc8edc0d9f1b28f705ea 100644
--- a/arch/ia64/tools/Makefile
+++ b/arch/ia64/tools/Makefile
@@ -1,6 +1,6 @@
 CFLAGS	= -g -O2 -Wall $(CPPFLAGS)
 
-TARGET	= $(TOPDIR)/include/asm-ia64/offsets.h
+TARGET	= include/asm-ia64/offsets.h
 
 src = $(obj)
 
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 7e56a421e6bc2950a063f6692abf7d1a74b592c7..69d171ada574dbfd26303ab253ac99babbbd6687 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -14,7 +14,7 @@
  */
 #include <linux/types.h>
 
-#include <asm/system.h>
+#include <asm/intrinsics.h>
 
 /*
  * On IA-64, counter must always be volatile to ensure that that the
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index 9ec001ebd3415f34dfeeb5e53b9d8e84ff17d80d..0f49f6e8df6cb69775c4a722763a2eb78763f22d 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -11,7 +11,7 @@
 
 #include <linux/types.h>
 
-#include <asm/system.h>
+#include <asm/intrinsics.h>
 
 /**
  * set_bit - Atomically set a bit in memory
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 37011ca30459987f6cfac82b7cc929e6f8e7aef0..b4a59a5173cb30af851e3b10c2aecd4c11d3ae3a 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -17,7 +17,6 @@
 
 #include <asm/ptrace.h>
 #include <asm/kregs.h>
-#include <asm/system.h>
 #include <asm/types.h>
 
 #define IA64_NUM_DBG_REGS	8
@@ -79,12 +78,12 @@
 
 #include <linux/cache.h>
 #include <linux/compiler.h>
-#include <linux/percpu.h>
 #include <linux/threads.h>
 
 #include <asm/fpu.h>
 #include <asm/offsets.h>
 #include <asm/page.h>
+#include <asm/percpu.h>
 #include <asm/rse.h>
 #include <asm/unwind.h>
 #include <asm/atomic.h>
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 83b6c48bd1f9900d3725f03ac4f464463ca17af9..d09f11cb14ec2c05827ffa5a361146eba3b6f735 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -185,169 +185,6 @@ do {										\
 	(flags & IA64_PSR_I) == 0;		\
 })
 
-/*
- * Force an unresolved reference if someone tries to use
- * ia64_fetch_and_add() with a bad value.
- */
-extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
-extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
-
-#define IA64_FETCHADD(tmp,v,n,sz)						\
-({										\
-	switch (sz) {								\
-	      case 4:								\
-		__asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2"		\
-				      : "=r"(tmp) : "r"(v), "i"(n) : "memory");	\
-		break;								\
-										\
-	      case 8:								\
-		__asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2"		\
-				      : "=r"(tmp) : "r"(v), "i"(n) : "memory");	\
-		break;								\
-										\
-	      default:								\
-		__bad_size_for_ia64_fetch_and_add();				\
-	}									\
-})
-
-#define ia64_fetch_and_add(i,v)							\
-({										\
-	__u64 _tmp;								\
-	volatile __typeof__(*(v)) *_v = (v);					\
-	switch (i) {								\
-	      case -16:	IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); break;	\
-	      case  -8:	IA64_FETCHADD(_tmp, _v,  -8, sizeof(*(v))); break;	\
-	      case  -4:	IA64_FETCHADD(_tmp, _v,  -4, sizeof(*(v))); break;	\
-	      case  -1:	IA64_FETCHADD(_tmp, _v,  -1, sizeof(*(v))); break;	\
-	      case   1:	IA64_FETCHADD(_tmp, _v,   1, sizeof(*(v))); break;	\
-	      case   4:	IA64_FETCHADD(_tmp, _v,   4, sizeof(*(v))); break;	\
-	      case   8:	IA64_FETCHADD(_tmp, _v,   8, sizeof(*(v))); break;	\
-	      case  16:	IA64_FETCHADD(_tmp, _v,  16, sizeof(*(v))); break;	\
-	      default:								\
-		_tmp = __bad_increment_for_ia64_fetch_and_add();		\
-		break;								\
-	}									\
-	(__typeof__(*(v))) (_tmp + (i));	/* return new value */		\
-})
-
-/*
- * This function doesn't exist, so you'll get a linker error if
- * something tries to do an invalid xchg().
- */
-extern void __xchg_called_with_bad_pointer (void);
-
-static __inline__ unsigned long
-__xchg (unsigned long x, volatile void *ptr, int size)
-{
-	unsigned long result;
-
-	switch (size) {
-	      case 1:
-		__asm__ __volatile ("xchg1 %0=[%1],%2" : "=r" (result)
-				    : "r" (ptr), "r" (x) : "memory");
-		return result;
-
-	      case 2:
-		__asm__ __volatile ("xchg2 %0=[%1],%2" : "=r" (result)
-				    : "r" (ptr), "r" (x) : "memory");
-		return result;
-
-	      case 4:
-		__asm__ __volatile ("xchg4 %0=[%1],%2" : "=r" (result)
-				    : "r" (ptr), "r" (x) : "memory");
-		return result;
-
-	      case 8:
-		__asm__ __volatile ("xchg8 %0=[%1],%2" : "=r" (result)
-				    : "r" (ptr), "r" (x) : "memory");
-		return result;
-	}
-	__xchg_called_with_bad_pointer();
-	return x;
-}
-
-#define xchg(ptr,x)							     \
-  ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
-
-/*
- * Atomic compare and exchange.  Compare OLD with MEM, if identical,
- * store NEW in MEM.  Return the initial value in MEM.  Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid cmpxchg().
- */
-extern long __cmpxchg_called_with_bad_pointer(void);
-
-#define ia64_cmpxchg(sem,ptr,old,new,size)						\
-({											\
-	__typeof__(ptr) _p_ = (ptr);							\
-	__typeof__(new) _n_ = (new);							\
-	__u64 _o_, _r_;									\
-											\
-	switch (size) {									\
-	      case 1: _o_ = (__u8 ) (long) (old); break;				\
-	      case 2: _o_ = (__u16) (long) (old); break;				\
-	      case 4: _o_ = (__u32) (long) (old); break;				\
-	      case 8: _o_ = (__u64) (long) (old); break;				\
-	      default: break;								\
-	}										\
-	 __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_));				\
-	switch (size) {									\
-	      case 1:									\
-		__asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv"		\
-				      : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory");	\
-		break;									\
-											\
-	      case 2:									\
-		__asm__ __volatile__ ("cmpxchg2."sem" %0=[%1],%2,ar.ccv"		\
-				      : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory");	\
-		break;									\
-											\
-	      case 4:									\
-		__asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv"		\
-				      : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory");	\
-		break;									\
-											\
-	      case 8:									\
-		__asm__ __volatile__ ("cmpxchg8."sem" %0=[%1],%2,ar.ccv"		\
-				      : "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory");	\
-		break;									\
-											\
-	      default:									\
-		_r_ = __cmpxchg_called_with_bad_pointer();				\
-		break;									\
-	}										\
-	(__typeof__(old)) _r_;								\
-})
-
-#define cmpxchg_acq(ptr,o,n)	ia64_cmpxchg("acq", (ptr), (o), (n), sizeof(*(ptr)))
-#define cmpxchg_rel(ptr,o,n)	ia64_cmpxchg("rel", (ptr), (o), (n), sizeof(*(ptr)))
-
-/* for compatibility with other platforms: */
-#define cmpxchg(ptr,o,n)	cmpxchg_acq(ptr,o,n)
-
-#ifdef CONFIG_IA64_DEBUG_CMPXCHG
-# define CMPXCHG_BUGCHECK_DECL	int _cmpxchg_bugcheck_count = 128;
-# define CMPXCHG_BUGCHECK(v)							\
-  do {										\
-	if (_cmpxchg_bugcheck_count-- <= 0) {					\
-		void *ip;							\
-		extern int printk(const char *fmt, ...);			\
-		asm ("mov %0=ip" : "=r"(ip));					\
-		printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));	\
-		break;								\
-	}									\
-  } while (0)
-#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
-# define CMPXCHG_BUGCHECK_DECL
-# define CMPXCHG_BUGCHECK(v)
-#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
-
 #ifdef __KERNEL__
 
 #define prepare_to_switch()    do { } while(0)
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 3cc16a00e5d835c84ce5e188183b529b0a681f24..e1fedc63b71aa6ecac4ccf8fd67b67577d81fafb 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -30,6 +30,7 @@ struct thread_info {
 	__u32 cpu;			/* current CPU */
 	mm_segment_t addr_limit;	/* user-level address space limit */
 	__s32 preempt_count;		/* 0=premptable, <0=BUG; will also serve as bh-counter */
+	struct restart_block restart_block;
 };
 
 #define INIT_THREAD_SIZE		/* tell sched.h not to declare the thread_union */