Submitted By: Joe Ciccone <jciccone@linuxfromscratch.org>
Date: 2006-06-25
Initial Package Version: 0.9b
Upstream Status: NONE
Origin: Joe Ciccone
Description: This patch fixes compilation issues with aboot-0.9b.
        This patch creates kernel headers that the makefile would
        normaly take out of the kernel source or that are not
        included with the linux-headers package installed in the
        system.

diff -Naur aboot-0.9b.orig/cons.c aboot-0.9b/cons.c
--- aboot-0.9b.orig/cons.c	2006-06-27 22:01:25.000000000 -0400
+++ aboot-0.9b/cons.c	2006-06-27 22:02:47.000000000 -0400
@@ -12,7 +12,7 @@
 #include <utils.h>
 
 long cons_dev;			/* console device */
-extern long int dispatch();	/* Need the full 64 bit return here...*/
+long dispatch(long proc, ...);	/* Need the full 64 bit return here...*/
 
 long
 cons_puts(const char *str, long len)
diff -Naur aboot-0.9b.orig/disk.c aboot-0.9b/disk.c
--- aboot-0.9b.orig/disk.c	2006-06-27 22:01:25.000000000 -0400
+++ aboot-0.9b/disk.c	2006-06-27 22:02:47.000000000 -0400
@@ -480,9 +480,7 @@
 				} else {
 					*d++ = *p;
 				}
-				break;
-
-			      default:
+				break;			
 			}
 			p++;
 		}
diff -Naur aboot-0.9b.orig/doc/man/Makefile aboot-0.9b/doc/man/Makefile
--- aboot-0.9b.orig/doc/man/Makefile	2006-06-27 22:01:25.000000000 -0400
+++ aboot-0.9b/doc/man/Makefile	2006-06-27 22:02:47.000000000 -0400
@@ -6,7 +6,7 @@
 # use that value. Otherwise default to /usr/man.
 
 ifeq ($(mandir),)
-MANDIR=/usr/man
+MANDIR=$(root)/usr/man
 else
 MANDIR=$(mandir)
 endif
diff -Naur aboot-0.9b.orig/include/asm/a.out.h aboot-0.9b/include/asm/a.out.h
--- aboot-0.9b.orig/include/asm/a.out.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm/a.out.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,91 @@
+#ifndef __ALPHA_A_OUT_H__
+#define __ALPHA_A_OUT_H__
+
+#include <linux/types.h>
+
+/*
+ * OSF/1 ECOFF header structs.  ECOFF files consist of:
+ * 	- a file header (struct filehdr),
+ *	- an a.out header (struct aouthdr),
+ *	- one or more section headers (struct scnhdr). 
+ *	  The filhdr's "f_nscns" field contains the
+ *	  number of section headers.
+ */
+
+struct filehdr
+{
+	/* OSF/1 "file" header */
+	__u16 f_magic, f_nscns;
+	__u32 f_timdat;
+	__u64 f_symptr;
+	__u32 f_nsyms;
+	__u16 f_opthdr, f_flags;
+};
+
+struct aouthdr
+{
+	__u64 info;		/* after that it looks quite normal.. */
+	__u64 tsize;
+	__u64 dsize;
+	__u64 bsize;
+	__u64 entry;
+	__u64 text_start;	/* with a few additions that actually make sense */
+	__u64 data_start;
+	__u64 bss_start;
+	__u32 gprmask, fprmask;	/* bitmask of general & floating point regs used in binary */
+	__u64 gpvalue;
+};
+
+struct scnhdr
+{
+	char	s_name[8];
+	__u64	s_paddr;
+	__u64	s_vaddr;
+	__u64	s_size;
+	__u64	s_scnptr;
+	__u64	s_relptr;
+	__u64	s_lnnoptr;
+	__u16	s_nreloc;
+	__u16	s_nlnno;
+	__u32	s_flags;
+};
+
+struct exec
+{
+	/* OSF/1 "file" header */
+	struct filehdr		fh;
+	struct aouthdr		ah;
+};
+
+/*
+ * Define's so that the kernel exec code can access the a.out header
+ * fields...
+ */
+#define	a_info		ah.info
+#define	a_text		ah.tsize
+#define a_data		ah.dsize
+#define a_bss		ah.bsize
+#define a_entry		ah.entry
+#define a_textstart	ah.text_start
+#define	a_datastart	ah.data_start
+#define	a_bssstart	ah.bss_start
+#define	a_gprmask	ah.gprmask
+#define a_fprmask	ah.fprmask
+#define a_gpvalue	ah.gpvalue
+
+#define N_TXTADDR(x) ((x).a_textstart)
+#define N_DATADDR(x) ((x).a_datastart)
+#define N_BSSADDR(x) ((x).a_bssstart)
+#define N_DRSIZE(x) 0
+#define N_TRSIZE(x) 0
+#define N_SYMSIZE(x) 0
+
+#define AOUTHSZ		sizeof(struct aouthdr)
+#define SCNHSZ		sizeof(struct scnhdr)
+#define SCNROUND	16
+
+#define N_TXTOFF(x) \
+  ((long) N_MAGIC(x) == ZMAGIC ? 0 : \
+   (sizeof(struct exec) + (x).fh.f_nscns*SCNHSZ + SCNROUND - 1) & ~(SCNROUND - 1))
+
+#endif /* __A_OUT_GNU_H__ */
diff -Naur aboot-0.9b.orig/include/asm/auxvec.h aboot-0.9b/include/asm/auxvec.h
--- aboot-0.9b.orig/include/asm/auxvec.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm/auxvec.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,24 @@
+#ifndef __ASM_ALPHA_AUXVEC_H
+#define __ASM_ALPHA_AUXVEC_H
+
+/* Reserve these numbers for any future use of a VDSO.  */
+#if 0
+#define AT_SYSINFO		32
+#define AT_SYSINFO_EHDR		33
+#endif
+
+/* More complete cache descriptions than AT_[DIU]CACHEBSIZE.  If the
+   value is -1, then the cache doesn't exist.  Otherwise:
+
+      bit 0-3:	  Cache set-associativity; 0 means fully associative.
+      bit 4-7:	  Log2 of cacheline size.
+      bit 8-31:	  Size of the entire cache >> 8.
+      bit 32-63:  Reserved.
+*/
+
+#define AT_L1I_CACHESHAPE	34
+#define AT_L1D_CACHESHAPE	35
+#define AT_L2_CACHESHAPE	36
+#define AT_L3_CACHESHAPE	37
+
+#endif /* __ASM_ALPHA_AUXVEC_H */
diff -Naur aboot-0.9b.orig/include/asm/barrier.h aboot-0.9b/include/asm/barrier.h
--- aboot-0.9b.orig/include/asm/barrier.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm/barrier.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,34 @@
+#ifndef __BARRIER_H
+#define __BARRIER_H
+
+#define mb() \
+__asm__ __volatile__("mb": : :"memory")
+
+#define rmb() \
+__asm__ __volatile__("mb": : :"memory")
+
+#define wmb() \
+__asm__ __volatile__("wmb": : :"memory")
+
+#define read_barrier_depends() \
+__asm__ __volatile__("mb": : :"memory")
+
+#ifdef CONFIG_SMP
+#define smp_mb()	mb()
+#define smp_rmb()	rmb()
+#define smp_wmb()	wmb()
+#define smp_read_barrier_depends()	read_barrier_depends()
+#else
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	barrier()
+#endif
+
+#define set_mb(var, value) \
+do { var = value; mb(); } while (0)
+
+#define set_wmb(var, value) \
+do { var = value; wmb(); } while (0)
+
+#endif		/* __BARRIER_H */
diff -Naur aboot-0.9b.orig/include/asm/hwrpb.h aboot-0.9b/include/asm/hwrpb.h
--- aboot-0.9b.orig/include/asm/hwrpb.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm/hwrpb.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,205 @@
+#ifndef __ALPHA_HWRPB_H
+#define __ALPHA_HWRPB_H
+
+#define INIT_HWRPB ((struct hwrpb_struct *) 0x10000000)
+
+/*
+ * DEC processor types for Alpha systems.  Found in HWRPB.
+ * These values are architected.
+ */
+
+#define EV3_CPU                 1       /* EV3                  */
+#define EV4_CPU                 2       /* EV4 (21064)          */
+#define LCA4_CPU                4       /* LCA4 (21066/21068)   */
+#define EV5_CPU                 5       /* EV5 (21164)          */
+#define EV45_CPU                6       /* EV4.5 (21064/xxx)    */
+#define EV56_CPU		7	/* EV5.6 (21164)	*/
+#define EV6_CPU			8	/* EV6 (21264)		*/
+#define PCA56_CPU		9	/* PCA56 (21164PC)	*/
+#define PCA57_CPU		10	/* PCA57 (notyet)	*/
+#define EV67_CPU		11	/* EV67 (21264A)	*/
+#define EV68CB_CPU		12	/* EV68CB (21264C)	*/
+#define EV68AL_CPU		13	/* EV68AL (21264B)	*/
+#define EV68CX_CPU		14	/* EV68CX (21264D)	*/
+#define EV7_CPU			15	/* EV7 (21364)		*/
+#define EV79_CPU		16	/* EV79 (21364??)	*/
+#define EV69_CPU		17	/* EV69 (21264/EV69A)	*/
+
+/*
+ * DEC system types for Alpha systems.  Found in HWRPB.
+ * These values are architected.
+ */
+
+#define ST_ADU			  1	/* Alpha ADU systype	*/
+#define ST_DEC_4000		  2	/* Cobra systype	*/
+#define ST_DEC_7000		  3	/* Ruby systype		*/
+#define ST_DEC_3000_500		  4	/* Flamingo systype	*/
+#define ST_DEC_2000_300		  6	/* Jensen systype	*/
+#define ST_DEC_3000_300		  7	/* Pelican systype	*/
+#define ST_DEC_2100_A500	  9	/* Sable systype	*/
+#define ST_DEC_AXPVME_64	 10	/* AXPvme system type	*/
+#define ST_DEC_AXPPCI_33	 11	/* NoName system type	*/
+#define ST_DEC_TLASER		 12	/* Turbolaser systype	*/
+#define ST_DEC_2100_A50		 13	/* Avanti systype	*/
+#define ST_DEC_MUSTANG		 14	/* Mustang systype	*/
+#define ST_DEC_ALCOR		 15	/* Alcor (EV5) systype	*/
+#define ST_DEC_1000		 17	/* Mikasa systype	*/
+#define ST_DEC_EB64		 18	/* EB64 systype		*/
+#define ST_DEC_EB66		 19	/* EB66 systype		*/
+#define ST_DEC_EB64P		 20	/* EB64+ systype	*/
+#define ST_DEC_BURNS		 21	/* laptop systype	*/
+#define ST_DEC_RAWHIDE		 22	/* Rawhide systype	*/
+#define ST_DEC_K2		 23	/* K2 systype		*/
+#define ST_DEC_LYNX		 24	/* Lynx systype		*/
+#define ST_DEC_XL		 25	/* Alpha XL systype	*/
+#define ST_DEC_EB164		 26	/* EB164 systype	*/
+#define ST_DEC_NORITAKE		 27	/* Noritake systype	*/
+#define ST_DEC_CORTEX		 28	/* Cortex systype	*/
+#define ST_DEC_MIATA		 30	/* Miata systype        */
+#define ST_DEC_XXM		 31	/* XXM systype		*/
+#define ST_DEC_TAKARA		 32	/* Takara systype	*/
+#define ST_DEC_YUKON		 33	/* Yukon systype	*/
+#define ST_DEC_TSUNAMI		 34	/* Tsunami systype	*/
+#define ST_DEC_WILDFIRE		 35	/* Wildfire systype	*/
+#define ST_DEC_CUSCO		 36	/* CUSCO systype	*/
+#define ST_DEC_EIGER		 37	/* Eiger systype	*/
+#define ST_DEC_TITAN		 38	/* Titan systype	*/
+#define ST_DEC_MARVEL		 39	/* Marvel systype	*/
+
+/* UNOFFICIAL!!! */
+#define ST_UNOFFICIAL_BIAS	100
+#define ST_DTI_RUFFIAN		101	/* RUFFIAN systype	*/
+
+/* Alpha Processor, Inc. systems */
+#define ST_API_BIAS		200
+#define ST_API_NAUTILUS		201	/* UP1000 systype	*/
+
+struct pcb_struct {
+	unsigned long ksp;
+	unsigned long usp;
+	unsigned long ptbr;
+	unsigned int pcc;
+	unsigned int asn;
+	unsigned long unique;
+	unsigned long flags;
+	unsigned long res1, res2;
+};
+
+struct percpu_struct {
+	unsigned long hwpcb[16];
+	unsigned long flags;
+	unsigned long pal_mem_size;
+	unsigned long pal_scratch_size;
+	unsigned long pal_mem_pa;
+	unsigned long pal_scratch_pa;
+	unsigned long pal_revision;
+	unsigned long type;
+	unsigned long variation;
+	unsigned long revision;
+	unsigned long serial_no[2];
+	unsigned long logout_area_pa;
+	unsigned long logout_area_len;
+	unsigned long halt_PCBB;
+	unsigned long halt_PC;
+	unsigned long halt_PS;
+	unsigned long halt_arg;
+	unsigned long halt_ra;
+	unsigned long halt_pv;
+	unsigned long halt_reason;
+	unsigned long res;
+	unsigned long ipc_buffer[21];
+	unsigned long palcode_avail[16];
+	unsigned long compatibility;
+	unsigned long console_data_log_pa;
+	unsigned long console_data_log_length;
+	unsigned long bcache_info;
+};
+
+struct procdesc_struct {
+	unsigned long weird_vms_stuff;
+	unsigned long address;
+};
+
+struct vf_map_struct {
+	unsigned long va;
+	unsigned long pa;
+	unsigned long count;
+};
+
+struct crb_struct {
+	struct procdesc_struct * dispatch_va;
+	struct procdesc_struct * dispatch_pa;
+	struct procdesc_struct * fixup_va;
+	struct procdesc_struct * fixup_pa;
+	/* virtual->physical map */
+	unsigned long map_entries;
+	unsigned long map_pages;
+	struct vf_map_struct map[1];
+};
+
+struct memclust_struct {
+	unsigned long start_pfn;
+	unsigned long numpages;
+	unsigned long numtested;
+	unsigned long bitmap_va;
+	unsigned long bitmap_pa;
+	unsigned long bitmap_chksum;
+	unsigned long usage;
+};
+
+struct memdesc_struct {
+	unsigned long chksum;
+	unsigned long optional_pa;
+	unsigned long numclusters;
+	struct memclust_struct cluster[0];
+};
+
+struct dsr_struct {
+	long smm;			/* SMM nubber used by LMF       */
+	unsigned long  lurt_off;	/* offset to LURT table         */
+	unsigned long  sysname_off;	/* offset to sysname char count */
+};
+
+struct hwrpb_struct {
+	unsigned long phys_addr;	/* check: physical address of the hwrpb */
+	unsigned long id;		/* check: "HWRPB\0\0\0" */
+	unsigned long revision;	
+	unsigned long size;		/* size of hwrpb */
+	unsigned long cpuid;
+	unsigned long pagesize;		/* 8192, I hope */
+	unsigned long pa_bits;		/* number of physical address bits */
+	unsigned long max_asn;
+	unsigned char ssn[16];		/* system serial number: big bother is watching */
+	unsigned long sys_type;
+	unsigned long sys_variation;
+	unsigned long sys_revision;
+	unsigned long intr_freq;	/* interval clock frequency * 4096 */
+	unsigned long cycle_freq;	/* cycle counter frequency */
+	unsigned long vptb;		/* Virtual Page Table Base address */
+	unsigned long res1;
+	unsigned long tbhb_offset;	/* Translation Buffer Hint Block */
+	unsigned long nr_processors;
+	unsigned long processor_size;
+	unsigned long processor_offset;
+	unsigned long ctb_nr;
+	unsigned long ctb_size;		/* console terminal block size */
+	unsigned long ctbt_offset;	/* console terminal block table offset */
+	unsigned long crb_offset;	/* console callback routine block */
+	unsigned long mddt_offset;	/* memory data descriptor table */
+	unsigned long cdb_offset;	/* configuration data block (or NULL) */
+	unsigned long frut_offset;	/* FRU table (or NULL) */
+	void (*save_terminal)(unsigned long);
+	unsigned long save_terminal_data;
+	void (*restore_terminal)(unsigned long);
+	unsigned long restore_terminal_data;
+	void (*CPU_restart)(unsigned long);
+	unsigned long CPU_restart_data;
+	unsigned long res2;
+	unsigned long res3;
+	unsigned long chksum;
+	unsigned long rxrdy;
+	unsigned long txrdy;
+	unsigned long dsr_offset;	/* "Dynamic System Recognition Data Block Table" */
+};
+
+#endif /* __ALPHA_HWRPB_H */
diff -Naur aboot-0.9b.orig/include/asm/page.h aboot-0.9b/include/asm/page.h
--- aboot-0.9b.orig/include/asm/page.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm/page.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,103 @@
+#ifndef _ALPHA_PAGE_H
+#define _ALPHA_PAGE_H
+
+#include <linux/config.h>
+#include <asm/pal.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT	13
+#define PAGE_SIZE	(1UL << PAGE_SHIFT)
+#define PAGE_MASK	(~(PAGE_SIZE-1))
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#define STRICT_MM_TYPECHECKS
+
+extern void clear_page(void *page);
+#define clear_user_page(page, vaddr, pg)	clear_page(page)
+
+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vmaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+extern void copy_page(void * _to, void * _from);
+#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x)	((x).pte)
+#define pmd_val(x)	((x).pmd)
+#define pgd_val(x)	((x).pgd)
+#define pgprot_val(x)	((x).pgprot)
+
+#define __pte(x)	((pte_t) { (x) } )
+#define __pmd(x)	((pmd_t) { (x) } )
+#define __pgd(x)	((pgd_t) { (x) } )
+#define __pgprot(x)	((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x)	(x)
+#define pmd_val(x)	(x)
+#define pgd_val(x)	(x)
+#define pgprot_val(x)	(x)
+
+#define __pte(x)	(x)
+#define __pgd(x)	(x)
+#define __pgprot(x)	(x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+#ifdef USE_48_BIT_KSEG
+#define PAGE_OFFSET		0xffff800000000000UL
+#else
+#define PAGE_OFFSET		0xfffffc0000000000UL
+#endif
+
+#else
+
+#ifdef USE_48_BIT_KSEG
+#define PAGE_OFFSET		0xffff800000000000
+#else
+#define PAGE_OFFSET		0xfffffc0000000000
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+#define __pa(x)			((unsigned long) (x) - PAGE_OFFSET)
+#define __va(x)			((void *)((unsigned long) (x) + PAGE_OFFSET))
+#ifndef CONFIG_DISCONTIGMEM
+#define pfn_to_page(pfn)	(mem_map + (pfn))
+#define page_to_pfn(page)	((unsigned long)((page) - mem_map))
+#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define pfn_valid(pfn)		((pfn) < max_mapnr)
+#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#endif /* CONFIG_DISCONTIGMEM */
+
+#define VM_DATA_DEFAULT_FLAGS		(VM_READ | VM_WRITE | VM_EXEC | \
+					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/page.h>
+
+#endif /* _ALPHA_PAGE_H */
diff -Naur aboot-0.9b.orig/include/asm/pal.h aboot-0.9b/include/asm/pal.h
--- aboot-0.9b.orig/include/asm/pal.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm/pal.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,51 @@
+#ifndef __ALPHA_PAL_H
+#define __ALPHA_PAL_H
+
+/*
+ * Common PAL-code
+ */
+#define PAL_halt	  0
+#define PAL_cflush	  1
+#define PAL_draina	  2
+#define PAL_bpt		128
+#define PAL_bugchk	129
+#define PAL_chmk	131
+#define PAL_callsys	131
+#define PAL_imb		134
+#define PAL_rduniq	158
+#define PAL_wruniq	159
+#define PAL_gentrap	170
+#define PAL_nphalt	190
+
+/*
+ * VMS specific PAL-code
+ */
+#define PAL_swppal	10
+#define PAL_mfpr_vptb	41
+
+/*
+ * OSF specific PAL-code
+ */
+#define PAL_cserve	 9
+#define PAL_wripir	13
+#define PAL_rdmces	16
+#define PAL_wrmces	17
+#define PAL_wrfen	43
+#define PAL_wrvptptr	45
+#define PAL_jtopal	46
+#define PAL_swpctx	48
+#define PAL_wrval	49
+#define PAL_rdval	50
+#define PAL_tbi		51
+#define PAL_wrent	52
+#define PAL_swpipl	53
+#define PAL_rdps	54
+#define PAL_wrkgp	55
+#define PAL_wrusp	56
+#define PAL_wrperfmon	57
+#define PAL_rdusp	58
+#define PAL_whami	60
+#define PAL_retsys	61
+#define PAL_rti		63
+
+#endif /* __ALPHA_PAL_H */
diff -Naur aboot-0.9b.orig/include/asm/spinlock.h aboot-0.9b/include/asm/spinlock.h
--- aboot-0.9b.orig/include/asm/spinlock.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm/spinlock.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,170 @@
+#ifndef _ALPHA_SPINLOCK_H
+#define _ALPHA_SPINLOCK_H
+
+#include <linux/config.h>
+#include <asm/system.h>
+#include <linux/kernel.h>
+#include <asm/current.h>
+
+/*
+ * Simple spin lock operations.  There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_is_locked(x)	((x)->lock != 0)
+#define __raw_spin_unlock_wait(x) \
+		do { cpu_relax(); } while ((x)->lock)
+
+static inline void __raw_spin_unlock(raw_spinlock_t * lock)
+{
+	mb();
+	lock->lock = 0;
+}
+
+static inline void __raw_spin_lock(raw_spinlock_t * lock)
+{
+	long tmp;
+
+	__asm__ __volatile__(
+	"1:	ldl_l	%0,%1\n"
+	"	bne	%0,2f\n"
+	"	lda	%0,1\n"
+	"	stl_c	%0,%1\n"
+	"	beq	%0,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	ldl	%0,%1\n"
+	"	bne	%0,2b\n"
+	"	br	1b\n"
+	".previous"
+	: "=&r" (tmp), "=m" (lock->lock)
+	: "m"(lock->lock) : "memory");
+}
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+	return !test_and_set_bit(0, &lock->lock);
+}
+
+/***********************************************************/
+
+static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+{
+	return (lock->lock & 1) == 0;
+}
+
+static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+{
+	return lock->lock == 0;
+}
+
+static inline void __raw_read_lock(raw_rwlock_t *lock)
+{
+	long regx;
+
+	__asm__ __volatile__(
+	"1:	ldl_l	%1,%0\n"
+	"	blbs	%1,6f\n"
+	"	subl	%1,2,%1\n"
+	"	stl_c	%1,%0\n"
+	"	beq	%1,6f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"6:	ldl	%1,%0\n"
+	"	blbs	%1,6b\n"
+	"	br	1b\n"
+	".previous"
+	: "=m" (*lock), "=&r" (regx)
+	: "m" (*lock) : "memory");
+}
+
+static inline void __raw_write_lock(raw_rwlock_t *lock)
+{
+	long regx;
+
+	__asm__ __volatile__(
+	"1:	ldl_l	%1,%0\n"
+	"	bne	%1,6f\n"
+	"	lda	%1,1\n"
+	"	stl_c	%1,%0\n"
+	"	beq	%1,6f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"6:	ldl	%1,%0\n"
+	"	bne	%1,6b\n"
+	"	br	1b\n"
+	".previous"
+	: "=m" (*lock), "=&r" (regx)
+	: "m" (*lock) : "memory");
+}
+
+static inline int __raw_read_trylock(raw_rwlock_t * lock)
+{
+	long regx;
+	int success;
+
+	__asm__ __volatile__(
+	"1:	ldl_l	%1,%0\n"
+	"	lda	%2,0\n"
+	"	blbs	%1,2f\n"
+	"	subl	%1,2,%2\n"
+	"	stl_c	%2,%0\n"
+	"	beq	%2,6f\n"
+	"2:	mb\n"
+	".subsection 2\n"
+	"6:	br	1b\n"
+	".previous"
+	: "=m" (*lock), "=&r" (regx), "=&r" (success)
+	: "m" (*lock) : "memory");
+
+	return success;
+}
+
+static inline int __raw_write_trylock(raw_rwlock_t * lock)
+{
+	long regx;
+	int success;
+
+	__asm__ __volatile__(
+	"1:	ldl_l	%1,%0\n"
+	"	lda	%2,0\n"
+	"	bne	%1,2f\n"
+	"	lda	%2,1\n"
+	"	stl_c	%2,%0\n"
+	"	beq	%2,6f\n"
+	"2:	mb\n"
+	".subsection 2\n"
+	"6:	br	1b\n"
+	".previous"
+	: "=m" (*lock), "=&r" (regx), "=&r" (success)
+	: "m" (*lock) : "memory");
+
+	return success;
+}
+
+static inline void __raw_read_unlock(raw_rwlock_t * lock)
+{
+	long regx;
+	__asm__ __volatile__(
+	"	mb\n"
+	"1:	ldl_l	%1,%0\n"
+	"	addl	%1,2,%1\n"
+	"	stl_c	%1,%0\n"
+	"	beq	%1,6f\n"
+	".subsection 2\n"
+	"6:	br	1b\n"
+	".previous"
+	: "=m" (*lock), "=&r" (regx)
+	: "m" (*lock) : "memory");
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t * lock)
+{
+	mb();
+	lock->lock = 0;
+}
+
+#endif /* _ALPHA_SPINLOCK_H */
diff -Naur aboot-0.9b.orig/include/asm/spinlock_types.h aboot-0.9b/include/asm/spinlock_types.h
--- aboot-0.9b.orig/include/asm/spinlock_types.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm/spinlock_types.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,20 @@
+#ifndef _ALPHA_SPINLOCK_TYPES_H
+#define _ALPHA_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+	volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED	{ 0 }
+
+typedef struct {
+	volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED		{ 0 }
+
+#endif
diff -Naur aboot-0.9b.orig/include/asm/string.h aboot-0.9b/include/asm/string.h
--- aboot-0.9b.orig/include/asm/string.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm/string.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,68 @@
+#ifndef __ALPHA_STRING_H__
+#define __ALPHA_STRING_H__
+
+#ifdef __KERNEL__
+
+/*
+ * GCC of any recent vintage doesn't do stupid things with bcopy.
+ * EGCS 1.1 knows all about expanding memcpy inline, others don't.
+ *
+ * Similarly for a memset with data = 0.
+ */
+
+#define __HAVE_ARCH_MEMCPY
+extern void * memcpy(void *, const void *, size_t);
+#define __HAVE_ARCH_MEMMOVE
+extern void * memmove(void *, const void *, size_t);
+
+/* For backward compatibility with modules.  Unused otherwise.  */
+extern void * __memcpy(void *, const void *, size_t);
+
+#define memcpy __builtin_memcpy
+
+#define __HAVE_ARCH_MEMSET
+extern void * __constant_c_memset(void *, unsigned long, size_t);
+extern void * __memset(void *, int, size_t);
+extern void * memset(void *, int, size_t);
+
+#define memset(s, c, n)							    \
+(__builtin_constant_p(c)						    \
+ ? (__builtin_constant_p(n) && (c) == 0					    \
+    ? __builtin_memset((s),0,(n)) 					    \
+    : __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n))) \
+ : __memset((s),(c),(n)))
+
+#define __HAVE_ARCH_STRCPY
+extern char * strcpy(char *,const char *);
+#define __HAVE_ARCH_STRNCPY
+extern char * strncpy(char *, const char *, size_t);
+#define __HAVE_ARCH_STRCAT
+extern char * strcat(char *, const char *);
+#define __HAVE_ARCH_STRNCAT
+extern char * strncat(char *, const char *, size_t);
+#define __HAVE_ARCH_STRCHR
+extern char * strchr(const char *,int);
+#define __HAVE_ARCH_STRRCHR
+extern char * strrchr(const char *,int);
+#define __HAVE_ARCH_STRLEN
+extern size_t strlen(const char *);
+#define __HAVE_ARCH_MEMCHR
+extern void * memchr(const void *, int, size_t);
+
+/* The following routine is like memset except that it writes 16-bit
+   aligned values.  The DEST and COUNT parameters must be even for 
+   correct operation.  */
+
+#define __HAVE_ARCH_MEMSETW
+extern void * __memsetw(void *dest, unsigned short, size_t count);
+
+#define memsetw(s, c, n)						 \
+(__builtin_constant_p(c)						 \
+ ? __constant_c_memset((s),0x0001000100010001UL*(unsigned short)(c),(n)) \
+ : __memsetw((s),(c),(n)))
+
+extern int strcasecmp(const char *, const char *);
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_STRING_H__ */
diff -Naur aboot-0.9b.orig/include/asm/system.h aboot-0.9b/include/asm/system.h
--- aboot-0.9b.orig/include/asm/system.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm/system.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,594 @@
+#ifndef __ALPHA_SYSTEM_H
+#define __ALPHA_SYSTEM_H
+
+#include <linux/config.h>
+#include <asm/pal.h>
+#include <asm/page.h>
+#include <asm/barrier.h>
+
+/*
+ * System defines.. Note that this is included both from .c and .S
+ * files, so it does only defines, not any C code.
+ */
+
+/*
+ * We leave one page for the initial stack page, and one page for
+ * the initial process structure. Also, the console eats 3 MB for
+ * the initial bootloader (one of which we can reclaim later).
+ */
+#define BOOT_PCB	0x20000000
+#define BOOT_ADDR	0x20000000
+/* Remove when official MILO sources have ELF support: */
+#define BOOT_SIZE	(16*1024)
+
+#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
+#define KERNEL_START_PHYS	0x300000 /* Old bootloaders hardcoded this.  */
+#else
+#define KERNEL_START_PHYS	0x1000000 /* required: Wildfire/Titan/Marvel */
+#endif
+
+#define KERNEL_START	(PAGE_OFFSET+KERNEL_START_PHYS)
+#define SWAPPER_PGD	KERNEL_START
+#define INIT_STACK	(PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
+#define EMPTY_PGT	(PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
+#define EMPTY_PGE	(PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
+#define ZERO_PGE	(PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
+
+#define START_ADDR	(PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
+
+/*
+ * This is setup by the secondary bootstrap loader.  Because
+ * the zero page is zeroed out as soon as the vm system is
+ * initialized, we need to copy things out into a more permanent
+ * place.
+ */
+#define PARAM			ZERO_PGE
+#define COMMAND_LINE		((char*)(PARAM + 0x0000))
+#define INITRD_START		(*(unsigned long *) (PARAM+0x100))
+#define INITRD_SIZE		(*(unsigned long *) (PARAM+0x108))
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+
+/*
+ * This is the logout header that should be common to all platforms
+ * (assuming they are running OSF/1 PALcode, I guess).
+ */
+struct el_common {
+	unsigned int	size;		/* size in bytes of logout area */
+	unsigned int	sbz1	: 30;	/* should be zero */
+	unsigned int	err2	:  1;	/* second error */
+	unsigned int	retry	:  1;	/* retry flag */
+	unsigned int	proc_offset;	/* processor-specific offset */
+	unsigned int	sys_offset;	/* system-specific offset */
+	unsigned int	code;		/* machine check code */
+	unsigned int	frame_rev;	/* frame revision */
+};
+
+/* Machine Check Frame for uncorrectable errors (Large format)
+ *      --- This is used to log uncorrectable errors such as
+ *          double bit ECC errors.
+ *      --- These errors are detected by both processor and systems.
+ */
+struct el_common_EV5_uncorrectable_mcheck {
+        unsigned long   shadow[8];        /* Shadow reg. 8-14, 25           */
+        unsigned long   paltemp[24];      /* PAL TEMP REGS.                 */
+        unsigned long   exc_addr;         /* Address of excepting instruction*/
+        unsigned long   exc_sum;          /* Summary of arithmetic traps.   */
+        unsigned long   exc_mask;         /* Exception mask (from exc_sum). */
+        unsigned long   pal_base;         /* Base address for PALcode.      */
+        unsigned long   isr;              /* Interrupt Status Reg.          */
+        unsigned long   icsr;             /* CURRENT SETUP OF EV5 IBOX      */
+        unsigned long   ic_perr_stat;     /* I-CACHE Reg. <11> set Data parity
+                                                         <12> set TAG parity*/
+        unsigned long   dc_perr_stat;     /* D-CACHE error Reg. Bits set to 1:
+                                                     <2> Data error in bank 0
+                                                     <3> Data error in bank 1
+                                                     <4> Tag error in bank 0
+                                                     <5> Tag error in bank 1 */
+        unsigned long   va;               /* Effective VA of fault or miss. */
+        unsigned long   mm_stat;          /* Holds the reason for D-stream 
+                                             fault or D-cache parity errors */
+        unsigned long   sc_addr;          /* Address that was being accessed
+                                             when EV5 detected Secondary cache
+                                             failure.                 */
+        unsigned long   sc_stat;          /* Helps determine if the error was
+                                             TAG/Data parity(Secondary Cache)*/
+        unsigned long   bc_tag_addr;      /* Contents of EV5 BC_TAG_ADDR    */
+        unsigned long   ei_addr;          /* Physical address of any transfer
+                                             that is logged in EV5 EI_STAT */
+        unsigned long   fill_syndrome;    /* For correcting ECC errors.     */
+        unsigned long   ei_stat;          /* Helps identify reason of any 
+                                             processor uncorrectable error
+                                             at its external interface.     */
+        unsigned long   ld_lock;          /* Contents of EV5 LD_LOCK register*/
+};
+
+struct el_common_EV6_mcheck {
+	unsigned int FrameSize;		/* Bytes, including this field */
+	unsigned int FrameFlags;	/* <31> = Retry, <30> = Second Error */
+	unsigned int CpuOffset;		/* Offset to CPU-specific info */
+	unsigned int SystemOffset;	/* Offset to system-specific info */
+	unsigned int MCHK_Code;
+	unsigned int MCHK_Frame_Rev;
+	unsigned long I_STAT;		/* EV6 Internal Processor Registers */
+	unsigned long DC_STAT;		/* (See the 21264 Spec) */
+	unsigned long C_ADDR;
+	unsigned long DC1_SYNDROME;
+	unsigned long DC0_SYNDROME;
+	unsigned long C_STAT;
+	unsigned long C_STS;
+	unsigned long MM_STAT;
+	unsigned long EXC_ADDR;
+	unsigned long IER_CM;
+	unsigned long ISUM;
+	unsigned long RESERVED0;
+	unsigned long PAL_BASE;
+	unsigned long I_CTL;
+	unsigned long PCTX;
+};
+
+extern void halt(void) __attribute__((noreturn));
+#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
+
+#define switch_to(P,N,L)						\
+  do {									\
+    (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P));	\
+    check_mmu_context();						\
+  } while (0)
+
+struct task_struct;
+extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
+
+#define imb() \
+__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
+
+#define draina() \
+__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
+
+enum implver_enum {
+	IMPLVER_EV4,
+	IMPLVER_EV5,
+	IMPLVER_EV6
+};
+
+#ifdef CONFIG_ALPHA_GENERIC
+#define implver()				\
+({ unsigned long __implver;			\
+   __asm__ ("implver %0" : "=r"(__implver));	\
+   (enum implver_enum) __implver; })
+#else
+/* Try to eliminate some dead code.  */
+#ifdef CONFIG_ALPHA_EV4
+#define implver() IMPLVER_EV4
+#endif
+#ifdef CONFIG_ALPHA_EV5
+#define implver() IMPLVER_EV5
+#endif
+#if defined(CONFIG_ALPHA_EV6)
+#define implver() IMPLVER_EV6
+#endif
+#endif
+
+enum amask_enum {
+	AMASK_BWX = (1UL << 0),
+	AMASK_FIX = (1UL << 1),
+	AMASK_CIX = (1UL << 2),
+	AMASK_MAX = (1UL << 8),
+	AMASK_PRECISE_TRAP = (1UL << 9),
+};
+
+#define amask(mask)						\
+({ unsigned long __amask, __input = (mask);			\
+   __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input));	\
+   __amask; })
+
+#define __CALL_PAL_R0(NAME, TYPE)				\
+static inline TYPE NAME(void)					\
+{								\
+	register TYPE __r0 __asm__("$0");			\
+	__asm__ __volatile__(					\
+		"call_pal %1 # " #NAME				\
+		:"=r" (__r0)					\
+		:"i" (PAL_ ## NAME)				\
+		:"$1", "$16", "$22", "$23", "$24", "$25");	\
+	return __r0;						\
+}
+
+#define __CALL_PAL_W1(NAME, TYPE0)				\
+static inline void NAME(TYPE0 arg0)				\
+{								\
+	register TYPE0 __r16 __asm__("$16") = arg0;		\
+	__asm__ __volatile__(					\
+		"call_pal %1 # "#NAME				\
+		: "=r"(__r16)					\
+		: "i"(PAL_ ## NAME), "0"(__r16)			\
+		: "$1", "$22", "$23", "$24", "$25");		\
+}
+
+#define __CALL_PAL_W2(NAME, TYPE0, TYPE1)			\
+static inline void NAME(TYPE0 arg0, TYPE1 arg1)			\
+{								\
+	register TYPE0 __r16 __asm__("$16") = arg0;		\
+	register TYPE1 __r17 __asm__("$17") = arg1;		\
+	__asm__ __volatile__(					\
+		"call_pal %2 # "#NAME				\
+		: "=r"(__r16), "=r"(__r17)			\
+		: "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17)	\
+		: "$1", "$22", "$23", "$24", "$25");		\
+}
+
+#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0)			\
+static inline RTYPE NAME(TYPE0 arg0)				\
+{								\
+	register RTYPE __r0 __asm__("$0");			\
+	register TYPE0 __r16 __asm__("$16") = arg0;		\
+	__asm__ __volatile__(					\
+		"call_pal %2 # "#NAME				\
+		: "=r"(__r16), "=r"(__r0)			\
+		: "i"(PAL_ ## NAME), "0"(__r16)			\
+		: "$1", "$22", "$23", "$24", "$25");		\
+	return __r0;						\
+}
+
+#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1)		\
+static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1)		\
+{								\
+	register RTYPE __r0 __asm__("$0");			\
+	register TYPE0 __r16 __asm__("$16") = arg0;		\
+	register TYPE1 __r17 __asm__("$17") = arg1;		\
+	__asm__ __volatile__(					\
+		"call_pal %3 # "#NAME				\
+		: "=r"(__r16), "=r"(__r17), "=r"(__r0)		\
+		: "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17)	\
+		: "$1", "$22", "$23", "$24", "$25");		\
+	return __r0;						\
+}
+
+__CALL_PAL_W1(cflush, unsigned long);
+__CALL_PAL_R0(rdmces, unsigned long);
+__CALL_PAL_R0(rdps, unsigned long);
+__CALL_PAL_R0(rdusp, unsigned long);
+__CALL_PAL_RW1(swpipl, unsigned long, unsigned long);
+__CALL_PAL_R0(whami, unsigned long);
+__CALL_PAL_W2(wrent, void*, unsigned long);
+__CALL_PAL_W1(wripir, unsigned long);
+__CALL_PAL_W1(wrkgp, unsigned long);
+__CALL_PAL_W1(wrmces, unsigned long);
+__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
+__CALL_PAL_W1(wrusp, unsigned long);
+__CALL_PAL_W1(wrvptptr, unsigned long);
+
+#define IPL_MIN		0
+#define IPL_SW0		1
+#define IPL_SW1		2
+#define IPL_DEV0	3
+#define IPL_DEV1	4
+#define IPL_TIMER	5
+#define IPL_PERF	6
+#define IPL_POWERFAIL	6
+#define IPL_MCHECK	7
+#define IPL_MAX		7
+
+#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
+#undef IPL_MIN
+#define IPL_MIN		__min_ipl
+extern int __min_ipl;
+#endif
+
+#define getipl()		(rdps() & 7)
+#define setipl(ipl)		((void) swpipl(ipl))
+
+#define local_irq_disable()			do { setipl(IPL_MAX); barrier(); } while(0)
+#define local_irq_enable()			do { barrier(); setipl(IPL_MIN); } while(0)
+#define local_save_flags(flags)	((flags) = rdps())
+#define local_irq_save(flags)	do { (flags) = swpipl(IPL_MAX); barrier(); } while(0)
+#define local_irq_restore(flags)	do { barrier(); setipl(flags); barrier(); } while(0)
+
+#define irqs_disabled()	(getipl() == IPL_MAX)
+
+/*
+ * TB routines..
+ */
+#define __tbi(nr,arg,arg1...)					\
+({								\
+	register unsigned long __r16 __asm__("$16") = (nr);	\
+	register unsigned long __r17 __asm__("$17"); arg;	\
+	__asm__ __volatile__(					\
+		"call_pal %3 #__tbi"				\
+		:"=r" (__r16),"=r" (__r17)			\
+		:"0" (__r16),"i" (PAL_tbi) ,##arg1		\
+		:"$0", "$1", "$22", "$23", "$24", "$25");	\
+})
+
+#define tbi(x,y)	__tbi(x,__r17=(y),"1" (__r17))
+#define tbisi(x)	__tbi(1,__r17=(x),"1" (__r17))
+#define tbisd(x)	__tbi(2,__r17=(x),"1" (__r17))
+#define tbis(x)		__tbi(3,__r17=(x),"1" (__r17))
+#define tbiap()		__tbi(-1, /* no second argument */)
+#define tbia()		__tbi(-2, /* no second argument */)
+
+/*
+ * Atomic exchange.
+ * Since it can be used to implement critical sections
+ * it must clobber "memory" (also for interrupts in UP).
+ */
+
+static inline unsigned long
+__xchg_u8(volatile char *m, unsigned long val)
+{
+	unsigned long ret, tmp, addr64;
+
+	__asm__ __volatile__(
+	"	andnot	%4,7,%3\n"
+	"	insbl	%1,%4,%1\n"
+	"1:	ldq_l	%2,0(%3)\n"
+	"	extbl	%2,%4,%0\n"
+	"	mskbl	%2,%4,%2\n"
+	"	or	%1,%2,%2\n"
+	"	stq_c	%2,0(%3)\n"
+	"	beq	%2,2f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+	: "r" ((long)m), "1" (val) : "memory");
+
+	return ret;
+}
+
+static inline unsigned long
+__xchg_u16(volatile short *m, unsigned long val)
+{
+	unsigned long ret, tmp, addr64;
+
+	__asm__ __volatile__(
+	"	andnot	%4,7,%3\n"
+	"	inswl	%1,%4,%1\n"
+	"1:	ldq_l	%2,0(%3)\n"
+	"	extwl	%2,%4,%0\n"
+	"	mskwl	%2,%4,%2\n"
+	"	or	%1,%2,%2\n"
+	"	stq_c	%2,0(%3)\n"
+	"	beq	%2,2f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+	: "r" ((long)m), "1" (val) : "memory");
+
+	return ret;
+}
+
+static inline unsigned long
+__xchg_u32(volatile int *m, unsigned long val)
+{
+	unsigned long dummy;
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%4\n"
+	"	bis $31,%3,%1\n"
+	"	stl_c %1,%2\n"
+	"	beq %1,2f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	: "=&r" (val), "=&r" (dummy), "=m" (*m)
+	: "rI" (val), "m" (*m) : "memory");
+
+	return val;
+}
+
+static inline unsigned long
+__xchg_u64(volatile long *m, unsigned long val)
+{
+	unsigned long dummy;
+
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%4\n"
+	"	bis $31,%3,%1\n"
+	"	stq_c %1,%2\n"
+	"	beq %1,2f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	: "=&r" (val), "=&r" (dummy), "=m" (*m)
+	: "rI" (val), "m" (*m) : "memory");
+
+	return val;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+   if something tries to do an invalid xchg().  */
+extern void __xchg_called_with_bad_pointer(void);
+
+#define __xchg(ptr, x, size) \
+({ \
+	unsigned long __xchg__res; \
+	volatile void *__xchg__ptr = (ptr); \
+	switch (size) { \
+		case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \
+		case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \
+		case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \
+		case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \
+		default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
+	} \
+	__xchg__res; \
+})
+
+#define xchg(ptr,x)							     \
+  ({									     \
+     __typeof__(*(ptr)) _x_ = (x);					     \
+     (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+  })
+
+#define tas(ptr) (xchg((ptr),1))
+
+
+/* 
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ *
+ * The memory barrier should be placed in SMP only when we actually
+ * make the change. If we don't change anything (so if the returned
+ * prev is equal to old) then we aren't acquiring anything new and
+ * we don't need any memory barrier as far I can tell.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long
+__cmpxchg_u8(volatile char *m, long old, long new)
+{
+	unsigned long prev, tmp, cmp, addr64;
+
+	__asm__ __volatile__(
+	"	andnot	%5,7,%4\n"
+	"	insbl	%1,%5,%1\n"
+	"1:	ldq_l	%2,0(%4)\n"
+	"	extbl	%2,%5,%0\n"
+	"	cmpeq	%0,%6,%3\n"
+	"	beq	%3,2f\n"
+	"	mskbl	%2,%5,%2\n"
+	"	or	%1,%2,%2\n"
+	"	stq_c	%2,0(%4)\n"
+	"	beq	%2,3f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	"2:\n"
+	".subsection 2\n"
+	"3:	br	1b\n"
+	".previous"
+	: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+	: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+	return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u16(volatile short *m, long old, long new)
+{
+	unsigned long prev, tmp, cmp, addr64;
+
+	__asm__ __volatile__(
+	"	andnot	%5,7,%4\n"
+	"	inswl	%1,%5,%1\n"
+	"1:	ldq_l	%2,0(%4)\n"
+	"	extwl	%2,%5,%0\n"
+	"	cmpeq	%0,%6,%3\n"
+	"	beq	%3,2f\n"
+	"	mskwl	%2,%5,%2\n"
+	"	or	%1,%2,%2\n"
+	"	stq_c	%2,0(%4)\n"
+	"	beq	%2,3f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	"2:\n"
+	".subsection 2\n"
+	"3:	br	1b\n"
+	".previous"
+	: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+	: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+	return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u32(volatile int *m, int old, int new)
+{
+	unsigned long prev, cmp;
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%5\n"
+	"	cmpeq %0,%3,%1\n"
+	"	beq %1,2f\n"
+	"	mov %4,%1\n"
+	"	stl_c %1,%2\n"
+	"	beq %1,3f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	"2:\n"
+	".subsection 2\n"
+	"3:	br 1b\n"
+	".previous"
+	: "=&r"(prev), "=&r"(cmp), "=m"(*m)
+	: "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+	return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
+{
+	unsigned long prev, cmp;
+
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%5\n"
+	"	cmpeq %0,%3,%1\n"
+	"	beq %1,2f\n"
+	"	mov %4,%1\n"
+	"	stq_c %1,%2\n"
+	"	beq %1,3f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	"2:\n"
+	".subsection 2\n"
+	"3:	br 1b\n"
+	".previous"
+	: "=&r"(prev), "=&r"(cmp), "=m"(*m)
+	: "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+	return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+   if something tries to do an invalid cmpxchg().  */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+	switch (size) {
+		case 1:
+			return __cmpxchg_u8(ptr, old, new);
+		case 2:
+			return __cmpxchg_u16(ptr, old, new);
+		case 4:
+			return __cmpxchg_u32(ptr, old, new);
+		case 8:
+			return __cmpxchg_u64(ptr, old, new);
+	}
+	__cmpxchg_called_with_bad_pointer();
+	return old;
+}
+
+#define cmpxchg(ptr,o,n)						 \
+  ({									 \
+     __typeof__(*(ptr)) _o_ = (o);					 \
+     __typeof__(*(ptr)) _n_ = (n);					 \
+     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
+				    (unsigned long)_n_, sizeof(*(ptr))); \
+  })
+
+#endif /* __ASSEMBLY__ */
+
+#define arch_align_stack(x) (x)
+
+#endif
diff -Naur aboot-0.9b.orig/include/asm/thread_info.h aboot-0.9b/include/asm/thread_info.h
--- aboot-0.9b.orig/include/asm/thread_info.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm/thread_info.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,96 @@
+#ifndef _ALPHA_THREAD_INFO_H
+#define _ALPHA_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/types.h>
+#include <asm/hwrpb.h>
+#endif
+
+#ifndef __ASSEMBLY__
+struct thread_info {
+	struct pcb_struct	pcb;		/* palcode state */
+
+	struct task_struct	*task;		/* main task structure */
+	unsigned int		flags;		/* low level flags */
+	unsigned int		ieee_state;	/* see fpu.h */
+
+	struct exec_domain	*exec_domain;	/* execution domain */
+	mm_segment_t		addr_limit;	/* thread address space */
+	unsigned		cpu;		/* current CPU */
+	int			preempt_count; /* 0 => preemptable, <0 => BUG */
+
+	int bpt_nsaved;
+	unsigned long bpt_addr[2];		/* breakpoint handling  */
+	unsigned int bpt_insn[2];
+
+	struct restart_block	restart_block;
+};
+
+/*
+ * Macros/functions for gaining access to the thread information structure.
+ */
+#define INIT_THREAD_INFO(tsk)			\
+{						\
+	.task		= &tsk,			\
+	.exec_domain	= &default_exec_domain,	\
+	.addr_limit	= KERNEL_DS,		\
+	.restart_block = {			\
+		.fn = do_no_restart_syscall,	\
+	},					\
+}
+
+#define init_thread_info	(init_thread_union.thread_info)
+#define init_stack		(init_thread_union.stack)
+
+/* How to get the thread information struct from C.  */
+register struct thread_info *__current_thread_info __asm__("$8");
+#define current_thread_info()  __current_thread_info
+
+/* Thread information allocation.  */
+#define THREAD_SIZE (2*PAGE_SIZE)
+#define alloc_thread_info(tsk) \
+  ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
+#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
+
+#endif /* __ASSEMBLY__ */
+
+#define PREEMPT_ACTIVE		0x40000000
+
+/*
+ * Thread information flags:
+ * - these are process state flags and used from assembly
+ * - pending work-to-be-done flags come first to fit in and immediate operand.
+ *
+ * TIF_SYSCALL_TRACE is known to be 0 via blbs.
+ */
+#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
+#define TIF_NOTIFY_RESUME	1	/* resumption notification requested */
+#define TIF_SIGPENDING		2	/* signal pending */
+#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
+#define TIF_POLLING_NRFLAG	4	/* poll_idle is polling NEED_RESCHED */
+#define TIF_DIE_IF_KERNEL	5	/* dik recursion lock */
+#define TIF_UAC_NOPRINT		6	/* see sysinfo.h */
+#define TIF_UAC_NOFIX		7
+#define TIF_UAC_SIGBUS		8
+#define TIF_MEMDIE		9
+
+#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
+#define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
+
+/* Work to do on interrupt/exception return.  */
+#define _TIF_WORK_MASK		(_TIF_NOTIFY_RESUME	\
+				 | _TIF_SIGPENDING	\
+				 | _TIF_NEED_RESCHED)
+
+/* Work to do on any return to userspace.  */
+#define _TIF_ALLWORK_MASK	(_TIF_WORK_MASK		\
+				 | _TIF_SYSCALL_TRACE)
+
+#endif /* __KERNEL__ */
+#endif /* _ALPHA_THREAD_INFO_H */
diff -Naur aboot-0.9b.orig/include/asm-generic/bitops/find.h aboot-0.9b/include/asm-generic/bitops/find.h
--- aboot-0.9b.orig/include/asm-generic/bitops/find.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm-generic/bitops/find.h	2006-06-27 22:02:02.000000000 -0400
@@ -0,0 +1 @@
+/* Dummy Header */
diff -Naur aboot-0.9b.orig/include/asm-generic/page.h aboot-0.9b/include/asm-generic/page.h
--- aboot-0.9b.orig/include/asm-generic/page.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/asm-generic/page.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,27 @@
+#ifndef _ASM_GENERIC_PAGE_H
+#define _ASM_GENERIC_PAGE_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+
+/* Pure 2^n version of get_order */
+/*
+static int get_order(unsigned long size)
+{
+	int order;
+
+	size = (size - 1) >> (PAGE_SHIFT - 1);
+	order = -1;
+	do {
+		size >>= 1;
+		order++;
+	} while (size);
+	return order;
+}*/
+
+#endif	/* __ASSEMBLY__ */
+#endif	/* __KERNEL__ */
+
+#endif	/* _ASM_GENERIC_PAGE_H */
diff -Naur aboot-0.9b.orig/include/linux/auxvec.h aboot-0.9b/include/linux/auxvec.h
--- aboot-0.9b.orig/include/linux/auxvec.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/auxvec.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,31 @@
+#ifndef _LINUX_AUXVEC_H
+#define _LINUX_AUXVEC_H
+
+#include <asm/auxvec.h>
+
+/* Symbolic values for the entries in the auxiliary table
+   put on the initial stack */
+#define AT_NULL   0	/* end of vector */
+#define AT_IGNORE 1	/* entry should be ignored */
+#define AT_EXECFD 2	/* file descriptor of program */
+#define AT_PHDR   3	/* program headers for program */
+#define AT_PHENT  4	/* size of program header entry */
+#define AT_PHNUM  5	/* number of program headers */
+#define AT_PAGESZ 6	/* system page size */
+#define AT_BASE   7	/* base address of interpreter */
+#define AT_FLAGS  8	/* flags */
+#define AT_ENTRY  9	/* entry point of program */
+#define AT_NOTELF 10	/* program is not ELF */
+#define AT_UID    11	/* real uid */
+#define AT_EUID   12	/* effective uid */
+#define AT_GID    13	/* real gid */
+#define AT_EGID   14	/* effective gid */
+#define AT_PLATFORM 15  /* string identifying CPU for optimizations */
+#define AT_HWCAP  16    /* arch dependent hints at CPU capabilities */
+#define AT_CLKTCK 17	/* frequency at which times() increments */
+
+#define AT_SECURE 23   /* secure mode boolean */
+
+#define AT_VECTOR_SIZE  42 /* Size of auxiliary table.  */
+
+#endif /* _LINUX_AUXVEC_H */
diff -Naur aboot-0.9b.orig/include/linux/blockgroup_lock.h aboot-0.9b/include/linux/blockgroup_lock.h
--- aboot-0.9b.orig/include/linux/blockgroup_lock.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/blockgroup_lock.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,60 @@
+#ifndef _LINUX_BLOCKGROUP_LOCK_H
+#define _LINUX_BLOCKGROUP_LOCK_H
+/*
+ * Per-blockgroup locking for ext2 and ext3.
+ *
+ * Simple hashed spinlocking.
+ */
+
+#include <linux/config.h>
+#include <linux/spinlock.h>
+#include <linux/cache.h>
+
+#ifdef CONFIG_SMP
+
+/*
+ * We want a power-of-two.  Is there a better way than this?
+ */
+
+#if NR_CPUS >= 32
+#define NR_BG_LOCKS	128
+#elif NR_CPUS >= 16
+#define NR_BG_LOCKS	64
+#elif NR_CPUS >= 8
+#define NR_BG_LOCKS	32
+#elif NR_CPUS >= 4
+#define NR_BG_LOCKS	16
+#elif NR_CPUS >= 2
+#define NR_BG_LOCKS	8
+#else
+#define NR_BG_LOCKS	4
+#endif
+
+#else	/* CONFIG_SMP */
+#define NR_BG_LOCKS	1
+#endif	/* CONFIG_SMP */
+
+struct bgl_lock {
+	spinlock_t lock;
+} ____cacheline_aligned_in_smp;
+
+struct blockgroup_lock {
+	struct bgl_lock locks[NR_BG_LOCKS];
+};
+
+static inline void bgl_lock_init(struct blockgroup_lock *bgl)
+{
+	int i;
+
+	for (i = 0; i < NR_BG_LOCKS; i++)
+		spin_lock_init(&bgl->locks[i].lock);
+}
+
+/*
+ * The accessor is a macro so we can embed a blockgroup_lock into different
+ * superblock types
+ */
+#define sb_bgl_lock(sb, block_group) \
+	(&(sb)->s_blockgroup_lock.locks[(block_group) & (NR_BG_LOCKS-1)].lock)
+
+#endif
diff -Naur aboot-0.9b.orig/include/linux/config.h aboot-0.9b/include/linux/config.h
--- aboot-0.9b.orig/include/linux/config.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/config.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1 @@
+/* Dummy Config.h */
diff -Naur aboot-0.9b.orig/include/linux/elf.h aboot-0.9b/include/linux/elf.h
--- aboot-0.9b.orig/include/linux/elf.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/elf.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,427 @@
+#ifndef _LINUX_ELF_H
+#define _LINUX_ELF_H
+
+#include <linux/types.h>
+#include <linux/auxvec.h>
+#include <asm/elf.h>
+
+#ifndef elf_read_implies_exec
+  /* Executables for which elf_read_implies_exec() returns TRUE will
+     have the READ_IMPLIES_EXEC personality flag set automatically.
+     Override in asm/elf.h as needed.  */
+# define elf_read_implies_exec(ex, have_pt_gnu_stack)	0
+#endif
+
+/* 32-bit ELF base types. */
+typedef __u32	Elf32_Addr;
+typedef __u16	Elf32_Half;
+typedef __u32	Elf32_Off;
+typedef __s32	Elf32_Sword;
+typedef __u32	Elf32_Word;
+
+/* 64-bit ELF base types. */
+typedef __u64	Elf64_Addr;
+typedef __u16	Elf64_Half;
+typedef __s16	Elf64_SHalf;
+typedef __u64	Elf64_Off;
+typedef __s32	Elf64_Sword;
+typedef __u32	Elf64_Word;
+typedef __u64	Elf64_Xword;
+typedef __s64	Elf64_Sxword;
+
+/* These constants are for the segment types stored in the image headers */
+#define PT_NULL    0
+#define PT_LOAD    1
+#define PT_DYNAMIC 2
+#define PT_INTERP  3
+#define PT_NOTE    4
+#define PT_SHLIB   5
+#define PT_PHDR    6
+#define PT_TLS     7               /* Thread local storage segment */
+#define PT_LOOS    0x60000000      /* OS-specific */
+#define PT_HIOS    0x6fffffff      /* OS-specific */
+#define PT_LOPROC  0x70000000
+#define PT_HIPROC  0x7fffffff
+#define PT_GNU_EH_FRAME		0x6474e550
+
+#define PT_GNU_STACK	(PT_LOOS + 0x474e551)
+
+/* These constants define the different elf file types */
+#define ET_NONE   0
+#define ET_REL    1
+#define ET_EXEC   2
+#define ET_DYN    3
+#define ET_CORE   4
+#define ET_LOPROC 0xff00
+#define ET_HIPROC 0xffff
+
+/* These constants define the various ELF target machines */
+#define EM_NONE  0
+#define EM_M32   1
+#define EM_SPARC 2
+#define EM_386   3
+#define EM_68K   4
+#define EM_88K   5
+#define EM_486   6   /* Perhaps disused */
+#define EM_860   7
+
+#define EM_MIPS		8	/* MIPS R3000 (officially, big-endian only) */
+
+#define EM_MIPS_RS4_BE 10	/* MIPS R4000 big-endian */
+
+#define EM_PARISC      15	/* HPPA */
+
+#define EM_SPARC32PLUS 18	/* Sun's "v8plus" */
+
+#define EM_PPC	       20	/* PowerPC */
+#define EM_PPC64       21       /* PowerPC64 */
+
+#define EM_SH	       42	/* SuperH */
+
+#define EM_SPARCV9     43	/* SPARC v9 64-bit */
+
+#define EM_IA_64	50	/* HP/Intel IA-64 */
+
+#define EM_X86_64	62	/* AMD x86-64 */
+
+#define EM_S390		22	/* IBM S/390 */
+
+#define EM_CRIS         76      /* Axis Communications 32-bit embedded processor */
+
+#define EM_V850		87	/* NEC v850 */
+
+#define EM_M32R		88	/* Renesas M32R */
+
+#define EM_H8_300       46      /* Renesas H8/300,300H,H8S */
+
+/*
+ * This is an interim value that we will use until the committee comes
+ * up with a final number.
+ */
+#define EM_ALPHA	0x9026
+
+/* Bogus old v850 magic number, used by old tools.  */
+#define EM_CYGNUS_V850	0x9080
+
+/* Bogus old m32r magic number, used by old tools.  */
+#define EM_CYGNUS_M32R	0x9041
+
+/*
+ * This is the old interim value for S/390 architecture
+ */
+#define EM_S390_OLD     0xA390
+
+#define EM_FRV		0x5441		/* Fujitsu FR-V */
+
+/* This is the info that is needed to parse the dynamic section of the file */
+#define DT_NULL		0
+#define DT_NEEDED	1
+#define DT_PLTRELSZ	2
+#define DT_PLTGOT	3
+#define DT_HASH		4
+#define DT_STRTAB	5
+#define DT_SYMTAB	6
+#define DT_RELA		7
+#define DT_RELASZ	8
+#define DT_RELAENT	9
+#define DT_STRSZ	10
+#define DT_SYMENT	11
+#define DT_INIT		12
+#define DT_FINI		13
+#define DT_SONAME	14
+#define DT_RPATH 	15
+#define DT_SYMBOLIC	16
+#define DT_REL	        17
+#define DT_RELSZ	18
+#define DT_RELENT	19
+#define DT_PLTREL	20
+#define DT_DEBUG	21
+#define DT_TEXTREL	22
+#define DT_JMPREL	23
+#define DT_LOPROC	0x70000000
+#define DT_HIPROC	0x7fffffff
+
+/* This info is needed when parsing the symbol table */
+#define STB_LOCAL  0
+#define STB_GLOBAL 1
+#define STB_WEAK   2
+
+#define STT_NOTYPE  0
+#define STT_OBJECT  1
+#define STT_FUNC    2
+#define STT_SECTION 3
+#define STT_FILE    4
+
+#define ELF_ST_BIND(x)		((x) >> 4)
+#define ELF_ST_TYPE(x)		(((unsigned int) x) & 0xf)
+#define ELF32_ST_BIND(x)	ELF_ST_BIND(x)
+#define ELF32_ST_TYPE(x)	ELF_ST_TYPE(x)
+#define ELF64_ST_BIND(x)	ELF_ST_BIND(x)
+#define ELF64_ST_TYPE(x)	ELF_ST_TYPE(x)
+
+typedef struct dynamic{
+  Elf32_Sword d_tag;
+  union{
+    Elf32_Sword	d_val;
+    Elf32_Addr	d_ptr;
+  } d_un;
+} Elf32_Dyn;
+
+typedef struct {
+  Elf64_Sxword d_tag;		/* entry tag value */
+  union {
+    Elf64_Xword d_val;
+    Elf64_Addr d_ptr;
+  } d_un;
+} Elf64_Dyn;
+
+/* The following are used with relocations */
+#define ELF32_R_SYM(x) ((x) >> 8)
+#define ELF32_R_TYPE(x) ((x) & 0xff)
+
+#define ELF64_R_SYM(i)			((i) >> 32)
+#define ELF64_R_TYPE(i)			((i) & 0xffffffff)
+
+typedef struct elf32_rel {
+  Elf32_Addr	r_offset;
+  Elf32_Word	r_info;
+} Elf32_Rel;
+
+typedef struct elf64_rel {
+  Elf64_Addr r_offset;	/* Location at which to apply the action */
+  Elf64_Xword r_info;	/* index and type of relocation */
+} Elf64_Rel;
+
+typedef struct elf32_rela{
+  Elf32_Addr	r_offset;
+  Elf32_Word	r_info;
+  Elf32_Sword	r_addend;
+} Elf32_Rela;
+
+typedef struct elf64_rela {
+  Elf64_Addr r_offset;	/* Location at which to apply the action */
+  Elf64_Xword r_info;	/* index and type of relocation */
+  Elf64_Sxword r_addend;	/* Constant addend used to compute value */
+} Elf64_Rela;
+
+typedef struct elf32_sym{
+  Elf32_Word	st_name;
+  Elf32_Addr	st_value;
+  Elf32_Word	st_size;
+  unsigned char	st_info;
+  unsigned char	st_other;
+  Elf32_Half	st_shndx;
+} Elf32_Sym;
+
+typedef struct elf64_sym {
+  Elf64_Word st_name;		/* Symbol name, index in string tbl */
+  unsigned char	st_info;	/* Type and binding attributes */
+  unsigned char	st_other;	/* No defined meaning, 0 */
+  Elf64_Half st_shndx;		/* Associated section index */
+  Elf64_Addr st_value;		/* Value of the symbol */
+  Elf64_Xword st_size;		/* Associated symbol size */
+} Elf64_Sym;
+
+
+#define EI_NIDENT	16
+
+typedef struct elf32_hdr{
+  unsigned char	e_ident[EI_NIDENT];
+  Elf32_Half	e_type;
+  Elf32_Half	e_machine;
+  Elf32_Word	e_version;
+  Elf32_Addr	e_entry;  /* Entry point */
+  Elf32_Off	e_phoff;
+  Elf32_Off	e_shoff;
+  Elf32_Word	e_flags;
+  Elf32_Half	e_ehsize;
+  Elf32_Half	e_phentsize;
+  Elf32_Half	e_phnum;
+  Elf32_Half	e_shentsize;
+  Elf32_Half	e_shnum;
+  Elf32_Half	e_shstrndx;
+} Elf32_Ehdr;
+
+typedef struct elf64_hdr {
+  unsigned char	e_ident[16];		/* ELF "magic number" */
+  Elf64_Half e_type;
+  Elf64_Half e_machine;
+  Elf64_Word e_version;
+  Elf64_Addr e_entry;		/* Entry point virtual address */
+  Elf64_Off e_phoff;		/* Program header table file offset */
+  Elf64_Off e_shoff;		/* Section header table file offset */
+  Elf64_Word e_flags;
+  Elf64_Half e_ehsize;
+  Elf64_Half e_phentsize;
+  Elf64_Half e_phnum;
+  Elf64_Half e_shentsize;
+  Elf64_Half e_shnum;
+  Elf64_Half e_shstrndx;
+} Elf64_Ehdr;
+
+/* These constants define the permissions on sections in the program
+   header, p_flags. */
+#define PF_R		0x4
+#define PF_W		0x2
+#define PF_X		0x1
+
+typedef struct elf32_phdr{
+  Elf32_Word	p_type;
+  Elf32_Off	p_offset;
+  Elf32_Addr	p_vaddr;
+  Elf32_Addr	p_paddr;
+  Elf32_Word	p_filesz;
+  Elf32_Word	p_memsz;
+  Elf32_Word	p_flags;
+  Elf32_Word	p_align;
+} Elf32_Phdr;
+
+typedef struct elf64_phdr {
+  Elf64_Word p_type;
+  Elf64_Word p_flags;
+  Elf64_Off p_offset;		/* Segment file offset */
+  Elf64_Addr p_vaddr;		/* Segment virtual address */
+  Elf64_Addr p_paddr;		/* Segment physical address */
+  Elf64_Xword p_filesz;		/* Segment size in file */
+  Elf64_Xword p_memsz;		/* Segment size in memory */
+  Elf64_Xword p_align;		/* Segment alignment, file & memory */
+} Elf64_Phdr;
+
+/* sh_type */
+#define SHT_NULL	0
+#define SHT_PROGBITS	1
+#define SHT_SYMTAB	2
+#define SHT_STRTAB	3
+#define SHT_RELA	4
+#define SHT_HASH	5
+#define SHT_DYNAMIC	6
+#define SHT_NOTE	7
+#define SHT_NOBITS	8
+#define SHT_REL		9
+#define SHT_SHLIB	10
+#define SHT_DYNSYM	11
+#define SHT_NUM		12
+#define SHT_LOPROC	0x70000000
+#define SHT_HIPROC	0x7fffffff
+#define SHT_LOUSER	0x80000000
+#define SHT_HIUSER	0xffffffff
+
+/* sh_flags */
+#define SHF_WRITE	0x1
+#define SHF_ALLOC	0x2
+#define SHF_EXECINSTR	0x4
+#define SHF_MASKPROC	0xf0000000
+
+/* special section indexes */
+#define SHN_UNDEF	0
+#define SHN_LORESERVE	0xff00
+#define SHN_LOPROC	0xff00
+#define SHN_HIPROC	0xff1f
+#define SHN_ABS		0xfff1
+#define SHN_COMMON	0xfff2
+#define SHN_HIRESERVE	0xffff
+ 
+typedef struct {
+  Elf32_Word	sh_name;
+  Elf32_Word	sh_type;
+  Elf32_Word	sh_flags;
+  Elf32_Addr	sh_addr;
+  Elf32_Off	sh_offset;
+  Elf32_Word	sh_size;
+  Elf32_Word	sh_link;
+  Elf32_Word	sh_info;
+  Elf32_Word	sh_addralign;
+  Elf32_Word	sh_entsize;
+} Elf32_Shdr;
+
+typedef struct elf64_shdr {
+  Elf64_Word sh_name;		/* Section name, index in string tbl */
+  Elf64_Word sh_type;		/* Type of section */
+  Elf64_Xword sh_flags;		/* Miscellaneous section attributes */
+  Elf64_Addr sh_addr;		/* Section virtual addr at execution */
+  Elf64_Off sh_offset;		/* Section file offset */
+  Elf64_Xword sh_size;		/* Size of section in bytes */
+  Elf64_Word sh_link;		/* Index of another section */
+  Elf64_Word sh_info;		/* Additional section information */
+  Elf64_Xword sh_addralign;	/* Section alignment */
+  Elf64_Xword sh_entsize;	/* Entry size if section holds table */
+} Elf64_Shdr;
+
+#define	EI_MAG0		0		/* e_ident[] indexes */
+#define	EI_MAG1		1
+#define	EI_MAG2		2
+#define	EI_MAG3		3
+#define	EI_CLASS	4
+#define	EI_DATA		5
+#define	EI_VERSION	6
+#define	EI_OSABI	7
+#define	EI_PAD		8
+
+#define	ELFMAG0		0x7f		/* EI_MAG */
+#define	ELFMAG1		'E'
+#define	ELFMAG2		'L'
+#define	ELFMAG3		'F'
+#define	ELFMAG		"\177ELF"
+#define	SELFMAG		4
+
+#define	ELFCLASSNONE	0		/* EI_CLASS */
+#define	ELFCLASS32	1
+#define	ELFCLASS64	2
+#define	ELFCLASSNUM	3
+
+#define ELFDATANONE	0		/* e_ident[EI_DATA] */
+#define ELFDATA2LSB	1
+#define ELFDATA2MSB	2
+
+#define EV_NONE		0		/* e_version, EI_VERSION */
+#define EV_CURRENT	1
+#define EV_NUM		2
+
+#define ELFOSABI_NONE	0
+#define ELFOSABI_LINUX	3
+
+#ifndef ELF_OSABI
+#define ELF_OSABI ELFOSABI_NONE
+#endif
+
+/* Notes used in ET_CORE */
+#define NT_PRSTATUS	1
+#define NT_PRFPREG	2
+#define NT_PRPSINFO	3
+#define NT_TASKSTRUCT	4
+#define NT_AUXV		6
+#define NT_PRXFPREG     0x46e62b7f      /* copied from gdb5.1/include/elf/common.h */
+
+
+/* Note header in a PT_NOTE section */
+typedef struct elf32_note {
+  Elf32_Word	n_namesz;	/* Name size */
+  Elf32_Word	n_descsz;	/* Content size */
+  Elf32_Word	n_type;		/* Content type */
+} Elf32_Nhdr;
+
+/* Note header in a PT_NOTE section */
+typedef struct elf64_note {
+  Elf64_Word n_namesz;	/* Name size */
+  Elf64_Word n_descsz;	/* Content size */
+  Elf64_Word n_type;	/* Content type */
+} Elf64_Nhdr;
+
+#if ELF_CLASS == ELFCLASS32
+
+extern Elf32_Dyn _DYNAMIC [];
+#define elfhdr		elf32_hdr
+#define elf_phdr	elf32_phdr
+#define elf_note	elf32_note
+
+#else
+
+extern Elf64_Dyn _DYNAMIC [];
+#define elfhdr		elf64_hdr
+#define elf_phdr	elf64_phdr
+#define elf_note	elf64_note
+
+#endif
+
+
+#endif /* _LINUX_ELF_H */
diff -Naur aboot-0.9b.orig/include/linux/spinlock_api_smp.h aboot-0.9b/include/linux/spinlock_api_smp.h
--- aboot-0.9b.orig/include/linux/spinlock_api_smp.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/spinlock_api_smp.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,57 @@
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+#define __LINUX_SPINLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+int in_lock_functions(unsigned long addr);
+
+#define assert_spin_locked(x)	BUG_ON(!spin_is_locked(x))
+
+void __lockfunc _spin_lock(spinlock_t *lock)		__acquires(spinlock_t);
+void __lockfunc _read_lock(rwlock_t *lock)		__acquires(rwlock_t);
+void __lockfunc _write_lock(rwlock_t *lock)		__acquires(rwlock_t);
+void __lockfunc _spin_lock_bh(spinlock_t *lock)		__acquires(spinlock_t);
+void __lockfunc _read_lock_bh(rwlock_t *lock)		__acquires(rwlock_t);
+void __lockfunc _write_lock_bh(rwlock_t *lock)		__acquires(rwlock_t);
+void __lockfunc _spin_lock_irq(spinlock_t *lock)	__acquires(spinlock_t);
+void __lockfunc _read_lock_irq(rwlock_t *lock)		__acquires(rwlock_t);
+void __lockfunc _write_lock_irq(rwlock_t *lock)		__acquires(rwlock_t);
+unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+							__acquires(spinlock_t);
+unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+							__acquires(rwlock_t);
+unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+							__acquires(rwlock_t);
+int __lockfunc _spin_trylock(spinlock_t *lock);
+int __lockfunc _read_trylock(rwlock_t *lock);
+int __lockfunc _write_trylock(rwlock_t *lock);
+int __lockfunc _spin_trylock_bh(spinlock_t *lock);
+void __lockfunc _spin_unlock(spinlock_t *lock)		__releases(spinlock_t);
+void __lockfunc _read_unlock(rwlock_t *lock)		__releases(rwlock_t);
+void __lockfunc _write_unlock(rwlock_t *lock)		__releases(rwlock_t);
+void __lockfunc _spin_unlock_bh(spinlock_t *lock)	__releases(spinlock_t);
+void __lockfunc _read_unlock_bh(rwlock_t *lock)		__releases(rwlock_t);
+void __lockfunc _write_unlock_bh(rwlock_t *lock)	__releases(rwlock_t);
+void __lockfunc _spin_unlock_irq(spinlock_t *lock)	__releases(spinlock_t);
+void __lockfunc _read_unlock_irq(rwlock_t *lock)	__releases(rwlock_t);
+void __lockfunc _write_unlock_irq(rwlock_t *lock)	__releases(rwlock_t);
+void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+							__releases(spinlock_t);
+void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+							__releases(rwlock_t);
+void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+							__releases(rwlock_t);
+
+#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff -Naur aboot-0.9b.orig/include/linux/spinlock_api_up.h aboot-0.9b/include/linux/spinlock_api_up.h
--- aboot-0.9b.orig/include/linux/spinlock_api_up.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/spinlock_api_up.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,80 @@
+#ifndef __LINUX_SPINLOCK_API_UP_H
+#define __LINUX_SPINLOCK_API_UP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_api_up.h
+ *
+ * spinlock API implementation on UP-nondebug (inlined implementation)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#define in_lock_functions(ADDR)		0
+
+#define assert_spin_locked(lock)	do { (void)(lock); } while (0)
+
+/*
+ * In the UP-nondebug case there's no real locking going on, so the
+ * only thing we have to do is to keep the preempt counts and irq
+ * flags straight, to supress compiler warnings of unused lock
+ * variables, and to add the proper checker annotations:
+ */
+#define __LOCK(lock) \
+  do { preempt_disable(); __acquire(lock); (void)(lock); } while (0)
+
+#define __LOCK_BH(lock) \
+  do { local_bh_disable(); __LOCK(lock); } while (0)
+
+#define __LOCK_IRQ(lock) \
+  do { local_irq_disable(); __LOCK(lock); } while (0)
+
+#define __LOCK_IRQSAVE(lock, flags) \
+  do { local_irq_save(flags); __LOCK(lock); } while (0)
+
+#define __UNLOCK(lock) \
+  do { preempt_enable(); __release(lock); (void)(lock); } while (0)
+
+#define __UNLOCK_BH(lock) \
+  do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0)
+
+#define __UNLOCK_IRQ(lock) \
+  do { local_irq_enable(); __UNLOCK(lock); } while (0)
+
+#define __UNLOCK_IRQRESTORE(lock, flags) \
+  do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
+
+#define _spin_lock(lock)			__LOCK(lock)
+#define _read_lock(lock)			__LOCK(lock)
+#define _write_lock(lock)			__LOCK(lock)
+#define _spin_lock_bh(lock)			__LOCK_BH(lock)
+#define _read_lock_bh(lock)			__LOCK_BH(lock)
+#define _write_lock_bh(lock)			__LOCK_BH(lock)
+#define _spin_lock_irq(lock)			__LOCK_IRQ(lock)
+#define _read_lock_irq(lock)			__LOCK_IRQ(lock)
+#define _write_lock_irq(lock)			__LOCK_IRQ(lock)
+#define _spin_lock_irqsave(lock, flags)		__LOCK_IRQSAVE(lock, flags)
+#define _read_lock_irqsave(lock, flags)		__LOCK_IRQSAVE(lock, flags)
+#define _write_lock_irqsave(lock, flags)	__LOCK_IRQSAVE(lock, flags)
+#define _spin_trylock(lock)			({ __LOCK(lock); 1; })
+#define _read_trylock(lock)			({ __LOCK(lock); 1; })
+#define _write_trylock(lock)			({ __LOCK(lock); 1; })
+#define _spin_trylock_bh(lock)			({ __LOCK_BH(lock); 1; })
+#define _spin_unlock(lock)			__UNLOCK(lock)
+#define _read_unlock(lock)			__UNLOCK(lock)
+#define _write_unlock(lock)			__UNLOCK(lock)
+#define _spin_unlock_bh(lock)			__UNLOCK_BH(lock)
+#define _write_unlock_bh(lock)			__UNLOCK_BH(lock)
+#define _read_unlock_bh(lock)			__UNLOCK_BH(lock)
+#define _spin_unlock_irq(lock)			__UNLOCK_IRQ(lock)
+#define _read_unlock_irq(lock)			__UNLOCK_IRQ(lock)
+#define _write_unlock_irq(lock)			__UNLOCK_IRQ(lock)
+#define _spin_unlock_irqrestore(lock, flags)	__UNLOCK_IRQRESTORE(lock, flags)
+#define _read_unlock_irqrestore(lock, flags)	__UNLOCK_IRQRESTORE(lock, flags)
+#define _write_unlock_irqrestore(lock, flags)	__UNLOCK_IRQRESTORE(lock, flags)
+
+#endif /* __LINUX_SPINLOCK_API_UP_H */
diff -Naur aboot-0.9b.orig/include/linux/spinlock.h aboot-0.9b/include/linux/spinlock.h
--- aboot-0.9b.orig/include/linux/spinlock.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/spinlock.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,247 @@
+#ifndef __LINUX_SPINLOCK_H
+#define __LINUX_SPINLOCK_H
+
+/*
+ * include/linux/spinlock.h - generic spinlock/rwlock declarations
+ *
+ * here's the role of the various spinlock/rwlock related include files:
+ *
+ * on SMP builds:
+ *
+ *  asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ *                        initializers
+ *
+ *  linux/spinlock_types.h:
+ *                        defines the generic type and initializers
+ *
+ *  asm/spinlock.h:       contains the __raw_spin_*()/etc. lowlevel
+ *                        implementations, mostly inline assembly code
+ *
+ *   (also included on UP-debug builds:)
+ *
+ *  linux/spinlock_api_smp.h:
+ *                        contains the prototypes for the _spin_*() APIs.
+ *
+ *  linux/spinlock.h:     builds the final spin_*() APIs.
+ *
+ * on UP builds:
+ *
+ *  linux/spinlock_type_up.h:
+ *                        contains the generic, simplified UP spinlock type.
+ *                        (which is an empty structure on non-debug builds)
+ *
+ *  linux/spinlock_types.h:
+ *                        defines the generic type and initializers
+ *
+ *  linux/spinlock_up.h:
+ *                        contains the __raw_spin_*()/etc. version of UP
+ *                        builds. (which are NOPs on non-debug, non-preempt
+ *                        builds)
+ *
+ *   (included on UP-non-debug builds:)
+ *
+ *  linux/spinlock_api_up.h:
+ *                        builds the _spin_*() APIs.
+ *
+ *  linux/spinlock.h:     builds the final spin_*() APIs.
+ */
+
+#include <linux/config.h>
+#include <linux/preempt.h>
+#include <linux/linkage.h>
+#include <linux/compiler.h>
+#include <linux/thread_info.h>
+#include <linux/kernel.h>
+#include <linux/stringify.h>
+
+#include <asm/system.h>
+
+/*
+ * Must define these before including other files, inline functions need them
+ */
+#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
+
+#define LOCK_SECTION_START(extra)               \
+        ".subsection 1\n\t"                     \
+        extra                                   \
+        ".ifndef " LOCK_SECTION_NAME "\n\t"     \
+        LOCK_SECTION_NAME ":\n\t"               \
+        ".endif\n"
+
+#define LOCK_SECTION_END                        \
+        ".previous\n\t"
+
+#define __lockfunc fastcall __attribute__((section(".spinlock.text")))
+
+/*
+ * Pull the raw_spinlock_t and raw_rwlock_t definitions:
+ */
+#include <linux/spinlock_types.h>
+
+extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
+
+/*
+ * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
+ */
+#if defined(CONFIG_SMP)
+# include <asm/spinlock.h>
+#else
+# include <linux/spinlock_up.h>
+#endif
+
+#define spin_lock_init(lock)	do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
+#define rwlock_init(lock)	do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
+
+#define spin_is_locked(lock)	__raw_spin_is_locked(&(lock)->raw_lock)
+
+/**
+ * spin_unlock_wait - wait until the spinlock gets unlocked
+ * @lock: the spinlock in question.
+ */
+#define spin_unlock_wait(lock)	__raw_spin_unlock_wait(&(lock)->raw_lock)
+
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
+#else
+# include <linux/spinlock_api_up.h>
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void _raw_spin_lock(spinlock_t *lock);
+#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+ extern int _raw_spin_trylock(spinlock_t *lock);
+ extern void _raw_spin_unlock(spinlock_t *lock);
+
+ extern void _raw_read_lock(rwlock_t *lock);
+ extern int _raw_read_trylock(rwlock_t *lock);
+ extern void _raw_read_unlock(rwlock_t *lock);
+ extern void _raw_write_lock(rwlock_t *lock);
+ extern int _raw_write_trylock(rwlock_t *lock);
+ extern void _raw_write_unlock(rwlock_t *lock);
+#else
+# define _raw_spin_unlock(lock)		__raw_spin_unlock(&(lock)->raw_lock)
+# define _raw_spin_trylock(lock)	__raw_spin_trylock(&(lock)->raw_lock)
+# define _raw_spin_lock(lock)		__raw_spin_lock(&(lock)->raw_lock)
+# define _raw_spin_lock_flags(lock, flags) \
+		__raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_read_lock(rwlock)		__raw_read_lock(&(rwlock)->raw_lock)
+# define _raw_write_lock(rwlock)	__raw_write_lock(&(rwlock)->raw_lock)
+# define _raw_read_unlock(rwlock)	__raw_read_unlock(&(rwlock)->raw_lock)
+# define _raw_write_unlock(rwlock)	__raw_write_unlock(&(rwlock)->raw_lock)
+# define _raw_read_trylock(rwlock)	__raw_read_trylock(&(rwlock)->raw_lock)
+# define _raw_write_trylock(rwlock)	__raw_write_trylock(&(rwlock)->raw_lock)
+#endif
+
+#define read_can_lock(rwlock)		__raw_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock)		__raw_write_can_lock(&(rwlock)->raw_lock)
+
+/*
+ * Define the various spin_lock and rw_lock methods.  Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
+ * methods are defined as nops in the case they are not required.
+ */
+#define spin_trylock(lock)		__cond_lock(_spin_trylock(lock))
+#define read_trylock(lock)		__cond_lock(_read_trylock(lock))
+#define write_trylock(lock)		__cond_lock(_write_trylock(lock))
+
+#define spin_lock(lock)			_spin_lock(lock)
+#define write_lock(lock)		_write_lock(lock)
+#define read_lock(lock)			_read_lock(lock)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+#define spin_lock_irqsave(lock, flags)	flags = _spin_lock_irqsave(lock)
+#define read_lock_irqsave(lock, flags)	flags = _read_lock_irqsave(lock)
+#define write_lock_irqsave(lock, flags)	flags = _write_lock_irqsave(lock)
+#else
+#define spin_lock_irqsave(lock, flags)	_spin_lock_irqsave(lock, flags)
+#define read_lock_irqsave(lock, flags)	_read_lock_irqsave(lock, flags)
+#define write_lock_irqsave(lock, flags)	_write_lock_irqsave(lock, flags)
+#endif
+
+#define spin_lock_irq(lock)		_spin_lock_irq(lock)
+#define spin_lock_bh(lock)		_spin_lock_bh(lock)
+
+#define read_lock_irq(lock)		_read_lock_irq(lock)
+#define read_lock_bh(lock)		_read_lock_bh(lock)
+
+#define write_lock_irq(lock)		_write_lock_irq(lock)
+#define write_lock_bh(lock)		_write_lock_bh(lock)
+
+/*
+ * We inline the unlock functions in the nondebug case:
+ */
+#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
+# define spin_unlock(lock)		_spin_unlock(lock)
+# define read_unlock(lock)		_read_unlock(lock)
+# define write_unlock(lock)		_write_unlock(lock)
+#else
+# define spin_unlock(lock)		__raw_spin_unlock(&(lock)->raw_lock)
+# define read_unlock(lock)		__raw_read_unlock(&(lock)->raw_lock)
+# define write_unlock(lock)		__raw_write_unlock(&(lock)->raw_lock)
+#endif
+
+#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
+# define spin_unlock_irq(lock)		_spin_unlock_irq(lock)
+# define read_unlock_irq(lock)		_read_unlock_irq(lock)
+# define write_unlock_irq(lock)		_write_unlock_irq(lock)
+#else
+# define spin_unlock_irq(lock) \
+    do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
+# define read_unlock_irq(lock) \
+    do { __raw_read_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
+# define write_unlock_irq(lock) \
+    do { __raw_write_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
+#endif
+
+#define spin_unlock_irqrestore(lock, flags) \
+					_spin_unlock_irqrestore(lock, flags)
+#define spin_unlock_bh(lock)		_spin_unlock_bh(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+					_read_unlock_irqrestore(lock, flags)
+#define read_unlock_bh(lock)		_read_unlock_bh(lock)
+
+#define write_unlock_irqrestore(lock, flags) \
+					_write_unlock_irqrestore(lock, flags)
+#define write_unlock_bh(lock)		_write_unlock_bh(lock)
+
+#define spin_trylock_bh(lock)		__cond_lock(_spin_trylock_bh(lock))
+
+#define spin_trylock_irq(lock) \
+({ \
+	local_irq_disable(); \
+	_spin_trylock(lock) ? \
+	1 : ({ local_irq_enable(); 0;  }); \
+})
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+	local_irq_save(flags); \
+	_spin_trylock(lock) ? \
+	1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+/*
+ * Pull the atomic_t declaration:
+ * (asm-mips/atomic.h needs above definitions)
+ */
+#include <asm/atomic.h>
+/**
+ * atomic_dec_and_lock - lock on reaching reference count zero
+ * @atomic: the atomic counter
+ * @lock: the spinlock in question
+ */
+extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
+#define atomic_dec_and_lock(atomic, lock) \
+		__cond_lock(_atomic_dec_and_lock(atomic, lock))
+
+/**
+ * spin_can_lock - would spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define spin_can_lock(lock)	(!spin_is_locked(lock))
+
+#endif /* __LINUX_SPINLOCK_H */
diff -Naur aboot-0.9b.orig/include/linux/spinlock_types.h aboot-0.9b/include/linux/spinlock_types.h
--- aboot-0.9b.orig/include/linux/spinlock_types.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/spinlock_types.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,67 @@
+#ifndef __LINUX_SPINLOCK_TYPES_H
+#define __LINUX_SPINLOCK_TYPES_H
+
+/*
+ * include/linux/spinlock_types.h - generic spinlock type definitions
+ *                                  and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+typedef struct {
+	raw_spinlock_t raw_lock;
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
+	unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+	unsigned int magic, owner_cpu;
+	void *owner;
+#endif
+} spinlock_t;
+
+#define SPINLOCK_MAGIC		0xdead4ead
+
+typedef struct {
+	raw_rwlock_t raw_lock;
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
+	unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+	unsigned int magic, owner_cpu;
+	void *owner;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC		0xdeaf1eed
+
+#define SPINLOCK_OWNER_INIT	((void *)-1L)
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_LOCK_UNLOCKED						\
+	(spinlock_t)	{	.raw_lock = __RAW_SPIN_LOCK_UNLOCKED,	\
+				.magic = SPINLOCK_MAGIC,		\
+				.owner = SPINLOCK_OWNER_INIT,		\
+				.owner_cpu = -1 }
+#define RW_LOCK_UNLOCKED						\
+	(rwlock_t)	{	.raw_lock = __RAW_RW_LOCK_UNLOCKED,	\
+				.magic = RWLOCK_MAGIC,			\
+				.owner = SPINLOCK_OWNER_INIT,		\
+				.owner_cpu = -1 }
+#else
+# define SPIN_LOCK_UNLOCKED \
+	(spinlock_t)	{	.raw_lock = __RAW_SPIN_LOCK_UNLOCKED }
+#define RW_LOCK_UNLOCKED \
+	(rwlock_t)	{	.raw_lock = __RAW_RW_LOCK_UNLOCKED }
+#endif
+
+#define DEFINE_SPINLOCK(x)	spinlock_t x = SPIN_LOCK_UNLOCKED
+#define DEFINE_RWLOCK(x)	rwlock_t x = RW_LOCK_UNLOCKED
+
+#endif /* __LINUX_SPINLOCK_TYPES_H */
diff -Naur aboot-0.9b.orig/include/linux/spinlock_types_up.h aboot-0.9b/include/linux/spinlock_types_up.h
--- aboot-0.9b.orig/include/linux/spinlock_types_up.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/spinlock_types_up.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,37 @@
+#ifndef __LINUX_SPINLOCK_TYPES_UP_H
+#define __LINUX_SPINLOCK_TYPES_UP_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_types_up.h - spinlock type definitions for UP
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+
+typedef struct {
+	volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+
+#else
+
+typedef struct { } raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { }
+
+#endif
+
+typedef struct {
+	/* no debug version on UP */
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED { }
+
+#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff -Naur aboot-0.9b.orig/include/linux/spinlock_up.h aboot-0.9b/include/linux/spinlock_up.h
--- aboot-0.9b.orig/include/linux/spinlock_up.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/spinlock_up.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,74 @@
+#ifndef __LINUX_SPINLOCK_UP_H
+#define __LINUX_SPINLOCK_UP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_up.h - UP-debug version of spinlocks.
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ *
+ * In the debug case, 1 means unlocked, 0 means locked. (the values
+ * are inverted, to catch initialization bugs)
+ *
+ * No atomicity anywhere, we are on UP.
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+
+#define __raw_spin_is_locked(x)		((x)->slock == 0)
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+	lock->slock = 0;
+}
+
+static inline void
+__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+{
+	local_irq_save(flags);
+	lock->slock = 0;
+}
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+	char oldval = lock->slock;
+
+	lock->slock = 0;
+
+	return oldval > 0;
+}
+
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+	lock->slock = 1;
+}
+
+/*
+ * Read-write spinlocks. No debug version.
+ */
+#define __raw_read_lock(lock)		do { (void)(lock); } while (0)
+#define __raw_write_lock(lock)		do { (void)(lock); } while (0)
+#define __raw_read_trylock(lock)	({ (void)(lock); 1; })
+#define __raw_write_trylock(lock)	({ (void)(lock); 1; })
+#define __raw_read_unlock(lock)		do { (void)(lock); } while (0)
+#define __raw_write_unlock(lock)	do { (void)(lock); } while (0)
+
+#else /* DEBUG_SPINLOCK */
+#define __raw_spin_is_locked(lock)	((void)(lock), 0)
+/* for sched.c and kernel_lock.c: */
+# define __raw_spin_lock(lock)		do { (void)(lock); } while (0)
+# define __raw_spin_unlock(lock)	do { (void)(lock); } while (0)
+# define __raw_spin_trylock(lock)	({ (void)(lock); 1; })
+#endif /* DEBUG_SPINLOCK */
+
+#define __raw_read_can_lock(lock)	(((void)(lock), 1))
+#define __raw_write_can_lock(lock)	(((void)(lock), 1))
+
+#define __raw_spin_unlock_wait(lock) \
+		do { cpu_relax(); } while (__raw_spin_is_locked(lock))
+
+#endif /* __LINUX_SPINLOCK_UP_H */
diff -Naur aboot-0.9b.orig/include/linux/stat.h aboot-0.9b/include/linux/stat.h
--- aboot-0.9b.orig/include/linux/stat.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/stat.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,77 @@
+#ifndef _LINUX_STAT_H
+#define _LINUX_STAT_H
+
+#ifdef __KERNEL__
+
+#include <asm/stat.h>
+
+#endif
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
+#define S_IFMT  00170000
+#define S_IFSOCK 0140000
+#define S_IFLNK	 0120000
+#define S_IFREG  0100000
+#define S_IFBLK  0060000
+#define S_IFDIR  0040000
+#define S_IFCHR  0020000
+#define S_IFIFO  0010000
+#define S_ISUID  0004000
+#define S_ISGID  0002000
+#define S_ISVTX  0001000
+
+#define S_ISLNK(m)	(((m) & S_IFMT) == S_IFLNK)
+#define S_ISREG(m)	(((m) & S_IFMT) == S_IFREG)
+#define S_ISDIR(m)	(((m) & S_IFMT) == S_IFDIR)
+#define S_ISCHR(m)	(((m) & S_IFMT) == S_IFCHR)
+#define S_ISBLK(m)	(((m) & S_IFMT) == S_IFBLK)
+#define S_ISFIFO(m)	(((m) & S_IFMT) == S_IFIFO)
+#define S_ISSOCK(m)	(((m) & S_IFMT) == S_IFSOCK)
+
+#define S_IRWXU 00700
+#define S_IRUSR 00400
+#define S_IWUSR 00200
+#define S_IXUSR 00100
+
+#define S_IRWXG 00070
+#define S_IRGRP 00040
+#define S_IWGRP 00020
+#define S_IXGRP 00010
+
+#define S_IRWXO 00007
+#define S_IROTH 00004
+#define S_IWOTH 00002
+#define S_IXOTH 00001
+
+#endif
+
+#ifdef __KERNEL__
+#define S_IRWXUGO	(S_IRWXU|S_IRWXG|S_IRWXO)
+#define S_IALLUGO	(S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO)
+#define S_IRUGO		(S_IRUSR|S_IRGRP|S_IROTH)
+#define S_IWUGO		(S_IWUSR|S_IWGRP|S_IWOTH)
+#define S_IXUGO		(S_IXUSR|S_IXGRP|S_IXOTH)
+
+#include <linux/types.h>
+#include <sys/time.h>
+
+struct kstat {
+	unsigned long	ino;
+	dev_t		dev;
+	umode_t		mode;
+	unsigned int	nlink;
+	uid_t		uid;
+	gid_t		gid;
+	dev_t		rdev;
+	loff_t		size;
+	struct timespec  atime;
+	struct timespec	mtime;
+	struct timespec	ctime;
+	unsigned long	blksize;
+	unsigned long	blocks;
+};
+
+#endif
+
+#endif
diff -Naur aboot-0.9b.orig/include/linux/string.h aboot-0.9b/include/linux/string.h
--- aboot-0.9b.orig/include/linux/string.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/string.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,100 @@
+#ifndef _LINUX_STRING_H_
+#define _LINUX_STRING_H_
+
+/* We don't want strings.h stuff being user by user stuff by accident */
+
+#ifdef __KERNEL__
+
+typedef unsigned gfp_t;
+
+#include <linux/compiler.h>	/* for inline */
+#include <linux/types.h>	/* for size_t */
+#include <linux/stddef.h>	/* for NULL */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern char * strpbrk(const char *,const char *);
+extern char * strsep(char **,const char *);
+extern __kernel_size_t strspn(const char *,const char *);
+extern __kernel_size_t strcspn(const char *,const char *);
+
+/*
+ * Include machine specific inline routines
+ */
+#include <asm/string.h>
+
+#ifndef __HAVE_ARCH_STRCPY
+extern char * strcpy(char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNCPY
+extern char * strncpy(char *,const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRLCPY
+size_t strlcpy(char *, const char *, size_t);
+#endif
+#ifndef __HAVE_ARCH_STRCAT
+extern char * strcat(char *, const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNCAT
+extern char * strncat(char *, const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRLCAT
+extern size_t strlcat(char *, const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRCMP
+extern int strcmp(const char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNCMP
+extern int strncmp(const char *,const char *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRNICMP
+extern int strnicmp(const char *, const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRCHR
+extern char * strchr(const char *,int);
+#endif
+#ifndef __HAVE_ARCH_STRNCHR
+extern char * strnchr(const char *, size_t, int);
+#endif
+#ifndef __HAVE_ARCH_STRRCHR
+extern char * strrchr(const char *,int);
+#endif
+#ifndef __HAVE_ARCH_STRSTR
+extern char * strstr(const char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRLEN
+extern __kernel_size_t strlen(const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNLEN
+extern __kernel_size_t strnlen(const char *,__kernel_size_t);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSET
+extern void * memset(void *,int,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMCPY
+extern void * memcpy(void *,const void *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMMOVE
+extern void * memmove(void *,const void *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMSCAN
+extern void * memscan(void *,int,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMCMP
+extern int memcmp(const void *,const void *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMCHR
+extern void * memchr(const void *,int,__kernel_size_t);
+#endif
+
+extern char *kstrdup(const char *s, gfp_t gfp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+#endif /* _LINUX_STRING_H_ */
diff -Naur aboot-0.9b.orig/include/linux/thread_info.h aboot-0.9b/include/linux/thread_info.h
--- aboot-0.9b.orig/include/linux/thread_info.h	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/include/linux/thread_info.h	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,71 @@
+/* thread_info.h: common low-level thread information accessors
+ *
+ * Copyright (C) 2002  David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds
+ */
+
+#ifndef _LINUX_THREAD_INFO_H
+#define _LINUX_THREAD_INFO_H
+
+/*
+ * System call restart block. 
+ */
+struct restart_block {
+	long (*fn)(struct restart_block *);
+	unsigned long arg0, arg1, arg2, arg3;
+};
+
+extern long do_no_restart_syscall(struct restart_block *parm);
+
+#include <linux/bitops.h>
+#include <asm/thread_info.h>
+
+#ifdef __KERNEL__
+
+/*
+ * flag set/clear/test wrappers
+ * - pass TIF_xxxx constants to these functions
+ */
+
+static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
+{
+	set_bit(flag,&ti->flags);
+}
+
+static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
+{
+	clear_bit(flag,&ti->flags);
+}
+
+static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
+{
+	return test_and_set_bit(flag,&ti->flags);
+}
+
+static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
+{
+	return test_and_clear_bit(flag,&ti->flags);
+}
+
+static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
+{
+	return test_bit(flag,&ti->flags);
+}
+
+#define set_thread_flag(flag) \
+	set_ti_thread_flag(current_thread_info(), flag)
+#define clear_thread_flag(flag) \
+	clear_ti_thread_flag(current_thread_info(), flag)
+#define test_and_set_thread_flag(flag) \
+	test_and_set_ti_thread_flag(current_thread_info(), flag)
+#define test_and_clear_thread_flag(flag) \
+	test_and_clear_ti_thread_flag(current_thread_info(), flag)
+#define test_thread_flag(flag) \
+	test_ti_thread_flag(current_thread_info(), flag)
+
+#define set_need_resched()	set_thread_flag(TIF_NEED_RESCHED)
+#define clear_need_resched()	clear_thread_flag(TIF_NEED_RESCHED)
+
+#endif
+
+#endif /* _LINUX_THREAD_INFO_H */
diff -Naur aboot-0.9b.orig/lib/isolib.c aboot-0.9b/lib/isolib.c
--- aboot-0.9b.orig/lib/isolib.c	2006-06-27 22:01:25.000000000 -0400
+++ aboot-0.9b/lib/isolib.c	2006-06-27 22:02:47.000000000 -0400
@@ -6,6 +6,7 @@
  * functionality to the Linux bootstrapper.  All we can do is
  * open and read files... but that's all we need 8-)
  */
+#define __KERNEL__
 #ifndef TESTING
 #  include <linux/string.h>
 #endif
@@ -1520,6 +1521,7 @@
 #ifdef DEBUG_ROCK
 	   printf("Symlink component flag not implemented (%d)\n",slen);
 #endif
+	   break;
 	 };
 	 slen -= slp->len + 2;
 	 oldslp = slp;
diff -Naur aboot-0.9b.orig/Makefile aboot-0.9b/Makefile
--- aboot-0.9b.orig/Makefile	2006-06-27 22:01:25.000000000 -0400
+++ aboot-0.9b/Makefile	2006-06-27 22:02:47.000000000 -0400
@@ -23,7 +23,7 @@
 root		=
 bindir		= $(root)/sbin
 bootdir		= $(root)/boot
-mandir		= /usr/man
+mandir		= $(root)/usr/man
 
 #
 # There shouldn't be any need to change anything below this line.
@@ -82,10 +82,10 @@
 	tools/objstrip -vb aboot bootlx
 
 install-man: 
-	make -C doc/man install
+	$(MAKE) -C doc/man install
 
 install-man-gz:
-	make -C doc/man install-gz
+	$(MAKE) -C doc/man install-gz
 
 install: tools/abootconf tools/e2writeboot tools/isomarkboot \
 	sdisklabel/swriteboot install-man
@@ -128,13 +128,13 @@
 	find . -name \*~ | xargs rm -f
 
 lib/%:
-	make -C lib $* CPPFLAGS="$(CPPFLAGS)" TESTING="$(TESTING)"
+	$(MAKE) -C lib $* CPPFLAGS="$(CPPFLAGS)" TESTING="$(TESTING)"
 
 tools/%:
-	make -C tools $* CPPFLAGS="$(CPPFLAGS)"
+	$(MAKE) -C tools $* CPPFLAGS="$(CPPFLAGS)"
 
 sdisklabel/%:
-	make -C sdisklabel $* CPPFLAGS="$(CPPFLAGS)"
+	$(MAKE) -C sdisklabel $* CPPFLAGS="$(CPPFLAGS)"
 
 vmlinux.nh: $(VMLINUX) tools/objstrip
 	tools/objstrip -vb $(VMLINUX) vmlinux.nh
diff -Naur aboot-0.9b.orig/sdisklabel/Makefile aboot-0.9b/sdisklabel/Makefile
--- aboot-0.9b.orig/sdisklabel/Makefile	2006-06-27 22:01:25.000000000 -0400
+++ aboot-0.9b/sdisklabel/Makefile	2006-06-27 22:02:47.000000000 -0400
@@ -1,13 +1,16 @@
-CC = gcc -O2
+HOST_CC = gcc -O2
 CFLAGS= -I../include $(CPPFLAGS) -Wall
 
 all:	sdisklabel swriteboot
 
+%.o: %.c
+	$(HOST_CC) $(CFLAGS) -c $< -o $@
+
 sdisklabel: sdisklabel.o library.o
-	$(CC) $(LDFLAGS) sdisklabel.o library.o -o sdisklabel
+	$(HOST_CC) $(LDFLAGS) sdisklabel.o library.o -o sdisklabel
 
 swriteboot: swriteboot.o library.o
-	$(CC) $(LDFLAGS) swriteboot.o library.o -o swriteboot
+	$(HOST_CC) $(LDFLAGS) swriteboot.o library.o -o swriteboot
 
 clean:
 	rm -f sdisklabel swriteboot *.o
diff -Naur aboot-0.9b.orig/tools/Makefile aboot-0.9b/tools/Makefile
--- aboot-0.9b.orig/tools/Makefile	2006-06-27 22:01:25.000000000 -0400
+++ aboot-0.9b/tools/Makefile	2006-06-27 22:02:47.000000000 -0400
@@ -1,20 +1,32 @@
-CC	= gcc
+HOST_CC	= gcc
 CFLAGS	= -g -O2 -Wall -I. -I../include $(CPPFLAGS)
 LDFLAGS	= -g
 PGMS	= e2writeboot isomarkboot abootconf elfencap objstrip
 
-EXEC_PREFIX = /usr
+EXEC_PREFIX = $(root)/usr
 
-all:	$(PGMS)
+all: $(PGMS)
 
-install:	$(PGMS)
-	install -s -c -o root -g root -m 755 $(PGMS) $(EXEC_PREFIX)/bin
+install: $(PGMS)
+	install -s -c -m 755 $(PGMS) $(EXEC_PREFIX)/bin
 
 clean:
 	rm -f *~ *.o *.a core $(PGMS)
 
-isomarkboot:	isomarkboot.o ../lib/isolib.o
-e2writeboot:	e2writeboot.o e2lib.o bio.o
+isolib.o: ../lib/isolib.c
+	$(HOST_CC) $(CFLAGS) -c $< -o $@
 
-e2writeboot.o:	e2lib.h
+%.o: %.c
+	$(HOST_CC) $(CFLAGS) -c $< -o $@
+
+e2writeboot.o:  e2lib.h
 e2lib.o: e2lib.h
+
+e2writeboot: e2writeboot.o e2lib.o bio.o
+	$(HOST_CC) $(LDFLAGS) e2writeboot.o e2lib.o bio.o -o $@
+
+isomarkboot: isomarkboot.o isolib.o
+	$(HOST_CC) $(LDFLAGS) isomarkboot.o isolib.o -o $@
+
+abootconf elfencap objstrip: $(@:%=%.c)
+	$(HOST_CC) $(CFLAGS) $(LDFLAGS) $(@:%=%.c) -o $@
diff -Naur aboot-0.9b.orig/tools/Makefile.orig aboot-0.9b/tools/Makefile.orig
--- aboot-0.9b.orig/tools/Makefile.orig	1969-12-31 19:00:00.000000000 -0500
+++ aboot-0.9b/tools/Makefile.orig	2006-06-27 22:02:47.000000000 -0400
@@ -0,0 +1,20 @@
+CC	= gcc
+CFLAGS	= -g -O2 -Wall -I. -I../include $(CPPFLAGS)
+LDFLAGS	= -g
+PGMS	= e2writeboot isomarkboot abootconf elfencap objstrip
+
+EXEC_PREFIX = /usr
+
+all:	$(PGMS)
+
+install:	$(PGMS)
+	install -s -c -o root -g root -m 755 $(PGMS) $(EXEC_PREFIX)/bin
+
+clean:
+	rm -f *~ *.o *.a core $(PGMS)
+
+isomarkboot:	isomarkboot.o ../lib/isolib.o
+e2writeboot:	e2writeboot.o e2lib.o bio.o
+
+e2writeboot.o:	e2lib.h
+e2lib.o: e2lib.h
diff -Naur aboot-0.9b.orig/tools/objstrip.c aboot-0.9b/tools/objstrip.c
--- aboot-0.9b.orig/tools/objstrip.c	2006-06-27 22:01:25.000000000 -0400
+++ aboot-0.9b/tools/objstrip.c	2006-06-27 22:02:47.000000000 -0400
@@ -7,15 +7,17 @@
  */
 /*
  * Converts an ECOFF or ELF object file into a bootable file.  The
- * object file must be a OMAGIC file (i.e., data and bss follow immediatly
+ * object file must be a OMAGIC file (i.e., data and bss follow immediately
  * behind the text).  See DEC "Assembly Language Programmer's Guide"
  * documentation for details.  The SRM boot process is documented in
  * the Alpha AXP Architecture Reference Manual, Second Edition by
  * Richard L. Sites and Richard T. Witek.
  */
-#include <stdlib.h>
 #include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
 #include <unistd.h>
+#include <string.h>
 
 #include <sys/fcntl.h>
 #include <sys/stat.h>
@@ -24,17 +26,8 @@
 #include <linux/a.out.h>
 #include <linux/coff.h>
 #include <linux/param.h>
-#include <string.h>
-
 #ifdef __ELF__
-# include <asm/elf.h>
 # include <linux/elf.h>
-# include <linux/version.h>
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
-#  define aboot_elf_check_arch(e)        elf_check_arch(e)
-# else
-#  define aboot_elf_check_arch(e)        elf_check_arch(e->e_machine)
-# endif
 #endif
 
 /* bootfile size must be multiple of BLOCK_SIZE: */
@@ -62,8 +55,8 @@
     struct exec * aout;		/* includes file & aout header */
     long offset;
 #ifdef __ELF__
-    struct elfhdr *elf;
-    struct elf_phdr *elf_phdr;	/* program header */
+    Elf64_Ehdr *elf;
+    Elf64_Phdr *elf_phdr;	/* program header */
     unsigned long long e_entry;
 #endif
 
@@ -150,19 +143,14 @@
     }
 
 #ifdef __ELF__
-    elf = (struct elfhdr *) buf;
+    elf = (Elf64_Ehdr *) buf;
 
-    if (elf->e_ident[0] == 0x7f && strncmp(elf->e_ident + 1, "ELF", 3) == 0) {
+    if (elf->e_ident[0] == 0x7f && strncmp((char *)elf->e_ident + 1, "ELF", 3) == 0) {
 	if (elf->e_type != ET_EXEC) {
 	    fprintf(stderr, "%s: %s is not an ELF executable\n",
 		    prog_name, inname);
 	    exit(1);
 	}
-	if (!aboot_elf_check_arch(elf)) {
-	    fprintf(stderr, "%s: is not for this processor (e_machine=%d)\n",
-		    prog_name, elf->e_machine);
-	    exit(1);
-	}
 	if (elf->e_phnum != 1) {
 	    fprintf(stderr,
 		    "%s: %d program headers (forgot to link with -N?)\n",
@@ -177,7 +165,7 @@
 	    exit(1);
 	}
 
-	elf_phdr = (struct elf_phdr *) buf;
+	elf_phdr = (Elf64_Phdr *) buf;
 	offset	 = elf_phdr->p_offset;
 	mem_size = elf_phdr->p_memsz;
 	fil_size = elf_phdr->p_filesz;
