Submitted By: Jim Gifford (patches at jg555 dot com)
Date: 2006-11-29
Initial Package Version: 2.6.19
Origin: Linux-MIPS
Upstream Status: http://www.linux-mips.org/pub/linux/mips/kernel/v2.6/

diff -Naur linux-2.6.19/arch/mips/Kconfig linux-mips-2.6.19/arch/mips/Kconfig
--- linux-2.6.19/arch/mips/Kconfig	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/Kconfig	2006-11-29 15:23:09.000000000 -0800
@@ -468,8 +468,6 @@
 config MACH_VR41XX
 	bool "NEC VR41XX-based machines"
 	select SYS_HAS_CPU_VR41XX
-	select SYS_SUPPORTS_32BIT_KERNEL
-	select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
 
 config PMC_YOSEMITE
 	bool "PMC-Sierra Yosemite eval board"
@@ -1142,7 +1140,7 @@
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	help
-	  The options selects support for the NEC VR4100 series of processors.
+	  The options selects support for the NEC VR41xx series of processors.
 	  Only choose this option if you have one of these processors as a
 	  kernel built with this option will not run on any other type of
 	  processor or vice versa.
@@ -1922,6 +1920,11 @@
 	depends on MIPS32_COMPAT
 	default y
 
+config SYSVIPC_COMPAT
+	bool
+	depends on COMPAT && SYSVIPC
+	default y
+
 config MIPS32_O32
 	bool "Kernel support for o32 binaries"
 	depends on MIPS32_COMPAT
diff -Naur linux-2.6.19/arch/mips/au1000/common/pci.c linux-mips-2.6.19/arch/mips/au1000/common/pci.c
--- linux-2.6.19/arch/mips/au1000/common/pci.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/au1000/common/pci.c	2006-11-29 15:23:09.000000000 -0800
@@ -76,13 +76,17 @@
 	}
 
 #ifdef CONFIG_DMA_NONCOHERENT
-	/*
-         *  Set the NC bit in controller for Au1500 pre-AC silicon
-	 */
-	u32 prid = read_c0_prid();
-	if ( (prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) {
-	       au_writel( 1<<16 | au_readl(Au1500_PCI_CFG), Au1500_PCI_CFG);
-	       printk("Non-coherent PCI accesses enabled\n");
+	{
+		/*
+		 *  Set the NC bit in controller for Au1500 pre-AC silicon
+		 */
+		u32 prid = read_c0_prid();
+
+		if ((prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) {
+		       au_writel((1 << 16) | au_readl(Au1500_PCI_CFG),
+			         Au1500_PCI_CFG);
+		       printk("Non-coherent PCI accesses enabled\n");
+		}
 	}
 #endif
 
diff -Naur linux-2.6.19/arch/mips/au1000/common/setup.c linux-mips-2.6.19/arch/mips/au1000/common/setup.c
--- linux-2.6.19/arch/mips/au1000/common/setup.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/au1000/common/setup.c	2006-11-29 15:23:09.000000000 -0800
@@ -141,17 +141,20 @@
 /* This routine should be valid for all Au1x based boards */
 phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
 {
-	u32 start, end;
-
 	/* Don't fixup 36 bit addresses */
-	if ((phys_addr >> 32) != 0) return phys_addr;
+	if ((phys_addr >> 32) != 0)
+		return phys_addr;
 
 #ifdef CONFIG_PCI
-	start = (u32)Au1500_PCI_MEM_START;
-	end = (u32)Au1500_PCI_MEM_END;
-	/* check for pci memory window */
-	if ((phys_addr >= start) && ((phys_addr + size) < end)) {
-		return (phys_t)((phys_addr - start) + Au1500_PCI_MEM_START);
+	{
+		u32 start, end;
+
+		start = (u32)Au1500_PCI_MEM_START;
+		end = (u32)Au1500_PCI_MEM_END;
+		/* check for pci memory window */
+		if ((phys_addr >= start) && ((phys_addr + size) < end))
+			return (phys_t)
+			       ((phys_addr - start) + Au1500_PCI_MEM_START);
 	}
 #endif
 
diff -Naur linux-2.6.19/arch/mips/au1000/pb1100/board_setup.c linux-mips-2.6.19/arch/mips/au1000/pb1100/board_setup.c
--- linux-2.6.19/arch/mips/au1000/pb1100/board_setup.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/au1000/pb1100/board_setup.c	2006-11-29 15:23:09.000000000 -0800
@@ -47,8 +47,7 @@
 
 void __init board_setup(void)
 {
-	u32 pin_func;
-	u32 sys_freqctrl, sys_clksrc;
+	volatile void __iomem * base = (volatile void __iomem *) 0xac000000UL;
 
 	// set AUX clock to 12MHz * 8 = 96 MHz
 	au_writel(8, SYS_AUXPLL);
@@ -56,58 +55,62 @@
 	udelay(100);
 
 #ifdef CONFIG_USB_OHCI
-	// configure pins GPIO[14:9] as GPIO
-	pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x80);
+	{
+		u32 pin_func, sys_freqctrl, sys_clksrc;
 
-	/* zero and disable FREQ2 */
-	sys_freqctrl = au_readl(SYS_FREQCTRL0);
-	sys_freqctrl &= ~0xFFF00000;
-	au_writel(sys_freqctrl, SYS_FREQCTRL0);
-
-	/* zero and disable USBH/USBD/IrDA clock */
-	sys_clksrc = au_readl(SYS_CLKSRC);
-	sys_clksrc &= ~0x0000001F;
-	au_writel(sys_clksrc, SYS_CLKSRC);
-
-	sys_freqctrl = au_readl(SYS_FREQCTRL0);
-	sys_freqctrl &= ~0xFFF00000;
-
-	sys_clksrc = au_readl(SYS_CLKSRC);
-	sys_clksrc &= ~0x0000001F;
-
-	// FREQ2 = aux/2 = 48 MHz
-	sys_freqctrl |= ((0<<22) | (1<<21) | (1<<20));
-	au_writel(sys_freqctrl, SYS_FREQCTRL0);
-
-	/*
-	 * Route 48MHz FREQ2 into USBH/USBD/IrDA
-	 */
-	sys_clksrc |= ((4<<2) | (0<<1) | 0 );
-	au_writel(sys_clksrc, SYS_CLKSRC);
-
-	/* setup the static bus controller */
-	au_writel(0x00000002, MEM_STCFG3);  /* type = PCMCIA */
-	au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */
-	au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */
-
-	// get USB Functionality pin state (device vs host drive pins)
-	pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x8000);
-	// 2nd USB port is USB host
-	pin_func |= 0x8000;
-	au_writel(pin_func, SYS_PINFUNC);
+		// configure pins GPIO[14:9] as GPIO
+		pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x80);
+
+		/* zero and disable FREQ2 */
+		sys_freqctrl = au_readl(SYS_FREQCTRL0);
+		sys_freqctrl &= ~0xFFF00000;
+		au_writel(sys_freqctrl, SYS_FREQCTRL0);
+
+		/* zero and disable USBH/USBD/IrDA clock */
+		sys_clksrc = au_readl(SYS_CLKSRC);
+		sys_clksrc &= ~0x0000001F;
+		au_writel(sys_clksrc, SYS_CLKSRC);
+
+		sys_freqctrl = au_readl(SYS_FREQCTRL0);
+		sys_freqctrl &= ~0xFFF00000;
+
+		sys_clksrc = au_readl(SYS_CLKSRC);
+		sys_clksrc &= ~0x0000001F;
+
+		// FREQ2 = aux/2 = 48 MHz
+		sys_freqctrl |= ((0<<22) | (1<<21) | (1<<20));
+		au_writel(sys_freqctrl, SYS_FREQCTRL0);
+
+		/*
+		 * Route 48MHz FREQ2 into USBH/USBD/IrDA
+		 */
+		sys_clksrc |= ((4<<2) | (0<<1) | 0 );
+		au_writel(sys_clksrc, SYS_CLKSRC);
+
+		/* setup the static bus controller */
+		au_writel(0x00000002, MEM_STCFG3);  /* type = PCMCIA */
+		au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */
+		au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */
+
+		// get USB Functionality pin state (device vs host drive pins)
+		pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x8000);
+		// 2nd USB port is USB host
+		pin_func |= 0x8000;
+		au_writel(pin_func, SYS_PINFUNC);
+	}
 #endif // defined (CONFIG_USB_OHCI)
 
 	/* Enable sys bus clock divider when IDLE state or no bus activity. */
 	au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
 
 	// Enable the RTC if not already enabled
-	if (!(readb(0xac000028) & 0x20)) {
-		writeb(readb(0xac000028) | 0x20, 0xac000028);
+	if (!(readb(base + 0x28) & 0x20)) {
+		writeb(readb(base + 0x28) | 0x20, base + 0x28);
 		au_sync();
 	}
 	// Put the clock in BCD mode
-	if (readb(0xac00002C) & 0x4) { /* reg B */
-		writeb(readb(0xac00002c) & ~0x4, 0xac00002c);
+	if (readb(base + 0x2C) & 0x4) { /* reg B */
+		writeb(readb(base + 0x2c) & ~0x4, base + 0x2c);
 		au_sync();
 	}
 }
diff -Naur linux-2.6.19/arch/mips/kernel/linux32.c linux-mips-2.6.19/arch/mips/kernel/linux32.c
--- linux-2.6.19/arch/mips/kernel/linux32.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/kernel/linux32.c	2006-11-29 15:23:09.000000000 -0800
@@ -382,531 +382,6 @@
 	return ret;
 }
 
-struct msgbuf32 { s32 mtype; char mtext[1]; };
-
-struct ipc_perm32
-{
-	key_t    	  key;
-        __compat_uid_t  uid;
-        __compat_gid_t  gid;
-        __compat_uid_t  cuid;
-        __compat_gid_t  cgid;
-        compat_mode_t	mode;
-        unsigned short  seq;
-};
-
-struct ipc64_perm32 {
-	key_t key;
-	__compat_uid_t uid;
-	__compat_gid_t gid;
-	__compat_uid_t cuid;
-	__compat_gid_t cgid;
-	compat_mode_t	mode;
-	unsigned short	seq;
-	unsigned short __pad1;
-	unsigned int __unused1;
-	unsigned int __unused2;
-};
-
-struct semid_ds32 {
-        struct ipc_perm32 sem_perm;               /* permissions .. see ipc.h */
-        compat_time_t   sem_otime;              /* last semop time */
-        compat_time_t   sem_ctime;              /* last change time */
-        u32 sem_base;              /* ptr to first semaphore in array */
-        u32 sem_pending;          /* pending operations to be processed */
-        u32 sem_pending_last;    /* last pending operation */
-        u32 undo;                  /* undo requests on this array */
-        unsigned short  sem_nsems;              /* no. of semaphores in array */
-};
-
-struct semid64_ds32 {
-	struct ipc64_perm32	sem_perm;
-	compat_time_t	sem_otime;
-	compat_time_t	sem_ctime;
-	unsigned int		sem_nsems;
-	unsigned int		__unused1;
-	unsigned int		__unused2;
-};
-
-struct msqid_ds32
-{
-        struct ipc_perm32 msg_perm;
-        u32 msg_first;
-        u32 msg_last;
-        compat_time_t   msg_stime;
-        compat_time_t   msg_rtime;
-        compat_time_t   msg_ctime;
-        u32 wwait;
-        u32 rwait;
-        unsigned short msg_cbytes;
-        unsigned short msg_qnum;
-        unsigned short msg_qbytes;
-        compat_ipc_pid_t msg_lspid;
-        compat_ipc_pid_t msg_lrpid;
-};
-
-struct msqid64_ds32 {
-	struct ipc64_perm32 msg_perm;
-	compat_time_t msg_stime;
-	unsigned int __unused1;
-	compat_time_t msg_rtime;
-	unsigned int __unused2;
-	compat_time_t msg_ctime;
-	unsigned int __unused3;
-	unsigned int msg_cbytes;
-	unsigned int msg_qnum;
-	unsigned int msg_qbytes;
-	compat_pid_t msg_lspid;
-	compat_pid_t msg_lrpid;
-	unsigned int __unused4;
-	unsigned int __unused5;
-};
-
-struct shmid_ds32 {
-        struct ipc_perm32       shm_perm;
-        int                     shm_segsz;
-        compat_time_t		shm_atime;
-        compat_time_t		shm_dtime;
-        compat_time_t		shm_ctime;
-        compat_ipc_pid_t    shm_cpid;
-        compat_ipc_pid_t    shm_lpid;
-        unsigned short          shm_nattch;
-};
-
-struct shmid64_ds32 {
-	struct ipc64_perm32	shm_perm;
-	compat_size_t		shm_segsz;
-	compat_time_t		shm_atime;
-	compat_time_t		shm_dtime;
-	compat_time_t shm_ctime;
-	compat_pid_t shm_cpid;
-	compat_pid_t shm_lpid;
-	unsigned int shm_nattch;
-	unsigned int __unused1;
-	unsigned int __unused2;
-};
-
-struct ipc_kludge32 {
-	u32 msgp;
-	s32 msgtyp;
-};
-
-static int
-do_sys32_semctl(int first, int second, int third, void __user *uptr)
-{
-	union semun fourth;
-	u32 pad;
-	int err, err2;
-	struct semid64_ds s;
-	mm_segment_t old_fs;
-
-	if (!uptr)
-		return -EINVAL;
-	err = -EFAULT;
-	if (get_user (pad, (u32 __user *)uptr))
-		return err;
-	if ((third & ~IPC_64) == SETVAL)
-		fourth.val = (int)pad;
-	else
-		fourth.__pad = (void __user *)A(pad);
-	switch (third & ~IPC_64) {
-	case IPC_INFO:
-	case IPC_RMID:
-	case IPC_SET:
-	case SEM_INFO:
-	case GETVAL:
-	case GETPID:
-	case GETNCNT:
-	case GETZCNT:
-	case GETALL:
-	case SETVAL:
-	case SETALL:
-		err = sys_semctl (first, second, third, fourth);
-		break;
-
-	case IPC_STAT:
-	case SEM_STAT:
-		fourth.__pad = (struct semid64_ds __user *)&s;
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = sys_semctl(first, second, third | IPC_64, fourth);
-		set_fs(old_fs);
-
-		if (third & IPC_64) {
-			struct semid64_ds32 __user *usp64 = (struct semid64_ds32 __user *) A(pad);
-
-			if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) {
-				err = -EFAULT;
-				break;
-			}
-			err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key);
-			err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid);
-			err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid);
-			err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid);
-			err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid);
-			err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode);
-			err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq);
-			err2 |= __put_user(s.sem_otime, &usp64->sem_otime);
-			err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime);
-			err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems);
-		} else {
-			struct semid_ds32 __user *usp32 = (struct semid_ds32 __user *) A(pad);
-
-			if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) {
-				err = -EFAULT;
-				break;
-			}
-			err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key);
-			err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid);
-			err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid);
-			err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid);
-			err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid);
-			err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode);
-			err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq);
-			err2 |= __put_user(s.sem_otime, &usp32->sem_otime);
-			err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime);
-			err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems);
-		}
-		if (err2)
-			err = -EFAULT;
-		break;
-
-	default:
-		err = - EINVAL;
-		break;
-	}
-
-	return err;
-}
-
-static int
-do_sys32_msgsnd (int first, int second, int third, void __user *uptr)
-{
-	struct msgbuf32 __user *up = (struct msgbuf32 __user *)uptr;
-	struct msgbuf *p;
-	mm_segment_t old_fs;
-	int err;
-
-	if (second < 0)
-		return -EINVAL;
-	p = kmalloc (second + sizeof (struct msgbuf)
-				    + 4, GFP_USER);
-	if (!p)
-		return -ENOMEM;
-	err = get_user (p->mtype, &up->mtype);
-	if (err)
-		goto out;
-	err |= __copy_from_user (p->mtext, &up->mtext, second);
-	if (err)
-		goto out;
-	old_fs = get_fs ();
-	set_fs (KERNEL_DS);
-	err = sys_msgsnd (first, (struct msgbuf __user *)p, second, third);
-	set_fs (old_fs);
-out:
-	kfree (p);
-
-	return err;
-}
-
-static int
-do_sys32_msgrcv (int first, int second, int msgtyp, int third,
-		 int version, void __user *uptr)
-{
-	struct msgbuf32 __user *up;
-	struct msgbuf *p;
-	mm_segment_t old_fs;
-	int err;
-
-	if (!version) {
-		struct ipc_kludge32 __user *uipck = (struct ipc_kludge32 __user *)uptr;
-		struct ipc_kludge32 ipck;
-
-		err = -EINVAL;
-		if (!uptr)
-			goto out;
-		err = -EFAULT;
-		if (copy_from_user (&ipck, uipck, sizeof (struct ipc_kludge32)))
-			goto out;
-		uptr = (void __user *)AA(ipck.msgp);
-		msgtyp = ipck.msgtyp;
-	}
-
-	if (second < 0)
-		return -EINVAL;
-	err = -ENOMEM;
-	p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER);
-	if (!p)
-		goto out;
-	old_fs = get_fs ();
-	set_fs (KERNEL_DS);
-	err = sys_msgrcv (first, (struct msgbuf __user *)p, second + 4, msgtyp, third);
-	set_fs (old_fs);
-	if (err < 0)
-		goto free_then_out;
-	up = (struct msgbuf32 __user *)uptr;
-	if (put_user (p->mtype, &up->mtype) ||
-	    __copy_to_user (&up->mtext, p->mtext, err))
-		err = -EFAULT;
-free_then_out:
-	kfree (p);
-out:
-	return err;
-}
-
-static int
-do_sys32_msgctl (int first, int second, void __user *uptr)
-{
-	int err = -EINVAL, err2;
-	struct msqid64_ds m;
-	struct msqid_ds32 __user *up32 = (struct msqid_ds32 __user *)uptr;
-	struct msqid64_ds32 __user *up64 = (struct msqid64_ds32 __user *)uptr;
-	mm_segment_t old_fs;
-
-	switch (second & ~IPC_64) {
-	case IPC_INFO:
-	case IPC_RMID:
-	case MSG_INFO:
-		err = sys_msgctl (first, second, (struct msqid_ds __user *)uptr);
-		break;
-
-	case IPC_SET:
-		if (second & IPC_64) {
-			if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) {
-				err = -EFAULT;
-				break;
-			}
-			err = __get_user(m.msg_perm.uid, &up64->msg_perm.uid);
-			err |= __get_user(m.msg_perm.gid, &up64->msg_perm.gid);
-			err |= __get_user(m.msg_perm.mode, &up64->msg_perm.mode);
-			err |= __get_user(m.msg_qbytes, &up64->msg_qbytes);
-		} else {
-			if (!access_ok(VERIFY_READ, up32, sizeof(*up32))) {
-				err = -EFAULT;
-				break;
-			}
-			err = __get_user(m.msg_perm.uid, &up32->msg_perm.uid);
-			err |= __get_user(m.msg_perm.gid, &up32->msg_perm.gid);
-			err |= __get_user(m.msg_perm.mode, &up32->msg_perm.mode);
-			err |= __get_user(m.msg_qbytes, &up32->msg_qbytes);
-		}
-		if (err)
-			break;
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
-		set_fs(old_fs);
-		break;
-
-	case IPC_STAT:
-	case MSG_STAT:
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
-		set_fs(old_fs);
-		if (second & IPC_64) {
-			if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
-				err = -EFAULT;
-				break;
-			}
-			err2 = __put_user(m.msg_perm.key, &up64->msg_perm.key);
-			err2 |= __put_user(m.msg_perm.uid, &up64->msg_perm.uid);
-			err2 |= __put_user(m.msg_perm.gid, &up64->msg_perm.gid);
-			err2 |= __put_user(m.msg_perm.cuid, &up64->msg_perm.cuid);
-			err2 |= __put_user(m.msg_perm.cgid, &up64->msg_perm.cgid);
-			err2 |= __put_user(m.msg_perm.mode, &up64->msg_perm.mode);
-			err2 |= __put_user(m.msg_perm.seq, &up64->msg_perm.seq);
-			err2 |= __put_user(m.msg_stime, &up64->msg_stime);
-			err2 |= __put_user(m.msg_rtime, &up64->msg_rtime);
-			err2 |= __put_user(m.msg_ctime, &up64->msg_ctime);
-			err2 |= __put_user(m.msg_cbytes, &up64->msg_cbytes);
-			err2 |= __put_user(m.msg_qnum, &up64->msg_qnum);
-			err2 |= __put_user(m.msg_qbytes, &up64->msg_qbytes);
-			err2 |= __put_user(m.msg_lspid, &up64->msg_lspid);
-			err2 |= __put_user(m.msg_lrpid, &up64->msg_lrpid);
-			if (err2)
-				err = -EFAULT;
-		} else {
-			if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
-				err = -EFAULT;
-				break;
-			}
-			err2 = __put_user(m.msg_perm.key, &up32->msg_perm.key);
-			err2 |= __put_user(m.msg_perm.uid, &up32->msg_perm.uid);
-			err2 |= __put_user(m.msg_perm.gid, &up32->msg_perm.gid);
-			err2 |= __put_user(m.msg_perm.cuid, &up32->msg_perm.cuid);
-			err2 |= __put_user(m.msg_perm.cgid, &up32->msg_perm.cgid);
-			err2 |= __put_user(m.msg_perm.mode, &up32->msg_perm.mode);
-			err2 |= __put_user(m.msg_perm.seq, &up32->msg_perm.seq);
-			err2 |= __put_user(m.msg_stime, &up32->msg_stime);
-			err2 |= __put_user(m.msg_rtime, &up32->msg_rtime);
-			err2 |= __put_user(m.msg_ctime, &up32->msg_ctime);
-			err2 |= __put_user(m.msg_cbytes, &up32->msg_cbytes);
-			err2 |= __put_user(m.msg_qnum, &up32->msg_qnum);
-			err2 |= __put_user(m.msg_qbytes, &up32->msg_qbytes);
-			err2 |= __put_user(m.msg_lspid, &up32->msg_lspid);
-			err2 |= __put_user(m.msg_lrpid, &up32->msg_lrpid);
-			if (err2)
-				err = -EFAULT;
-		}
-		break;
-	}
-
-	return err;
-}
-
-static int
-do_sys32_shmat (int first, int second, int third, int version, void __user *uptr)
-{
-	unsigned long raddr;
-	u32 __user *uaddr = (u32 __user *)A((u32)third);
-	int err = -EINVAL;
-
-	if (version == 1)
-		return err;
-	err = do_shmat (first, uptr, second, &raddr);
-	if (err)
-		return err;
-	err = put_user (raddr, uaddr);
-	return err;
-}
-
-struct shm_info32 {
-	int used_ids;
-	u32 shm_tot, shm_rss, shm_swp;
-	u32 swap_attempts, swap_successes;
-};
-
-static int
-do_sys32_shmctl (int first, int second, void __user *uptr)
-{
-	struct shmid64_ds32 __user *up64 = (struct shmid64_ds32 __user *)uptr;
-	struct shmid_ds32 __user *up32 = (struct shmid_ds32 __user *)uptr;
-	struct shm_info32 __user *uip = (struct shm_info32 __user *)uptr;
-	int err = -EFAULT, err2;
-	struct shmid64_ds s64;
-	mm_segment_t old_fs;
-	struct shm_info si;
-	struct shmid_ds s;
-
-	switch (second & ~IPC_64) {
-	case IPC_INFO:
-		second = IPC_INFO; /* So that we don't have to translate it */
-	case IPC_RMID:
-	case SHM_LOCK:
-	case SHM_UNLOCK:
-		err = sys_shmctl(first, second, (struct shmid_ds __user *)uptr);
-		break;
-	case IPC_SET:
-		if (second & IPC_64) {
-			err = get_user(s.shm_perm.uid, &up64->shm_perm.uid);
-			err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid);
-			err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode);
-		} else {
-			err = get_user(s.shm_perm.uid, &up32->shm_perm.uid);
-			err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid);
-			err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode);
-		}
-		if (err)
-			break;
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = sys_shmctl(first, second & ~IPC_64, (struct shmid_ds __user *)&s);
-		set_fs(old_fs);
-		break;
-
-	case IPC_STAT:
-	case SHM_STAT:
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = sys_shmctl(first, second | IPC_64, (void __user *) &s64);
-		set_fs(old_fs);
-		if (err < 0)
-			break;
-		if (second & IPC_64) {
-			if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
-				err = -EFAULT;
-				break;
-			}
-			err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key);
-			err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid);
-			err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid);
-			err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid);
-			err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid);
-			err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode);
-			err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq);
-			err2 |= __put_user(s64.shm_atime, &up64->shm_atime);
-			err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime);
-			err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime);
-			err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz);
-			err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch);
-			err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid);
-			err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid);
-		} else {
-			if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
-				err = -EFAULT;
-				break;
-			}
-			err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key);
-			err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid);
-			err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid);
-			err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid);
-			err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid);
-			err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode);
-			err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq);
-			err2 |= __put_user(s64.shm_atime, &up32->shm_atime);
-			err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime);
-			err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime);
-			err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz);
-			err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch);
-			err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid);
-			err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid);
-		}
-		if (err2)
-			err = -EFAULT;
-		break;
-
-	case SHM_INFO:
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = sys_shmctl(first, second, (void __user *)&si);
-		set_fs(old_fs);
-		if (err < 0)
-			break;
-		err2 = put_user(si.used_ids, &uip->used_ids);
-		err2 |= __put_user(si.shm_tot, &uip->shm_tot);
-		err2 |= __put_user(si.shm_rss, &uip->shm_rss);
-		err2 |= __put_user(si.shm_swp, &uip->shm_swp);
-		err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
-		err2 |= __put_user (si.swap_successes, &uip->swap_successes);
-		if (err2)
-			err = -EFAULT;
-		break;
-
-	default:
-		err = -EINVAL;
-		break;
-	}
-
-	return err;
-}
-
-static int sys32_semtimedop(int semid, struct sembuf __user *tsems, int nsems,
-                            const struct compat_timespec __user *timeout32)
-{
-	struct compat_timespec t32;
-	struct timespec __user *t64 = compat_alloc_user_space(sizeof(*t64));
-
-	if (copy_from_user(&t32, timeout32, sizeof(t32)))
-		return -EFAULT;
-
-	if (put_user(t32.tv_sec, &t64->tv_sec) ||
-	    put_user(t32.tv_nsec, &t64->tv_nsec))
-		return -EFAULT;
-
-	return sys_semtimedop(semid, tsems, nsems, t64);
-}
-
 asmlinkage long
 sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
 {
@@ -918,48 +393,43 @@
 	switch (call) {
 	case SEMOP:
 		/* struct sembuf is the same on 32 and 64bit :)) */
-		err = sys_semtimedop (first, (struct sembuf __user *)AA(ptr), second,
-		                      NULL);
+		err = sys_semtimedop(first, compat_ptr(ptr), second, NULL);
 		break;
 	case SEMTIMEDOP:
-		err = sys32_semtimedop (first, (struct sembuf __user *)AA(ptr), second,
-		                      (const struct compat_timespec __user *)AA(fifth));
+		err = compat_sys_semtimedop(first, compat_ptr(ptr), second,
+					    compat_ptr(fifth));
 		break;
 	case SEMGET:
-		err = sys_semget (first, second, third);
+		err = sys_semget(first, second, third);
 		break;
 	case SEMCTL:
-		err = do_sys32_semctl (first, second, third,
-				       (void __user *)AA(ptr));
+		err = compat_sys_semctl(first, second, third, compat_ptr(ptr));
 		break;
-
 	case MSGSND:
-		err = do_sys32_msgsnd (first, second, third,
-				       (void __user *)AA(ptr));
+		err = compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
 		break;
 	case MSGRCV:
-		err = do_sys32_msgrcv (first, second, fifth, third,
-				       version, (void __user *)AA(ptr));
+		err = compat_sys_msgrcv(first, second, fifth, third,
+					version, compat_ptr(ptr));
 		break;
 	case MSGGET:
-		err = sys_msgget ((key_t) first, second);
+		err = sys_msgget((key_t) first, second);
 		break;
 	case MSGCTL:
-		err = do_sys32_msgctl (first, second, (void __user *)AA(ptr));
+		err = compat_sys_msgctl(first, second, compat_ptr(ptr));
 		break;
-
 	case SHMAT:
-		err = do_sys32_shmat (first, second, third,
-				      version, (void __user *)AA(ptr));
+		err = compat_sys_shmat(first, second, third, version,
+				       compat_ptr(ptr));
 		break;
 	case SHMDT:
-		err = sys_shmdt ((char __user *)A(ptr));
+		err = sys_shmdt(compat_ptr(ptr));
 		break;
 	case SHMGET:
-		err = sys_shmget (first, (unsigned)second, third);
+		err = sys_shmget(first, (unsigned)second, third);
 		break;
 	case SHMCTL:
-		err = do_sys32_shmctl (first, second, (void __user *)AA(ptr));
+		err = compat_sys_shmctl(first, second, compat_ptr(ptr));
 		break;
 	default:
 		err = -EINVAL;
@@ -969,18 +439,16 @@
 	return err;
 }
 
-asmlinkage long sys32_shmat(int shmid, char __user *shmaddr,
-			  int shmflg, int32_t __user *addr)
+#ifdef CONFIG_MIPS32_N32
+asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, union semun arg)
 {
-	unsigned long raddr;
-	int err;
-
-	err = do_shmat(shmid, shmaddr, shmflg, &raddr);
-	if (err)
-		return err;
-
-	return put_user(raddr, addr);
+	/* compat_sys_semctl expects a pointer to union semun */
+	u32 __user *uptr = compat_alloc_user_space(sizeof(u32));
+	if (put_user(ptr_to_compat(arg.__pad), uptr))
+		return -EFAULT;
+	return compat_sys_semctl(semid, semnum, cmd, uptr);
 }
+#endif
 
 struct sysctl_args32
 {
diff -Naur linux-2.6.19/arch/mips/kernel/mips-mt.c linux-mips-2.6.19/arch/mips/kernel/mips-mt.c
--- linux-2.6.19/arch/mips/kernel/mips-mt.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/kernel/mips-mt.c	2006-11-29 15:23:09.000000000 -0800
@@ -96,6 +96,10 @@
 		goto out_unlock;
 	}
 
+	retval = security_task_setscheduler(p, 0, NULL);
+	if (retval)
+		goto out_unlock;
+
 	/* Record new user-specified CPU set for future reference */
 	p->thread.user_cpus_allowed = new_mask;
 
@@ -141,8 +145,9 @@
 	p = find_process_by_pid(pid);
 	if (!p)
 		goto out_unlock;
-
-	retval = 0;
+	retval = security_task_getscheduler(p);
+	if (retval)
+		goto out_unlock;
 
 	cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
 
diff -Naur linux-2.6.19/arch/mips/kernel/ptrace.c linux-mips-2.6.19/arch/mips/kernel/ptrace.c
--- linux-2.6.19/arch/mips/kernel/ptrace.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/kernel/ptrace.c	2006-11-29 15:23:09.000000000 -0800
@@ -20,12 +20,12 @@
 #include <linux/mm.h>
 #include <linux/errno.h>
 #include <linux/ptrace.h>
-#include <linux/audit.h>
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
 #include <linux/user.h>
 #include <linux/security.h>
-#include <linux/signal.h>
+#include <linux/audit.h>
+#include <linux/seccomp.h>
 
 #include <asm/byteorder.h>
 #include <asm/cpu.h>
@@ -473,12 +473,16 @@
  */
 asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
 {
+	/* do the secure computing check first */
+	secure_computing(regs->orig_eax);
+
 	if (unlikely(current->audit_context) && entryexit)
 		audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
 		                   regs->regs[2]);
 
 	if (!(current->ptrace & PT_PTRACED))
 		goto out;
+
 	if (!test_thread_flag(TIF_SYSCALL_TRACE))
 		goto out;
 
@@ -496,9 +500,14 @@
 		send_sig(current->exit_code, current, 1);
 		current->exit_code = 0;
 	}
- out:
+
+out:
+	/* There is no ->orig_eax and that's quite intensional for now making
+	   this work will require some work in various other place before it's
+	   more than a placebo.  */
+
 	if (unlikely(current->audit_context) && !entryexit)
-		audit_syscall_entry(audit_arch(), regs->regs[2],
-				    regs->regs[4], regs->regs[5],
-				    regs->regs[6], regs->regs[7]);
+		audit_syscall_entry(audit_arch(), regs->orig_eax,
+		                    regs->regs[4], regs->regs[5],
+		                    regs->regs[6], regs->regs[7]);
 }
diff -Naur linux-2.6.19/arch/mips/kernel/scall64-n32.S linux-mips-2.6.19/arch/mips/kernel/scall64-n32.S
--- linux-2.6.19/arch/mips/kernel/scall64-n32.S	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/kernel/scall64-n32.S	2006-11-29 15:23:09.000000000 -0800
@@ -149,8 +149,8 @@
 	PTR	sys_mincore
 	PTR	sys_madvise
 	PTR	sys_shmget
-	PTR	sys32_shmat
-	PTR	sys_shmctl			/* 6030 */
+	PTR	sys_shmat
+	PTR	compat_sys_shmctl			/* 6030 */
 	PTR	sys_dup
 	PTR	sys_dup2
 	PTR	sys_pause
@@ -184,12 +184,12 @@
 	PTR	sys32_newuname
 	PTR	sys_semget
 	PTR	sys_semop
-	PTR	sys_semctl
+	PTR	sysn32_semctl
 	PTR	sys_shmdt			/* 6065 */
 	PTR	sys_msgget
-	PTR	sys_msgsnd
-	PTR	sys_msgrcv
-	PTR	sys_msgctl
+	PTR	compat_sys_msgsnd
+	PTR	compat_sys_msgrcv
+	PTR	compat_sys_msgctl
 	PTR	compat_sys_fcntl		/* 6070 */
 	PTR	sys_flock
 	PTR	sys_fsync
@@ -335,7 +335,7 @@
 	PTR	compat_sys_fcntl64
 	PTR	sys_set_tid_address
 	PTR	sys_restart_syscall
-	PTR	sys_semtimedop			/* 6215 */
+	PTR	compat_sys_semtimedop			/* 6215 */
 	PTR	sys_fadvise64_64
 	PTR	compat_sys_statfs64
 	PTR	compat_sys_fstatfs64
diff -Naur linux-2.6.19/arch/mips/lib/Makefile linux-mips-2.6.19/arch/mips/lib/Makefile
--- linux-2.6.19/arch/mips/lib/Makefile	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/lib/Makefile	2006-11-29 15:23:09.000000000 -0800
@@ -5,8 +5,6 @@
 lib-y	+= csum_partial_copy.o memcpy.o promlib.o strlen_user.o strncpy_user.o \
 	   strnlen_user.o uncached.o
 
-obj-y	+= iomap.o
-
 # libgcc-style stuff needed in the kernel
 lib-y += ashldi3.o ashrdi3.o lshrdi3.o
 
diff -Naur linux-2.6.19/arch/mips/lib/iomap.c linux-mips-2.6.19/arch/mips/lib/iomap.c
--- linux-2.6.19/arch/mips/lib/iomap.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/lib/iomap.c	1969-12-31 16:00:00.000000000 -0800
@@ -1,78 +0,0 @@
-/*
- *  iomap.c, Memory Mapped I/O routines for MIPS architecture.
- *
- *  This code is based on lib/iomap.c, by Linus Torvalds.
- *
- *  Copyright (C) 2004-2005  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <asm/io.h>
-
-void __iomem *ioport_map(unsigned long port, unsigned int nr)
-{
-	unsigned long end;
-
-	end = port + nr - 1UL;
-	if (ioport_resource.start > port ||
-	    ioport_resource.end < end || port > end)
-		return NULL;
-
-	return (void __iomem *)(mips_io_port_base + port);
-}
-
-void ioport_unmap(void __iomem *addr)
-{
-}
-EXPORT_SYMBOL(ioport_map);
-EXPORT_SYMBOL(ioport_unmap);
-
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-	unsigned long start, len, flags;
-
-	if (dev == NULL)
-		return NULL;
-
-	start = pci_resource_start(dev, bar);
-	len = pci_resource_len(dev, bar);
-	if (!start || !len)
-		return NULL;
-
-	if (maxlen != 0 && len > maxlen)
-		len = maxlen;
-
-	flags = pci_resource_flags(dev, bar);
-	if (flags & IORESOURCE_IO)
-		return ioport_map(start, len);
-	if (flags & IORESOURCE_MEM) {
-		if (flags & IORESOURCE_CACHEABLE)
-			return ioremap_cachable(start, len);
-		return ioremap_nocache(start, len);
-	}
-
-	return NULL;
-}
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{
-	iounmap(addr);
-}
-EXPORT_SYMBOL(pci_iomap);
-EXPORT_SYMBOL(pci_iounmap);
diff -Naur linux-2.6.19/arch/mips/lib-32/dump_tlb.c linux-mips-2.6.19/arch/mips/lib-32/dump_tlb.c
--- linux-2.6.19/arch/mips/lib-32/dump_tlb.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/lib-32/dump_tlb.c	2006-11-29 15:23:09.000000000 -0800
@@ -40,8 +40,6 @@
 		return "256Mb";
 #endif
 	}
-
-	return "unknown";
 }
 
 #define BARRIER()					\
diff -Naur linux-2.6.19/arch/mips/lib-64/dump_tlb.c linux-mips-2.6.19/arch/mips/lib-64/dump_tlb.c
--- linux-2.6.19/arch/mips/lib-64/dump_tlb.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/lib-64/dump_tlb.c	2006-11-29 15:23:09.000000000 -0800
@@ -31,8 +31,6 @@
 	case PM_256M:	return "256Mb";
 #endif
 	}
-
-	return "unknown";
 }
 
 #define BARRIER()					\
diff -Naur linux-2.6.19/arch/mips/mm/init.c linux-mips-2.6.19/arch/mips/mm/init.c
--- linux-2.6.19/arch/mips/mm/init.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/mm/init.c	2006-11-29 15:23:09.000000000 -0800
@@ -203,6 +203,31 @@
 	preempt_check_resched();
 }
 
+void copy_user_highpage(struct page *to, struct page *from,
+	unsigned long vaddr, struct vm_area_struct *vma)
+{
+	void *vfrom, *vto;
+
+	vto = kmap_atomic(to, KM_USER1);
+	if (cpu_has_dc_aliases) {
+		vfrom = kmap_coherent(from, vaddr);
+		copy_page(vto, vfrom);
+		kunmap_coherent(from);
+	} else {
+		vfrom = kmap_atomic(from, KM_USER0);
+		copy_page(vto, vfrom);
+		kunmap_atomic(vfrom, KM_USER0);
+	}
+	if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
+	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+		flush_data_cache_page((unsigned long)vto);
+	kunmap_atomic(vto, KM_USER1);
+	/* Make sure this page is cleared on other CPU's too before using it */
+	smp_wmb();
+}
+
+EXPORT_SYMBOL(copy_user_highpage);
+
 void copy_to_user_page(struct vm_area_struct *vma,
 	struct page *page, unsigned long vaddr, void *dst, const void *src,
 	unsigned long len)
diff -Naur linux-2.6.19/arch/mips/momentum/jaguar_atx/Makefile linux-mips-2.6.19/arch/mips/momentum/jaguar_atx/Makefile
--- linux-2.6.19/arch/mips/momentum/jaguar_atx/Makefile	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/momentum/jaguar_atx/Makefile	2006-11-29 15:23:09.000000000 -0800
@@ -6,7 +6,7 @@
 # unless it's something special (ie not a .c file).
 #
 
-obj-y += irq.o prom.o reset.o setup.o
+obj-y += irq.o platform.o prom.o reset.o setup.o
 
 obj-$(CONFIG_SERIAL_8250_CONSOLE) += ja-console.o
 obj-$(CONFIG_REMOTE_DEBUG) += dbg_io.o
diff -Naur linux-2.6.19/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h linux-mips-2.6.19/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
--- linux-2.6.19/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h	2006-11-29 15:23:09.000000000 -0800
@@ -46,7 +46,9 @@
 
 extern unsigned long ja_fpga_base;
 
-#define JAGUAR_FPGA_WRITE(x,y) writeb(x, ja_fpga_base + JAGUAR_ATX_REG_##y)
-#define JAGUAR_FPGA_READ(x) readb(ja_fpga_base + JAGUAR_ATX_REG_##x)
+#define __FPGA_REG_TO_ADDR(reg)						\
+	((void *) ja_fpga_base + JAGUAR_ATX_REG_##reg)
+#define JAGUAR_FPGA_WRITE(x, reg) writeb(x, __FPGA_REG_TO_ADDR(reg))
+#define JAGUAR_FPGA_READ(reg) readb(__FPGA_REG_TO_ADDR(reg))
 
 #endif
diff -Naur linux-2.6.19/arch/mips/momentum/jaguar_atx/platform.c linux-mips-2.6.19/arch/mips/momentum/jaguar_atx/platform.c
--- linux-2.6.19/arch/mips/momentum/jaguar_atx/platform.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/momentum/jaguar_atx/platform.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,235 @@
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include <linux/ioport.h>
+#include <linux/mv643xx.h>
+#include <linux/platform_device.h>
+
+#include "jaguar_atx_fpga.h"
+
+#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
+
+static struct resource mv643xx_eth_shared_resources[] = {
+	[0] = {
+		.name   = "ethernet shared base",
+		.start  = 0xf1000000 + MV643XX_ETH_SHARED_REGS,
+		.end    = 0xf1000000 + MV643XX_ETH_SHARED_REGS +
+		                       MV643XX_ETH_SHARED_REGS_SIZE - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device mv643xx_eth_shared_device = {
+	.name		= MV643XX_ETH_SHARED_NAME,
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(mv643xx_eth_shared_resources),
+	.resource	= mv643xx_eth_shared_resources,
+};
+
+#define MV_SRAM_BASE			0xfe000000UL
+#define MV_SRAM_SIZE			(256 * 1024)
+
+#define MV_SRAM_RXRING_SIZE		(MV_SRAM_SIZE / 4)
+#define MV_SRAM_TXRING_SIZE		(MV_SRAM_SIZE / 4)
+
+#define MV_SRAM_BASE_ETH0		MV_SRAM_BASE
+#define MV_SRAM_BASE_ETH1		(MV_SRAM_BASE + (MV_SRAM_SIZE / 2))
+
+#define MV64x60_IRQ_ETH_0 48
+#define MV64x60_IRQ_ETH_1 49
+#define MV64x60_IRQ_ETH_2 50
+
+#ifdef CONFIG_MV643XX_ETH_0
+
+static struct resource mv64x60_eth0_resources[] = {
+	[0] = {
+		.name	= "eth0 irq",
+		.start	= MV64x60_IRQ_ETH_0,
+		.end	= MV64x60_IRQ_ETH_0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static char eth0_mac_addr[ETH_ALEN];
+
+static struct mv643xx_eth_platform_data eth0_pd = {
+	.mac_addr	= eth0_mac_addr,
+
+	.tx_sram_addr	= MV_SRAM_BASE_ETH0,
+	.tx_sram_size	= MV_SRAM_TXRING_SIZE,
+	.tx_queue_size	= MV_SRAM_TXRING_SIZE / 16,
+
+	.rx_sram_addr	= MV_SRAM_BASE_ETH0 + MV_SRAM_TXRING_SIZE,
+	.rx_sram_size	= MV_SRAM_RXRING_SIZE,
+	.rx_queue_size	= MV_SRAM_RXRING_SIZE / 16,
+};
+
+static struct platform_device eth0_device = {
+	.name		= MV643XX_ETH_NAME,
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(mv64x60_eth0_resources),
+	.resource	= mv64x60_eth0_resources,
+	.dev = {
+		.platform_data = &eth0_pd,
+	},
+};
+#endif /* CONFIG_MV643XX_ETH_0 */
+
+#ifdef CONFIG_MV643XX_ETH_1
+
+static struct resource mv64x60_eth1_resources[] = {
+	[0] = {
+		.name	= "eth1 irq",
+		.start	= MV64x60_IRQ_ETH_1,
+		.end	= MV64x60_IRQ_ETH_1,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static char eth1_mac_addr[ETH_ALEN];
+
+static struct mv643xx_eth_platform_data eth1_pd = {
+	.mac_addr	= eth1_mac_addr,
+
+	.tx_sram_addr	= MV_SRAM_BASE_ETH1,
+	.tx_sram_size	= MV_SRAM_TXRING_SIZE,
+	.tx_queue_size	= MV_SRAM_TXRING_SIZE / 16,
+
+	.rx_sram_addr	= MV_SRAM_BASE_ETH1 + MV_SRAM_TXRING_SIZE,
+	.rx_sram_size	= MV_SRAM_RXRING_SIZE,
+	.rx_queue_size	= MV_SRAM_RXRING_SIZE / 16,
+};
+
+static struct platform_device eth1_device = {
+	.name		= MV643XX_ETH_NAME,
+	.id		= 1,
+	.num_resources	= ARRAY_SIZE(mv64x60_eth1_resources),
+	.resource	= mv64x60_eth1_resources,
+	.dev = {
+		.platform_data = &eth1_pd,
+	},
+};
+#endif /* CONFIG_MV643XX_ETH_1 */
+
+#ifdef CONFIG_MV643XX_ETH_2
+
+static struct resource mv64x60_eth2_resources[] = {
+	[0] = {
+		.name	= "eth2 irq",
+		.start	= MV64x60_IRQ_ETH_2,
+		.end	= MV64x60_IRQ_ETH_2,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static char eth2_mac_addr[ETH_ALEN];
+
+static struct mv643xx_eth_platform_data eth2_pd = {
+	.mac_addr	= eth2_mac_addr,
+};
+
+static struct platform_device eth2_device = {
+	.name		= MV643XX_ETH_NAME,
+	.id		= 1,
+	.num_resources	= ARRAY_SIZE(mv64x60_eth2_resources),
+	.resource	= mv64x60_eth2_resources,
+	.dev = {
+		.platform_data = &eth2_pd,
+	},
+};
+#endif /* CONFIG_MV643XX_ETH_2 */
+
+static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
+	&mv643xx_eth_shared_device,
+#ifdef CONFIG_MV643XX_ETH_0
+	&eth0_device,
+#endif
+#ifdef CONFIG_MV643XX_ETH_1
+	&eth1_device,
+#endif
+#ifdef CONFIG_MV643XX_ETH_2
+	&eth2_device,
+#endif
+};
+
+static u8 __init exchange_bit(u8 val, u8 cs)
+{
+	/* place the data */
+	JAGUAR_FPGA_WRITE((val << 2) | cs, EEPROM_MODE);
+	udelay(1);
+
+	/* turn the clock on */
+	JAGUAR_FPGA_WRITE((val << 2) | cs | 0x2, EEPROM_MODE);
+	udelay(1);
+
+	/* turn the clock off and read-strobe */
+	JAGUAR_FPGA_WRITE((val << 2) | cs | 0x10, EEPROM_MODE);
+
+	/* return the data */
+	return (JAGUAR_FPGA_READ(EEPROM_MODE) >> 3) & 0x1;
+}
+
+static void __init get_mac(char dest[6])
+{
+	u8 read_opcode[12] = {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	int i,j;
+
+	for (i = 0; i < 12; i++)
+		exchange_bit(read_opcode[i], 1);
+
+	for (j = 0; j < 6; j++) {
+		dest[j] = 0;
+		for (i = 0; i < 8; i++) {
+			dest[j] <<= 1;
+			dest[j] |= exchange_bit(0, 1);
+		}
+	}
+
+	/* turn off CS */
+	exchange_bit(0,0);
+}
+
+/*
+ * Copy and increment ethernet MAC address by a small value.
+ *
+ * This is useful for systems where the only one MAC address is stored in
+ * non-volatile memory for multiple ports.
+ */
+static inline void eth_mac_add(unsigned char *dst, unsigned char *src,
+	unsigned int add)
+{
+	int i;
+
+	BUG_ON(add >= 256);
+
+	for (i = ETH_ALEN; i >= 0; i--) {
+		dst[i] = src[i] + add;
+		add = dst[i] < src[i];		/* compute carry */
+	}
+
+	WARN_ON(add);
+}
+
+static int __init mv643xx_eth_add_pds(void)
+{
+	unsigned char mac[ETH_ALEN];
+	int ret;
+
+	get_mac(mac);
+#ifdef CONFIG_MV643XX_ETH_0
+	eth_mac_add(eth1_mac_addr, mac, 0);
+#endif
+#ifdef CONFIG_MV643XX_ETH_1
+	eth_mac_add(eth1_mac_addr, mac, 1);
+#endif
+#ifdef CONFIG_MV643XX_ETH_2
+	eth_mac_add(eth2_mac_addr, mac, 2);
+#endif
+	ret = platform_add_devices(mv643xx_eth_pd_devs,
+			ARRAY_SIZE(mv643xx_eth_pd_devs));
+
+	return ret;
+}
+
+device_initcall(mv643xx_eth_add_pds);
+
+#endif /* defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE) */
diff -Naur linux-2.6.19/arch/mips/momentum/jaguar_atx/prom.c linux-mips-2.6.19/arch/mips/momentum/jaguar_atx/prom.c
--- linux-2.6.19/arch/mips/momentum/jaguar_atx/prom.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/momentum/jaguar_atx/prom.c	2006-11-29 15:23:09.000000000 -0800
@@ -39,56 +39,6 @@
 	return "Momentum Jaguar-ATX";
 }
 
-#ifdef CONFIG_MV643XX_ETH
-extern unsigned char prom_mac_addr_base[6];
-
-static void burn_clocks(void)
-{
-	int i;
-
-	/* this loop should burn at least 1us -- this should be plenty */
-	for (i = 0; i < 0x10000; i++)
-		;
-}
-
-static u8 exchange_bit(u8 val, u8 cs)
-{
-	/* place the data */
-	JAGUAR_FPGA_WRITE((val << 2) | cs, EEPROM_MODE);
-	burn_clocks();
-
-	/* turn the clock on */
-	JAGUAR_FPGA_WRITE((val << 2) | cs | 0x2, EEPROM_MODE);
-	burn_clocks();
-
-	/* turn the clock off and read-strobe */
-	JAGUAR_FPGA_WRITE((val << 2) | cs | 0x10, EEPROM_MODE);
-
-	/* return the data */
-	return ((JAGUAR_FPGA_READ(EEPROM_MODE) >> 3) & 0x1);
-}
-
-void get_mac(char dest[6])
-{
-	u8 read_opcode[12] = {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-	int i,j;
-
-	for (i = 0; i < 12; i++)
-		exchange_bit(read_opcode[i], 1);
-
-	for (j = 0; j < 6; j++) {
-		dest[j] = 0;
-		for (i = 0; i < 8; i++) {
-			dest[j] <<= 1;
-			dest[j] |= exchange_bit(0, 1);
-		}
-	}
-
-	/* turn off CS */
-	exchange_bit(0,0);
-}
-#endif
-
 #ifdef CONFIG_64BIT
 
 unsigned long signext(unsigned long addr)
@@ -228,11 +178,6 @@
 #endif /* CONFIG_64BIT */
 	mips_machgroup = MACH_GROUP_MOMENCO;
 	mips_machtype = MACH_MOMENCO_JAGUAR_ATX;
-
-#ifdef CONFIG_MV643XX_ETH
-	/* get the base MAC address for on-board ethernet ports */
-	get_mac(prom_mac_addr_base);
-#endif
 }
 
 unsigned long __init prom_free_prom_memory(void)
diff -Naur linux-2.6.19/arch/mips/pci/fixup-tb0219.c linux-mips-2.6.19/arch/mips/pci/fixup-tb0219.c
--- linux-2.6.19/arch/mips/pci/fixup-tb0219.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/pci/fixup-tb0219.c	2006-11-29 15:23:09.000000000 -0800
@@ -2,7 +2,7 @@
  *  fixup-tb0219.c, The TANBAC TB0219 specific PCI fixups.
  *
  *  Copyright (C) 2003  Megasolution Inc. <matsu@megasolution.jp>
- *  Copyright (C) 2004  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *  Copyright (C) 2004-2005  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
diff -Naur linux-2.6.19/arch/mips/qemu/Makefile linux-mips-2.6.19/arch/mips/qemu/Makefile
--- linux-2.6.19/arch/mips/qemu/Makefile	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/qemu/Makefile	2006-11-29 15:23:09.000000000 -0800
@@ -4,4 +4,5 @@
 
 obj-y		= q-firmware.o q-irq.o q-mem.o q-setup.o q-reset.o
 
+obj-$(CONFIG_VT) += q-vga.o
 obj-$(CONFIG_SMP) += q-smp.o
diff -Naur linux-2.6.19/arch/mips/qemu/q-setup.c linux-mips-2.6.19/arch/mips/qemu/q-setup.c
--- linux-2.6.19/arch/mips/qemu/q-setup.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/qemu/q-setup.c	2006-11-29 15:23:09.000000000 -0800
@@ -2,6 +2,7 @@
 #include <asm/io.h>
 #include <asm/time.h>
 
+extern void qvga_init(void);
 extern void qemu_reboot_setup(void);
 
 #define QEMU_PORT_BASE 0xb4000000
@@ -23,5 +24,9 @@
 void __init plat_mem_setup(void)
 {
 	set_io_port_base(QEMU_PORT_BASE);
+#ifdef CONFIG_VT
+	qvga_init();
+#endif
+
 	qemu_reboot_setup();
 }
diff -Naur linux-2.6.19/arch/mips/qemu/q-vga.c linux-mips-2.6.19/arch/mips/qemu/q-vga.c
--- linux-2.6.19/arch/mips/qemu/q-vga.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/arch/mips/qemu/q-vga.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,189 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 by Ralf Baechle (ralf@linux-mips.org)
+ *
+ * This will eventually go into the qemu firmware.
+ */
+#include <linux/init.h>
+#include <linux/screen_info.h>
+#include <linux/tty.h>
+#include <asm/io.h>
+#include <video/vga.h>
+
+/*
+ * This will eventually be done by the firmware; right now Linux assumes to
+ * run on the uninitialized hardware.
+ */
+#undef LOAD_VGA_FONT
+
+static unsigned char sr[8] __initdata = {	/* Sequencer */
+	0x03, 0x00, 0x03, 0x04, 0x02, 0x00, 0x00, 0x00
+};
+
+static unsigned char gr[16] __initdata= {	/* Graphics Controller */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x0e, 0x00,
+	0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static unsigned char ar[21] __initdata= {	/* Attribute Controller */
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+	0x0c, 0x01, 0x07, 0x13, 0x00
+};
+
+static unsigned char cr[32] __initdata= {	/* CRT Controller */
+	0x91, 0x4f, 0x4f, 0x95, 0x57, 0x4f, 0xc0, 0x1f,
+	0x00, 0x4f, 0x0d, 0x0e, 0x02, 0x30, 0x09, 0xb0,
+	0x90, 0x83, 0x8f, 0x28, 0x1f, 0x8f, 0xc1, 0xa3,
+	0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static struct rgb {
+	unsigned char r;
+	unsigned char g;
+	unsigned char b;
+} palette[16] __initdata= {
+	[ 0] = {0x00, 0x00, 0x00},
+	[ 1] = {0x00, 0x00, 0x2a},
+	[ 2] = {0x00, 0x2a, 0x00},
+	[ 3] = {0x00, 0x2a, 0x2a},
+	[ 4] = {0x2a, 0x00, 0x00},
+	[ 5] = {0x2a, 0x00, 0x2a},
+	[ 6] = {0x2a, 0x15, 0x00},
+	[ 7] = {0x2a, 0x2a, 0x2a},
+	[ 8] = {0x15, 0x15, 0x15},
+	[ 9] = {0x15, 0x15, 0x3f},
+	[10] = {0x15, 0x3f, 0x15},
+	[11] = {0x15, 0x3f, 0x3f},
+	[12] = {0x3f, 0x15, 0x15},
+	[13] = {0x3f, 0x15, 0x3f},
+	[14] = {0x3f, 0x3f, 0x15},
+	[15] = {0x3f, 0x3f, 0x3f}
+
+};
+
+void __init qvga_init_ibm(void)
+{
+	int i;
+
+	for (i = 0; i < 8; i++) {	/* Sequencer registers */
+		outb(i, 0x3c4);
+		outb(sr[i], 0x3c5);
+	}
+
+	for (i = 0; i < 16; i++) {	/* Graphics Controller registers */
+		outb(i, 0x3ce);
+		outb(gr[i], 0x3cf);
+	}
+
+	for (i = 0; i < 21; i++) {	/* Attribute Controller registers */
+		outb(i, 0x3c0);
+		outb(ar[i], 0x3c1);
+	}
+	outb(0x20, 0x3c0);		/* enable bit in *index* register */
+
+	for (i = 0; i < 32; i++) {	/* CRT Controller registers */
+		outb(i, 0x3d4);
+		outb(cr[i], 0x3d5);
+	}
+
+	for (i = 0; i < 16; i++) {	/* palette */
+		outb(i, 0x3c8);
+		outb(palette[i].r, 0x3c9);
+		outb(palette[i].g, 0x3c9);
+		outb(palette[i].b, 0x3c9);
+	}
+
+#if 1
+	 for (i = 0; i < 0x20000; i += 2)
+		*(volatile unsigned short *) (0xb00a0000 + i) = 0xaaaa;
+#endif
+}
+
+#ifdef LOAD_VGA_FONT
+#include "/home/ralf/src/qemu/qemu-mips/vgafont.h"
+
+static void __init
+qvga_load_font(unsigned char *def, unsigned int c)
+{
+	volatile void *w = (volatile void *) 0xb00a0000;
+
+	vga_wseq(NULL, 0, 1);
+	vga_wseq(NULL, 2, 4);
+	vga_wseq(NULL, 4, 7);
+	vga_wseq(NULL, 0, 3);
+	vga_wgfx(NULL, 4, 2);
+	vga_wgfx(NULL, 5, 0);
+	vga_wgfx(NULL, 6, 0);
+
+	memcpy(w, def, c);
+
+	vga_wseq(NULL, 0, 1);
+	vga_wseq(NULL, 2, 3);
+	vga_wseq(NULL, 4, 3);
+	vga_wseq(NULL, 0, 3);
+	vga_wgfx(NULL, 4, 0);
+	vga_wgfx(NULL, 5, 0x10);
+	vga_wgfx(NULL, 6, 0xe);
+}
+#endif
+
+void __init qvga_init(void)
+{
+	struct screen_info *si = &screen_info;
+	unsigned int h;
+	int i;
+
+#ifdef LOAD_VGA_FONT
+	qvga_load_font(vgafont16, 4096);
+#endif
+
+	vga_wgfx(NULL, 5, 0x10);	/* Set odd/even mode */
+	vga_wgfx(NULL, 6, 0x0c);	/* map to offset 0xb8000, text mode */
+	vga_wseq(NULL, 2, 3);		/* Planes 0 & 1 */
+	vga_wseq(NULL, 3, 4);		/* Font offset */
+	outb(1, VGA_MIS_W);		/* set msr to MSR_COLOR_EMULATION */
+	vga_wcrt(NULL, 1, 79);		/* 80 columns */
+	vga_wcrt(NULL, 9, 15);		/* 16 pixels per character */
+	vga_wcrt(NULL, 0x0c, 0);	/* start address high 8 bit */
+	vga_wcrt(NULL, 0x0d, 0);	/* start address low 8 bit */
+	vga_wcrt(NULL, 0x13, 0x28);	/* line offset */
+	vga_wcrt(NULL, 0x07, 0x1f);	/* line compare bit 8 */
+	vga_wcrt(NULL, 0x09, 0x4f);	/* line compare bit 9 */
+	vga_wcrt(NULL, 0x18, 0xff);	/* line compare low 8 bit */
+
+	h = (25 * 16);
+	vga_wcrt(NULL, 0x12, h);
+
+	outb(7, 0x3d4);
+	outb((inb(0x3d5) & ~0x42) | ((h >> 7) & 2) | ((h >> 3) & 0x40), 0x3d5);
+
+	for (i = 0; i < 21; i++)	/* Attribute Controller */
+		vga_wattr(NULL, i, ar[i]);
+	outb(0x20, 0x3c0);		/* Set bit 5 in Attribute Controller */
+					/* index ...  VGA is so stupid I want */
+					/* to cry all day ... */
+	outb(0, VGA_PEL_IW);
+	for (i = 0; i < 16; i++) {	/* palette */
+		outb(palette[i].r, VGA_PEL_D);
+		outb(palette[i].g, VGA_PEL_D);
+		outb(palette[i].b, VGA_PEL_D);
+	}
+
+	si->orig_x		= 0; 			/* Cursor x position */
+	si->orig_y		= 0;			/* Cursor y position */
+	si->orig_video_cols	= 80;			/* Columns */
+	si->orig_video_lines	= 25;			/* Lines */
+	si->orig_video_isVGA	= VIDEO_TYPE_VGAC;	/* Card type */
+	si->orig_video_points	= 16;
+
+#if 0
+	for (i = 0; i < 80; i += 2)
+		//*(volatile unsigned short *) (0xb00b8000 + i) = 0x0100 | 'A';
+		scr_writew(0x0100 | 'A', (volatile unsigned short *) (0xb00b8000 + i));
+	while (1);
+#endif
+}
diff -Naur linux-2.6.19/drivers/char/Kconfig linux-mips-2.6.19/drivers/char/Kconfig
--- linux-2.6.19/drivers/char/Kconfig	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/char/Kconfig	2006-11-29 15:23:09.000000000 -0800
@@ -357,19 +357,56 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called istallion.
 
-config AU1000_UART
-	bool "Enable Au1000 UART Support"
-	depends on SERIAL_NONSTANDARD && MIPS
-	help
-	  If you have an Alchemy AU1000 processor (MIPS based) and you want
-	  to use serial ports, say Y.  Otherwise, say N.
-
-config AU1000_SERIAL_CONSOLE
-	bool "Enable Au1000 serial console"
-	depends on AU1000_UART
+config AU1X00_GPIO
+	tristate "Alchemy Au1000 GPIO device support"
+	depends on MIPS && SOC_AU1X00
+
+config TS_AU1X00_ADS7846
+	tristate "Au1000/ADS7846 touchscreen support"
+	depends on MIPS && SOC_AU1X00
+
+config SIBYTE_SB1250_DUART
+	bool "Support for BCM1xxx onchip DUART"
+	depends on MIPS && SIBYTE_SB1xxx_SOC=y
+
+config SIBYTE_SB1250_DUART_CONSOLE
+	bool "Console on BCM1xxx DUART"
+	depends on SIBYTE_SB1250_DUART
+
+config SERIAL_DEC
+	bool "DECstation serial support"
+	depends on MACH_DECSTATION
+	default y
+	help
+	  This selects whether you want to be asked about drivers for
+	  DECstation serial ports.
+
+	  Note that the answer to this question won't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about DECstation serial ports.
+
+	  If unsure, say Y.
+
+config SERIAL_DEC_CONSOLE
+	bool "Support for console on a DECstation serial port"
+	depends on SERIAL_DEC
+	default y
+	help
+	  If you say Y here, it will be possible to use a serial port as the
+	  system console (the system console is the device which receives all
+	  kernel messages and warnings and which allows logins in single user
+	  mode).  Note that the firmware uses ttyS0 as the serial console on
+	  the Maxine and ttyS2 on the others.
+
+	  If unsure, say Y.
+
+config ZS
+	bool "Z85C30 Serial Support"
+	depends on SERIAL_DEC
+	default y
 	help
-	  If you have an Alchemy AU1000 processor (MIPS based) and you want
-	  to use a console on a serial port, say Y.  Otherwise, say N.
+	  Documentation on the Zilog 85C350 serial communications controller
+	  is downloadable at <http://www.zilog.com/pdfs/serial/z85c30.pdf>.
 
 config A2232
 	tristate "Commodore A2232 serial support (EXPERIMENTAL)"
diff -Naur linux-2.6.19/drivers/char/Makefile linux-mips-2.6.19/drivers/char/Makefile
--- linux-2.6.19/drivers/char/Makefile	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/char/Makefile	2006-11-29 15:23:09.000000000 -0800
@@ -31,6 +31,7 @@
 obj-$(CONFIG_A2232)		+= ser_a2232.o generic_serial.o
 obj-$(CONFIG_ATARI_DSP56K)	+= dsp56k.o
 obj-$(CONFIG_MOXA_SMARTIO)	+= mxser.o
+obj-$(CONFIG_SIBYTE_SB1250_DUART) += sb1250_duart.o
 obj-$(CONFIG_COMPUTONE)		+= ip2/
 obj-$(CONFIG_RISCOM8)		+= riscom8.o
 obj-$(CONFIG_ISI)		+= isicom.o
@@ -53,6 +54,7 @@
 obj-$(CONFIG_VIOTAPE)		+= viotape.o
 obj-$(CONFIG_HVCS)		+= hvcs.o
 obj-$(CONFIG_SGI_MBCS)		+= mbcs.o
+obj-$(CONFIG_SERIAL_DEC)	+= decserial.o
 obj-$(CONFIG_BRIQ_PANEL)	+= briq_panel.o
 
 obj-$(CONFIG_PRINTER)		+= lp.o
@@ -80,6 +82,7 @@
 obj-$(CONFIG_HW_RANDOM)		+= hw_random/
 obj-$(CONFIG_FTAPE)		+= ftape/
 obj-$(CONFIG_COBALT_LCD)	+= lcd.o
+obj-$(CONFIG_AU1000_GPIO)	+= au1000_gpio.o
 obj-$(CONFIG_PPDEV)		+= ppdev.o
 obj-$(CONFIG_NWBUTTON)		+= nwbutton.o
 obj-$(CONFIG_NWFLASH)		+= nwflash.o
diff -Naur linux-2.6.19/drivers/char/au1000_gpio.c linux-mips-2.6.19/drivers/char/au1000_gpio.c
--- linux-2.6.19/drivers/char/au1000_gpio.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/char/au1000_gpio.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,262 @@
+/*
+ * FILE NAME au1000_gpio.c
+ *
+ * BRIEF MODULE DESCRIPTION
+ *  Driver for Alchemy Au1000 GPIO.
+ *
+ *  Author: MontaVista Software, Inc.  <source@mvista.com>
+ *          Steve Longerbeam <stevel@mvista.com>
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE	LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/au1000.h>
+#include <asm/au1000_gpio.h>
+
+#define VERSION "0.01"
+
+static const struct {
+	u32 active_hi;
+	u32 avail_mask;
+} pinfunc_to_avail[15] = {
+	{1,  0x7<<16},   // 0  = SSI0     / GPIO[18:16]
+	{-1, 0},         // 1  = AC97     / SSI1
+	{1,  1<<19},     // 2  = IRDA     / GPIO19
+	{1,  1<<20},     // 3  = UART0    / GPIO20
+	{1,  0x1f<<24},  // 4  = NIC2     / GPIO[28:24]
+	{1,  0x7<<29},   // 5  = I2S      / GPIO[31:29]
+	{0,  1<<8},      // 6  = I2SDI    / GPIO8
+	{0,  0x3f<<9},   // 7  = UART3    / GPIO[14:9]
+	{0,  1<<15},     // 8  = IRFIRSEL / GPIO15
+	{0,  1<<2},      // 9  = EXTCLK0 or OSC / GPIO2
+	{0,  1<<3},      // 10 = EXTCLK1  / GPIO3
+	{0,  1<<6},      // 11 = SMROMCKE / GPIO6
+	{1,  1<<21},     // 12 = UART1    / GPIO21
+	{1,  1<<22},     // 13 = UART2    / GPIO22
+	{1,  1<<23}      // 14 = UART3    / GPIO23
+};
+
+	
+u32 get_au1000_avail_gpio_mask(void)
+{
+	int i;
+	u32 pinfunc = inl(SYS_PINFUNC);
+	u32 avail_mask = 0; // start with no gpio available
+
+	// first, check for GPIO's reprogrammed as peripheral pins
+	for (i=0; i<15; i++) {
+		if (pinfunc_to_avail[i].active_hi < 0)
+			continue;
+		if (!(pinfunc_to_avail[i].active_hi ^
+		      ((pinfunc & (1<<i)) ? 1:0)))
+			avail_mask |= pinfunc_to_avail[i].avail_mask;
+	}
+
+	// check for GPIO's used as interrupt sources
+	avail_mask &= ~(inl(IC1_MASKRD) &
+			(inl(IC1_CFG0RD) | inl(IC1_CFG1RD)));
+
+#ifdef CONFIG_USB_OHCI
+	avail_mask &= ~((1<<4) | (1<<11) | (1<<5) | (1<<13));
+#endif
+	
+	return avail_mask;
+}
+
+
+/*
+ * Tristate the requested GPIO pins specified in data.
+ * Only available GPIOs will be tristated.
+ */
+int au1000gpio_tristate(u32 data)
+{
+	data &= get_au1000_avail_gpio_mask();
+
+	if (data)
+		outl(data, SYS_TRIOUTCLR);
+
+	return 0;
+}
+
+
+/*
+ * Return the pin state. Pins configured as outputs will return
+ * the output state, and pins configured as inputs (tri-stated)
+ * will return input pin state.
+ */
+int au1000gpio_in(u32 *data)
+{
+	*data = inl(SYS_PINSTATERD);
+	return 0;
+}
+
+
+/*
+ * Set/clear GPIO pins. Only available GPIOs will be affected.
+ */
+int au1000gpio_set(u32 data)
+{
+	data &= get_au1000_avail_gpio_mask();
+
+	if (data)
+		outl(data, SYS_OUTPUTSET);
+	return 0;
+}
+
+int au1000gpio_clear(u32 data)
+{
+	data &= get_au1000_avail_gpio_mask();
+
+	if (data)
+		outl(data, SYS_OUTPUTCLR);
+	return 0;
+}
+
+/*
+ * Output data to GPIO pins. Only available GPIOs will be affected.
+ */
+int au1000gpio_out(u32 data)
+{
+	au1000gpio_set(data);
+	au1000gpio_clear(~data);
+	return 0;
+}
+
+
+EXPORT_SYMBOL(get_au1000_avail_gpio_mask);
+EXPORT_SYMBOL(au1000gpio_tristate);
+EXPORT_SYMBOL(au1000gpio_in);
+EXPORT_SYMBOL(au1000gpio_set);
+EXPORT_SYMBOL(au1000gpio_clear);
+EXPORT_SYMBOL(au1000gpio_out);
+
+
+static int au1000gpio_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+
+static int au1000gpio_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+
+static int au1000gpio_ioctl(struct inode *inode, struct file *file,
+			    unsigned int cmd, unsigned long arg)
+{
+	int status;
+	u32 val;
+	
+	switch(cmd) {
+	case AU1000GPIO_IN:
+		
+		status = au1000gpio_in(&val);
+		if (status != 0)
+			return status;
+
+		return put_user(val, (u32 *)arg);
+
+	case AU1000GPIO_OUT:
+
+		if (get_user(val, (u32 *)arg)) 
+			return -EFAULT;
+
+		return au1000gpio_out(val);
+
+	case AU1000GPIO_SET:
+
+		if (get_user(val, (u32 *)arg)) 
+			return -EFAULT;
+
+		return au1000gpio_set(val);
+		
+	case AU1000GPIO_CLEAR:
+
+		if (get_user(val, (u32 *)arg)) 
+			return -EFAULT;
+
+		return au1000gpio_clear(val);
+
+	case AU1000GPIO_TRISTATE:
+
+		if (get_user(val, (u32 *)arg)) 
+			return -EFAULT;
+
+		return au1000gpio_tristate(val);
+
+	case AU1000GPIO_AVAIL_MASK:
+		
+		return put_user(get_au1000_avail_gpio_mask(),
+				(u32 *)arg);
+		
+	default:
+		return -ENOIOCTLCMD;
+
+	}
+
+	return 0;
+}
+
+
+static struct file_operations au1000gpio_fops =
+{
+	.owner		= THIS_MODULE,
+	.ioctl		= au1000gpio_ioctl,
+	.open		= au1000gpio_open,
+	.release	= au1000gpio_release,
+};
+
+
+static struct miscdevice au1000gpio_miscdev =
+{
+	MISC_DYNAMIC_MINOR,
+	"au1000_gpio",
+	&au1000gpio_fops
+};
+
+
+int __init au1000gpio_init(void)
+{
+	misc_register(&au1000gpio_miscdev);
+	printk("Au1000 gpio driver, version %s\n", VERSION);
+	return 0;
+}	
+
+
+void __exit au1000gpio_exit(void)
+{
+	misc_deregister(&au1000gpio_miscdev);
+}
+
+
+module_init(au1000gpio_init);
+module_exit(au1000gpio_exit);
diff -Naur linux-2.6.19/drivers/char/au1000_ts.c linux-mips-2.6.19/drivers/char/au1000_ts.c
--- linux-2.6.19/drivers/char/au1000_ts.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/char/au1000_ts.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,677 @@
+/*
+ *      au1000_ts.c  --  Touch screen driver for the Alchemy Au1000's
+ *                       SSI Port 0 talking to the ADS7846 touch screen
+ *                       controller.
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ *         	stevel@mvista.com or source@mvista.com
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Notes:
+ *
+ *  Revision history
+ *    06.27.2001  Initial version
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/poll.h>
+#include <linux/string.h>
+#include <linux/ioport.h>       /* request_region */
+#include <linux/interrupt.h>    /* mark_bh */
+#include <asm/uaccess.h>        /* get_user,copy_to_user */
+#include <asm/io.h>
+#include <asm/au1000.h>
+
+#define TS_NAME "au1000-ts"
+#define TS_MAJOR 11
+
+#define PFX TS_NAME
+#define AU1000_TS_DEBUG 1
+
+#ifdef AU1000_TS_DEBUG
+#define dbg(format, arg...) printk(KERN_DEBUG PFX ": " format "\n" , ## arg)
+#else
+#define dbg(format, arg...) do {} while (0)
+#endif
+#define err(format, arg...) printk(KERN_ERR PFX ": " format "\n" , ## arg)
+#define info(format, arg...) printk(KERN_INFO PFX ": " format "\n" , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING PFX ": " format "\n" , ## arg)
+
+
+// SSI Status register bit defines
+#define SSISTAT_BF    (1<<4)
+#define SSISTAT_OF    (1<<3)
+#define SSISTAT_UF    (1<<2)
+#define SSISTAT_DONE  (1<<1)
+#define SSISTAT_BUSY  (1<<0)
+
+// SSI Interrupt Pending and Enable register bit defines
+#define SSIINT_OI     (1<<3)
+#define SSIINT_UI     (1<<2)
+#define SSIINT_DI     (1<<1)
+
+// SSI Address/Data register bit defines
+#define SSIADAT_D         (1<<24)
+#define SSIADAT_ADDR_BIT  16
+#define SSIADAT_ADDR_MASK (0xff<<SSIADAT_ADDR_BIT)
+#define SSIADAT_DATA_BIT  0
+#define SSIADAT_DATA_MASK (0xfff<<SSIADAT_DATA_BIT)
+
+// SSI Enable register bit defines
+#define SSIEN_CD (1<<1)
+#define SSIEN_E  (1<<0)
+
+// SSI Config register bit defines
+#define SSICFG_AO (1<<24)
+#define SSICFG_DO (1<<23)
+#define SSICFG_ALEN_BIT 20
+#define SSICFG_ALEN_MASK (0x7<<SSICFG_ALEN_BIT)
+#define SSICFG_DLEN_BIT 16
+#define SSICFG_DLEN_MASK (0xf<<SSICFG_DLEN_BIT)
+#define SSICFG_DD (1<<11)
+#define SSICFG_AD (1<<10)
+#define SSICFG_BM_BIT 8
+#define SSICFG_BM_MASK (0x3<<SSICFG_BM_BIT)
+#define SSICFG_CE (1<<7)
+#define SSICFG_DP (1<<6)
+#define SSICFG_DL (1<<5)
+#define SSICFG_EP (1<<4)
+
+// Bus Turnaround Selection
+#define SCLK_HOLD_HIGH 0
+#define SCLK_HOLD_LOW  1
+#define SCLK_CYCLE     2
+
+/*
+ * Default config for SSI0:
+ *
+ *   - transmit MSBit first
+ *   - expect MSBit first on data receive
+ *   - address length 7 bits
+ *   - expect data length 12 bits
+ *   - do not disable Direction bit
+ *   - do not disable Address bits
+ *   - SCLK held low during bus turnaround
+ *   - Address and Data bits clocked out on falling edge of SCLK
+ *   - Direction bit high is a read, low is a write
+ *   - Direction bit precedes Address bits
+ *   - Active low enable signal
+ */
+
+#define DEFAULT_SSI_CONFIG \
+    (SSICFG_AO | SSICFG_DO | (6<<SSICFG_ALEN_BIT) | (11<<SSICFG_DLEN_BIT) |\
+    (SCLK_HOLD_LOW<<SSICFG_BM_BIT) | SSICFG_DP | SSICFG_EP)
+
+
+// ADS7846 Control Byte bit defines
+#define ADS7846_ADDR_BIT  4
+#define ADS7846_ADDR_MASK (0x7<<ADS7846_ADDR_BIT)
+#define   ADS7846_MEASURE_X  (0x5<<ADS7846_ADDR_BIT)
+#define   ADS7846_MEASURE_Y  (0x1<<ADS7846_ADDR_BIT)
+#define   ADS7846_MEASURE_Z1 (0x3<<ADS7846_ADDR_BIT)
+#define   ADS7846_MEASURE_Z2 (0x4<<ADS7846_ADDR_BIT)
+#define ADS7846_8BITS     (1<<3)
+#define ADS7846_12BITS    0
+#define ADS7846_SER       (1<<2)
+#define ADS7846_DFR       0
+#define ADS7846_PWR_BIT   0
+#define   ADS7846_PD      0
+#define   ADS7846_ADC_ON  (0x1<<ADS7846_PWR_BIT)
+#define   ADS7846_REF_ON  (0x2<<ADS7846_PWR_BIT)
+#define   ADS7846_REF_ADC_ON (0x3<<ADS7846_PWR_BIT)
+
+#define MEASURE_12BIT_X \
+    (ADS7846_MEASURE_X | ADS7846_12BITS | ADS7846_DFR | ADS7846_PD)
+#define MEASURE_12BIT_Y \
+    (ADS7846_MEASURE_Y | ADS7846_12BITS | ADS7846_DFR | ADS7846_PD)
+#define MEASURE_12BIT_Z1 \
+    (ADS7846_MEASURE_Z1 | ADS7846_12BITS | ADS7846_DFR | ADS7846_PD)
+#define MEASURE_12BIT_Z2 \
+    (ADS7846_MEASURE_Z2 | ADS7846_12BITS | ADS7846_DFR | ADS7846_PD)
+
+typedef enum {
+	IDLE = 0,
+	ACQ_X,
+	ACQ_Y,
+	ACQ_Z1,
+	ACQ_Z2
+} acq_state_t;
+
+/* +++++++++++++ Lifted from include/linux/h3600_ts.h ++++++++++++++*/
+typedef struct {
+	unsigned short pressure;  // touch pressure
+	unsigned short x;         // calibrated X
+	unsigned short y;         // calibrated Y
+	unsigned short millisecs; // timestamp of this event
+} TS_EVENT;
+
+typedef struct {
+	int xscale;
+	int xtrans;
+	int yscale;
+	int ytrans;
+	int xyswap;
+} TS_CAL;
+
+/* Use 'f' as magic number */
+#define IOC_MAGIC  'f'
+
+#define TS_GET_RATE             _IO(IOC_MAGIC, 8)
+#define TS_SET_RATE             _IO(IOC_MAGIC, 9)
+#define TS_GET_CAL              _IOR(IOC_MAGIC, 10, TS_CAL)
+#define TS_SET_CAL              _IOW(IOC_MAGIC, 11, TS_CAL)
+
+/* +++++++++++++ Done lifted from include/linux/h3600_ts.h +++++++++*/
+
+
+#define EVENT_BUFSIZE 128
+
+/*
+ * Which pressure equation to use from ADS7846 datasheet.
+ * The first equation requires knowing only the X plate
+ * resistance, but needs 4 measurements (X, Y, Z1, Z2).
+ * The second equation requires knowing both X and Y plate
+ * resistance, but only needs 3 measurements (X, Y, Z1).
+ * The second equation is preferred because of the shorter
+ * acquisition time required.
+ */
+enum {
+	PRESSURE_EQN_1 = 0,
+	PRESSURE_EQN_2
+};
+
+
+/*
+ * The touch screen's X and Y plate resistances, used by
+ * pressure equations.
+ */
+#define DEFAULT_X_PLATE_OHMS 580
+#define DEFAULT_Y_PLATE_OHMS 580
+
+/*
+ * Pen up/down pressure resistance thresholds.
+ *
+ * FIXME: these are bogus and will have to be found empirically.
+ *
+ * These are hysteresis points. If pen state is up and pressure
+ * is greater than pen-down threshold, pen transitions to down.
+ * If pen state is down and pressure is less than pen-up threshold,
+ * pen transitions to up. If pressure is in-between, pen status
+ * doesn't change.
+ *
+ * This wouldn't be needed if PENIRQ* from the ADS7846 were
+ * routed to an interrupt line on the Au1000. This would issue
+ * an interrupt when the panel is touched.
+ */
+#define DEFAULT_PENDOWN_THRESH_OHMS 100
+#define DEFAULT_PENUP_THRESH_OHMS    80
+
+typedef struct {
+	int baudrate;
+	u32 clkdiv;
+	acq_state_t acq_state;            // State of acquisition state machine
+	int x_raw, y_raw, z1_raw, z2_raw; // The current raw acquisition values
+	TS_CAL cal;                       // Calibration values
+	// The X and Y plate resistance, needed to calculate pressure
+	int x_plate_ohms, y_plate_ohms;
+	// pressure resistance at which pen is considered down/up
+	int pendown_thresh_ohms;
+	int penup_thresh_ohms;
+	int pressure_eqn;                 // eqn to use for pressure calc
+	int pendown;                      // 1 = pen is down, 0 = pen is up
+	TS_EVENT event_buf[EVENT_BUFSIZE];// The event queue
+	int nextIn, nextOut;
+	int event_count;
+	struct fasync_struct *fasync;     // asynch notification
+	struct timer_list acq_timer;      // Timer for triggering acquisitions
+	wait_queue_head_t wait;           // read wait queue
+	spinlock_t lock;
+	struct tq_struct chug_tq;
+} au1000_ts_t;
+
+static au1000_ts_t au1000_ts;
+
+
+static inline u32
+calc_clkdiv(int baud)
+{
+	u32 sys_busclk =
+		(get_au1000_speed() / (int)(inl(PM_POWERUP_CONTROL)&0x03) + 2);
+	return (sys_busclk / (2 * baud)) - 1;
+}
+
+static inline int
+calc_baudrate(u32 clkdiv)
+{
+	u32 sys_busclk =
+		(get_au1000_speed() / (int)(inl(PM_POWERUP_CONTROL)&0x03) + 2);
+	return sys_busclk / (2 * (clkdiv + 1));
+}
+
+
+/*
+ * This is a bottom-half handler that is scheduled after
+ * raw X,Y,Z1,Z2 coordinates have been acquired, and does
+ * the following:
+ *
+ *   - computes touch screen pressure resistance
+ *   - if pressure is above a threshold considered to be pen-down:
+ *         - compute calibrated X and Y coordinates
+ *         - queue a new TS_EVENT
+ *         - signal asynchronously and wake up any read
+ */
+static void
+chug_raw_data(void* private)
+{
+	au1000_ts_t* ts = (au1000_ts_t*)private;
+	TS_EVENT event;
+	int Rt, Xcal, Ycal;
+	unsigned long flags;
+
+	// timestamp this new event.
+	event.millisecs = jiffies;
+
+	// Calculate touch pressure resistance
+	if (ts->pressure_eqn == PRESSURE_EQN_2) {
+		Rt = (ts->x_plate_ohms * ts->x_raw *
+		      (4096 - ts->z1_raw)) / ts->z1_raw;
+		Rt -= (ts->y_plate_ohms * ts->y_raw);
+		Rt = (Rt + 2048) >> 12; // round up to nearest ohm
+	} else {
+		Rt = (ts->x_plate_ohms * ts->x_raw *
+		      (ts->z2_raw - ts->z1_raw)) / ts->z1_raw;
+		Rt = (Rt + 2048) >> 12; // round up to nearest ohm
+	}
+
+	// hysteresis
+	if (!ts->pendown && Rt > ts->pendown_thresh_ohms)
+		ts->pendown = 1;
+	else if (ts->pendown && Rt < ts->penup_thresh_ohms)
+		ts->pendown = 0;
+
+	if (ts->pendown) {
+		// Pen is down
+		// Calculate calibrated X,Y
+		Xcal = ((ts->cal.xscale * ts->x_raw) >> 8) + ts->cal.xtrans;
+		Ycal = ((ts->cal.yscale * ts->y_raw) >> 8) + ts->cal.ytrans;
+
+		event.x = (unsigned short)Xcal;
+		event.y = (unsigned short)Ycal;
+		event.pressure = (unsigned short)Rt;
+
+		// add this event to the event queue
+		spin_lock_irqsave(&ts->lock, flags);
+		ts->event_buf[ts->nextIn++] = event;
+		if (ts->nextIn == EVENT_BUFSIZE)
+			ts->nextIn = 0;
+		if (ts->event_count < EVENT_BUFSIZE) {
+			ts->event_count++;
+		} else {
+			// throw out the oldest event
+			if (++ts->nextOut == EVENT_BUFSIZE)
+				ts->nextOut = 0;
+		}
+		spin_unlock_irqrestore(&ts->lock, flags);
+
+		// async notify
+		if (ts->fasync)
+			kill_fasync(&ts->fasync, SIGIO, POLL_IN);
+		// wake up any read call
+		if (waitqueue_active(&ts->wait))
+			wake_up_interruptible(&ts->wait);
+	}
+}
+
+
+/*
+ * Raw X,Y,pressure acquisition timer function. This triggers
+ * the start of a new acquisition. Its duration between calls
+ * is the touch screen polling rate.
+ */
+static void
+au1000_acq_timer(unsigned long data)
+{
+	au1000_ts_t* ts = (au1000_ts_t*)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ts->lock, flags);
+
+	// start acquisition with X coordinate
+	ts->acq_state = ACQ_X;
+	// start me up
+	outl(SSIADAT_D | (MEASURE_12BIT_X << SSIADAT_ADDR_BIT), SSI0_ADATA);
+
+	// schedule next acquire
+	ts->acq_timer.expires = jiffies + HZ / 100;
+	add_timer(&ts->acq_timer);
+
+	spin_unlock_irqrestore(&ts->lock, flags);
+}
+
+static void
+ssi0_interrupt(int irq, void *dev_id)
+{
+	au1000_ts_t *ts = (au1000_ts_t*)dev_id;
+	u32 stat, int_stat, data;
+
+	spin_lock(&ts->lock);
+
+	stat = inl(SSI0_STATUS);
+	// clear sticky status bits
+	outl(stat & (SSISTAT_OF|SSISTAT_UF|SSISTAT_DONE), SSI0_STATUS);
+
+	int_stat = inl(SSI0_INT);
+	// clear sticky intr status bits
+	outl(int_stat & (SSIINT_OI|SSIINT_UI|SSIINT_DI), SSI0_INT);
+
+	if ((int_stat & (SSIINT_OI|SSIINT_UI|SSIINT_DI)) != SSIINT_DI) {
+		if (int_stat & SSIINT_OI)
+			err("overflow");
+		if (int_stat & SSIINT_UI)
+			err("underflow");
+		spin_unlock(&ts->lock);
+		return;
+	}
+
+	data = inl(SSI0_ADATA) & SSIADAT_DATA_MASK;
+
+	switch (ts->acq_state) {
+	case IDLE:
+		break;
+	case ACQ_X:
+		ts->x_raw = data;
+		ts->acq_state = ACQ_Y;
+		// trigger Y acq
+		outl(SSIADAT_D | (MEASURE_12BIT_Y << SSIADAT_ADDR_BIT),
+		     SSI0_ADATA);
+		break;
+	case ACQ_Y:
+		ts->y_raw = data;
+		ts->acq_state = ACQ_Z1;
+		// trigger Z1 acq
+		outl(SSIADAT_D | (MEASURE_12BIT_Z1 << SSIADAT_ADDR_BIT),
+		     SSI0_ADATA);
+		break;
+	case ACQ_Z1:
+		ts->z1_raw = data;
+		if (ts->pressure_eqn == PRESSURE_EQN_2) {
+			// don't acq Z2, using 2nd eqn for touch pressure
+			ts->acq_state = IDLE;
+			// got the raw stuff, now mark BH
+			queue_task(&ts->chug_tq, &tq_immediate);
+			mark_bh(IMMEDIATE_BH);
+		} else {
+			ts->acq_state = ACQ_Z2;
+			// trigger Z2 acq
+			outl(SSIADAT_D | (MEASURE_12BIT_Z2<<SSIADAT_ADDR_BIT),
+			     SSI0_ADATA);
+		}
+		break;
+	case ACQ_Z2:
+		ts->z2_raw = data;
+		ts->acq_state = IDLE;
+		// got the raw stuff, now mark BH
+		queue_task(&ts->chug_tq, &tq_immediate);
+		mark_bh(IMMEDIATE_BH);
+		break;
+	}
+
+	spin_unlock(&ts->lock);
+}
+
+
+/* +++++++++++++ File operations ++++++++++++++*/
+
+static int
+au1000_fasync(int fd, struct file *filp, int mode)
+{
+	au1000_ts_t* ts = (au1000_ts_t*)filp->private_data;
+	return fasync_helper(fd, filp, mode, &ts->fasync);
+}
+
+static int
+au1000_ioctl(struct inode * inode, struct file *filp,
+	     unsigned int cmd, unsigned long arg)
+{
+	au1000_ts_t* ts = (au1000_ts_t*)filp->private_data;
+
+	switch(cmd) {
+	case TS_GET_RATE:       /* TODO: what is this? */
+		break;
+	case TS_SET_RATE:       /* TODO: what is this? */
+		break;
+	case TS_GET_CAL:
+		copy_to_user((char *)arg, (char *)&ts->cal, sizeof(TS_CAL));
+		break;
+	case TS_SET_CAL:
+		copy_from_user((char *)&ts->cal, (char *)arg, sizeof(TS_CAL));
+		break;
+	default:
+		err("unknown cmd %04x", cmd);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned int
+au1000_poll(struct file * filp, poll_table * wait)
+{
+	au1000_ts_t* ts = (au1000_ts_t*)filp->private_data;
+	poll_wait(filp, &ts->wait, wait);
+	if (ts->event_count)
+		return POLLIN | POLLRDNORM;
+	return 0;
+}
+
+static ssize_t
+au1000_read(struct file * filp, char * buf, size_t count, loff_t * l)
+{
+	au1000_ts_t* ts = (au1000_ts_t*)filp->private_data;
+	unsigned long flags;
+	TS_EVENT event;
+	int i;
+
+	if (ts->event_count == 0) {
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		interruptible_sleep_on(&ts->wait);
+		if (signal_pending(current))
+			return -ERESTARTSYS;
+	}
+
+	for (i = count;
+	     i >= sizeof(TS_EVENT);
+	     i -= sizeof(TS_EVENT), buf += sizeof(TS_EVENT)) {
+		if (ts->event_count == 0)
+			break;
+		spin_lock_irqsave(&ts->lock, flags);
+		event = ts->event_buf[ts->nextOut++];
+		if (ts->nextOut == EVENT_BUFSIZE)
+			ts->nextOut = 0;
+		if (ts->event_count)
+			ts->event_count--;
+		spin_unlock_irqrestore(&ts->lock, flags);
+		copy_to_user(buf, &event, sizeof(TS_EVENT));
+	}
+
+	return count - i;
+}
+
+
+static int
+au1000_open(struct inode * inode, struct file * filp)
+{
+	au1000_ts_t* ts;
+	unsigned long flags;
+
+	filp->private_data = ts = &au1000_ts;
+
+	spin_lock_irqsave(&ts->lock, flags);
+
+	// setup SSI0 config
+	outl(DEFAULT_SSI_CONFIG, SSI0_CONFIG);
+
+	// clear out SSI0 status bits
+	outl(SSISTAT_OF|SSISTAT_UF|SSISTAT_DONE, SSI0_STATUS);
+	// clear out SSI0 interrupt pending bits
+	outl(SSIINT_OI|SSIINT_UI|SSIINT_DI, SSI0_INT);
+
+	// enable SSI0 interrupts
+	outl(SSIINT_OI|SSIINT_UI|SSIINT_DI, SSI0_INT_ENABLE);
+
+	/*
+	 * init bh handler that chugs the raw data (calibrates and
+	 * calculates touch pressure).
+	 */
+	ts->chug_tq.routine = chug_raw_data;
+	ts->chug_tq.data = ts;
+	ts->pendown = 0; // pen up
+	
+	// flush event queue
+	ts->nextIn = ts->nextOut = ts->event_count = 0;
+	
+	// Start acquisition timer function
+	init_timer(&ts->acq_timer);
+	ts->acq_timer.function = au1000_acq_timer;
+	ts->acq_timer.data = (unsigned long)ts;
+	ts->acq_timer.expires = jiffies + HZ / 100;
+	add_timer(&ts->acq_timer);
+
+	spin_unlock_irqrestore(&ts->lock, flags);
+
+	return 0;
+}
+
+static int
+au1000_release(struct inode * inode, struct file * filp)
+{
+	au1000_ts_t* ts = (au1000_ts_t*)filp->private_data;
+	unsigned long flags;
+	
+	au1000_fasync(-1, filp, 0);
+	del_timer_sync(&ts->acq_timer);
+
+	spin_lock_irqsave(&ts->lock, flags);
+	// disable SSI0 interrupts
+	outl(0, SSI0_INT_ENABLE);
+	spin_unlock_irqrestore(&ts->lock, flags);
+
+	return 0;
+}
+
+
+static struct file_operations ts_fops = {
+	.read           = au1000_read,
+	.poll           = au1000_poll,
+	.ioctl		= au1000_ioctl,
+	.fasync         = au1000_fasync,
+	.open		= au1000_open,
+	.release	= au1000_release,
+};
+
+/* +++++++++++++ End File operations ++++++++++++++*/
+
+
+int __init
+au1000ts_init_module(void)
+{
+	au1000_ts_t* ts = &au1000_ts;
+	int ret;
+
+	/* register our character device */
+	if ((ret = register_chrdev(TS_MAJOR, TS_NAME, &ts_fops)) < 0) {
+		err("can't get major number");
+		return ret;
+	}
+	info("registered");
+
+	memset(ts, 0, sizeof(au1000_ts_t));
+	init_waitqueue_head(&ts->wait);
+	spin_lock_init(&ts->lock);
+
+	if (!request_region(virt_to_phys((void*)SSI0_STATUS), 0x100, TS_NAME)) {
+		err("SSI0 ports in use");
+		return -ENXIO;
+	}
+
+	if ((ret = request_irq(AU1000_SSI0_INT, ssi0_interrupt,
+			       SA_SHIRQ | SA_INTERRUPT, TS_NAME, ts))) {
+		err("could not get IRQ");
+		return ret;
+	}
+
+	// initial calibration values
+	ts->cal.xscale = -93;
+	ts->cal.xtrans = 346;
+	ts->cal.yscale = -64;
+	ts->cal.ytrans = 251;
+
+	// init pen up/down hysteresis points
+	ts->pendown_thresh_ohms = DEFAULT_PENDOWN_THRESH_OHMS;
+	ts->penup_thresh_ohms = DEFAULT_PENUP_THRESH_OHMS;
+	ts->pressure_eqn = PRESSURE_EQN_2;
+	// init X and Y plate resistances
+	ts->x_plate_ohms = DEFAULT_X_PLATE_OHMS;
+	ts->y_plate_ohms = DEFAULT_Y_PLATE_OHMS;
+
+	// set GPIO to SSI0 function
+	outl(inl(PIN_FUNCTION) & ~1, PIN_FUNCTION);
+	
+	// enable SSI0 clock and bring SSI0 out of reset
+	outl(0, SSI0_CONTROL);
+	udelay(1000);
+	outl(SSIEN_E, SSI0_CONTROL);
+	udelay(100);
+	
+	// FIXME: is this a working baudrate?
+	ts->clkdiv = 0;
+	ts->baudrate = calc_baudrate(ts->clkdiv);
+	outl(ts->clkdiv, SSI0_CLKDIV);
+
+	info("baudrate = %d Hz", ts->baudrate);
+	
+	return 0;
+}
+
+void
+au1000ts_cleanup_module(void)
+{
+	// disable clocks and hold in reset
+	outl(SSIEN_CD, SSI0_CONTROL);
+	free_irq(AU1000_SSI0_INT, &au1000_ts);
+	release_region(virt_to_phys((void*)SSI0_STATUS), 0x100);
+	unregister_chrdev(TS_MAJOR, TS_NAME);
+}
+
+/* Module information */
+MODULE_AUTHOR("Steve Longerbeam, stevel@mvista.com, www.mvista.com");
+MODULE_DESCRIPTION("Au1000/ADS7846 Touch Screen Driver");
+
+module_init(au1000ts_init_module);
+module_exit(au1000ts_cleanup_module);
diff -Naur linux-2.6.19/drivers/char/decserial.c linux-mips-2.6.19/drivers/char/decserial.c
--- linux-2.6.19/drivers/char/decserial.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/char/decserial.c	2006-11-29 15:23:09.000000000 -0800
@@ -14,86 +14,84 @@
  *      device. Added support for PROM console in drivers/char/tty_io.c
  *      instead. Although it may work to enable more than one 
  *      console device I strongly recommend to use only one.
+ *
+ *	Copyright (C) 2004  Maciej W. Rozycki
  */
 
+#include <linux/errno.h>
 #include <linux/init.h>
-#include <asm/dec/machtype.h>
-
-#ifdef CONFIG_ZS
-extern int zs_init(void);
-#endif
 
-#ifdef CONFIG_DZ
-extern int dz_init(void);
-#endif
+#include <asm/dec/machtype.h>
+#include <asm/dec/serial.h>
 
-#ifdef CONFIG_SERIAL_CONSOLE
+extern int register_zs_hook(unsigned int channel,
+			    struct dec_serial_hook *hook);
+extern int unregister_zs_hook(unsigned int channel);
 
+int register_dec_serial_hook(unsigned int channel,
+			     struct dec_serial_hook *hook)
+{
 #ifdef CONFIG_ZS
-extern void zs_serial_console_init(void);
-#endif
-
-#ifdef CONFIG_DZ
-extern void dz_serial_console_init(void);
+	if (IOASIC)
+		return register_zs_hook(channel, hook);
 #endif
+	return 0;
+}
 
+int unregister_dec_serial_hook(unsigned int channel)
+{
+#ifdef CONFIG_ZS
+	if (IOASIC)
+		return unregister_zs_hook(channel);
 #endif
+	return 0;
+}
 
-/* rs_init - starts up the serial interface -
-   handle normal case of starting up the serial interface */
 
-#ifdef CONFIG_SERIAL
+extern int zs_init(void);
+extern int dz_init(void);
 
+/*
+ * rs_init - starts up the serial interface -
+ * handle normal case of starting up the serial interface
+ */
 int __init rs_init(void)
 {
-
-#if defined(CONFIG_ZS) && defined(CONFIG_DZ)
-    if (IOASIC)
-	return zs_init();
-    else
-	return dz_init();
-#else
-
 #ifdef CONFIG_ZS
-    return zs_init();
+	if (IOASIC)
+		return zs_init();
 #endif
-
 #ifdef CONFIG_DZ
-    return dz_init();
-#endif
-
+	if (!IOASIC)
+		return dz_init();
 #endif
+	return -ENXIO;
 }
 
 __initcall(rs_init);
 
-#endif
 
-#ifdef CONFIG_SERIAL_CONSOLE
+#ifdef CONFIG_SERIAL_DEC_CONSOLE
+
+extern void zs_serial_console_init(void);
+extern void dz_serial_console_init(void);
 
-/* serial_console_init handles the special case of starting
- *   up the console on the serial port
+/*
+ * dec_serial_console_init handles the special case of starting
+ * up the console on the serial port
  */
-static int __init decserial_console_init(void)
+static int __init dec_serial_console_init(void)
 {
-#if defined(CONFIG_ZS) && defined(CONFIG_DZ)
-    if (IOASIC)
-	zs_serial_console_init();
-    else
-	dz_serial_console_init();
-#else
-
 #ifdef CONFIG_ZS
-    zs_serial_console_init();
+	if (IOASIC)
+		zs_serial_console_init();
 #endif
-
 #ifdef CONFIG_DZ
-    dz_serial_console_init();
-#endif
-
+	if (!IOASIC)
+		dz_serial_console_init();
 #endif
     return 0;
 }
-console_initcall(decserial_console_init);
+console_initcall(dec_serial_console_init);
 
 #endif
diff -Naur linux-2.6.19/drivers/char/sb1250_duart.c linux-mips-2.6.19/drivers/char/sb1250_duart.c
--- linux-2.6.19/drivers/char/sb1250_duart.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/char/sb1250_duart.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,909 @@
+/*
+ * Copyright (C) 2000,2001,2002,2003,2004 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+/* 
+ * Driver support for the on-chip sb1250 dual-channel serial port,
+ * running in asynchronous mode.  Also, support for doing a serial console
+ * on one of those ports
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/serial.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/termios.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/sched.h>
+#include <linux/tty_flip.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/sibyte/swarm.h>
+#include <asm/sibyte/sb1250.h>
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#else
+#error invalid SiByte UART configuation
+#endif
+#include <asm/sibyte/sb1250_uart.h>
+#include <asm/war.h>
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#define UNIT_CHANREG(n,reg)	A_BCM1480_DUART_CHANREG((n),(reg))
+#define UNIT_IMRREG(n)		A_BCM1480_DUART_IMRREG(n)
+#define UNIT_INT(n)		(K_BCM1480_INT_UART_0 + (n))
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#define UNIT_CHANREG(n,reg)	A_DUART_CHANREG((n),(reg))
+#define UNIT_IMRREG(n)		A_DUART_IMRREG(n)
+#define UNIT_INT(n)		(K_INT_UART_0 + (n))
+#else
+#error invalid SiByte UART configuation
+#endif
+
+/* Toggle spewing of debugging output */
+#undef DEBUG
+
+#define DEFAULT_CFLAGS          (CS8 | B115200)
+
+#define TX_INTEN          1
+#define DUART_INITIALIZED 2
+
+#define DUART_MAX_LINE 4
+char sb1250_duart_present[DUART_MAX_LINE];
+EXPORT_SYMBOL(sb1250_duart_present);
+
+/*
+ * Still not sure what the termios structures set up here are for, 
+ *  but we have to supply pointers to them to register the tty driver
+ */
+static struct tty_driver *sb1250_duart_driver; //, sb1250_duart_callout_driver;
+
+/*
+ * This lock protects both the open flags for all the uart states as 
+ * well as the reference count for the module
+ */
+static DEFINE_SPINLOCK(open_lock);
+
+typedef struct { 
+	unsigned char       outp_buf[SERIAL_XMIT_SIZE];
+	unsigned int        outp_head;
+	unsigned int        outp_tail;
+	unsigned int        outp_count;
+	spinlock_t          outp_lock;
+	unsigned int        open;
+	unsigned int        line;
+	unsigned int        last_cflags;
+	unsigned long       flags;
+	struct tty_struct   *tty;
+	/* CSR addresses */
+	volatile u32	    *status;
+	volatile u32	    *imr;
+	volatile u32	    *tx_hold;
+	volatile u32	    *rx_hold;
+	volatile u32	    *mode_1;
+	volatile u32	    *mode_2;
+	volatile u32	    *clk_sel;
+	volatile u32	    *cmd;
+} uart_state_t;
+
+static uart_state_t uart_states[DUART_MAX_LINE];
+
+/*
+ * Inline functions local to this module 
+ */
+
+/*
+ * In bug 1956, we get glitches that can mess up uart registers.  This
+ * "write-mode-1 after any register access" is the accepted
+ * workaround.
+ */
+#if SIBYTE_1956_WAR
+static unsigned int last_mode1[DUART_MAX_LINE];
+#endif
+
+static inline u32 READ_SERCSR(volatile u32 *addr, int line)
+{
+	u32 val = csr_in32(addr);
+#if SIBYTE_1956_WAR
+	csr_out32(last_mode1[line], uart_states[line].mode_1);
+#endif
+	return val;
+}
+
+static inline void WRITE_SERCSR(u32 val, volatile u32 *addr, int line)
+{
+	csr_out32(val, addr);
+#if SIBYTE_1956_WAR
+	csr_out32(last_mode1[line], uart_states[line].mode_1);
+#endif
+}
+
+static void init_duart_port(uart_state_t *port, int line)
+{
+	if (!(port->flags & DUART_INITIALIZED)) {
+		port->line = line;
+		port->status = IOADDR(UNIT_CHANREG(line, R_DUART_STATUS));
+		port->imr = IOADDR(UNIT_IMRREG(line));
+		port->tx_hold = IOADDR(UNIT_CHANREG(line, R_DUART_TX_HOLD));
+		port->rx_hold = IOADDR(UNIT_CHANREG(line, R_DUART_RX_HOLD));
+		port->mode_1 = IOADDR(UNIT_CHANREG(line, R_DUART_MODE_REG_1));
+		port->mode_2 = IOADDR(UNIT_CHANREG(line, R_DUART_MODE_REG_2));
+		port->clk_sel = IOADDR(UNIT_CHANREG(line, R_DUART_CLK_SEL));
+		port->cmd = IOADDR(UNIT_CHANREG(line, R_DUART_CMD));
+		port->flags |= DUART_INITIALIZED;
+	}
+}
+
+/*
+ * Mask out the passed interrupt lines at the duart level.  This should be
+ * called while holding the associated outp_lock.
+ */
+static inline void duart_mask_ints(unsigned int line, unsigned int mask)
+{
+	uart_state_t *port = uart_states + line;
+	u64 tmp = READ_SERCSR(port->imr, line);
+	WRITE_SERCSR(tmp & ~mask, port->imr, line);
+}
+
+	
+/* Unmask the passed interrupt lines at the duart level */
+static inline void duart_unmask_ints(unsigned int line, unsigned int mask)
+{
+	uart_state_t *port = uart_states + line;
+	u64 tmp = READ_SERCSR(port->imr, line);
+	WRITE_SERCSR(tmp | mask, port->imr, line);
+}
+
+static inline void transmit_char_pio(uart_state_t *us)
+{
+	struct tty_struct *tty = us->tty;
+	int blocked = 0;
+
+	if (spin_trylock(&us->outp_lock)) {
+		for (;;) {
+			if (!(READ_SERCSR(us->status, us->line) & M_DUART_TX_RDY))
+				break;
+			if (us->outp_count <= 0 || tty->stopped || tty->hw_stopped) {
+				break;
+			} else {
+				WRITE_SERCSR(us->outp_buf[us->outp_head],
+					     us->tx_hold, us->line);
+				us->outp_head = (us->outp_head + 1) & (SERIAL_XMIT_SIZE-1);
+				if (--us->outp_count <= 0)
+					break;
+			}
+			udelay(10);
+		}
+		spin_unlock(&us->outp_lock);
+	} else {
+		blocked = 1;
+	}
+
+	if (!us->outp_count || tty->stopped ||
+	    tty->hw_stopped || blocked) {
+		us->flags &= ~TX_INTEN;
+		duart_mask_ints(us->line, M_DUART_IMR_TX);
+	}
+
+      	if (us->open &&
+	    (us->outp_count < (SERIAL_XMIT_SIZE/2))) {
+		/*
+		 * We told the discipline at one point that we had no
+		 * space, so it went to sleep.  Wake it up when we hit
+		 * half empty
+		 */
+		if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+		    tty->ldisc.write_wakeup)
+			tty->ldisc.write_wakeup(tty);
+		wake_up_interruptible(&tty->write_wait);
+	}
+}
+
+/* 
+ * Generic interrupt handler for both channels.  dev_id is a pointer
+ * to the proper uart_states structure, so from that we can derive 
+ * which port interrupted
+ */
+
+static irqreturn_t duart_int(int irq, void *dev_id)
+{
+	uart_state_t *us = (uart_state_t *)dev_id;
+	struct tty_struct *tty = us->tty;
+	unsigned int status = READ_SERCSR(us->status, us->line);
+
+	pr_debug("DUART INT\n");
+
+	if (status & M_DUART_RX_RDY) {
+		int counter = 2048;
+		unsigned int ch;
+
+		if (status & M_DUART_OVRUN_ERR)
+			tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+		if (status & M_DUART_PARITY_ERR) {
+			printk("Parity error!\n");
+		} else if (status & M_DUART_FRM_ERR) {
+			printk("Frame error!\n");
+		}
+
+		while (counter > 0) {
+			if (!(READ_SERCSR(us->status, us->line) & M_DUART_RX_RDY))
+				break;
+			ch = READ_SERCSR(us->rx_hold, us->line);
+			tty_insert_flip_char(tty, ch, 0);
+			udelay(1);
+			counter--;
+		}
+		tty_flip_buffer_push(tty);
+	}
+
+	if (status & M_DUART_TX_RDY) {
+		transmit_char_pio(us);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ *  Actual driver functions
+ */
+
+/* Return the number of characters we can accomodate in a write at this instant */
+static int duart_write_room(struct tty_struct *tty)
+{
+	uart_state_t *us = (uart_state_t *) tty->driver_data;
+	int retval;
+
+	retval = SERIAL_XMIT_SIZE - us->outp_count;
+
+	pr_debug("duart_write_room called, returning %i\n", retval);
+
+	return retval;
+}
+
+/* memcpy the data from src to destination, but take extra care if the
+   data is coming from user space */
+static inline int copy_buf(char *dest, const char *src, int size, int from_user) 
+{
+	if (from_user) {
+		(void) copy_from_user(dest, src, size); 
+	} else {
+		memcpy(dest, src, size);
+	}
+	return size;
+}
+
+/*
+ * Buffer up to count characters from buf to be written.  If we don't have
+ * other characters buffered, enable the tx interrupt to start sending
+ */
+static int duart_write(struct tty_struct *tty, const unsigned char *buf,
+		       int count)
+{
+	uart_state_t *us;
+	int c, t, total = 0;
+	unsigned long flags;
+
+	if (!tty) return 0;
+
+	us = tty->driver_data;
+	if (!us) return 0;
+
+	pr_debug("duart_write called for %i chars by %i (%s)\n", count, current->pid, current->comm);
+
+	spin_lock_irqsave(&us->outp_lock, flags);
+
+	for (;;) {
+		c = count;
+
+		t = SERIAL_XMIT_SIZE - us->outp_tail;
+		if (t < c) c = t;
+
+		t = SERIAL_XMIT_SIZE - 1 - us->outp_count;
+		if (t < c) c = t;
+
+		if (c <= 0) break;
+
+		memcpy(us->outp_buf + us->outp_tail, buf, c);
+
+		us->outp_count += c;
+		us->outp_tail = (us->outp_tail + c) & (SERIAL_XMIT_SIZE - 1);
+		buf += c;
+		count -= c;
+		total += c;
+	}
+
+	spin_unlock_irqrestore(&us->outp_lock, flags);
+
+	if (us->outp_count && !tty->stopped && 
+	    !tty->hw_stopped && !(us->flags & TX_INTEN)) {
+		us->flags |= TX_INTEN;
+		duart_unmask_ints(us->line, M_DUART_IMR_TX);
+	}
+
+	return total;
+}
+
+
+/* Buffer one character to be written.  If there's not room for it, just drop
+   it on the floor.  This is used for echo, among other things */
+static void duart_put_char(struct tty_struct *tty, u_char ch)
+{
+	uart_state_t *us = (uart_state_t *) tty->driver_data;
+	unsigned long flags;
+
+	pr_debug("duart_put_char called.  Char is %x (%c)\n", (int)ch, ch);
+
+	spin_lock_irqsave(&us->outp_lock, flags);
+
+	if (us->outp_count == SERIAL_XMIT_SIZE) {
+		spin_unlock_irqrestore(&us->outp_lock, flags);
+		return;
+	}
+
+	us->outp_buf[us->outp_tail] = ch;
+	us->outp_tail = (us->outp_tail + 1) &(SERIAL_XMIT_SIZE-1);
+	us->outp_count++;
+
+	spin_unlock_irqrestore(&us->outp_lock, flags);
+}
+
+static void duart_flush_chars(struct tty_struct * tty)
+{
+	uart_state_t *port;
+
+	if (!tty) return;
+
+	port = tty->driver_data;
+
+	if (!port) return;
+
+	if (port->outp_count <= 0 || tty->stopped || tty->hw_stopped) {
+		return;
+	}
+
+	port->flags |= TX_INTEN;
+	duart_unmask_ints(port->line, M_DUART_IMR_TX);
+}
+
+/* Return the number of characters in the output buffer that have yet to be 
+   written */
+static int duart_chars_in_buffer(struct tty_struct *tty)
+{
+	uart_state_t *us = (uart_state_t *) tty->driver_data;
+	int retval;
+
+	retval = us->outp_count;
+
+	pr_debug("duart_chars_in_buffer returning %i\n", retval);
+
+	return retval;
+}
+
+/* Kill everything we haven't yet shoved into the FIFO.  Turn off the
+   transmit interrupt since we've nothing more to transmit */
+static void duart_flush_buffer(struct tty_struct *tty)
+{
+	uart_state_t *us = (uart_state_t *) tty->driver_data;
+	unsigned long flags;
+
+	pr_debug("duart_flush_buffer called\n");
+	spin_lock_irqsave(&us->outp_lock, flags);
+	us->outp_head = us->outp_tail = us->outp_count = 0;
+	spin_unlock_irqrestore(&us->outp_lock, flags);
+
+	wake_up_interruptible(&us->tty->write_wait);
+	if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+	    tty->ldisc.write_wakeup)
+		tty->ldisc.write_wakeup(tty);
+}
+
+
+/* See sb1250 user manual for details on these registers */
+static inline void duart_set_cflag(unsigned int line, unsigned int cflag)
+{
+	unsigned int mode_reg1 = 0, mode_reg2 = 0;
+	unsigned int clk_divisor;
+	uart_state_t *port = uart_states + line;
+
+	switch (cflag & CSIZE) {
+	case CS7:
+		mode_reg1 |= V_DUART_BITS_PER_CHAR_7;
+		
+	default:
+		/* We don't handle CS5 or CS6...is there a way we're supposed to flag this? 
+		   right now we just force them to CS8 */
+		mode_reg1 |= 0x0;
+		break;
+	}
+	if (cflag & CSTOPB) {
+	        mode_reg2 |= M_DUART_STOP_BIT_LEN_2;
+	}
+	if (!(cflag & PARENB)) {
+	        mode_reg1 |= V_DUART_PARITY_MODE_NONE;
+	}
+	if (cflag & PARODD) {
+		mode_reg1 |= M_DUART_PARITY_TYPE_ODD;
+	}
+	
+	/* Formula for this is (5000000/baud)-1, but we saturate
+	   at 12 bits, which means we can't actually do anything less
+	   that 1200 baud */
+	switch (cflag & CBAUD) {
+	case B200:	
+	case B300:	
+	case B1200:	clk_divisor = 4095;		break;
+	case B1800:	clk_divisor = 2776;		break;
+	case B2400:	clk_divisor = 2082;		break;
+	case B4800:	clk_divisor = 1040;		break;
+	default:
+	case B9600:	clk_divisor = 519;		break;
+	case B19200:	clk_divisor = 259;		break;
+	case B38400:	clk_divisor = 129;		break;
+	case B57600:	clk_divisor = 85;		break;
+	case B115200:	clk_divisor = 42;		break;
+	}
+	WRITE_SERCSR(mode_reg1, port->mode_1, port->line);
+	WRITE_SERCSR(mode_reg2, port->mode_2, port->line);
+	WRITE_SERCSR(clk_divisor, port->clk_sel, port->line);
+	port->last_cflags = cflag;
+}
+
+
+/* Handle notification of a termios change.  */
+static void duart_set_termios(struct tty_struct *tty, struct termios *old)
+{
+	uart_state_t *us = (uart_state_t *) tty->driver_data;
+
+	pr_debug("duart_set_termios called by %i (%s)\n", current->pid, current->comm);
+	if (old && tty->termios->c_cflag == old->c_cflag)
+		return;
+	duart_set_cflag(us->line, tty->termios->c_cflag);
+}
+
+static int get_serial_info(uart_state_t *us, struct serial_struct * retinfo) {
+
+	struct serial_struct tmp;
+
+	memset(&tmp, 0, sizeof(tmp));
+
+	tmp.type=PORT_SB1250;
+	tmp.line=us->line;
+	tmp.port=UNIT_CHANREG(tmp.line,0);
+	tmp.irq=UNIT_INT(tmp.line);
+	tmp.xmit_fifo_size=16; /* fixed by hw */
+	tmp.baud_base=5000000;
+	tmp.io_type=SERIAL_IO_MEM;
+
+	if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int duart_ioctl(struct tty_struct *tty, struct file * file,
+		       unsigned int cmd, unsigned long arg)
+{
+	uart_state_t *us = (uart_state_t *) tty->driver_data;
+
+/*	if (serial_paranoia_check(info, tty->device, "rs_ioctl"))
+	return -ENODEV;*/
+	switch (cmd) {
+	case TIOCMGET:
+		printk("Ignoring TIOCMGET\n");
+		break;
+	case TIOCMBIS:
+		printk("Ignoring TIOCMBIS\n");
+		break;
+	case TIOCMBIC:
+		printk("Ignoring TIOCMBIC\n");
+		break;
+	case TIOCMSET:
+		printk("Ignoring TIOCMSET\n");
+		break;
+	case TIOCGSERIAL:
+		return get_serial_info(us,(struct serial_struct *) arg);
+	case TIOCSSERIAL:
+		printk("Ignoring TIOCSSERIAL\n");
+		break;
+	case TIOCSERCONFIG:
+		printk("Ignoring TIOCSERCONFIG\n");
+		break;
+	case TIOCSERGETLSR: /* Get line status register */
+		printk("Ignoring TIOCSERGETLSR\n");
+		break;
+	case TIOCSERGSTRUCT:
+		printk("Ignoring TIOCSERGSTRUCT\n");
+		break;
+	case TIOCMIWAIT:
+		printk("Ignoring TIOCMIWAIT\n");
+		break;
+	case TIOCGICOUNT:
+		printk("Ignoring TIOCGICOUNT\n");
+		break;
+	case TIOCSERGWILD:
+		printk("Ignoring TIOCSERGWILD\n");
+		break;
+	case TIOCSERSWILD:
+		printk("Ignoring TIOCSERSWILD\n");
+		break;
+	default:
+		break;
+	}
+//	printk("Ignoring IOCTL %x from pid %i (%s)\n", cmd, current->pid, current->comm);
+	return -ENOIOCTLCMD;
+}
+
+/* XXXKW locking? */
+static void duart_start(struct tty_struct *tty)
+{
+	uart_state_t *us = (uart_state_t *) tty->driver_data;
+
+	pr_debug("duart_start called\n");
+
+	if (us->outp_count && !(us->flags & TX_INTEN)) {
+		us->flags |= TX_INTEN;
+		duart_unmask_ints(us->line, M_DUART_IMR_TX);
+	}
+}
+
+/* XXXKW locking? */
+static void duart_stop(struct tty_struct *tty)
+{
+	uart_state_t *us = (uart_state_t *) tty->driver_data;
+
+	pr_debug("duart_stop called\n");
+
+	if (us->outp_count && (us->flags & TX_INTEN)) {
+		us->flags &= ~TX_INTEN;
+		duart_mask_ints(us->line, M_DUART_IMR_TX);
+	}
+}
+
+/* Not sure on the semantics of this; are we supposed to wait until the stuff
+   already in the hardware FIFO drains, or are we supposed to wait until 
+   we've drained the output buffer, too?  I'm assuming the former, 'cause thats
+   what the other drivers seem to assume 
+*/
+
+static void duart_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+	uart_state_t *us = (uart_state_t *) tty->driver_data;
+	unsigned long orig_jiffies;
+
+	orig_jiffies = jiffies;
+	pr_debug("duart_wait_until_sent(%d)+\n", timeout);
+	while (!(READ_SERCSR(us->status, us->line) & M_DUART_TX_EMT)) {
+		set_current_state(TASK_INTERRUPTIBLE);
+	 	schedule_timeout(1);
+		if (signal_pending(current))
+			break;
+		if (timeout && time_after(jiffies, orig_jiffies + timeout))
+			break;
+	}
+	pr_debug("duart_wait_until_sent()-\n");
+}
+
+/*
+ * duart_hangup() --- called by tty_hangup() when a hangup is signaled.
+ */
+static void duart_hangup(struct tty_struct *tty)
+{
+	uart_state_t *us = (uart_state_t *) tty->driver_data;
+
+	duart_flush_buffer(tty);
+	us->open = 0;
+	us->tty = 0;
+}
+
+/*
+ * Open a tty line.  Note that this can be called multiple times, so ->open can
+ * be >1.  Only set up the tty struct if this is a "new" open, e.g. ->open was
+ * zero
+ */
+static int duart_open(struct tty_struct *tty, struct file *filp)
+{
+	uart_state_t *us;
+	unsigned int line = tty->index;
+	unsigned long flags;
+
+	if ((line >= tty->driver->num) || !sb1250_duart_present[line])
+		return -ENODEV;
+
+	pr_debug("duart_open called by %i (%s), tty is %p, rw is %p, ww is %p\n",
+	       current->pid, current->comm, tty, tty->read_wait,
+	       tty->write_wait);
+
+	us = uart_states + line;
+	tty->driver_data = us;
+
+	spin_lock_irqsave(&open_lock, flags);
+	if (!us->open) {
+		us->tty = tty;
+		us->tty->termios->c_cflag = us->last_cflags;
+	}
+	us->open++;
+	us->flags &= ~TX_INTEN;
+	duart_unmask_ints(line, M_DUART_IMR_RX);
+	spin_unlock_irqrestore(&open_lock, flags);
+
+	return 0;
+}
+
+
+/*
+ * Close a reference count out.  If reference count hits zero, null the
+ * tty, kill the interrupts.  The tty_io driver is responsible for making
+ * sure we've cleared out our internal buffers before calling close()
+ */
+static void duart_close(struct tty_struct *tty, struct file *filp)
+{
+	uart_state_t *us = (uart_state_t *) tty->driver_data;
+	unsigned long flags;
+
+	pr_debug("duart_close called by %i (%s)\n", current->pid, current->comm);
+
+	if (!us || !us->open)
+		return;
+
+	spin_lock_irqsave(&open_lock, flags);
+	if (tty_hung_up_p(filp)) {
+		spin_unlock_irqrestore(&open_lock, flags);
+		return;
+	}
+
+	if (--us->open < 0) {
+		us->open = 0;
+		printk(KERN_ERR "duart: bad open count: %d\n", us->open);
+	}
+	if (us->open) {
+		spin_unlock_irqrestore(&open_lock, flags);
+		return;
+	}
+
+	spin_unlock_irqrestore(&open_lock, flags);
+
+	tty->closing = 1;
+
+	/* Stop accepting input */
+	duart_mask_ints(us->line, M_DUART_IMR_RX);
+	/* Wait for FIFO to drain */
+	while (!(READ_SERCSR(us->status, us->line) & M_DUART_TX_EMT))
+		;
+
+	if (tty->driver->flush_buffer)
+		tty->driver->flush_buffer(tty);
+	if (tty->ldisc.flush_buffer)
+		tty->ldisc.flush_buffer(tty);
+	tty->closing = 0;
+}
+
+
+static struct tty_operations duart_ops = {
+        .open   = duart_open,
+        .close = duart_close,
+        .write = duart_write,
+        .put_char = duart_put_char,
+        .flush_chars = duart_flush_chars,
+        .write_room = duart_write_room,
+        .chars_in_buffer = duart_chars_in_buffer,
+        .flush_buffer = duart_flush_buffer,
+        .ioctl = duart_ioctl,
+//        .throttle = duart_throttle,
+//        .unthrottle = duart_unthrottle,
+        .set_termios = duart_set_termios,
+        .stop = duart_stop,
+        .start = duart_start,
+        .hangup = duart_hangup,
+	.wait_until_sent = duart_wait_until_sent,
+};
+
+/* Initialize the sb1250_duart_present array based on SOC type.  */
+static void __init sb1250_duart_init_present_lines(void)
+{
+	int i, max_lines;
+
+	/* Set the number of available units based on the SOC type.  */
+	switch (soc_type) {
+	case K_SYS_SOC_TYPE_BCM1x55:
+	case K_SYS_SOC_TYPE_BCM1x80:
+		max_lines = 4;
+		break;
+	default:
+		/* Assume at least two serial ports at the normal address.  */
+		max_lines = 2;
+		break;
+	}
+	if (max_lines > DUART_MAX_LINE)
+		max_lines = DUART_MAX_LINE;
+
+	for (i = 0; i < max_lines; i++)
+		sb1250_duart_present[i] = 1;
+}
+
+/* Set up the driver and register it, register the UART interrupts.  This
+   is called from tty_init, or as a part of the module init */
+static int __init sb1250_duart_init(void) 
+{
+	int i;
+
+	sb1250_duart_init_present_lines();
+
+	sb1250_duart_driver = alloc_tty_driver(DUART_MAX_LINE);
+	if (!sb1250_duart_driver)
+		return -ENOMEM;
+
+	sb1250_duart_driver->owner = THIS_MODULE;
+	sb1250_duart_driver->name = "duart";
+	sb1250_duart_driver->major = TTY_MAJOR;
+	sb1250_duart_driver->minor_start = SB1250_DUART_MINOR_BASE;
+	sb1250_duart_driver->type            = TTY_DRIVER_TYPE_SERIAL;
+	sb1250_duart_driver->subtype         = SERIAL_TYPE_NORMAL;
+	sb1250_duart_driver->init_termios    = tty_std_termios;
+	sb1250_duart_driver->flags           = TTY_DRIVER_REAL_RAW;
+	tty_set_operations(sb1250_duart_driver, &duart_ops);
+
+	for (i=0; i<DUART_MAX_LINE; i++) {
+		uart_state_t *port = uart_states + i;
+
+		if (!sb1250_duart_present[i])
+			continue;
+
+		init_duart_port(port, i);
+		spin_lock_init(&port->outp_lock);
+		duart_mask_ints(i, M_DUART_IMR_ALL);
+		if (request_irq(UNIT_INT(i), duart_int, 0, "uart", port)) {
+			panic("Couldn't get uart0 interrupt line");
+		}
+		__raw_writeq(M_DUART_RX_EN|M_DUART_TX_EN,
+			     IOADDR(UNIT_CHANREG(i, R_DUART_CMD)));
+		duart_set_cflag(i, DEFAULT_CFLAGS);
+	}
+
+	/* Interrupts are now active, our ISR can be called. */
+
+	if (tty_register_driver(sb1250_duart_driver)) {
+		printk(KERN_ERR "Couldn't register sb1250 duart serial driver\n");
+		put_tty_driver(sb1250_duart_driver);
+		return 1;
+	}
+	return 0;
+}
+
+/* Unload the driver.  Unregister stuff, get ready to go away */
+static void __exit sb1250_duart_fini(void)
+{
+	unsigned long flags;
+	int i;
+
+	local_irq_save(flags);
+	tty_unregister_driver(sb1250_duart_driver);
+	put_tty_driver(sb1250_duart_driver);
+
+	for (i=0; i<DUART_MAX_LINE; i++) {
+		if (!sb1250_duart_present[i])
+			continue;
+		free_irq(UNIT_INT(i), &uart_states[i]);
+		disable_irq(UNIT_INT(i));
+	}
+	local_irq_restore(flags);
+}
+
+module_init(sb1250_duart_init);
+module_exit(sb1250_duart_fini);
+MODULE_DESCRIPTION("SB1250 Duart serial driver");
+MODULE_AUTHOR("Broadcom Corp.");
+
+#ifdef CONFIG_SIBYTE_SB1250_DUART_CONSOLE
+
+/*
+ * Serial console stuff.  Very basic, polling driver for doing serial
+ * console output.  The console_sem is held by the caller, so we
+ * shouldn't be interrupted for more console activity.
+ * XXXKW What about getting interrupted by uart driver activity?
+ */
+
+void serial_outc(unsigned char c, int line)
+{
+	uart_state_t *port = uart_states + line;
+	while (!(READ_SERCSR(port->status, line) & M_DUART_TX_RDY)) ;
+	WRITE_SERCSR(c, port->tx_hold, line);
+	while (!(READ_SERCSR(port->status, port->line) & M_DUART_TX_EMT)) ;
+}
+
+static void ser_console_write(struct console *cons, const char *s,
+	unsigned int count)
+{
+	int line = cons->index;
+	uart_state_t *port = uart_states + line;
+	u32 imr;
+
+	imr = READ_SERCSR(port->imr, line);
+	WRITE_SERCSR(0, port->imr, line);
+	while (count--) {
+		if (*s == '\n')
+			serial_outc('\r', line);
+		serial_outc(*s++, line);
+    	}
+	WRITE_SERCSR(imr, port->imr, line);
+}
+
+static struct tty_driver *ser_console_device(struct console *c, int *index)
+{
+	*index = c->index;
+	return sb1250_duart_driver;
+}
+
+static int ser_console_setup(struct console *cons, char *str)
+{
+	int i;
+
+	sb1250_duart_init_present_lines();
+
+	for (i=0; i<DUART_MAX_LINE; i++) {
+		uart_state_t *port = uart_states + i;
+
+		if (!sb1250_duart_present[i])
+			continue;
+
+		init_duart_port(port, i);
+#if SIBYTE_1956_WAR
+		last_mode1[i] = V_DUART_PARITY_MODE_NONE|V_DUART_BITS_PER_CHAR_8;
+#endif
+		WRITE_SERCSR(V_DUART_PARITY_MODE_NONE|V_DUART_BITS_PER_CHAR_8,
+			     port->mode_1, i);
+		WRITE_SERCSR(M_DUART_STOP_BIT_LEN_1,
+			     port->mode_2, i);
+		WRITE_SERCSR(V_DUART_BAUD_RATE(115200),
+			     port->clk_sel, i);
+		WRITE_SERCSR(M_DUART_RX_EN|M_DUART_TX_EN,
+			     port->cmd, i);
+	}
+	return 0;
+}
+
+static struct console sb1250_ser_cons = {
+	.name		= "duart",
+	.write		= ser_console_write,
+	.device		= ser_console_device,
+	.setup		= ser_console_setup,
+	.flags		= CON_PRINTBUFFER,
+	.index		= -1,
+};
+
+static int __init sb1250_serial_console_init(void)
+{
+	register_console(&sb1250_ser_cons);
+	return 0;
+}
+
+console_initcall(sb1250_serial_console_init);
+
+#endif /* CONFIG_SIBYTE_SB1250_DUART_CONSOLE */
diff -Naur linux-2.6.19/drivers/ide/mips/Makefile linux-mips-2.6.19/drivers/ide/mips/Makefile
--- linux-2.6.19/drivers/ide/mips/Makefile	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/ide/mips/Makefile	2006-11-29 15:23:09.000000000 -0800
@@ -1,4 +1,4 @@
 obj-$(CONFIG_BLK_DEV_IDE_SWARM)		+= swarm.o
 obj-$(CONFIG_BLK_DEV_IDE_AU1XXX)	+= au1xxx-ide.o
 
-EXTRA_CFLAGS    := -Idrivers/ide
+CFLAGS_au1xxx-ide.o := -Idrivers/ide
diff -Naur linux-2.6.19/drivers/mtd/devices/docprobe.c linux-mips-2.6.19/drivers/mtd/devices/docprobe.c
--- linux-2.6.19/drivers/mtd/devices/docprobe.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/mtd/devices/docprobe.c	2006-11-29 15:23:09.000000000 -0800
@@ -83,10 +83,10 @@
 	0xe4000000,
 #elif defined(CONFIG_MOMENCO_OCELOT)
 	0x2f000000,
-        0xff000000,
+	0xff000000,
 #elif defined(CONFIG_MOMENCO_OCELOT_G) || defined (CONFIG_MOMENCO_OCELOT_C)
-        0xff000000,
-##else
+	0xff000000,
+#else
 #warning Unknown architecture for DiskOnChip. No default probe locations defined
 #endif
 	0xffffffff };
diff -Naur linux-2.6.19/drivers/mtd/maps/lasat.c linux-mips-2.6.19/drivers/mtd/maps/lasat.c
--- linux-2.6.19/drivers/mtd/maps/lasat.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/mtd/maps/lasat.c	2006-11-29 15:23:09.000000000 -0800
@@ -7,7 +7,7 @@
  * modify it under the terms of the GNU General Public License version
  * 2 as published by the Free Software Foundation.
  *
- * $Id: lasat.c,v 1.9 2004/11/04 13:24:15 gleixner Exp $
+ * $Id: lasat.c,v 1.7 2004/07/12 21:59:44 dwmw2 Exp $
  *
  */
 
@@ -49,7 +49,7 @@
 	ENABLE_VPP((&lasat_map));
 
 	lasat_map.phys = lasat_flash_partition_start(LASAT_MTD_BOOTLOADER);
-	lasat_map.virt = ioremap_nocache(
+	lasat_map.virt = (unsigned long)ioremap_nocache(
 		        lasat_map.phys, lasat_board_info.li_flash_size);
 	lasat_map.size = lasat_board_info.li_flash_size;
 
diff -Naur linux-2.6.19/drivers/net/Kconfig linux-mips-2.6.19/drivers/net/Kconfig
--- linux-2.6.19/drivers/net/Kconfig	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/net/Kconfig	2006-11-29 15:23:09.000000000 -0800
@@ -441,6 +441,14 @@
 	  This is the driver for the onboard card of MIPS Magnum 4000,
 	  Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
 
+config GALILEO_64240_ETH
+	tristate "Galileo GT64240 Ethernet support"
+	depends on NET_ETHERNET && MOMENCO_OCELOT_G
+	select MII
+	help
+	  This is the driver for the ethernet interfaces integrated into
+	  the Galileo (now Marvell) GT64240 chipset.
+
 config MIPS_AU1X00_ENET
 	bool "MIPS AU1000 Ethernet support"
 	depends on NET_ETHERNET && SOC_AU1X00
@@ -450,10 +458,6 @@
 	  If you have an Alchemy Semi AU1X00 based system
 	  say Y.  Otherwise, say N.
 
-config NET_SB1250_MAC
-	tristate "SB1250 Ethernet support"
-	depends on NET_ETHERNET && SIBYTE_SB1xxx_SOC
-
 config SGI_IOC3_ETH
 	bool "SGI IOC3 Ethernet"
 	depends on NET_ETHERNET && PCI && SGI_IP27
@@ -464,25 +468,13 @@
 	  the Ethernet-HOWTO, available from
 	  <http://www.tldp.org/docs.html#howto>.
 
-config SGI_IOC3_ETH_HW_RX_CSUM
-	bool "Receive hardware checksums"
-	depends on SGI_IOC3_ETH && INET
-	default y
-	help
-	  The SGI IOC3 network adapter supports TCP and UDP checksums in
-	  hardware to offload processing of these checksums from the CPU.  At
-	  the moment only acceleration of IPv4 is supported.  This option
-	  enables offloading for checksums on receive.  If unsure, say Y.
-
-config SGI_IOC3_ETH_HW_TX_CSUM
-	bool "Transmit hardware checksums"
-	depends on SGI_IOC3_ETH && INET
-	default y
+config MIPS_SIM_NET
+	tristate "MIPS simulator Network device (EXPERIMENTAL)"
+	depends on NETDEVICES && MIPS_SIM && EXPERIMENTAL
 	help
-	  The SGI IOC3 network adapter supports TCP and UDP checksums in
-	  hardware to offload processing of these checksums from the CPU.  At
-	  the moment only acceleration of IPv4 is supported.  This option
-	  enables offloading for checksums on transmit.  If unsure, say Y.
+	  The MIPSNET device is a simple Ethernet network device which is
+	  emulated by the MIPS Simulator.
+	  If you are not using a MIPSsim or are unsure, say N.
 
 config MIPS_SIM_NET
 	tristate "MIPS simulator Network device (EXPERIMENTAL)"
@@ -2065,6 +2057,10 @@
 
 	  If in doubt, say N.
 
+config NET_SB1250_MAC
+	tristate "SB1250 Ethernet support"
+	depends on SIBYTE_SB1xxx_SOC
+
 config R8169_VLAN
 	bool "VLAN support"
 	depends on R8169 && VLAN_8021Q
@@ -2296,8 +2292,8 @@
 	select MII
 	help
 	  This driver supports the gigabit Ethernet on the Marvell MV643XX
-	  chipset which is used in the Momenco Ocelot C and Jaguar ATX and
-	  Pegasos II, amongst other PPC and MIPS boards.
+	  chipset which is used in the Momenco Ocelot C Ocelot, Jaguar ATX
+	  and Pegasos II, amongst other PPC and MIPS boards.
 
 config MV643XX_ETH_0
 	bool "MV-643XX Port 0"
@@ -2320,6 +2316,20 @@
 	  This enables support for Port 2 of the Marvell MV643XX Gigabit
 	  Ethernet.
 
+config BIG_SUR_FE
+	bool "PMC-Sierra TITAN Fast Ethernet Support"
+	depends on NET_ETHERNET && PMC_BIG_SUR
+	help
+	  This enables support for the the integrated ethernet of
+	  PMC-Sierra's Big Sur SoC.
+
+config TITAN_GE
+	bool "PMC-Sierra TITAN Gigabit Ethernet Support"
+	depends on PMC_YOSEMITE
+	help
+	  This enables support for the the integrated ethernet of
+	  PMC-Sierra's Titan SoC.
+
 config QLA3XXX
 	tristate "QLogic QLA3XXX Network Driver Support"
 	depends on PCI
diff -Naur linux-2.6.19/drivers/net/Makefile linux-mips-2.6.19/drivers/net/Makefile
--- linux-2.6.19/drivers/net/Makefile	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/net/Makefile	2006-11-29 15:23:09.000000000 -0800
@@ -112,6 +112,10 @@
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 obj-$(CONFIG_QLA3XXX) += qla3xxx.o
 
+obj-$(CONFIG_GALILEO_64240_ETH) += gt64240eth.o
+obj-$(CONFIG_BIG_SUR_FE) += big_sur_ge.o
+obj-$(CONFIG_TITAN_GE) += titan_mdio.o titan_ge.o
+
 obj-$(CONFIG_PPP) += ppp_generic.o
 obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
 obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
diff -Naur linux-2.6.19/drivers/net/big_sur_ge.c linux-mips-2.6.19/drivers/net/big_sur_ge.c
--- linux-2.6.19/drivers/net/big_sur_ge.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/net/big_sur_ge.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,2004 @@
+/*
+ * drivers/net/big_sur_ge.c - Driver for PMC-Sierra Big Sur ethernet ports
+ *
+ * Copyright (C) 2003 PMC-Sierra Inc.
+ * Author : Manish Lachwani (lachwani@pmc-sierra.com)
+ * Copyright (C) 2003 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ */
+
+/*************************************************************************
+ * Description :
+ *
+ * The driver has three modes of operation: FIFO non-DMA, Simple DMA
+ * and SG DMA. There is also a Polled mode and an Interrupt mode of
+ * operation. SG DMA should do zerocopy and check offload. Probably,
+ * zerocopy on the Rx might also work. Simple DMA is the non-zerocpy
+ * case on the Tx and the Rx.
+ *
+ * We turn on Simple DMA and interrupt mode. Although, support has been
+ * added for the SG mode also but not for the polled mode. This is a
+ * Fast Ethernet driver although there will be support for Gigabit soon.
+ *
+ * The driver is divided into two parts: Hardware dependent and a
+ * Hardware independent. There is currently no support for checksum offload
+ * zerocopy and Rx NAPI. There is support for Interrupt Mitigation.
+ ****************************************************************************/
+
+/*************************************************************
+ * Hardware Indepenent Part of the driver
+ *************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "big_sur_ge.h"
+
+#define TX_TIMEOUT (60*HZ)	/* Transmission timeout is 60 seconds. */
+
+static struct net_device *dev_list = NULL;
+static DEFINE_SPINLOCK(dev_lock);
+
+typedef enum DUPLEX { UNKNOWN, HALF_DUPLEX, FULL_DUPLEX } DUPLEX;
+
+/* Big Sur Ethernet MAC structure */
+struct big_sur_ge_enet {
+	struct net_device_stats stats;	/* Statistics for this device */
+	struct net_device *next_dev;	/* The next device in dev_list */
+	struct timer_list phy_timer;	/* PHY monitoring timer */
+	u32 index;		/* Which interface is this */
+	u32 save_base_address;	/* Saved physical base address */
+	struct sk_buff *saved_skb;	/* skb being transmitted */
+	spinlock_t lock;	/* For atomic access to saved_skb */
+	u8 mii_addr;		/* The MII address of the PHY */
+	big_sur_ge emac;	/* GE driver structure */
+};
+
+/* Manish : For testing purposes only */
+static unsigned char big_sur_mac_addr_base[6] = "00:11:22:33:44:55";
+
+/*********************************************************************
+ * Function Prototypes (whole bunch of them)
+ *********************************************************************/
+unsigned long big_sur_ge_dma_control(xdma_channel *);
+void big_sur_ge_dma_reset(xdma_channel *);
+static void handle_fifo_intr(big_sur_ge *);
+void big_sur_ge_check_fifo_recv_error(big_sur_ge *);
+void big_sur_ge_check_fifo_send_error(big_sur_ge *);
+static int big_sur_ge_config_fifo(big_sur_ge *);
+big_sur_ge_config *big_sur_ge_lookup_config(unsigned int);
+static int big_sur_ge_config_dma(big_sur_ge *);
+void big_sur_ge_enet_reset(big_sur_ge *);
+void big_sur_ge_check_mac_error(big_sur_ge *, unsigned long);
+
+/*********************************************************************
+ * DMA Channel Initialization
+ **********************************************************************/
+static int big_sur_ge_dma_init(xdma_channel * dma, unsigned long base_address)
+{
+	dma->reg_base_address = base_address;
+	dma->get_ptr = NULL;
+	dma->put_ptr = NULL;
+	dma->commit_ptr = NULL;
+	dma->last_ptr = NULL;
+	dma->total_desc_count = (unsigned long) NULL;
+	dma->active_desc_count = (unsigned long) NULL;
+	dma->ready = 1;		/* DMA channel is ready */
+
+	big_sur_ge_dma_reset(dma);
+
+	return 0;
+}
+
+/*********************************************************************
+ * Is the DMA channel ready yet ?
+ **********************************************************************/
+static int big_sur_ge_dma_ready(xdma_channel * dma)
+{
+	return dma->ready == 1;
+}
+
+/*********************************************************************
+ * Perform the self test on the DMA channel
+ **********************************************************************/
+#define BIG_SUR_GE_CONTROL_REG_RESET_MASK	0x98000000
+
+static int big_sur_ge_dma_self_test(xdma_channel * dma)
+{
+	unsigned long reg_data;
+
+	big_sur_ge_dma_reset(dma);
+
+	reg_data = big_sur_ge_dma_control(dma);
+	if (reg_data != BIG_SUR_GE_CONTROL_REG_RESET_MASK) {
+		printk(KERN_ERR "DMA Channel Self Test Failed \n");
+		return -1;
+	}
+
+	return 0;
+}
+
+/*********************************************************************
+ * Reset the DMA channel
+ **********************************************************************/
+static void big_sur_ge_dma_reset(xdma_channel * dma)
+{
+	BIG_SUR_GE_WRITE(dma->reg_base_address + BIG_SUR_GE_RST_REG_OFFSET,
+			 BIG_SUR_GE_RESET_MASK);
+}
+
+/*********************************************************************
+ * Get control of the DMA channel
+ **********************************************************************/
+static unsigned long big_sur_ge_dma_control(xdma_channel * dma)
+{
+	return BIG_SUR_GE_READ(dma->reg_base_address +
+			       BIG_SUR_GE_DMAC_REG_OFFSET);
+}
+
+/*********************************************************************
+ * Set control of the DMA channel
+ **********************************************************************/
+static void big_sur_ge_set_dma_control(xdma_channel * dma, unsigned long control)
+{
+	BIG_SUR_GE_WRITE(dma->reg_base_address +
+			 BIG_SUR_GE_DMAC_REG_OFFSET, control);
+}
+
+/*********************************************************************
+ * Get the status of the DMA channel
+ *********************************************************************/
+static unsigned long big_sur_ge_dma_status(xdma_channel * dma)
+{
+	return BIG_SUR_GE_READ(dma->reg_base_address +
+			       BIG_SUR_GE_DMAS_REG_OFFSET);
+}
+
+/*********************************************************************
+ * Set the interrupt status of the DMA channel
+ *********************************************************************/
+static void big_sur_ge_set_intr_status(xdma_channel * dma, unsigned long status)
+{
+	BIG_SUR_GE_WRITE(dma->reg_base_address + BIG_SUR_GE_IS_REG_OFFSET,
+			 status);
+}
+
+/*********************************************************************
+ * Get the interrupt status of the DMA channel
+ *********************************************************************/
+static unsigned long big_sur_ge_get_intr_status(xdma_channel * dma)
+{
+	return BIG_SUR_GE_READ(dma->reg_base_address +
+			       BIG_SUR_GE_IS_REG_OFFSET);
+}
+
+/*********************************************************************
+ * Set the Interrupt Enable
+ *********************************************************************/
+static void big_sur_ge_set_intr_enable(xdma_channel * dma, unsigned long enable)
+{
+	BIG_SUR_GE_WRITE(dma->reg_base_address + BIG_SUR_GE_IE_REG_OFFSET,
+			 enable);
+}
+
+/*********************************************************************
+ * Get the Interrupt Enable field to make a check
+ *********************************************************************/
+static unsigned long big_sur_ge_get_intr_enable(xdma_channel * dma)
+{
+	return BIG_SUR_GE_READ(dma->reg_base_address +
+			       BIG_SUR_GE_IE_REG_OFFSET);
+}
+
+/*********************************************************************
+ * Transfer the data over the DMA channel
+ *********************************************************************/
+static void big_sur_ge_dma_transfer(xdma_channel * dma, unsigned long *source,
+			     unsigned long *dest, unsigned long length)
+{
+	BIG_SUR_GE_WRITE(dma->reg_base_address + BIG_SUR_GE_SA_REG_OFFSET,
+			 (unsigned long) source);
+
+	BIG_SUR_GE_WRITE(dma->reg_base_address + BIG_SUR_GE_DA_REG_OFFSET,
+			 (unsigned long) dest);
+
+	BIG_SUR_GE_WRITE(dma->reg_base_address + BIG_SUR_GE_LEN_REG_OFFSET,
+			 length);
+}
+
+/*********************************************************************
+ * Get the DMA descriptor
+ *********************************************************************/
+static int big_sur_ge_get_descriptor(xdma_channel * dma,
+			      xbuf_descriptor ** buffer_desc)
+{
+	unsigned long reg_data;
+
+	reg_data = xbuf_descriptor_GetControl(dma->get_ptr);
+	xbuf_descriptor_SetControl(dma->get_ptr, reg_data |
+				   BIG_SUR_GE_DMACR_SG_DISABLE_MASK);
+
+	*buffer_desc = dma->get_ptr;
+
+	dma->get_ptr = xbuf_descriptor_GetNextPtr(dma->get_ptr);
+	dma->active_desc_count--;
+
+	return 0;
+}
+
+/*********************************************************************
+ * Get the packet count
+ *********************************************************************/
+static int big_sur_ge_get_packet_count(xdma_channel * dma)
+{
+	return (BIG_SUR_GE_READ
+		(dma->reg_base_address + BIG_SUR_GE_UPC_REG_OFFSET));
+}
+
+/*********************************************************************
+ * Descrement the packet count
+ *********************************************************************/
+static void big_sur_ge_decr_packet_count(xdma_channel * dma)
+{
+	unsigned long reg_data;
+
+	reg_data =
+	    BIG_SUR_GE_READ(dma->base_address + BIG_SUR_GE_UPC_REG_OFFSET);
+	if (reg_data > 0)
+		BIG_SUR_GE_WRITE(dma->base_address +
+				 BIG_SUR_GE_UPC_REG_OFFSET, 1);
+}
+
+/****************************************************************************
+ * Start of the code that deals with the Packet Fifo
+ *****************************************************************************/
+
+/****************************************************************************
+ * Init the packet fifo
+ ****************************************************************************/
+static int packet_fifo_init(packet_fifo * fifo, u32 reg, u32 data)
+{
+	fifo->reg_base_addr = reg;
+	fifo->data_base_address = data;
+	fifo->ready_status = 1;
+
+	BIG_SUR_GE_FIFO_RESET(fifo);
+
+	return 0;
+}
+
+/****************************************************************************
+ * Packet fifo self test
+ ****************************************************************************/
+static int packet_fifo_self_test(packet_fifo * fifo, unsigned long type)
+{
+	unsigned long reg_data;
+
+	BIG_SUR_GE_FIFO_RESET(fifo);
+	reg_data =
+	    BIG_SUR_GE_READ(fifo->reg_base_addr +
+			    BIG_SUR_GE_COUNT_STATUS_REG_OFFSET);
+
+	if (type == BIG_SUR_GE_READ_FIFO_TYPE) {
+		if (reg_data != BIG_SUR_GE_EMPTY_FULL_MASK) {
+			printk(KERN_ERR "Read FIFO not empty \n");
+			return -1;
+		}
+	} else if (!(reg_data & BIG_SUR_GE_EMPTY_FULL_MASK)) {
+		printk(KERN_ERR "Write FIFO is full \n");
+		return -1;
+	}
+
+	return 0;
+}
+
+/****************************************************************************
+ * Packet FIFO read
+ ****************************************************************************/
+static int packet_fifo_read(packet_fifo * fifo, u8 * buffer, unsigned int len)
+{
+	unsigned long fifo_count, word_count, extra_byte;
+	unsigned long *buffer_data = (unsigned long *) buffer;
+
+	fifo_count =
+	    BIG_SUR_GE_READ(fifo->reg_base_addr +
+			    BIG_SUR_GE_FIFO_WIDTH_BYTE_COUNT);
+	fifo_count &= BIG_SUR_GE_COUNT_MASK;
+
+	if ((fifo_count * BIG_SUR_GE_FIFO_WIDTH_BYTE_COUNT) < len)
+		return -1;
+
+	word_count = len / BIG_SUR_GE_FIFO_WIDTH_BYTE_COUNT;
+	extra_byte = len % BIG_SUR_GE_FIFO_WIDTH_BYTE_COUNT;
+
+	for (fifo_count = 0; fifo_count < word_count; fifo_count++)
+		buffer_data[fifo_count] =
+		    BIG_SUR_GE_READ(fifo->reg_base_addr);
+
+	if (extra_byte > 0) {
+		unsigned long last_word;
+		int *extra_buffer_data =
+		    (int *) (buffer_data + word_count);
+
+		last_word = BIG_SUR_GE_READ(fifo->data_base_address);
+		if (extra_byte == 1)
+			extra_buffer_data[0] = (int) (last_word << 24);
+		else if (extra_byte == 2) {
+			extra_buffer_data[0] = (int) (last_word << 24);
+			extra_buffer_data[1] = (int) (last_word << 16);
+		} else if (extra_byte == 3) {
+			extra_buffer_data[0] = (int) (last_word << 24);
+			extra_buffer_data[1] = (int) (last_word << 16);
+			extra_buffer_data[2] = (int) (last_word << 8);
+		}
+	}
+
+	return 0;
+}
+
+/*****************************************************************************
+ * Write the data into the packet fifo
+ *****************************************************************************/
+static int packet_fifo_write(packet_fifo * fifo, int *buffer, int len)
+{
+	unsigned long fifo_count, word_count, extra_byte;
+	unsigned long *buffer_data = (unsigned long *) buffer;
+
+	fifo_count =
+	    BIG_SUR_GE_READ(fifo->reg_base_addr +
+			    BIG_SUR_GE_FIFO_WIDTH_BYTE_COUNT);
+	fifo_count &= BIG_SUR_GE_COUNT_MASK;
+
+	word_count = len / BIG_SUR_GE_FIFO_WIDTH_BYTE_COUNT;
+	extra_byte = len % BIG_SUR_GE_FIFO_WIDTH_BYTE_COUNT;
+
+	/* You should see what the ppc driver does here. It just slobbers */
+	if (extra_byte > 0)
+		if (fifo_count > (word_count + 1)) {
+			printk(KERN_ERR
+			       "No room in the packet send fifo \n");
+			return -1;
+		}
+
+	for (fifo_count = 0; fifo_count < word_count; fifo_count++)
+		BIG_SUR_GE_WRITE(fifo->data_base_address,
+				 buffer_data[fifo_count]);
+
+
+	if (extra_byte > 0) {
+		unsigned long last_word = 0;
+		int *extra_buffer_data =
+		    (int *) (buffer_data + word_count);
+
+		if (extra_byte == 1)
+			last_word = extra_buffer_data[0] << 24;
+		else if (extra_byte == 2)
+			last_word = (extra_buffer_data[0] << 24 |
+				     extra_buffer_data[1] << 16);
+
+		else if (extra_byte == 3)
+			last_word = (extra_buffer_data[0] << 24 |
+				     extra_buffer_data[1] << 16 |
+				     extra_buffer_data[2] << 8);
+
+
+		BIG_SUR_GE_WRITE(fifo->data_base_address, last_word);
+	}
+
+	return 0;
+}
+
+
+/*****************************************************************************
+ * Interrupt handlers: We handle any errors associated with the FIFO.
+ * FIFO is for simple dma case and we do want to handle the simple DMA
+ * case. We dont handle the Scatter Gather DMA for now since it is not working.
+ ******************************************************************************/
+
+/*********************************************************************************
+ * FIFO send for Simple DMA with Interrupts
+ **********************************************************************************/
+static int big_sur_ge_enet_fifo_send(big_sur_ge * emac, u8 * buffer,
+			      unsigned long byte_cnt)
+{
+	unsigned long int_status, reg_data;
+
+	/* Silly checks here that we really dont need */
+	if (!emac->started)
+		return -1;
+
+	if (emac->polled)
+		return -1;
+
+	if (emac->dma_sg)
+		return -1;
+
+	int_status =
+	    BIG_SUR_GE_READ(emac->base_address + XIIF_V123B_IISR_OFFSET);
+	if (int_status & BIG_SUR_GE_EIR_XMIT_LFIFO_FULL_MASK) {
+		printk(KERN_ERR "Tx FIFO error: Queue is Full \n");
+		return -1;
+	}
+
+	/*
+	 * Write the data to the FIFO in the hardware
+	 */
+	if ((BIG_SUR_GE_GET_COUNT(&emac->send_fifo) *
+	     sizeof(unsigned long)) < byte_cnt) {
+		printk(KERN_ERR "Send FIFO on chip is full \n");
+		return -1;
+	}
+
+	if (big_sur_ge_dma_status(&emac->send_channel) &
+	    BIG_SUR_GE_DMASR_BUSY_MASK) {
+		printk(KERN_ERR "Send channel FIFO engine busy \n");
+		return -1;
+	}
+
+	big_sur_ge_set_dma_control(&emac->send_channel,
+				   BIG_SUR_GE_DMACR_SOURCE_INCR_MASK |
+				   BIG_SUR_GE_DMACR_DEST_LOCAL_MASK |
+				   BIG_SUR_GE_DMACR_SG_DISABLE_MASK);
+
+	big_sur_ge_dma_transfer(&emac->send_channel,
+				(unsigned long *) buffer,
+				(unsigned long *) (emac->base_address +
+						   BIG_SUR_GE_PFIFO_TXDATA_OFFSET),
+				byte_cnt);
+
+	reg_data = big_sur_ge_dma_status(&emac->send_channel);
+	while (reg_data & BIG_SUR_GE_DMASR_BUSY_MASK) {
+		reg_data = big_sur_ge_dma_status(&emac->recv_channel);
+		if (!(reg_data & BIG_SUR_GE_DMASR_BUSY_MASK))
+			break;
+	}
+
+	if ((reg_data & BIG_SUR_GE_DMASR_BUS_ERROR_MASK) ||
+	    (reg_data & BIG_SUR_GE_DMASR_BUS_TIMEOUT_MASK)) {
+		printk(KERN_ERR "Send side DMA error \n");
+		return -1;
+	}
+
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_TPLR_OFFSET,
+			 byte_cnt);
+
+	return 0;
+}
+
+/*************************************************************************
+ * FIFO receive for Simple DMA case
+ *************************************************************************/
+static int big_sur_ge_enet_fifo_recv(big_sur_ge * emac, u8 * buffer,
+			      unsigned long *byte_cnt)
+{
+	unsigned long int_status, reg_data;
+
+	/* Silly checks here that we really dont need */
+	if (!emac->started)
+		return -1;
+
+	if (emac->polled)
+		return -1;
+
+	if (emac->dma_sg)
+		return -1;
+
+	if (*byte_cnt < BIG_SUR_GE_MAX_FRAME_SIZE)
+		return -1;
+
+	int_status =
+	    BIG_SUR_GE_READ(emac->base_address + XIIF_V123B_IISR_OFFSET);
+	if (int_status & BIG_SUR_GE_EIR_RECV_LFIFO_EMPTY_MASK) {
+		BIG_SUR_GE_WRITE(emac->base_address +
+				 XIIF_V123B_IISR_OFFSET,
+				 BIG_SUR_GE_EIR_RECV_LFIFO_EMPTY_MASK);
+		return -1;
+	}
+
+	if (big_sur_ge_dma_status(&emac->recv_channel) &
+	    BIG_SUR_GE_DMASR_BUSY_MASK) {
+		printk(KERN_ERR "Rx side DMA Engine busy \n");
+		return -1;
+	}
+
+	if (BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_RPLR_OFFSET) ==
+	    0) {
+		printk(KERN_ERR "MAC has the FIFO packet length 0 \n");
+		return -1;
+	}
+
+	/* For the simple DMA case only */
+	big_sur_ge_set_dma_control(&emac->recv_channel,
+				   BIG_SUR_GE_DMACR_DEST_INCR_MASK |
+				   BIG_SUR_GE_DMACR_SOURCE_LOCAL_MASK |
+				   BIG_SUR_GE_DMACR_SG_DISABLE_MASK);
+
+	if (packet_fifo_read(&emac->recv_fifo, buffer,
+			     BIG_SUR_GE_READ(emac->base_address +
+					     BIG_SUR_GE_RPLR_OFFSET)) ==
+	    -1) {
+		printk(KERN_ERR "Not enough space in the FIFO \n");
+		return -1;
+	}
+
+	big_sur_ge_dma_transfer(&emac->recv_channel,
+				(unsigned long *) (emac->base_address +
+						   BIG_SUR_GE_PFIFO_RXDATA_OFFSET),
+				(unsigned long *)
+				buffer,
+				BIG_SUR_GE_READ(emac->base_address +
+						BIG_SUR_GE_RPLR_OFFSET));
+
+	reg_data = big_sur_ge_dma_status(&emac->recv_channel);
+	while (reg_data & BIG_SUR_GE_DMASR_BUSY_MASK) {
+		reg_data = big_sur_ge_dma_status(&emac->recv_channel);
+		if (!(reg_data & BIG_SUR_GE_DMASR_BUSY_MASK))
+			break;
+	}
+
+	if ((reg_data & BIG_SUR_GE_DMASR_BUS_ERROR_MASK) ||
+	    (reg_data & BIG_SUR_GE_DMASR_BUS_TIMEOUT_MASK)) {
+		printk(KERN_ERR "DMA Bus Error \n");
+		return -1;
+	}
+
+	*byte_cnt =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_RPLR_OFFSET);
+
+	return 0;
+}
+
+static irqreturn_t big_sur_ge_int_handler(int irq, void *dev_id)
+{
+	struct net_device *netdev = dev_id;
+	struct big_sur_ge_enet *lp = netdev->priv;
+	big_sur_ge *emac = (big_sur_ge *)emac_ptr;
+	void *emac_ptr = &lp->emac;
+	unsigned long int_status;
+
+	int_status =
+	    BIG_SUR_GE_READ(emac->base_address + XIIF_V123B_DIPR_OFFSET);
+	if (int_status & BIG_SUR_GE_IPIF_EMAC_MASK)
+		handle_fifo_intr(emac);
+
+	if (int_status & BIG_SUR_GE_IPIF_RECV_FIFO_MASK)
+		big_sur_ge_check_fifo_recv_error(emac);
+
+	if (int_status & BIG_SUR_GE_IPIF_SEND_FIFO_MASK)
+		big_sur_ge_check_fifo_send_error(emac);
+
+	if (int_status & XIIF_V123B_ERROR_MASK)
+		BIG_SUR_GE_WRITE(emac->base_address +
+				 XIIF_V123B_DISR_OFFSET,
+				 XIIF_V123B_ERROR_MASK);
+
+	return IRQ_HANDLED;
+}
+
+/****************************************************************************
+ * Set the FIFO send handler
+ ***************************************************************************/
+static void big_sur_ge_set_fifo_send_handler(big_sur_ge * emac, void *call_back,
+				      big_sur_fifo_handler function)
+{
+	emac->big_sur_ge_fifo_send_handler = function;
+	emac->fifo_send_ref = call_back;
+}
+
+/****************************************************************************
+ * Set the FIFO recv handler
+ ***************************************************************************/
+static void big_sur_ge_set_fifo_recv_handler(big_sur_ge * emac, void *call_back,
+				      big_sur_fifo_handler function)
+{
+	emac->big_sur_ge_fifo_recv_handler = function;
+	emac->fifo_recv_ref = call_back;
+}
+
+/****************************************************************************
+ * Main Fifo intr handler
+ ***************************************************************************/
+static void handle_fifo_intr(big_sur_ge * emac)
+{
+	unsigned long int_status;
+
+	/* Ack the interrupts asap */
+	int_status =
+	    BIG_SUR_GE_READ(emac->base_address + XIIF_V123B_IISR_OFFSET);
+	BIG_SUR_GE_WRITE(emac->base_address + XIIF_V123B_IISR_OFFSET,
+			 int_status);
+
+	/* Process the Rx side */
+	if (int_status & BIG_SUR_GE_EIR_RECV_DONE_MASK) {
+		emac->big_sur_ge_fifo_recv_handler(&emac->fifo_recv_ref);
+		BIG_SUR_GE_WRITE(emac->base_address +
+				 XIIF_V123B_IISR_OFFSET,
+				 BIG_SUR_GE_EIR_RECV_DONE_MASK);
+	}
+
+	if (int_status & BIG_SUR_GE_EIR_XMIT_DONE_MASK) {
+		/* We dont collect stats and hence we dont need to get status */
+
+		emac->big_sur_ge_fifo_send_handler(emac->fifo_recv_ref);
+		BIG_SUR_GE_WRITE(emac->base_address +
+				 XIIF_V123B_IISR_OFFSET,
+				 BIG_SUR_GE_EIR_XMIT_DONE_MASK);
+	}
+
+	big_sur_ge_check_mac_error(emac, int_status);
+}
+
+/******************************************************************
+ * Handle the Receive side DMA interrupts. The PPC driver has
+ * callbacks all over the place. This has been eliminated here by
+ * using the following approach:
+ *
+ * The ISR is set to the main interrrupt handler. This will handle
+ * all the interrupts including the ones for DMA. In this main isr,
+ * we determine if we need to call recv or send side intr functions.
+ * Pretty complex but thats the way it is now.
+ *******************************************************************/
+static void big_sur_ge_handle_recv_intr(big_sur_ge * emac)
+{
+	unsigned long int_status;
+
+	int_status = big_sur_ge_get_intr_status(&emac->recv_channel);
+	if (int_status & (BIG_SUR_GE_IXR_PKT_THRESHOLD_MASK |
+			  BIG_SUR_GE_IXR_PKT_WAIT_BOUND_MASK)) {
+		u32 num_packets;
+		u32 num_processed;
+		u32 num_buffers;
+		u32 num_bytes;
+		xbuf_descriptor *first_desc_ptr = NULL;
+		xbuf_descriptor *buffer_desc;
+		int is_last = 0;
+
+		/* The number of packets we need to process on the Rx */
+		num_packets =
+		    big_sur_ge_get_packet_count(&emac->recv_channel);
+
+		for (num_processed = 0; num_processed < num_packets;
+		     num_processed++) {
+			while (!is_last) {
+				if (big_sur_ge_get_descriptor
+				    (&emac->recv_channel,
+				     &buffer_desc) == -1)
+					break;
+
+				if (first_desc_ptr == NULL)
+					first_desc_ptr = buffer_desc;
+
+				num_bytes +=
+				    xbuf_descriptor_GetLength(buffer_desc);
+
+				if (xbuf_descriptor_IsLastStatus
+				    (buffer_desc)) {
+					is_last = 1;
+				}
+
+				num_buffers++;
+			}
+
+			/* Number of buffers is always 1 since we dont do SG */
+
+			/*
+			 * Only for SG DMA which is currently not supported. In the
+			 * future, as we have SG channel working, we will code this
+			 * receive side routine. For now, do nothing. This is never
+			 * called from FIFO mode - Manish
+			 */
+			big_sur_ge_decr_packet_count(&emac->recv_channel);
+		}
+	}
+
+	/* Ack the interrupts */
+	big_sur_ge_set_intr_status(&emac->recv_channel, int_status);
+
+	if (int_status & BIG_SUR_GE_IXR_DMA_ERROR_MASK) {
+		/* We need a reset here */
+	}
+
+	big_sur_ge_set_intr_status(&emac->recv_channel, int_status);
+}
+
+/****************************************************************
+ * Handle the send side DMA interrupt
+ ****************************************************************/
+static void big_sur_ge_handle_send_intr(big_sur_ge * emac)
+{
+	unsigned long int_status;
+
+	int_status = big_sur_ge_get_intr_status(&emac->send_channel);
+
+	if (int_status & (BIG_SUR_GE_IXR_PKT_THRESHOLD_MASK |
+			  BIG_SUR_GE_IXR_PKT_WAIT_BOUND_MASK)) {
+		unsigned long num_frames = 0;
+		unsigned long num_processed = 0;
+		unsigned long num_buffers = 0;
+		unsigned long num_bytes = 0;
+		unsigned long is_last = 0;
+		xbuf_descriptor *first_desc_ptr = NULL;
+		xbuf_descriptor *buffer_desc;
+
+		num_frames =
+		    big_sur_ge_get_packet_count(&emac->send_channel);
+
+		for (num_processed = 0; num_processed < num_frames;
+		     num_processed++) {
+			while (!is_last) {
+				if (big_sur_ge_get_descriptor
+				    (&emac->send_channel, &buffer_desc)
+				    == -1) {
+					break;
+				}
+
+				if (first_desc_ptr == NULL)
+					first_desc_ptr = buffer_desc;
+
+				num_bytes +=
+				    xbuf_descriptor_GetLength(buffer_desc);
+				if (xbuf_descriptor_IsLastControl
+				    (buffer_desc))
+					is_last = 1;
+
+				num_buffers++;
+			}
+
+			/*
+			 * Only for SG DMA which is currently not supported. In the
+			 * future, as we have SG channel working, we will code this
+			 * receive side routine. For now, do nothing. This is never
+			 * called from FIFO mode - Manish
+			 */
+			big_sur_ge_decr_packet_count(&emac->send_channel);
+		}
+	}
+
+	/* Ack the interrupts and reset DMA channel if necessary */
+	big_sur_ge_set_intr_status(&emac->send_channel, int_status);
+	if (int_status & BIG_SUR_GE_IXR_DMA_ERROR_MASK) {
+		/* Manish : need reset */
+	}
+
+	big_sur_ge_set_intr_status(&emac->send_channel, int_status);
+}
+
+/*****************************************************************
+ * For now, the MAC address errors dont trigger a update of the
+ * stats. There is no stats framework in place. Hence, we just
+ * check for the errors below and do a reset if needed.
+ *****************************************************************/
+static void big_sur_ge_check_mac_error(big_sur_ge * emac,
+				unsigned long int_status)
+{
+	if (int_status & (BIG_SUR_GE_EIR_RECV_DFIFO_OVER_MASK |
+			  BIG_SUR_GE_EIR_RECV_LFIFO_OVER_MASK |
+			  BIG_SUR_GE_EIR_RECV_LFIFO_UNDER_MASK |
+			  BIG_SUR_GE_EIR_RECV_ERROR_MASK |
+			  BIG_SUR_GE_EIR_RECV_MISSED_FRAME_MASK |
+			  BIG_SUR_GE_EIR_RECV_COLLISION_MASK |
+			  BIG_SUR_GE_EIR_RECV_FCS_ERROR_MASK |
+			  BIG_SUR_GE_EIR_RECV_LEN_ERROR_MASK |
+			  BIG_SUR_GE_EIR_RECV_SHORT_ERROR_MASK |
+			  BIG_SUR_GE_EIR_RECV_LONG_ERROR_MASK |
+			  BIG_SUR_GE_EIR_RECV_ALIGN_ERROR_MASK |
+			  BIG_SUR_GE_EIR_XMIT_SFIFO_OVER_MASK |
+			  BIG_SUR_GE_EIR_XMIT_LFIFO_OVER_MASK |
+			  BIG_SUR_GE_EIR_XMIT_SFIFO_UNDER_MASK |
+			  BIG_SUR_GE_EIR_XMIT_LFIFO_UNDER_MASK)) {
+
+		BIG_SUR_GE_WRITE(emac->base_address +
+				 XIIF_V123B_IIER_OFFSET, 0);
+		/*
+		 * Manish Reset the MAC here
+		 */
+	}
+}
+
+/*****************************************************************
+ * Check for FIFO Recv errors
+ *****************************************************************/
+static void big_sur_ge_check_fifo_recv_error(big_sur_ge * emac)
+{
+	if (BIG_SUR_GE_IS_DEADLOCKED(&emac->recv_fifo)) {
+		unsigned long intr_enable;
+
+		intr_enable =
+		    BIG_SUR_GE_READ(emac->base_address +
+				    XIIF_V123B_DIER_OFFSET);
+		BIG_SUR_GE_WRITE(emac->base_address +
+				 XIIF_V123B_DIER_OFFSET,
+				 intr_enable &
+				 ~(BIG_SUR_GE_IPIF_RECV_FIFO_MASK));
+
+	}
+}
+
+/*****************************************************************
+ * Check for FIFO Send errors
+ *****************************************************************/
+static void big_sur_ge_check_fifo_send_error(big_sur_ge * emac)
+{
+	if (BIG_SUR_GE_IS_DEADLOCKED(&emac->send_fifo)) {
+		unsigned long intr_enable;
+
+		intr_enable =
+		    BIG_SUR_GE_READ(emac->base_address +
+				    XIIF_V123B_DIER_OFFSET);
+		BIG_SUR_GE_WRITE(emac->base_address +
+				 XIIF_V123B_DIER_OFFSET,
+				 intr_enable &
+				 ~(BIG_SUR_GE_IPIF_SEND_FIFO_MASK));
+	}
+}
+
+/*****************************************************************
+ * GE unit init
+ ****************************************************************/
+static int big_sur_ge_enet_init(big_sur_ge * emac, unsigned int device_id)
+{
+	unsigned long reg_data;
+	big_sur_ge_config *config;
+	int err;
+
+	/* Assume that the device has been stopped */
+
+	config = big_sur_ge_lookup_config(device_id);
+	if (config == NULL)
+		return -1;
+
+	emac->ready = 0;
+	emac->started = 0;
+	emac->dma_sg = 0;	/* This MAC has no support for Scatter Gather DMA */
+	emac->has_mii = config->has_mii;
+	emac->has_mcast_hash_table = 0;
+	emac->dma_config = config->dma_config;
+
+	/*
+	 * Initialize the FIFO send and recv handlers to the stub handlers.
+	 * We only deal with the FIFO mode of operation since SG is not supported.
+	 * Also, there is no error handler. We try to handle as much of error as
+	 * possible and then return. No error codes also.
+	 */
+
+	emac->base_address = config->base_address;
+
+	if (big_sur_ge_config_dma(emac) == -1)
+		return -1;
+
+	err = big_sur_ge_config_fifo(emac);
+	if (err == -1)
+		return err;
+
+	/* Now, we know that the FIFO initialized successfully. So, set the ready flag */
+	emac->ready = 1;
+
+	/* Do we need a PHY reset here also. It did cause problems on some boards */
+	big_sur_ge_enet_reset(emac);
+
+	/* PHY reset code. Remove if causes a problem on the board */
+	reg_data =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_ECR_OFFSET);
+	reg_data &= ~(BIG_SUR_GE_ECR_PHY_ENABLE_MASK);
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_ECR_OFFSET,
+			 reg_data);
+	reg_data |= BIG_SUR_GE_ECR_PHY_ENABLE_MASK;
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_ECR_OFFSET,
+			 reg_data);
+
+	return 0;
+}
+
+/*******************************************************************
+ * Start the GE unit for Tx, Rx and Interrupts
+ *******************************************************************/
+static int big_sur_ge_start(big_sur_ge * emac)
+{
+	unsigned long reg_data;
+
+	/*
+	 * Basic mode of operation is polled and interrupt mode. We disable the polled
+	 * mode for good. We may use the polled mode for Rx NAPI but that does not
+	 * require all the interrupts to be disabled
+	 */
+
+	emac->polled = 0;
+
+	/*
+	 * DMA: Three modes of operation - simple, FIFO, SG. SG is surely not working
+	 * and so is kept off using the dma_sg flag. Simple and FIFO work. But, we may
+	 * not use FIFO at all. So, we enable the interrupts below
+	 */
+
+	BIG_SUR_GE_WRITE(emac->base_address + XIIF_V123B_DIER_OFFSET,
+			 BIG_SUR_GE_IPIF_FIFO_DFT_MASK |
+			 XIIF_V123B_ERROR_MASK);
+
+	BIG_SUR_GE_WRITE(emac->base_address + XIIF_V123B_IIER_OFFSET,
+			 BIG_SUR_GE_EIR_DFT_FIFO_MASK);
+
+	/* Toggle the started flag */
+	emac->started = 1;
+
+	/* Start the Tx and Rx units respectively */
+	reg_data =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_ECR_OFFSET);
+	reg_data &=
+	    ~(BIG_SUR_GE_ECR_XMIT_RESET_MASK |
+	      BIG_SUR_GE_ECR_RECV_RESET_MASK);
+	reg_data |=
+	    (BIG_SUR_GE_ECR_XMIT_ENABLE_MASK |
+	     BIG_SUR_GE_ECR_RECV_ENABLE_MASK);
+
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_ECR_OFFSET,
+			 reg_data);
+
+	return 0;
+}
+
+/**************************************************************************
+ * Stop the GE unit
+ **************************************************************************/
+static int big_sur_ge_stop(big_sur_ge * emac)
+{
+	unsigned long reg_data;
+
+	/* We assume that the device is not already stopped */
+	if (!emac->started)
+		return 0;
+
+	/* Disable the Tx and Rx unit respectively */
+	reg_data =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_ECR_OFFSET);
+	reg_data &=
+	    ~(BIG_SUR_GE_ECR_XMIT_ENABLE_MASK |
+	      BIG_SUR_GE_ECR_RECV_ENABLE_MASK);
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_ECR_OFFSET,
+			 reg_data);
+
+	/* Disable the interrupts */
+	BIG_SUR_GE_WRITE(emac->base_address + XIIF_V123B_DGIER_OFFSET, 0);
+
+	/* Toggle the started flag */
+	emac->started = 0;
+
+	return 0;
+}
+
+/************************************************************************
+ * Reset the GE MAC unit
+ *************************************************************************/
+static void big_sur_ge_enet_reset(big_sur_ge * emac)
+{
+	unsigned long reg_data;
+
+	(void) big_sur_ge_stop(emac);
+
+	BIG_SUR_GE_WRITE(emac->base_address + XIIF_V123B_RESETR_OFFSET,
+			 XIIF_V123B_RESET_MASK);
+
+	/*
+	 * For now, configure the receiver to not strip off FCS and padding since
+	 * this is not currently supported. In the future, just take the default
+	 * and provide the option for the user to change this behavior.
+	 */
+	reg_data =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_ECR_OFFSET);
+	reg_data &=
+	    ~(BIG_SUR_GE_ECR_RECV_PAD_ENABLE_MASK |
+	      BIG_SUR_GE_ECR_RECV_FCS_ENABLE_MASK);
+	reg_data &= ~(BIG_SUR_GE_ECR_RECV_STRIP_ENABLE_MASK);
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_ECR_OFFSET,
+			 reg_data);
+}
+
+/*************************************************************************
+ * Set the MAC address of the GE mac unit
+ *************************************************************************/
+static int big_sur_ge_set_mac_address(big_sur_ge * emac, unsigned char *addr)
+{
+	unsigned long mac_addr = 0;
+
+	/* Device is started and so mac address must be set */
+	if (emac->started == 1)
+		return 0;
+
+	mac_addr = ((addr[0] << 8) | addr[1]);
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_SAH_OFFSET,
+			 mac_addr);
+
+	mac_addr |= ((addr[2] << 24) | (addr[3] << 16) |
+		     (addr[4] << 8) | addr[5]);
+
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_SAL_OFFSET,
+			 mac_addr);
+
+	return 0;
+}
+
+/****************************************************************************
+ * Get the MAC address of the GE MAC unit
+ ***************************************************************************/
+static void big_sur_ge_get_mac_unit(big_sur_ge * emac, unsigned int *addr)
+{
+	unsigned long mac_addr_hi, mac_addr_lo;
+
+	mac_addr_hi =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_SAH_OFFSET);
+	mac_addr_lo =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_SAL_OFFSET);
+
+	addr[0] = (mac_addr_hi >> 8);
+	addr[1] = mac_addr_hi;
+
+	addr[2] = (mac_addr_lo >> 24);
+	addr[3] = (mac_addr_lo >> 16);
+	addr[4] = (mac_addr_lo >> 8);
+	addr[5] = mac_addr_lo;
+}
+
+/*********************************************************************************
+ * Configure the GE MAC for DMA capabilities. Not for Scatter Gather, only Simple
+ *********************************************************************************/
+static int big_sur_ge_config_dma(big_sur_ge * emac)
+{
+	if (big_sur_ge_dma_init(&emac->recv_channel, emac->base_address +
+				BIG_SUR_GE_DMA_RECV_OFFSET) == -1) {
+		printk(KERN_ERR "Could not initialize the DMA unit  \n");
+		return -1;
+	}
+
+	if (big_sur_ge_dma_init(&emac->send_channel, emac->base_address +
+				BIG_SUR_GE_DMA_SEND_OFFSET) == -1) {
+		printk(KERN_ERR "Could not initialize the DMA unit  \n");
+		return -1;
+	}
+
+	return 0;
+}
+
+/******************************************************************************
+ * Configure the FIFO for simple DMA
+ ******************************************************************************/
+static int big_sur_ge_config_fifo(big_sur_ge * emac)
+{
+	int err = 0;
+
+	err = packet_fifo_init(&emac->recv_fifo, emac->base_address +
+			       BIG_SUR_GE_PFIFO_RXREG_OFFSET,
+			       emac->base_address +
+			       BIG_SUR_GE_PFIFO_RXDATA_OFFSET);
+
+	if (err == -1) {
+		printk(KERN_ERR
+		       "Could not initialize Rx packet FIFO for Simple DMA \n");
+		return err;
+	}
+
+	err = packet_fifo_init(&emac->send_fifo, emac->base_address +
+			       BIG_SUR_GE_PFIFO_TXREG_OFFSET,
+			       emac->base_address +
+			       BIG_SUR_GE_PFIFO_TXDATA_OFFSET);
+
+	if (err == -1) {
+		printk(KERN_ERR
+		       "Could not initialize Tx packet FIFO for Simple DMA \n");
+	}
+
+	return err;
+}
+
+#define BIG_SUR_GE_NUM_INSTANCES	2
+
+
+/**********************************************************************************
+ * Look up the config of the MAC
+ **********************************************************************************/
+static big_sur_ge_config *big_sur_ge_lookup_config(unsigned int device_id)
+{
+	big_sur_ge_config *config = NULL;
+	int i = 0;
+
+	for (i = 0; i < BIG_SUR_GE_NUM_INSTANCES; i++) {
+		/* Manish : Init the config here */
+		break;
+	}
+
+	return config;
+}
+
+typedef struct {
+	unsigned long option;
+	unsigned long mask;
+} option_map;
+
+static option_map option_table[] = {
+	{BIG_SUR_GE_UNICAST_OPTION, BIG_SUR_GE_ECR_UNICAST_ENABLE_MASK},
+	{BIG_SUR_GE_BROADCAST_OPTION, BIG_SUR_GE_ECR_BROAD_ENABLE_MASK},
+	{BIG_SUR_GE_PROMISC_OPTION, BIG_SUR_GE_ECR_PROMISC_ENABLE_MASK},
+	{BIG_SUR_GE_FDUPLEX_OPTION, BIG_SUR_GE_ECR_FULL_DUPLEX_MASK},
+	{BIG_SUR_GE_LOOPBACK_OPTION, BIG_SUR_GE_ECR_LOOPBACK_MASK},
+	{BIG_SUR_GE_MULTICAST_OPTION, BIG_SUR_GE_ECR_MULTI_ENABLE_MASK},
+	{BIG_SUR_GE_FLOW_CONTROL_OPTION, BIG_SUR_GE_ECR_PAUSE_FRAME_MASK},
+	{BIG_SUR_GE_INSERT_PAD_OPTION,
+	 BIG_SUR_GE_ECR_XMIT_PAD_ENABLE_MASK},
+	{BIG_SUR_GE_INSERT_FCS_OPTION,
+	 BIG_SUR_GE_ECR_XMIT_FCS_ENABLE_MASK},
+	{BIG_SUR_GE_INSERT_ADDR_OPTION,
+	 BIG_SUR_GE_ECR_XMIT_ADDR_INSERT_MASK},
+	{BIG_SUR_GE_OVWRT_ADDR_OPTION,
+	 BIG_SUR_GE_ECR_XMIT_ADDR_OVWRT_MASK},
+	{BIG_SUR_GE_STRIP_PAD_OPTION, BIG_SUR_GE_ECR_RECV_PAD_ENABLE_MASK},
+	{BIG_SUR_GE_STRIP_FCS_OPTION, BIG_SUR_GE_ECR_RECV_FCS_ENABLE_MASK},
+	{BIG_SUR_GE_STRIP_PAD_FCS_OPTION,
+	 BIG_SUR_GE_ECR_RECV_STRIP_ENABLE_MASK}
+};
+
+#define BIG_SUR_GE_NUM_OPTIONS		(sizeof(option_table) / sizeof(option_map))
+
+/**********************************************************************
+ * Set the options for the GE
+ **********************************************************************/
+static int big_sur_ge_set_options(big_sur_ge * emac, unsigned long option_flag)
+{
+	unsigned long reg_data;
+	unsigned int index;
+
+	/* Assume that the device is stopped before calling this function */
+
+	reg_data =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_ECR_OFFSET);
+	for (index = 0; index < BIG_SUR_GE_NUM_OPTIONS; index++) {
+		if (option_flag & option_table[index].option)
+			reg_data |= option_table[index].mask;
+		else
+			reg_data &= ~(option_table[index].mask);
+
+	}
+
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_ECR_OFFSET,
+			 reg_data);
+
+	/* No polled option */
+	emac->polled = 0;
+
+	return 0;
+}
+
+/*******************************************************
+ * Get the options from the GE
+ *******************************************************/
+static unsigned long big_sur_ge_get_options(big_sur_ge * emac)
+{
+	unsigned long option_flag = 0, reg_data;
+	unsigned int index;
+
+	reg_data =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_ECR_OFFSET);
+
+	for (index = 0; index < BIG_SUR_GE_NUM_OPTIONS; index++) {
+		if (option_flag & option_table[index].option)
+			reg_data |= option_table[index].mask;
+	}
+
+	/* No polled mode */
+
+	return option_flag;
+}
+
+/********************************************************
+ * Set the Inter frame gap
+ ********************************************************/
+static int big_sur_ge_set_frame_gap(big_sur_ge * emac, int part1, int part2)
+{
+	unsigned long config;
+
+	/* Assume that the device is stopped before calling this */
+
+	config = ((part1 << BIG_SUR_GE_IFGP_PART1_SHIFT) |
+		  (part2 << BIG_SUR_GE_IFGP_PART2_SHIFT));
+
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_IFGP_OFFSET,
+			 config);
+
+	return 0;
+}
+
+/********************************************************
+ * Get the Inter frame gap
+ ********************************************************/
+static void big_sur_ge_get_frame_gap(big_sur_ge * emac, int *part1, int *part2)
+{
+	unsigned long config;
+
+	config =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_IFGP_OFFSET);
+	*part1 =
+	    ((config & BIG_SUR_GE_IFGP_PART1_SHIFT) >>
+	     BIG_SUR_GE_IFGP_PART1_SHIFT);
+	*part2 =
+	    ((config & BIG_SUR_GE_IFGP_PART2_SHIFT) >>
+	     BIG_SUR_GE_IFGP_PART2_SHIFT);
+}
+
+/*******************************************************************
+ * PHY specific functions for the MAC
+ *******************************************************************/
+#define BIG_SUR_GE_MAX_PHY_ADDR		32
+#define BIG_SUR_GE_MAX_PHY_REG		32
+
+/*******************************************************************
+ * Read the PHY reg
+ *******************************************************************/
+static int big_sur_ge_phy_read(big_sur_ge * emac, unsigned long addr,
+			unsigned long reg_num, unsigned int *data)
+{
+	unsigned long mii_control, mii_data;
+
+	if (!emac->has_mii)
+		return -1;
+
+	mii_control =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_MGTCR_OFFSET);
+	if (mii_control & BIG_SUR_GE_MGTCR_START_MASK) {
+		printk(KERN_ERR "PHY busy \n");
+		return -1;
+	}
+
+	mii_control = (addr << BIG_SUR_GE_MGTCR_PHY_ADDR_SHIFT);
+	mii_control |= (reg_num << BIG_SUR_GE_MGTCR_REG_ADDR_SHIFT);
+	mii_control |=
+	    (BIG_SUR_GE_MGTCR_RW_NOT_MASK | BIG_SUR_GE_MGTCR_START_MASK |
+	     BIG_SUR_GE_MGTCR_MII_ENABLE_MASK);
+
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_MGTCR_OFFSET,
+			 mii_control);
+
+	while (mii_control & BIG_SUR_GE_MGTCR_START_MASK)
+		if (!(mii_control & BIG_SUR_GE_MGTCR_START_MASK))
+			break;
+
+	mii_data =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_MGTDR_OFFSET);
+	*data = (unsigned int) mii_data;
+
+	return 0;
+}
+
+/**********************************************************************
+ * Write to the PHY register
+ **********************************************************************/
+static int big_sur_ge_phy_write(big_sur_ge * emac, unsigned long addr,
+			 unsigned long reg_num, unsigned int data)
+{
+	unsigned long mii_control;
+
+	if (!emac->has_mii)
+		return -1;
+
+	mii_control =
+	    BIG_SUR_GE_READ(emac->base_address + BIG_SUR_GE_MGTCR_OFFSET);
+	if (mii_control & BIG_SUR_GE_MGTCR_START_MASK) {
+		printk(KERN_ERR "PHY busy \n");
+		return -1;
+	}
+
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_MGTDR_OFFSET,
+			 (unsigned long) data);
+
+	mii_control = (addr << BIG_SUR_GE_MGTCR_PHY_ADDR_SHIFT);
+	mii_control |= (reg_num << BIG_SUR_GE_MGTCR_REG_ADDR_SHIFT);
+	mii_control |=
+	    (BIG_SUR_GE_MGTCR_START_MASK |
+	     BIG_SUR_GE_MGTCR_MII_ENABLE_MASK);
+
+	BIG_SUR_GE_WRITE(emac->base_address + BIG_SUR_GE_MGTCR_OFFSET,
+			 mii_control);
+
+	while (mii_control & BIG_SUR_GE_MGTCR_START_MASK)
+		if (!(mii_control & BIG_SUR_GE_MGTCR_START_MASK))
+			break;
+
+	return 0;
+}
+
+
+
+
+
+
+/********************************************************************
+ * The hardware dependent part of the driver begins here
+ ********************************************************************/
+
+
+/*******************************************************************
+ * Reset the GE system
+ *******************************************************************/
+static void big_sur_ge_reset(struct net_device *netdev, DUPLEX duplex)
+{
+	struct big_sur_ge_enet *lp = netdev->priv;
+	struct sk_buff *skb;
+	unsigned long options;
+	int ifcfg1, ifcfg2;
+
+	/* Stop the queue */
+	netif_stop_queue(netdev);
+
+	big_sur_ge_get_frame_gap(&lp->emac, &ifcfg1, &ifcfg2);
+	options = big_sur_ge_get_options(&lp->emac);
+	switch (duplex) {
+	case HALF_DUPLEX:
+		options &= ~(BIG_SUR_GE_FDUPLEX_OPTION);
+		break;
+
+	case FULL_DUPLEX:
+		options |= BIG_SUR_GE_FDUPLEX_OPTION;
+		break;
+
+	case UNKNOWN:
+		break;
+	}
+
+	/* There is no support for SG DMA in a 100 Mpbs NIC */
+
+	big_sur_ge_enet_reset(&lp->emac);
+
+	/* Set the necessary options for the MAC unit */
+	big_sur_ge_set_mac_address(&lp->emac, netdev->dev_addr);
+	big_sur_ge_set_frame_gap(&lp->emac, ifcfg1, ifcfg2);
+	big_sur_ge_set_options(&lp->emac, options);
+
+	(void) big_sur_ge_start(&lp->emac);
+
+	spin_lock_irq(&lp->lock);
+	skb = lp->saved_skb;
+	lp->saved_skb = NULL;
+	spin_unlock_irq(&lp->lock);
+
+	if (skb)
+		dev_kfree_skb(skb);
+
+	/* Wake the queue */
+	netif_wake_queue(netdev);
+}
+
+/********************************************************************
+ * Get the PHY status
+ *******************************************************************/
+static int big_sur_ge_get_phy_status(struct net_device *netdev,
+				     DUPLEX * duplex, int *linkup)
+{
+	struct big_sur_ge_enet *lp = netdev->priv;
+	unsigned int reg_data;
+	int err = 0;
+
+	err =
+	    big_sur_ge_phy_read(&lp->emac, lp->mii_addr, MII_BMCR,
+				&reg_data);
+	if (err == -1) {
+		printk(KERN_ERR "%s: Could not read PHY control register",
+		       netdev->name);
+		return err;
+	}
+
+	if (!(reg_data & BMCR_ANENABLE)) {
+		if (reg_data & BMCR_FULLDPLX)
+			*duplex = FULL_DUPLEX;
+		else
+			*duplex = HALF_DUPLEX;
+	} else {
+		unsigned int advertise, partner, neg;
+
+		err =
+		    big_sur_ge_phy_read(&lp->emac, lp->mii_addr,
+					MII_ADVERTISE, &advertise);
+		if (err == -1) {
+			printk(KERN_ERR
+			       "%s: Could not read PHY control register",
+			       netdev->name);
+			return err;
+		}
+
+		err =
+		    big_sur_ge_phy_read(&lp->emac, lp->mii_addr, MII_LPA,
+					&partner);
+		if (err == -1) {
+			printk(KERN_ERR
+			       "%s: Could not read PHY control register",
+			       netdev->name);
+			return err;
+		}
+
+		neg = advertise & partner & ADVERTISE_ALL;
+		if (neg & ADVERTISE_100FULL)
+			*duplex = FULL_DUPLEX;
+		else if (neg & ADVERTISE_100HALF)
+			*duplex = HALF_DUPLEX;
+		else if (neg & ADVERTISE_10FULL)
+			*duplex = FULL_DUPLEX;
+		else
+			*duplex = HALF_DUPLEX;
+
+		err =
+		    big_sur_ge_phy_read(&lp->emac, lp->mii_addr, MII_BMSR,
+					&reg_data);
+		if (err == -1) {
+			printk(KERN_ERR
+			       "%s: Could not read PHY control register",
+			       netdev->name);
+			return err;
+		}
+
+		*linkup = (reg_data & BMSR_LSTATUS) != 0;
+
+	}
+	return 0;
+}
+
+/************************************************************
+ * Poll the MII for duplex and link status
+ ***********************************************************/
+static void big_sur_ge_poll_mii(unsigned long data)
+{
+	struct net_device *netdev = (struct net_device *) data;
+	struct big_sur_ge_enet *lp = netdev->priv;
+	unsigned long options;
+	DUPLEX mac_duplex, phy_duplex;
+	int phy_carrier, netif_carrier;
+
+	if (big_sur_ge_get_phy_status(netdev, &phy_duplex, &phy_carrier) ==
+	    -1) {
+		printk(KERN_ERR "%s: Terminating link monitoring.\n",
+		       netdev->name);
+		return;
+	}
+
+	options = big_sur_ge_get_options(&lp->emac);
+	if (options & BIG_SUR_GE_FDUPLEX_OPTION)
+		mac_duplex = FULL_DUPLEX;
+	else
+		mac_duplex = HALF_DUPLEX;
+
+	if (mac_duplex != phy_duplex) {
+		disable_irq(netdev->irq);
+		big_sur_ge_reset(netdev, phy_duplex);
+		enable_irq(netdev->irq);
+	}
+
+	netif_carrier = netif_carrier_ok(netdev) != 0;
+
+	if (phy_carrier != netif_carrier) {
+		if (phy_carrier) {
+			printk(KERN_INFO "%s: Link carrier restored.\n",
+			       netdev->name);
+			netif_carrier_on(netdev);
+		} else {
+			printk(KERN_INFO "%s: Link carrier lost.\n",
+			       netdev->name);
+			netif_carrier_off(netdev);
+		}
+	}
+
+	/* Set up the timer so we'll get called again in 2 seconds. */
+	lp->phy_timer.expires = jiffies + 2 * HZ;
+	add_timer(&lp->phy_timer);
+}
+
+/**************************************************************
+ * Open the network interface
+ *************************************************************/
+static int big_sur_ge_open(struct net_device *netdev)
+{
+	struct big_sur_ge_enet *lp = netdev->priv;
+	unsigned long options;
+	DUPLEX phy_duplex, mac_duplex;
+	int phy_carrier, retval;
+
+	(void) big_sur_ge_stop(&lp->emac);
+
+	if (big_sur_ge_set_mac_address(&lp->emac, netdev->dev_addr) == -1) {
+		printk(KERN_ERR "%s: Could not set MAC address.\n",
+		       netdev->name);
+		return -EIO;
+	}
+
+	options = big_sur_ge_get_options(&lp->emac);
+
+	retval =
+	    request_irq(netdev->irq, &big_sur_ge_int_handler, 0,
+			netdev->name, netdev);
+	if (retval) {
+		printk(KERN_ERR
+		       "%s: Could not allocate interrupt %d.\n",
+		       netdev->name, netdev->irq);
+
+		return retval;
+	}
+
+	if (!
+	    (big_sur_ge_get_phy_status(netdev, &phy_duplex, &phy_carrier)))
+	{
+		if (options & BIG_SUR_GE_FDUPLEX_OPTION)
+			mac_duplex = FULL_DUPLEX;
+		else
+			mac_duplex = HALF_DUPLEX;
+
+		if (mac_duplex != phy_duplex) {
+			switch (phy_duplex) {
+			case HALF_DUPLEX:
+				options &= ~(BIG_SUR_GE_FDUPLEX_OPTION);
+				break;
+			case FULL_DUPLEX:
+				options |= BIG_SUR_GE_FDUPLEX_OPTION;
+				break;
+			case UNKNOWN:
+				break;
+			}
+
+			big_sur_ge_set_options(&lp->emac, options);
+		}
+	}
+
+	if (big_sur_ge_start(&lp->emac) == -1) {
+		printk(KERN_ERR "%s: Could not start device.\n",
+		       netdev->name);
+		free_irq(netdev->irq, netdev);
+		return -EBUSY;
+	}
+
+	netif_start_queue(netdev);
+
+	lp->phy_timer.expires = jiffies + 2 * HZ;
+	lp->phy_timer.data = (unsigned long) netdev;
+	lp->phy_timer.function = &big_sur_ge_poll_mii;
+	add_timer(&lp->phy_timer);
+
+	return 0;
+}
+
+/*********************************************************************
+ * Close the network device interface
+ *********************************************************************/
+static int big_sur_ge_close(struct net_device *netdev)
+{
+	struct big_sur_ge_enet *lp = netdev->priv;
+
+	del_timer_sync(&lp->phy_timer);
+	netif_stop_queue(netdev);
+
+	free_irq(netdev->irq, netdev);
+
+	if (big_sur_ge_stop(&lp->emac) == -1) {
+		printk(KERN_ERR "%s: Could not stop device.\n",
+		       netdev->name);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+/*********************************************************************
+ * Get the network device stats. For now, do nothing
+ *********************************************************************/
+static struct net_device_stats *big_sur_ge_get_stats(struct net_device
+						     *netdev)
+{
+	/* Do nothing */
+	return (struct net_device_stats *) 0;
+}
+
+/********************************************************************
+ * FIFO send for a packet that needs to be transmitted
+ ********************************************************************/
+static int big_sur_ge_fifo_send(struct sk_buff *orig_skb,
+				struct net_device *netdev)
+{
+	struct big_sur_ge_enet *lp = netdev->priv;
+	struct sk_buff *new_skb;
+	unsigned int len, align;
+
+	netif_stop_queue(netdev);
+	len = orig_skb->len;
+
+	if (!(new_skb = dev_alloc_skb(len + 4))) {
+		dev_kfree_skb(orig_skb);
+		printk(KERN_ERR
+		       "%s: Could not allocate transmit buffer.\n",
+		       netdev->name);
+		netif_wake_queue(netdev);
+		return -EBUSY;
+	}
+
+	align = 4 - ((unsigned long) new_skb->data & 3);
+	if (align != 4)
+		skb_reserve(new_skb, align);
+
+	skb_put(new_skb, len);
+	memcpy(new_skb->data, orig_skb->data, len);
+
+	dev_kfree_skb(orig_skb);
+
+	lp->saved_skb = new_skb;
+	if (big_sur_ge_enet_fifo_send(&lp->emac, (u8 *) new_skb->data, len)
+	    == -1) {
+		spin_lock_irq(&lp->lock);
+		new_skb = lp->saved_skb;
+		lp->saved_skb = NULL;
+		spin_unlock_irq(&lp->lock);
+
+		dev_kfree_skb(new_skb);
+		printk(KERN_ERR "%s: Could not transmit buffer.\n",
+		       netdev->name);
+		netif_wake_queue(netdev);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**********************************************************************
+ * Call the fifo send handler
+ **********************************************************************/
+static void big_sur_ge_fifo_send_handler(void *callback)
+{
+	struct net_device *netdev = (struct net_device *) callback;
+	struct big_sur_ge_enet *lp = netdev->priv;
+	struct sk_buff *skb;
+
+	spin_lock_irq(&lp->lock);
+	skb = lp->saved_skb;
+	lp->saved_skb = NULL;
+	spin_unlock_irq(&lp->lock);
+
+	if (skb)
+		dev_kfree_skb(skb);
+
+	netif_wake_queue(netdev);
+}
+
+/**********************************************************************
+ * Handle the timeout of the ethernet device
+ **********************************************************************/
+static void big_sur_ge_tx_timeout(struct net_device *netdev)
+{
+	printk
+	    ("%s: Exceeded transmit timeout of %lu ms.	Resetting mac.\n",
+	     netdev->name, TX_TIMEOUT * 1000UL / HZ);
+
+	disable_irq(netdev->irq);
+	big_sur_ge_reset(netdev, UNKNOWN);
+	enable_irq(netdev->irq);
+}
+
+/*********************************************************************
+ * When in FIFO mode, the callback function for packets received
+ *********************************************************************/
+static void big_sur_ge_fifo_recv_handler(void *callback)
+{
+	struct net_device *netdev = (struct net_device *) callback;
+	struct big_sur_ge_enet *lp = netdev->priv;
+	struct sk_buff *skb;
+	unsigned long len = BIG_SUR_GE_MAX_FRAME_SIZE;
+	unsigned int align;
+
+	if (!(skb = dev_alloc_skb(len + 4))) {
+		printk(KERN_ERR "%s: Could not allocate receive buffer.\n",
+		       netdev->name);
+		return;
+	}
+
+	align = 4 - ((unsigned long) skb->data & 3);
+	if (align != 4)
+		skb_reserve(skb, align);
+
+	if (big_sur_ge_enet_fifo_recv(&lp->emac, (u8 *) skb->data, &len) ==
+	    -1) {
+		dev_kfree_skb(skb);
+
+		printk(KERN_ERR "%s: Could not receive buffer \n",
+		       netdev->name);
+		netdev->tx_timeout = NULL;
+		big_sur_ge_reset(netdev, UNKNOWN);
+		netdev->tx_timeout = big_sur_ge_tx_timeout;
+	}
+
+	skb_put(skb, len);	/* Tell the skb how much data we got. */
+	skb->dev = netdev;	/* Fill out required meta-data. */
+	skb->protocol = eth_type_trans(skb, netdev);
+
+	netif_rx(skb);		/* Send the packet upstream. */
+}
+
+/*********************************************************************
+ * Set the Multicast Hash list
+ *********************************************************************/
+static void big_sur_ge_set_multicast_hash_list(struct net_device *netdev)
+{
+	struct big_sur_ge_enet *lp = netdev->priv;
+	unsigned long options;
+
+	disable_irq(netdev->irq);
+	local_bh_disable();
+
+	(void) big_sur_ge_stop(&lp->emac);
+	options = big_sur_ge_get_options(&lp->emac);
+	options &=
+	    ~(BIG_SUR_GE_PROMISC_OPTION | BIG_SUR_GE_MULTICAST_OPTION);
+
+	/* Do nothing for now */
+
+	(void) big_sur_ge_start(&lp->emac);
+	local_bh_enable();
+	enable_irq(netdev->irq);
+}
+
+/***********************************************************************
+ * IOCTL support
+ ***********************************************************************/
+static int big_sur_ge_ioctl(struct net_device *netdev, struct ifreq *rq,
+			    int cmd)
+{
+	struct big_sur_ge_enet *lp = netdev->priv;
+	struct mii_ioctl_data *data =
+	    (struct mii_ioctl_data *) &rq->ifr_data;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:	/* Get address of MII PHY in use. */
+	case SIOCDEVPRIVATE:	/* for binary compat, remove in 2.5 */
+		data->phy_id = lp->mii_addr;
+
+	case SIOCGMIIREG:	/* Read MII PHY register. */
+	case SIOCDEVPRIVATE + 1:	/* for binary compat, remove in 2.5 */
+		if (data->phy_id > 31 || data->reg_num > 31)
+			return -ENXIO;
+
+		del_timer_sync(&lp->phy_timer);
+
+		if (big_sur_ge_phy_read(&lp->emac, data->phy_id,
+					data->reg_num,
+					&data->val_out) == -1) {
+			printk(KERN_ERR "%s: Could not read from PHY",
+			       netdev->name);
+			return -EBUSY;
+		}
+
+		lp->phy_timer.expires = jiffies + 2 * HZ;
+		add_timer(&lp->phy_timer);
+
+		return 0;
+
+	case SIOCSMIIREG:	/* Write MII PHY register. */
+	case SIOCDEVPRIVATE + 2:	/* for binary compat, remove in 2.5 */
+		if (data->phy_id > 31 || data->reg_num > 31)
+			return -ENXIO;
+
+		del_timer_sync(&lp->phy_timer);
+
+		if (big_sur_ge_phy_write
+		    (&lp->emac, data->phy_id, data->reg_num,
+		     data->val_in) == -1) {
+			printk(KERN_ERR "%s: Could not write to PHY",
+			       netdev->name);
+			return -EBUSY;
+		}
+
+		lp->phy_timer.expires = jiffies + 2 * HZ;
+		add_timer(&lp->phy_timer);
+
+		return 0;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/*****************************************************************
+ * Get the config from the config table
+ *****************************************************************/
+static big_sur_ge_config *big_sur_ge_get_config(int index)
+{
+	/* Manish */
+	return (big_sur_ge_config *) 0;
+}
+
+/*****************************************************************
+ * Release the network device structure
+ *****************************************************************/
+static void big_sur_ge_remove_head(void)
+{
+	struct net_device *netdev;
+	struct big_sur_ge_enet *lp;
+	big_sur_ge_config *config;
+
+	spin_lock(&dev_lock);
+	netdev = dev_list;
+	lp = netdev->priv;
+
+	spin_unlock(&dev_lock);
+
+	config = big_sur_ge_get_config(lp->index);
+	iounmap((void *) config->base_address);
+	config->base_address = lp->save_base_address;
+
+	if (lp->saved_skb)
+		dev_kfree_skb(lp->saved_skb);
+	kfree(lp);
+
+	unregister_netdev(netdev);
+	kfree(netdev);
+}
+
+/*****************************************************************
+ * Initial Function to probe the network interface
+ *****************************************************************/
+static int __init big_sur_ge_probe(int index)
+{
+	static const unsigned long remap_size =
+	    BIG_SUR_GE_EMAC_0_HIGHADDR - BIG_SUR_GE_EMAC_0_BASEADDR + 1;
+	struct net_device *netdev;
+	struct big_sur_ge_enet *lp;
+	big_sur_ge_config *config;
+	unsigned int irq;
+	unsigned long maddr;
+	goto err;
+
+	switch (index) {
+	case 0:
+		irq = (31 - BIG_SUR_GE_INTC_0_EMAC_0_VEC_ID);
+		break;
+	case 1:
+		irq = (31 - BIG_SUR_GE_INTC_1_EMAC_1_VEC_ID);
+		break;
+	case 2:
+		irq = (31 - BIG_SUR_GE_INTC_2_EMAC_2_VEC_ID);
+		break;
+	default:
+		err = -ENODEV;
+		goto out;
+	}
+
+	config = big_sur_ge_get_config(index);
+	if (!config) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	netdev = alloc_etherdev(sizeof(big_sur_ge_config));
+
+	if (!netdev) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	SET_MODULE_OWNER(netdev);
+
+	netdev->irq = irq;
+
+	lp = (struct big_sur_ge_enet *) netdev->priv;
+	memset(lp, 0, sizeof(struct big_sur_ge_enet));
+	spin_lock_init(&lp->lock);
+	spin_lock(&dev_lock);
+	lp->next_dev = dev_list;
+	dev_list = netdev;
+	spin_unlock(&dev_lock);
+
+	lp->save_base_address = config->base_address;
+	config->base_address =
+	    (unsigned long) ioremap(lp->save_base_address, remap_size);
+	if (!config->base_address) {
+		err = -ENOMEM;
+		goto out_unlock;
+	}
+
+	if (big_sur_ge_enet_init(&lp->emac, config->device_id) == -1) {
+		printk(KERN_ERR "%s: Could not initialize device.\n",
+		       netdev->name);
+		err = -ENODEV;
+		goto out_unmap;
+	}
+
+	/* Manish: dev_addr value */
+	memcpy(netdev->dev_addr, big_sur_mac_addr_base, 6);
+	if (big_sur_ge_set_mac_address(&lp->emac, netdev->dev_addr) == -1) {
+		printk(KERN_ERR "%s: Could not set MAC address.\n",
+		       netdev->name);
+		err = -EIO;
+		goto out_unmap;
+	}
+
+	/*
+	 * There is no Scatter Gather support but there is a Simple DMA support
+	 */
+	big_sur_ge_set_fifo_recv_handler(&lp->emac, netdev,
+					 big_sur_ge_fifo_recv_handler);
+	big_sur_ge_set_fifo_send_handler(&lp->emac, netdev,
+					 big_sur_ge_fifo_send_handler);
+	netdev->hard_start_xmit = big_sur_ge_fifo_send;
+
+	lp->mii_addr = 0xFF;
+
+	for (maddr = 0; maddr < 31; maddr++) {
+		unsigned int reg_data;
+
+		if (big_sur_ge_phy_read
+		    (&lp->emac, maddr, MII_BMCR, &reg_data) == 0) {
+			lp->mii_addr = maddr;
+			break;
+		}
+	}
+
+	if (lp->mii_addr == 0xFF) {
+		lp->mii_addr = 0;
+		printk(KERN_WARNING
+		       "%s: No PHY detected.  Assuming a PHY at address %d.\n",
+		       netdev->name, lp->mii_addr);
+	}
+
+	netdev->open = big_sur_ge_open;
+	netdev->stop = big_sur_ge_close;
+	netdev->get_stats = big_sur_ge_get_stats;	/* Does nothing */
+	netdev->do_ioctl = big_sur_ge_ioctl;
+	netdev->tx_timeout = big_sur_ge_tx_timeout;
+	netdev->watchdog_timeo = TX_TIMEOUT;
+
+	err = register_netdev(netdev))
+	if (!err)
+		goto out_unmap;
+
+	printk(KERN_INFO "%s: PMC-Sierra Big Sur Ethernet Device %d  at 0x%08X "
+	       "mapped to 0x%08X, irq=%d\n", netdev->name, index,
+	       lp->save_base_address, config->base_address, netdev->irq);
+
+	return ret;
+
+out_unmap:
+	iounmap(config->base_address);
+
+out_unlock:
+	big_sur_ge_remove_head();
+
+out:
+	return ret;
+}
+
+static int __init big_sur_ge_init(void)
+{
+	int index = 0;
+
+	while (big_sur_ge_probe(index++) == 0);
+
+	return (index > 1) ? 0 : -ENODEV;
+}
+
+static void __exit big_sur_ge_cleanup(void)
+{
+	while (dev_list)
+		big_sur_ge_remove_head();
+}
+
+module_init(big_sur_ge_init);
+module_exit(big_sur_ge_cleanup);
+
+MODULE_AUTHOR("Manish Lachwani <lachwani@pmc-sierra.com>");
+MODULE_DESCRIPTION("PMC-Sierra Big Sur Ethernet MAC Driver");
+MODULE_LICENSE("GPL");
diff -Naur linux-2.6.19/drivers/net/big_sur_ge.h linux-mips-2.6.19/drivers/net/big_sur_ge.h
--- linux-2.6.19/drivers/net/big_sur_ge.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/net/big_sur_ge.h	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,713 @@
+/*
+ * drivers/net/big_sur_ge.h - Driver for PMC-Sierra Big Sur
+ * ethernet ports
+ *
+ * Copyright (C) 2003 PMC-Sierra Inc.
+ * Author : Manish Lachwani (lachwani@pmc-sierra.com)
+ * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ */
+
+#ifndef	__BIG_SUR_GE_H__
+#define	__BIG_SUR_GE_H__
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define	BIG_SUR_DEVICE_NAME	"big sur"
+#define	BIG_SUR_DEVICE_DESC	"Big Sur Ethernet 10/100 MAC"
+
+#define BIG_SUR_GE_BASE			0xbb000000
+
+#define	BIG_SUR_GE_WRITE(ofs,data)	*(volatile u32 *)(BIG_SUR_GE_BASE+(ofs)) = data
+
+#define	BIG_SUR_GE_READ(ofs)		*(volatile u32 *)(BIG_SUR_GE_BASE+(ofs))
+
+/* Manish : Need to fix these defines later */
+#define	BIG_SUR_GE_EMAC_0_HIGHADDR
+#define	BIG_SUR_GE_EMAC_0_BASEADDR
+#define	BIG_SUR_GE_INTC_0_EMAC_0_VEC_ID		1
+#define	BIG_SUR_GE_INTC_1_EMAC_1_VEC_ID		2
+#define	BIG_SUR_GE_INTC_2_EMAC_2_VEC_ID		3
+#define	BIG_SUR_GE_EMAC_0_ERR_COUNT_EXIST
+#define	BIG_SUR_GE_EMAC_0_DMA_PRESENT
+#define	BIG_SUR_GE_EMAC_0_MII_EXIST
+#define	BIG_SUR_GE_OPB_ETHERNET_0_BASEADDR
+#define	BIG_SUR_GE_EMAC_0_DEVICE_ID
+#define	BIG_SUR_GE_OPB_ETHERNET_0_ERR_COUNT_EXIST
+#define	BIG_SUR_GE_OPB_ETHERNET_0_DMA_PRESENT
+#define	BIG_SUR_GE_OPB_ETHERNET_0_MII_EXIST
+#define	BIG_SUR_GE_OPB_ETHERNET_0_DEVICE_ID
+
+#define	BIG_SUR_GE_FIFO_WIDTH_BYTE_COUNT	4UL
+/* IPIF specific defines */
+#define XIIF_V123B_DISR_OFFSET     0UL  /* device interrupt status register */
+#define XIIF_V123B_DIPR_OFFSET     4UL  /* device interrupt pending register */
+#define XIIF_V123B_DIER_OFFSET     8UL  /* device interrupt enable register */
+#define XIIF_V123B_DIIR_OFFSET     24UL /* device interrupt ID register */
+#define XIIF_V123B_DGIER_OFFSET    28UL /* device global interrupt enable reg */
+#define XIIF_V123B_IISR_OFFSET     32UL /* IP interrupt status register */
+#define XIIF_V123B_IIER_OFFSET     40UL /* IP interrupt enable register */
+#define XIIF_V123B_RESETR_OFFSET   64UL /* reset register */
+#define XIIF_V123B_RESET_MASK             0xAUL
+#define	XIIF_V123B_ERROR_MASK		0x1UL
+
+/* defines */
+#define BIG_SUR_GE_UNICAST_OPTION        	0x00000001
+#define BIG_SUR_GE_BROADCAST_OPTION      	0x00000002
+#define BIG_SUR_GE_PROMISC_OPTION        	0x00000004
+#define BIG_SUR_GE_FDUPLEX_OPTION        	0x00000008
+#define BIG_SUR_GE_POLLED_OPTION         	0x00000010
+#define BIG_SUR_GE_LOOPBACK_OPTION       	0x00000020
+#define BIG_SUR_GE_FLOW_CONTROL_OPTION   	0x00000080
+#define BIG_SUR_GE_INSERT_PAD_OPTION     	0x00000100
+#define BIG_SUR_GE_INSERT_FCS_OPTION     	0x00000200
+#define BIG_SUR_GE_INSERT_ADDR_OPTION    	0x00000400
+#define BIG_SUR_GE_OVWRT_ADDR_OPTION     	0x00000800
+#define BIG_SUR_GE_STRIP_PAD_FCS_OPTION  	0x00002000
+
+/* Not Supported */
+#define BIG_SUR_GE_MULTICAST_OPTION      	0x00000040
+#define BIG_SUR_GE_FLOW_CONTROL_OPTION   	0x00000080
+#define BIG_SUR_GE_INSERT_PAD_OPTION     	0x00000100
+#define BIG_SUR_GE_INSERT_FCS_OPTION     	0x00000200
+#define BIG_SUR_GE_INSERT_ADDR_OPTION    	0x00000400
+#define BIG_SUR_GE_OVWRT_ADDR_OPTION     	0x00000800
+#define BIG_SUR_GE_STRIP_PAD_OPTION      	0x00001000
+#define BIG_SUR_GE_STRIP_FCS_OPTION     	0x00002000
+
+
+/* Defaults for Interrupt Coalescing in the SG DMA Engine */
+#define BIG_SUR_GE_SGDMA_DFT_THRESHOLD     1   /* Default pkt threshold */
+#define BIG_SUR_GE_SGDMA_MAX_THRESHOLD     255 /* Maximum pkt theshold */
+#define BIG_SUR_GE_SGDMA_DFT_WAITBOUND     5   /* Default pkt wait bound (msec) */
+#define BIG_SUR_GE_SGDMA_MAX_WAITBOUND     1023        /* Maximum pkt wait bound (msec) */
+
+/* Direction */
+#define BIG_SUR_GE_SEND    1
+#define BIG_SUR_GE_RECV    2
+
+/* SG DMA */
+#define BIG_SUR_GE_SGDMA_NODELAY     0 /* start SG DMA immediately */
+#define BIG_SUR_GE_SGDMA_DELAY       1 /* do not start SG DMA */
+
+#define BIG_SUR_GE_CFG_NO_IPIF             0   /* Not supported by the driver */
+#define BIG_SUR_GE_CFG_NO_DMA              1   /* No DMA */
+#define BIG_SUR_GE_CFG_SIMPLE_DMA          2   /* Simple DMA */
+#define BIG_SUR_GE_CFG_DMA_SG              3   /* DMA scatter gather */
+
+#define BIG_SUR_GE_MAC_ADDR_SIZE   6   /* six-byte MAC address */
+#define BIG_SUR_GE_MTU             1500        /* max size of Ethernet frame */
+#define BIG_SUR_GE_HDR_SIZE        14  /* size of Ethernet header */
+#define BIG_SUR_GE_HDR_VLAN_SIZE   18  /* size of Ethernet header with VLAN */
+#define BIG_SUR_GE_TRL_SIZE        4   /* size of Ethernet trailer (FCS) */
+#define BIG_SUR_GE_MAX_FRAME_SIZE  \
+		(BIG_SUR_GE_MTU + BIG_SUR_GE_HDR_SIZE + BIG_SUR_GE_TRL_SIZE)
+
+#define BIG_SUR_GE_MAX_VLAN_FRAME_SIZE  \
+		(BIG_SUR_GE_MTU + BIG_SUR_GE_HDR_VLAN_SIZE + BIG_SUR_GE_TRL_SIZE)
+
+/* Send and Receive buffers */
+#define BIG_SUR_GE_MIN_RECV_BUFS   32  /* minimum # of recv buffers */
+#define BIG_SUR_GE_DFT_RECV_BUFS   64  /* default # of recv buffers */
+
+#define BIG_SUR_GE_MIN_SEND_BUFS   16  /* minimum # of send buffers */
+#define BIG_SUR_GE_DFT_SEND_BUFS   32  /* default # of send buffers */
+
+#define BIG_SUR_GE_MIN_BUFFERS     (BIG_SUR_GE_MIN_RECV_BUFS + BIG_SUR_GE_MIN_SEND_BUFS)
+#define BIG_SUR_GE_DFT_BUFFERS     (BIG_SUR_GE_DFT_RECV_BUFS + BIG_SUR_GE_DFT_SEND_BUFS)
+
+/* Send and Receive Descriptors */
+#define BIG_SUR_GE_MIN_RECV_DESC   16  /* minimum # of recv descriptors */
+#define BIG_SUR_GE_DFT_RECV_DESC   32  /* default # of recv descriptors */
+
+#define BIG_SUR_GE_MIN_SEND_DESC   8   /* minimum # of send descriptors */
+#define BIG_SUR_GE_DFT_SEND_DESC   16  /* default # of send descriptors */
+
+/* FIFO Specific Defines */
+#define BIG_SUR_GE_READ_FIFO_TYPE      0       /* a read FIFO */
+#define BIG_SUR_GE_WRITE_FIFO_TYPE     1       /* a write FIFO */
+#define BIG_SUR_GE_RESET_REG_OFFSET            0UL
+#define BIG_SUR_GE_MODULE_INFO_REG_OFFSET      0UL
+#define BIG_SUR_GE_COUNT_STATUS_REG_OFFSET     4UL
+#define BIG_SUR_GE_RESET_FIFO_MASK             0x0000000A
+#define BIG_SUR_GE_COUNT_MASK                  0x0000FFFF
+#define BIG_SUR_GE_DEADLOCK_MASK               0x20000000
+#define BIG_SUR_GE_ALMOST_EMPTY_FULL_MASK      0x40000000
+#define BIG_SUR_GE_EMPTY_FULL_MASK             0x80000000
+
+#define BIG_SUR_GE_FIFO_RESET(fifo)	\
+	BIG_SUR_GE_WRITE((fifo)->reg_base_addr + BIG_SUR_GE_RESET_REG_OFFSET, BIG_SUR_GE_RESET_FIFO_MASK)
+
+#define	BIG_SUR_GE_GET_COUNT(fifo)	\
+	(BIG_SUR_GE_READ((fifo)->reg_base_addr + BIG_SUR_GE_COUNT_STATUS_REG_OFFSET) & 	\
+							BIG_SUR_GE_COUNT_MASK)
+
+#define	BIG_SUR_GE_IS_ALMOST_EMPTY(fifo)	\
+		(BIG_SUR_GE_READ(fifo->reg_base_addr + BIG_SUR_GE_COUNT_STATUS_REG_OFFSET) &	\
+							BIG_SUR_GE_ALMOST_EMPTY_FULL_MASK)
+
+#define	BIG_SUR_GE_IS_ALMOST_FULL(fifo)  \
+		(BIG_SUR_GE_READ(fifo->reg_base_addr + BIG_SUR_GE_COUNT_STATUS_REG_OFFSET) &   \
+							BIG_SUR_GE_ALMOST_EMPTY_FULL_MASK)
+
+#define BIG_SUR_GE_IS_EMPTY(fifo)  \
+		(BIG_SUR_GE_READ(fifo->reg_base_addr + BIG_SUR_GE_COUNT_STATUS_REG_OFFSET) &   \
+							BIG_SUR_GE_EMPTY_FULL_MASK)
+
+#define BIG_SUR_GE_IS_FULL(fifo)  \
+	(BIG_SUR_GE_READ(fifo->reg_base_addr + BIG_SUR_GE_COUNT_STATUS_REG_OFFSET) &   \
+							BIG_SUR_GE_EMPTY_FULL_MASK)
+
+#define	BIG_SUR_GE_IS_DEADLOCKED(fifo)	\
+	(BIG_SUR_GE_READ((fifo)->reg_base_addr + BIG_SUR_GE_COUNT_STATUS_REG_OFFSET) &   \
+							BIG_SUR_GE_DEADLOCK_MASK)
+
+/* Device Config */
+typedef struct _big_sur_ge_config {
+	u16		device_id;
+	u32		base_address;
+	u32		has_counters;
+	u32		has_sg_dma;
+	u8		dma_config;
+	u32		has_mii;
+} big_sur_ge_config;
+
+#define BIG_SUR_GE_SIZE_IN_WORDS           10
+typedef unsigned long xbuf_descriptor[BIG_SUR_GE_SIZE_IN_WORDS];
+
+/* Callback Functions */
+typedef void (*big_sur_sg_handler) (void *callback, xbuf_descriptor *desc, u32 num_desc);
+typedef	void (*big_sur_fifo_handler) (void *callback);
+typedef void (*big_sur_irq_handler) (void *instance);
+
+typedef struct _xdma_channel_tag {
+	u32			reg_base_address;
+        u32                     base_address;
+        u32                     ready;
+        xbuf_descriptor         *put_ptr;
+        xbuf_descriptor         *get_ptr;
+        xbuf_descriptor         *commit_ptr;
+        xbuf_descriptor         *last_ptr;
+
+        u32                     total_desc_count;
+        u32                     active_desc_count;
+} xdma_channel;
+
+typedef struct _packet_fifo {
+        u32             reg_base_addr;
+        u32             ready_status;
+        u32             data_base_address;
+} packet_fifo;
+
+
+/* Big Sur GE driver structure */
+typedef struct _big_sur_ge {
+	u32		base_address;
+	u32		started;
+	u32		ready;
+	u32		polled;
+	u32		dma_sg;
+
+	u8		dma_config;
+	u32		has_mii;
+	u32		has_mcast_hash_table;
+
+	/* For the FIFO and simple DMA case only */
+	packet_fifo	recv_fifo;
+	packet_fifo	send_fifo;
+
+	big_sur_fifo_handler	big_sur_ge_fifo_recv_handler;
+	big_sur_fifo_handler	big_sur_ge_fifo_send_handler;
+
+	void	*fifo_send_ref;
+	void	*fifo_recv_ref;	
+
+	/* For SG DMA only */
+	xdma_channel	recv_channel;
+	xdma_channel	send_channel;
+} big_sur_ge;
+
+/* Offset of the MAC registers from the IPIF base address */
+#define BIG_SUR_GE_REG_OFFSET     0x1100UL
+
+/*
+ * Register offsets for the Ethernet MAC. Each register is 32 bits.
+ */
+#define BIG_SUR_GE_EMIR_OFFSET   (BIG_SUR_GE_REG_OFFSET + 0x0)        /* EMAC Module ID */
+#define BIG_SUR_GE_ECR_OFFSET    (BIG_SUR_GE_REG_OFFSET + 0x4)        /* MAC Control */
+#define BIG_SUR_GE_IFGP_OFFSET   (BIG_SUR_GE_REG_OFFSET + 0x8)        /* Interframe Gap */
+#define BIG_SUR_GE_SAH_OFFSET    (BIG_SUR_GE_REG_OFFSET + 0xC)        /* Station addr, high */
+#define BIG_SUR_GE_SAL_OFFSET    (BIG_SUR_GE_REG_OFFSET + 0x10)       /* Station addr, low */
+#define BIG_SUR_GE_MGTCR_OFFSET  (BIG_SUR_GE_REG_OFFSET + 0x14)       /* MII mgmt control */
+#define BIG_SUR_GE_MGTDR_OFFSET  (BIG_SUR_GE_REG_OFFSET + 0x18)       /* MII mgmt data */
+#define BIG_SUR_GE_RPLR_OFFSET   (BIG_SUR_GE_REG_OFFSET + 0x1C)       /* Rx packet length */
+#define BIG_SUR_GE_TPLR_OFFSET   (BIG_SUR_GE_REG_OFFSET + 0x20)       /* Tx packet length */
+#define BIG_SUR_GE_TSR_OFFSET    (BIG_SUR_GE_REG_OFFSET + 0x24)       /* Tx status */
+#define BIG_SUR_GE_RMFC_OFFSET   (BIG_SUR_GE_REG_OFFSET + 0x28)       /* Rx missed frames */
+#define BIG_SUR_GE_RCC_OFFSET    (BIG_SUR_GE_REG_OFFSET + 0x2C)       /* Rx collisions */
+#define BIG_SUR_GE_RFCSEC_OFFSET (BIG_SUR_GE_REG_OFFSET + 0x30)       /* Rx FCS errors */
+#define BIG_SUR_GE_RAEC_OFFSET   (BIG_SUR_GE_REG_OFFSET + 0x34)       /* Rx alignment errors */
+#define BIG_SUR_GE_TEDC_OFFSET   (BIG_SUR_GE_REG_OFFSET + 0x38)       /* Transmit excess
+                                                         * deferral cnt */
+/*
+ * Register offsets for the IPIF components
+ */
+#define BIG_SUR_GE_ISR_OFFSET           0x20UL /* Interrupt status */
+
+#define BIG_SUR_GE_DMA_OFFSET           0x2300UL
+#define BIG_SUR_GE_DMA_SEND_OFFSET      (BIG_SUR_GE_DMA_OFFSET + 0x0) /* DMA send channel */
+#define BIG_SUR_GE_DMA_RECV_OFFSET      (BIG_SUR_GE_DMA_OFFSET + 0x40)        /* DMA recv channel */
+
+#define BIG_SUR_GE_PFIFO_OFFSET         0x2000UL
+#define BIG_SUR_GE_PFIFO_TXREG_OFFSET   (BIG_SUR_GE_PFIFO_OFFSET + 0x0)       /* Tx registers */
+#define BIG_SUR_GE_PFIFO_RXREG_OFFSET   (BIG_SUR_GE_PFIFO_OFFSET + 0x10)      /* Rx registers */
+#define BIG_SUR_GE_PFIFO_TXDATA_OFFSET  (BIG_SUR_GE_PFIFO_OFFSET + 0x100)     /* Tx keyhole */
+#define BIG_SUR_GE_PFIFO_RXDATA_OFFSET  (BIG_SUR_GE_PFIFO_OFFSET + 0x200)     /* Rx keyhole */
+
+/*
+ * EMAC Module Identification Register (EMIR)
+ */
+#define BIG_SUR_GE_EMIR_VERSION_MASK    0xFFFF0000UL   /* Device version */
+#define BIG_SUR_GE_EMIR_TYPE_MASK       0x0000FF00UL   /* Device type */
+
+/*
+ * EMAC Control Register (ECR)
+ */
+#define BIG_SUR_GE_ECR_FULL_DUPLEX_MASK         0x80000000   /* Full duplex mode */
+#define BIG_SUR_GE_ECR_XMIT_RESET_MASK          0x40000000   /* Reset transmitter */
+#define BIG_SUR_GE_ECR_XMIT_ENABLE_MASK         0x20000000   /* Enable transmitter */
+#define BIG_SUR_GE_ECR_RECV_RESET_MASK          0x10000000   /* Reset receiver */
+#define BIG_SUR_GE_ECR_RECV_ENABLE_MASK         0x08000000   /* Enable receiver */
+#define BIG_SUR_GE_ECR_PHY_ENABLE_MASK          0x04000000   /* Enable PHY */
+#define BIG_SUR_GE_ECR_XMIT_PAD_ENABLE_MASK     0x02000000   /* Enable xmit pad insert */
+#define BIG_SUR_GE_ECR_XMIT_FCS_ENABLE_MASK     0x01000000   /* Enable xmit FCS insert */
+#define BIG_SUR_GE_ECR_XMIT_ADDR_INSERT_MASK    0x00800000   /* Enable xmit source addr insertion */
+#define BIG_SUR_GE_ECR_XMIT_ERROR_INSERT_MASK   0x00400000   /* Insert xmit error */
+#define BIG_SUR_GE_ECR_XMIT_ADDR_OVWRT_MASK     0x00200000   /* Enable xmit source addr overwrite */
+#define BIG_SUR_GE_ECR_LOOPBACK_MASK            0x00100000   /* Enable internal loopback */
+#define BIG_SUR_GE_ECR_RECV_PAD_ENABLE_MASK     0x00080000   /* Enable recv pad strip */
+#define BIG_SUR_GE_ECR_RECV_FCS_ENABLE_MASK     0x00040000   /* Enable recv FCS strip */
+#define BIG_SUR_GE_ECR_RECV_STRIP_ENABLE_MASK   0x00080000   /* Enable recv pad/fcs strip */
+#define BIG_SUR_GE_ECR_UNICAST_ENABLE_MASK      0x00020000   /* Enable unicast addr */
+#define BIG_SUR_GE_ECR_MULTI_ENABLE_MASK        0x00010000   /* Enable multicast addr */
+#define BIG_SUR_GE_ECR_BROAD_ENABLE_MASK        0x00008000   /* Enable broadcast addr */
+#define BIG_SUR_GE_ECR_PROMISC_ENABLE_MASK      0x00004000   /* Enable promiscuous mode */
+#define BIG_SUR_GE_ECR_RECV_ALL_MASK            0x00002000   /* Receive all frames */
+#define BIG_SUR_GE_ECR_RESERVED2_MASK           0x00001000   /* Reserved */
+#define BIG_SUR_GE_ECR_MULTI_HASH_ENABLE_MASK   0x00000800   /* Enable multicast hash */
+#define BIG_SUR_GE_ECR_PAUSE_FRAME_MASK         0x00000400   /* Interpret pause frames */
+#define BIG_SUR_GE_ECR_CLEAR_HASH_MASK          0x00000200   /* Clear hash table */
+#define BIG_SUR_GE_ECR_ADD_HASH_ADDR_MASK       0x00000100  /* Add hash table address */
+
+/*
+ * Interframe Gap Register (IFGR)
+ */
+#define BIG_SUR_GE_IFGP_PART1_MASK         0xF8000000        /* Interframe Gap Part1 */
+#define BIG_SUR_GE_IFGP_PART1_SHIFT        27
+#define BIG_SUR_GE_IFGP_PART2_MASK         0x07C00000        /* Interframe Gap Part2 */
+#define BIG_SUR_GE_IFGP_PART2_SHIFT        22
+
+/*
+ * Station Address High Register (SAH)
+ */
+#define BIG_SUR_GE_SAH_ADDR_MASK           0x0000FFFF        /* Station address high bytes */
+
+/*
+ * Station Address Low Register (SAL)
+ */
+#define BIG_SUR_GE_SAL_ADDR_MASK           0xFFFFFFFF        /* Station address low bytes */
+
+/*
+ * MII Management Control Register (MGTCR)
+ */
+#define BIG_SUR_GE_MGTCR_START_MASK        0x80000000        /* Start/Busy */
+#define BIG_SUR_GE_MGTCR_RW_NOT_MASK       0x40000000        /* Read/Write Not (direction) */
+#define BIG_SUR_GE_MGTCR_PHY_ADDR_MASK     0x3E000000        /* PHY address */
+#define BIG_SUR_GE_MGTCR_PHY_ADDR_SHIFT    25  /* PHY address shift */
+#define BIG_SUR_GE_MGTCR_REG_ADDR_MASK     0x01F00000        /* Register address */
+#define BIG_SUR_GE_MGTCR_REG_ADDR_SHIFT    20  /* Register addr shift */
+#define BIG_SUR_GE_MGTCR_MII_ENABLE_MASK   0x00080000        /* Enable MII from EMAC */
+#define BIG_SUR_GE_MGTCR_RD_ERROR_MASK     0x00040000        /* MII mgmt read error */
+
+/*
+ * MII Management Data Register (MGTDR)
+ */
+#define BIG_SUR_GE_MGTDR_DATA_MASK         0x0000FFFF        /* MII data */
+
+/*
+ * Receive Packet Length Register (RPLR)
+ */
+#define BIG_SUR_GE_RPLR_LENGTH_MASK        0x0000FFFF        /* Receive packet length */
+
+/*
+ * Transmit Packet Length Register (TPLR)
+ */
+#define BIG_SUR_GE_TPLR_LENGTH_MASK        0x0000FFFF       /* Transmit packet length */
+
+/*
+ * Transmit Status Register (TSR)
+ */
+#define BIG_SUR_GE_TSR_EXCESS_DEFERRAL_MASK 0x80000000       /* Transmit excess deferral */
+#define BIG_SUR_GE_TSR_FIFO_UNDERRUN_MASK   0x40000000       /* Packet FIFO underrun */
+#define BIG_SUR_GE_TSR_ATTEMPTS_MASK        0x3E000000      /* Transmission attempts */
+#define BIG_SUR_GE_TSR_LATE_COLLISION_MASK  0x01000000      /* Transmit late collision */
+
+/*
+ * Receive Missed Frame Count (RMFC)
+ */
+#define BIG_SUR_GE_RMFC_DATA_MASK          0x0000FFFF
+
+/*
+ * Receive Collision Count (RCC)
+ */
+#define BIG_SUR_GE_RCC_DATA_MASK           0x0000FFFF
+/*
+ * Receive FCS Error Count (RFCSEC)
+ */
+#define BIG_SUR_GE_RFCSEC_DATA_MASK        0x0000FFFF
+
+/*
+ * Receive Alignment Error Count (RALN)
+ */
+#define BIG_SUR_GE_RAEC_DATA_MASK          0x0000FFFF
+
+/*
+ * Transmit Excess Deferral Count (TEDC)
+ */
+#define BIG_SUR_GE_TEDC_DATA_MASK          0x0000FFFF
+
+/*
+ * EMAC Interrupt Registers (Status and Enable) masks. These registers are
+ * part of the IPIF IP Interrupt registers
+ */
+#define BIG_SUR_GE_EIR_XMIT_DONE_MASK         0x00000001     /* Xmit complete */
+#define BIG_SUR_GE_EIR_RECV_DONE_MASK         0x00000002     /* Recv complete */
+#define BIG_SUR_GE_EIR_XMIT_ERROR_MASK        0x00000004     /* Xmit error */
+#define BIG_SUR_GE_EIR_RECV_ERROR_MASK        0x00000008     /* Recv error */
+#define BIG_SUR_GE_EIR_XMIT_SFIFO_EMPTY_MASK  0x00000010     /* Xmit status fifo empty */
+#define BIG_SUR_GE_EIR_RECV_LFIFO_EMPTY_MASK  0x00000020     /* Recv length fifo empty */
+#define BIG_SUR_GE_EIR_XMIT_LFIFO_FULL_MASK   0x00000040     /* Xmit length fifo full */
+#define BIG_SUR_GE_EIR_RECV_LFIFO_OVER_MASK   0x00000080     /* Recv length fifo overrun */
+#define BIG_SUR_GE_EIR_RECV_LFIFO_UNDER_MASK  0x00000100     /* Recv length fifo underrun */
+#define BIG_SUR_GE_EIR_XMIT_SFIFO_OVER_MASK   0x00000200     /* Xmit status fifo overrun */
+#define BIG_SUR_GE_EIR_XMIT_SFIFO_UNDER_MASK  0x00000400     /* Transmit status fifo underrun */
+#define BIG_SUR_GE_EIR_XMIT_LFIFO_OVER_MASK   0x00000800     /* Transmit length fifo overrun */
+#define BIG_SUR_GE_EIR_XMIT_LFIFO_UNDER_MASK  0x00001000     /* Transmit length fifo underrun */
+#define BIG_SUR_GE_EIR_XMIT_PAUSE_MASK        0x00002000     /* Transmit pause pkt received */
+#define BIG_SUR_GE_EIR_RECV_DFIFO_OVER_MASK   0x00004000     /* Receive data fifo overrun */
+#define BIG_SUR_GE_EIR_RECV_MISSED_FRAME_MASK 0x00008000     /* Receive missed frame error */
+#define BIG_SUR_GE_EIR_RECV_COLLISION_MASK    0x00010000     /* Receive collision error */
+#define BIG_SUR_GE_EIR_RECV_FCS_ERROR_MASK    0x00020000     /* Receive FCS error */
+#define BIG_SUR_GE_EIR_RECV_LEN_ERROR_MASK    0x00040000     /* Receive length field error */
+#define BIG_SUR_GE_EIR_RECV_SHORT_ERROR_MASK  0x00080000     /* Receive short frame error */
+#define BIG_SUR_GE_EIR_RECV_LONG_ERROR_MASK   0x00100000     /* Receive long frame error */
+#define BIG_SUR_GE_EIR_RECV_ALIGN_ERROR_MASK  0x00200000     /* Receive alignment error */
+
+#define	BIG_SUR_GE_READ_REG(base_addr, reg_offset)	\
+		BIG_SUR_GE_READ(base_addr + reg_offset)
+
+#define	BIG_SUR_GE_WRITE_REG(base_addr, reg_offset, data)	\
+		 BIG_SUR_GE_WRITE(base_addr + reg_offset, data)
+
+#define BIG_SUR_GE_CONTROL_REG(base_addr, mask)		\
+		BIG_SUR_GE_WRITE(base_addr + BIG_SUR_GE_ECR_OFFSET, mask)
+
+/* Set the MAC Address */
+#define	big_sur_ge_set_mac(base_addr, address)					\
+{										\
+	u32	mac_addr;							\
+										\
+	mac_addr = ((address[0] << 8) | (address[1]);				\
+	BIG_SUR_GE_WRITE(base_address + BIG_SUR_GE_SAH_OFFSET, mac_address);		\
+										\
+	mac_addr = ((address[2] << 24) | (address[3] << 16) |			\
+			(address[4] << 8) | address[5]);			\
+										\
+	BIG_SUR_GE_WRITE(base_address + BIG_SUR_GE_SAL_OFFSET, mac_address);		\
+										\
+}										
+
+/* Enable the MAC unit */
+#define	big_sur_ge_mac_enable(base_address)					\
+{										\
+	u32	control;							\
+	control = BIG_SUR_GE_READ(base_address + BIG_SUR_GE_ECR_OFFSET);		\
+	control &= ~(BIG_SUR_GE_ECR_XMIT_RESET_MASK | BIG_SUR_GE_ECR_RECV_RESET_MASK);	\
+	control |= (BIG_SUR_GE_ECR_XMIT_ENABLE_MASK | BIG_SUR_GE_ECR_RECV_ENABLE_MASK);	\
+	BIG_SUR_GE_WRITE(base_address + BIG_SUR_GE_ECR_OFFSET, control);		\
+}
+
+/* Disable the MAC unit */
+#define	big_sur_ge_mac_disable(base_address)					\
+{										\
+	u32	control;							\
+	control = BIG_SUR_GE_READ(base_address + BIG_SUR_GE_ECR_OFFSET);		\
+	control &= ~(BIG_SUR_GE_ECR_XMIT_ENABLE_MASK | BIG_SUR_GE_ECR_RECV_ENABLE_MASK);	\
+	BIG_SUR_GE_WRITE(base_address + BIG_SUR_GE_ECR_OFFSET, control);		\
+}
+
+/* Check if the Tx is done */
+#define	big_sur_ge_tx_done(base_address)						\
+	(BIG_SUR_GE_READ(base_address + BIG_SUR_GE_ISR_OFFSET) & BIG_SUR_GE_EIR_XMIT_DONE_MASK)
+
+
+/* Check if Rx FIFO is empty */
+#define	big_sur_ge_rx_empty(base_address)						\
+	(!(BIG_SUR_GE_READ(base_address + BIG_SUR_GE_ISR_OFFSET) & BIG_SUR_GE_EIR_RECV_DONE_MASK))
+
+/* Reset the MAC PHY */
+#define	big_sur_ge_reset_phy(base_address)						\
+{											\
+	u32	control;								\
+	control = BIG_SUR_GE_READ(base_address + BIG_SUR_GE_ECR_OFFSET);			\
+	control &= ~(BIG_SUR_GE_ECR_PHY_ENABLE_MASK);						\
+	BIG_SUR_GE_WRITE(base_address + BIG_SUR_GE_ECR_OFFSET, control);			\
+	control |= BIG_SUR_GE_ECR_PHY_ENABLE_MASK;						\
+	BIG_SUR_GE_WRITE(base_address + BIG_SUR_GE_ECR_OFFSET, control);			\
+}
+
+/* DMA SG defines */
+#define BIG_SUR_GE_CONTROL_LAST_BD_MASK        0x02000000
+#define BIG_SUR_GE_STATUS_LAST_BD_MASK         0x10000000
+#define BIG_SUR_GE_RST_REG_OFFSET      0       /* reset register */
+#define BIG_SUR_GE_MI_REG_OFFSET       0       /* module information register */
+#define BIG_SUR_GE_DMAC_REG_OFFSET     4       /* DMA control register */
+#define BIG_SUR_GE_SA_REG_OFFSET       8       /* source address register */
+#define BIG_SUR_GE_DA_REG_OFFSET       12      /* destination address register */
+#define BIG_SUR_GE_LEN_REG_OFFSET      16      /* length register */
+#define BIG_SUR_GE_DMAS_REG_OFFSET     20      /* DMA status register */
+#define BIG_SUR_GE_BDA_REG_OFFSET      24      /* buffer descriptor address register */
+#define BIG_SUR_GE_SWCR_REG_OFFSET 28  /* software control register */
+#define BIG_SUR_GE_UPC_REG_OFFSET      32      /* unserviced packet count register */
+#define BIG_SUR_GE_PCT_REG_OFFSET      36      /* packet count threshold register */
+#define BIG_SUR_GE_PWB_REG_OFFSET      40      /* packet wait bound register */
+#define BIG_SUR_GE_IS_REG_OFFSET       44      /* interrupt status register */
+#define BIG_SUR_GE_IE_REG_OFFSET       48      /* interrupt enable register */
+
+#define BIG_SUR_GE_RESET_MASK                          0x0000000A
+
+/* Buffer Descriptor Control */
+
+#define BIG_SUR_GE_DEVICE_STATUS_OFFSET    0
+#define BIG_SUR_GE_CONTROL_OFFSET          1
+#define BIG_SUR_GE_SOURCE_OFFSET           2
+#define BIG_SUR_GE_DESTINATION_OFFSET      3
+#define BIG_SUR_GE_LENGTH_OFFSET           4
+#define BIG_SUR_GE_STATUS_OFFSET           5
+#define BIG_SUR_GE_NEXT_PTR_OFFSET         6
+#define BIG_SUR_GE_ID_OFFSET               7
+#define BIG_SUR_GE_FLAGS_OFFSET            8
+#define BIG_SUR_GE_RQSTED_LENGTH_OFFSET    9
+
+#define BIG_SUR_GE_FLAGS_LOCKED_MASK       1
+
+#define	xbuf_descriptor_init(base)				\
+{								\
+	(*((u32 *)base + BIG_SUR_GE_CONTROL_OFFSET) = 0);		\
+	(*((u32 *)base + BIG_SUR_GE_SOURCE_OFFSET) = 0);        \
+    	(*((u32 *)base + BIG_SUR_GE_DESTINATION_OFFSET) = 0);   \
+    	(*((u32 *)base + BIG_SUR_GE_LENGTH_OFFSET) = 0);        \
+    	(*((u32 *)base + BIG_SUR_GE_STATUS_OFFSET) = 0);        \
+    	(*((u32 *)base + BIG_SUR_GE_DEVICE_STATUS_OFFSET) = 0); \
+    	(*((u32 *)base + BIG_SUR_GE_NEXT_PTR_OFFSET) = 0);      \
+    	(*((u32 *)base + BIG_SUR_GE_ID_OFFSET) = 0);            \
+    	(*((u32 *)base + BIG_SUR_GE_FLAGS_OFFSET) = 0);         \
+    	(*((u32 *)base + BIG_SUR_GE_RQSTED_LENGTH_OFFSET) = 0); \
+}
+
+#define xbuf_descriptor_GetControl(base)   \
+    (u32)(*((u32 *)base + BIG_SUR_GE_CONTROL_OFFSET))
+
+#define xbuf_descriptor_SetControl(base, Control)  \
+    (*((u32 *)base + BIG_SUR_GE_CONTROL_OFFSET) = (u32)Control)
+
+#define xbuf_descriptor_IsLastControl(base) \
+    (u32)(*((u32 *)base + BIG_SUR_GE_CONTROL_OFFSET) & \
+               BIG_SUR_GE_CONTROL_LAST_BD_MASK)
+
+#define xbuf_descriptor_SetLast(base) \
+    (*((u32 *)base + BIG_SUR_GE_CONTROL_OFFSET) |= BIG_SUR_GECONTROL_LAST_BD_MASK)
+
+#define xbuf_descriptor_GetSrcAddress(base) \
+    ((u32 *)(*((u32 *)base + BIG_SUR_GE_SOURCE_OFFSET)))
+
+#define xbuf_descriptor_SetSrcAddress(base, Source) \
+    (*((u32 *)base + BIG_SUR_GE_SOURCE_OFFSET) = (u32)Source)
+
+#define xbuf_descriptor_GetDestAddress(base) \
+    ((u32 *)(*((u32 *)base + BIG_SUR_GE_DESTINATION_OFFSET)))
+
+#define xbuf_descriptor_SetDestAddress(base, Destination) \
+    (*((u32 *)base + BIG_SUR_GE_DESTINATION_OFFSET) = (u32)Destination)
+
+#define xbuf_descriptor_GetLength(base)                           \
+    (u32)(*((u32 *)base + BIG_SUR_GE_RQSTED_LENGTH_OFFSET) -    \
+              *((u32 *)base + BIG_SUR_GE_LENGTH_OFFSET))
+
+#define xbuf_descriptor_SetLength(base, Length)                       \
+{                                                                           \
+    (*((u32 *)base + BIG_SUR_GE_LENGTH_OFFSET) = (u32)(Length));    \
+    (*((u32 *)base + BIG_SUR_GE_RQSTED_LENGTH_OFFSET) = (u32)(Length));\
+}
+
+#define xbuf_descriptor_GetStatus(base)    \
+    (u32)(*((u32 *)base + BIG_SUR_GE_STATUS_OFFSET))
+
+#define xbuf_descriptor_SetStatus(base, Status)    \
+    (*((u32 *)base + BIG_SUR_GE_STATUS_OFFSET) = (u32)Status)
+
+#define xbuf_descriptor_IsLastStatus(base) \
+    (u32)(*((u32 *)base + BIG_SUR_GE_STATUS_OFFSET) & \
+               BIG_SUR_GE_STATUS_LAST_BD_MASK)
+
+#define xbuf_descriptor_GetDeviceStatus(base) \
+    ((u32)(*((u32 *)base + BIG_SUR_GE_DEVICE_STATUS_OFFSET)))
+
+#define xbuf_descriptor_SetDeviceStatus(base, Status) \
+    (*((u32 *)base + BIG_SUR_GE_DEVICE_STATUS_OFFSET) = (u32)Status)
+
+#define xbuf_descriptor_GetNextPtr(base) \
+    (xbuf_descriptor *)(*((u32 *)base + BIG_SUR_GE_NEXT_PTR_OFFSET))
+
+#define xbuf_descriptor_SetNextPtr(base, NextPtr) \
+    (*((u32 *)base + BIG_SUR_GE_NEXT_PTR_OFFSET) = (u32)NextPtr)
+
+#define xbuf_descriptor_GetId(base) \
+    (u32)(*((u32 *)base + BIG_SUR_GE_ID_OFFSET))
+
+#define xbuf_descriptor_SetId(base, Id) \
+    (*((u32 *)base + BIG_SUR_GE_ID_OFFSET) = (u32)Id)
+
+#define xbuf_descriptor_GetFlags(base) \
+    (u32)(*((u32 *)base + BIG_SUR_GE_FLAGS_OFFSET))
+
+#define xbuf_descriptor_SetFlags(base, Flags) \
+    (*((u32 *)base + BIG_SUR_GE_FLAGS_OFFSET) = (u32)Flags)
+
+#define xbuf_descriptor_Lock(base) \
+    (*((u32 *)base + BIG_SUR_GE_FLAGS_OFFSET) |= BIG_SUR_GE_FLAGS_LOCKED_MASK)
+
+#define xbuf_descriptor_Unlock(base) \
+    (*((u32 *)base + BIG_SUR_GE_FLAGS_OFFSET) &= ~BIG_SUR_GE_FLAGS_LOCKED_MASK)
+
+#define xbuf_descriptor_IsLocked(base) \
+	(*((u32 *)base + BIG_SUR_GE_FLAGS_OFFSET) & BIG_SUR_GE_FLAGS_LOCKED_MASK)
+
+#define BIG_SUR_GE_DMACR_SOURCE_INCR_MASK      0x80000000UL    /* increment source address */
+#define BIG_SUR_GE_DMACR_DEST_INCR_MASK        0x40000000UL    /* increment dest address */
+#define BIG_SUR_GE_DMACR_SOURCE_LOCAL_MASK 0x20000000UL        /* local source address */
+#define BIG_SUR_GE_DMACR_DEST_LOCAL_MASK       0x10000000UL    /* local dest address */
+#define BIG_SUR_GE_DMACR_SG_DISABLE_MASK       0x08000000UL    /* scatter gather disable */
+#define BIG_SUR_GE_DMACR_GEN_BD_INTR_MASK      0x04000000UL    /* descriptor interrupt */
+#define BIG_SUR_GE_DMACR_LAST_BD_MASK          BIG_SUR_GE_CONTROL_LAST_BD_MASK        /* last buffer */
+#define BIG_SUR_GE_DMASR_BUSY_MASK                     0x80000000UL    /* channel is busy */
+#define BIG_SUR_GE_DMASR_BUS_ERROR_MASK        0x40000000UL    /* bus error occurred */
+#define BIG_SUR_GE_DMASR_BUS_TIMEOUT_MASK      0x20000000UL    /* bus timeout occurred */
+#define BIG_SUR_GE_DMASR_LAST_BD_MASK          BIG_SUR_GE_STATUS_LAST_BD_MASK /* last buffer */
+#define BIG_SUR_GE_DMASR_SG_BUSY_MASK          0x08000000UL    /* scatter gather is busy */
+#define BIG_SUR_GE_IXR_DMA_DONE_MASK           0x1UL   /* dma operation done */
+#define BIG_SUR_GE_IXR_DMA_ERROR_MASK      0x2UL       /* dma operation error */
+#define BIG_SUR_GE_IXR_PKT_DONE_MASK       0x4UL       /* packet done */
+#define BIG_SUR_GE_IXR_PKT_THRESHOLD_MASK      0x8UL   /* packet count threshold */
+#define BIG_SUR_GE_IXR_PKT_WAIT_BOUND_MASK 0x10UL      /* packet wait bound reached */
+#define BIG_SUR_GE_IXR_SG_DISABLE_ACK_MASK 0x20UL      /* scatter gather disable
+                                                   acknowledge occurred */
+#define BIG_SUR_GEIXR_SG_END_MASK                     0x40UL  /* last buffer descriptor
+                                                           disabled scatter gather */
+#define BIG_SUR_GEIXR_BD_MASK                         0x80UL  /* buffer descriptor done */
+
+/* BD control */
+#define BIG_SUR_GE_DFT_SEND_BD_MASK    (BIG_SUR_GEDMACR_SOURCE_INCR_MASK | \
+                                 BIG_SUR_GEDMACR_DEST_LOCAL_MASK)
+#define BIG_SUR_GE_DFT_RECV_BD_MASK    (BIG_SUR_GEDMACR_DEST_INCR_MASK |  \
+                                 BIG_SUR_GEDMACR_SOURCE_LOCAL_MASK)
+
+/* Interrupts */
+#define BIG_SUR_GE_IPIF_EMAC_MASK      0x00000004UL    /* MAC interrupt */
+#define BIG_SUR_GE_IPIF_SEND_DMA_MASK  0x00000008UL    /* Send DMA interrupt */
+#define BIG_SUR_GE_IPIF_RECV_DMA_MASK  0x00000010UL    /* Receive DMA interrupt */
+#define BIG_SUR_GE_IPIF_RECV_FIFO_MASK 0x00000020UL    /* Receive FIFO interrupt */
+#define BIG_SUR_GE_IPIF_SEND_FIFO_MASK 0x00000040UL    /* Send FIFO interrupt */
+
+#define BIG_SUR_GE_IPIF_DMA_DFT_MASK   (BIG_SUR_GE_IPIF_SEND_DMA_MASK |   \
+                                 BIG_SUR_GE_IPIF_RECV_DMA_MASK |   \
+                                 BIG_SUR_GE_IPIF_EMAC_MASK |       \
+                                 BIG_SUR_GE_IPIF_SEND_FIFO_MASK |  \
+                                 BIG_SUR_GE_IPIF_RECV_FIFO_MASK)
+
+#define BIG_SUR_GE_IPIF_FIFO_DFT_MASK  (BIG_SUR_GE_IPIF_EMAC_MASK |       \
+                                 BIG_SUR_GE_IPIF_SEND_FIFO_MASK |  \
+                                 BIG_SUR_GE_IPIF_RECV_FIFO_MASK)
+
+#define BIG_SUR_GE_IPIF_DMA_DEV_INTR_COUNT   7 /* Number of interrupt sources */
+#define BIG_SUR_GE_IPIF_FIFO_DEV_INTR_COUNT  5 /* Number of interrupt sources */
+#define BIG_SUR_GE_IPIF_DEVICE_INTR_COUNT  7   /* Number of interrupt sources */
+#define BIG_SUR_GE_IPIF_IP_INTR_COUNT      22  /* Number of MAC interrupts */
+
+/* a mask for all transmit interrupts, used in polled mode */
+#define BIG_SUR_GE_EIR_XMIT_ALL_MASK   (BIG_SUR_GE_EIR_XMIT_DONE_MASK |           \
+                                 BIG_SUR_GE_EIR_XMIT_ERROR_MASK |          \
+                                 BIG_SUR_GE_EIR_XMIT_SFIFO_EMPTY_MASK |    \
+                                 BIG_SUR_GE_EIR_XMIT_LFIFO_FULL_MASK)
+
+/* a mask for all receive interrupts, used in polled mode */
+#define BIG_SUR_GE_EIR_RECV_ALL_MASK   (BIG_SUR_GE_EIR_RECV_DONE_MASK |           \
+                                 BIG_SUR_GE_EIR_RECV_ERROR_MASK |          \
+                                 BIG_SUR_GE_EIR_RECV_LFIFO_EMPTY_MASK |    \
+                                 BIG_SUR_GE_EIR_RECV_LFIFO_OVER_MASK |     \
+                                 BIG_SUR_GE_EIR_RECV_LFIFO_UNDER_MASK |    \
+                                 BIG_SUR_GE_EIR_RECV_DFIFO_OVER_MASK |     \
+                                 BIG_SUR_GE_EIR_RECV_MISSED_FRAME_MASK |   \
+                                 BIG_SUR_GE_EIR_RECV_COLLISION_MASK |      \
+                                 BIG_SUR_GE_EIR_RECV_FCS_ERROR_MASK |      \
+                                 BIG_SUR_GE_EIR_RECV_LEN_ERROR_MASK |      \
+                                 BIG_SUR_GE_EIR_RECV_SHORT_ERROR_MASK |    \
+                                 BIG_SUR_GE_EIR_RECV_LONG_ERROR_MASK |     \
+                                 BIG_SUR_GE_EIR_RECV_ALIGN_ERROR_MASK)
+
+/* a default interrupt mask for scatter-gather DMA operation */
+#define BIG_SUR_GE_EIR_DFT_SG_MASK    (BIG_SUR_GE_EIR_RECV_ERROR_MASK |           \
+                                BIG_SUR_GE_EIR_RECV_LFIFO_OVER_MASK |      \
+                                BIG_SUR_GE_EIR_RECV_LFIFO_UNDER_MASK |     \
+                                BIG_SUR_GE_EIR_XMIT_SFIFO_OVER_MASK |      \
+                                BIG_SUR_GE_EIR_XMIT_SFIFO_UNDER_MASK |     \
+                                BIG_SUR_GE_EIR_XMIT_LFIFO_OVER_MASK |      \
+                                BIG_SUR_GE_EIR_XMIT_LFIFO_UNDER_MASK |     \
+                                BIG_SUR_GE_EIR_RECV_DFIFO_OVER_MASK |      \
+                                BIG_SUR_GE_EIR_RECV_MISSED_FRAME_MASK |    \
+                                BIG_SUR_GE_EIR_RECV_COLLISION_MASK |       \
+                                BIG_SUR_GE_EIR_RECV_FCS_ERROR_MASK |       \
+                                BIG_SUR_GE_EIR_RECV_LEN_ERROR_MASK |       \
+                                BIG_SUR_GE_EIR_RECV_SHORT_ERROR_MASK |     \
+                                BIG_SUR_GE_EIR_RECV_LONG_ERROR_MASK |      \
+                                BIG_SUR_GE_EIR_RECV_ALIGN_ERROR_MASK)
+
+/* a default interrupt mask for non-DMA operation (direct FIFOs) */
+#define BIG_SUR_GE_EIR_DFT_FIFO_MASK  (BIG_SUR_GE_EIR_XMIT_DONE_MASK |            \
+                                BIG_SUR_GE_EIR_RECV_DONE_MASK |            \
+                                BIG_SUR_GE_EIR_DFT_SG_MASK)
+
+#define BIG_SUR_GE_DMA_SG_INTR_MASK    (BIG_SUR_GEIXR_DMA_ERROR_MASK  |      \
+                                 BIG_SUR_GEIXR_PKT_THRESHOLD_MASK |   \
+                                 BIG_SUR_GEIXR_PKT_WAIT_BOUND_MASK |  \
+                                 BIG_SUR_GEIXR_SG_END_MASK)
+
+#endif
diff -Naur linux-2.6.19/drivers/net/gt64240eth.c linux-mips-2.6.19/drivers/net/gt64240eth.c
--- linux-2.6.19/drivers/net/gt64240eth.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/net/gt64240eth.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,1671 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 Patton Electronics Company
+ * Copyright (C) 2002 Momentum Computer
+ *
+ * Copyright 2000 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ *         	stevel@mvista.com or support@mvista.com
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Ethernet driver for the MIPS GT96100 Advanced Communication Controller.
+ * 
+ * Modified for the Gallileo/Marvell GT-64240 Communication Controller.
+ *
+ * Support for Rx NAPI, Rx checksum offload, IOCTL and ETHTOOL added
+ * Manish Lachwani (lachwani@pmc-sierra.com) - 09/16/2003
+ *
+ * Modified for later version of Linux 2.4 kernel
+ * Manish Lachwani (lachwani@pmc-sierra.com) - 04/29/2004
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/mii.h>
+
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#define DESC_DATA_BE 1
+
+#include "gt64240eth.h"
+
+// enable this port (set hash size to 1/2K)
+//- #define PORT_CONFIG pcrHS
+#define PORT_CONFIG (pcrHS | pcrHD)
+//- #define PORT_CONFIG pcrHS |pcrPM |pcrPBF|pcrHDM
+//- GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, pcrEN | pcrHS);
+//- GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, pcrEN | pcrHS | pcrPM);
+//- GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, pcrEN | pcrHS | pcrPM | 1<<pcrLPBKBit);
+
+// clear all the MIB ctr regs
+#define EXT_CONFIG_CLEAR (pcxrFCTL | pcxrFCTLen | pcxrFLP | pcxrDPLXen | pcxrPRIOrxOverride | pcxrRMIIen)
+
+/*
+ * _debug level:
+ * <= 2	none.
+ *  > 2	some warnings such as queue full, .....
+ *  > 3	lots of change-of-state messages.
+ *  > 4	EXTENSIVE data/descriptor dumps.
+ */
+
+#ifdef GT64240_DEBUG
+static int gt64240_debug = GT64240_DEBUG;
+#else
+static int gt64240_debug = 0;
+#endif
+
+static int debug = -1;
+
+#define GT64240_MSG_ENABLE	(NETIF_MSG_DRV          | \
+				NETIF_MSG_PROBE        | \
+				NETIF_MSG_LINK)
+
+
+/********************************************************/
+
+// prototypes
+static void gt64240_delay(int msec);
+static int gt64240_add_hash_entry(struct net_device *dev,
+				  unsigned char *addr);
+static void read_mib_counters(struct gt64240_private *gp);
+static void dump_MII(struct net_device *dev);
+static void dump_tx_desc(struct net_device *dev, int i);
+static void dump_rx_desc(struct net_device *dev, int i);
+static void dump_hw_addr(unsigned char *addr_str);
+static void update_stats(struct gt64240_private *gp);
+static void abort(struct net_device *dev, u32 abort_bits);
+static void hard_stop(struct net_device *dev);
+static void enable_ether_irq(struct net_device *dev);
+static void disable_ether_irq(struct net_device *dev);
+static int __init gt64240_probe1(unsigned long ioaddr, int irq, int port_num);
+static void reset_tx(struct net_device *dev);
+static void reset_rx(struct net_device *dev);
+static int gt64240_init(struct net_device *dev);
+static int gt64240_open(struct net_device *dev);
+static int gt64240_close(struct net_device *dev);
+static int gt64240_tx(struct sk_buff *skb, struct net_device *dev);
+#ifdef GT64240_NAPI
+static int gt64240_poll(struct net_device *dev, int *budget);
+static int gt64240_rx(struct net_device *dev, u32 status, int budget);
+#else
+static int gt64240_rx(struct net_device *dev, u32 status);
+#endif
+static void gt64240_tx_timeout(struct net_device *dev);
+static void gt64240_set_rx_mode(struct net_device *dev);
+static struct net_device_stats *gt64240_get_stats(struct net_device *dev);
+
+extern char * prom_getcmdline(void);
+extern int prom_get_mac_addrs(unsigned char
+			      station_addr[NUM_INTERFACES][6]);
+
+static char version[] __devinitdata =
+	"gt64240eth.o: version 0.1, <www.patton.com>\n";
+
+// PHY device addresses
+static u32 gt64240_phy_addr[NUM_INTERFACES] __devinitdata = { 0x8, 0x1, 0xa };
+
+// Need real Ethernet addresses -- in parse_mac_addr_options(),
+// these will be replaced by prom_get_mac_addrs() and/or prom_getcmdline().
+static unsigned char gt64240_station_addr[NUM_INTERFACES][6] = {
+	{0x00, 0x01, 0x02, 0x03, 0x04, 0x05},
+	{0x01, 0x02, 0x03, 0x04, 0x05, 0x06},
+	{0x02, 0x03, 0x04, 0x05, 0x06, 0x07}
+};
+
+static int max_interrupt_work = 32;
+
+/*
+ * Base address and interupt of the GT64240 ethernet controllers
+ */
+static struct {
+	unsigned int port;
+	int irq;
+} gt64240_iflist[NUM_INTERFACES] = {
+	{
+	GT64240_ETH0_BASE, 8}, {
+	GT64240_ETH1_BASE, 8}, {
+	GT64240_ETH2_BASE, 8}
+};
+
+static void gt64240_delay(int ms)
+{
+	if (in_interrupt())
+		return;
+	else {
+		current->state = TASK_INTERRUPTIBLE;
+		schedule_timeout(ms * HZ / 1000);
+	}
+}
+
+unsigned char prom_mac_addr_base[6];
+
+int prom_get_mac_addrs(unsigned char station_addr[NUM_INTERFACES][6])
+{
+	memcpy(station_addr[0], prom_mac_addr_base, 6);
+	memcpy(station_addr[1], prom_mac_addr_base, 6);
+	memcpy(station_addr[2], prom_mac_addr_base, 6);
+
+	station_addr[1][5] += 1;
+	station_addr[2][5] += 2;
+
+	return 0;
+}
+
+void parse_mac_addr_options(void)
+{
+	prom_get_mac_addrs(gt64240_station_addr);
+}
+
+static int read_MII(struct net_device *dev, int phy, int reg)
+{
+	int timedout = 20;
+	u32 smir = smirOpCode | (phy << smirPhyAdBit) |
+	    (reg << smirRegAdBit);
+
+	// wait for last operation to complete
+	while ((GT64240_READ(GT64240_ETH_SMI_REG)) & smirBusy) {
+		// snooze for 1 msec and check again
+		gt64240_delay(1);
+
+		if (--timedout == 0) {
+			printk("%s: read_MII busy timeout!!\n", dev->name);
+			return -1;
+		}
+	}
+
+	GT64240_WRITE(GT64240_ETH_SMI_REG, smir);
+
+	timedout = 20;
+	// wait for read to complete
+	while (!
+	       ((smir =
+		 GT64240_READ(GT64240_ETH_SMI_REG)) & smirReadValid)) {
+		// snooze for 1 msec and check again
+		gt64240_delay(1);
+
+		if (--timedout == 0) {
+			printk("%s: read_MII timeout!!\n", dev->name);
+			return -1;
+		}
+	}
+
+	return (int) (smir & smirDataMask);
+}
+
+static void gp_get_drvinfo (struct net_device *dev, 
+				struct ethtool_drvinfo *info)
+{
+	strcpy(info->driver, "gt64260");
+	strcpy(info->version, version);
+}
+
+static int gp_get_settings(struct net_device *dev, 
+				struct ethtool_cmd *cmd)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	int rc;
+
+	spin_lock_irq(&gp->lock);
+	rc = mii_ethtool_gset(&gp->mii_if, cmd);
+	spin_unlock_irq(&gp->lock);
+	return rc;
+}
+
+static int gp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	int rc;
+
+	spin_lock_irq(&gp->lock);
+	rc = mii_ethtool_sset(&gp->mii_if, cmd);
+	spin_unlock_irq(&gp->lock);
+	return rc;
+}
+
+static int gp_nway_reset(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	return mii_nway_restart(&gp->mii_if);
+}
+
+static u32 gp_get_link(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	return mii_link_ok(&gp->mii_if);
+}
+
+static u32 gp_get_msglevel(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	return gp->msg_enable;
+}
+
+static void gp_set_msglevel(struct net_device *dev, u32 value)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	gp->msg_enable = value;
+}
+
+static struct ethtool_ops gp_ethtool_ops = {
+	.get_drvinfo		= gp_get_drvinfo,
+	.get_settings		= gp_get_settings,
+	.set_settings		= gp_set_settings,
+	.nway_reset		= gp_nway_reset,
+	.get_link		= gp_get_link,
+	.get_msglevel		= gp_get_msglevel,
+	.set_msglevel		= gp_set_msglevel,
+};
+
+static int gt64240_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	struct mii_ioctl_data *data =
+	    (struct mii_ioctl_data *) &rq->ifr_data;
+	int retval;
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	spin_lock_irq(&gp->lock);
+	retval = generic_mii_ioctl(&gp->mii_if, data, cmd, NULL);
+	spin_unlock_irq(&gp->lock);
+
+	return retval;
+}
+
+static void dump_tx_desc(struct net_device *dev, int i)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	gt64240_td_t *td = &gp->tx_ring[i];
+
+	printk("%s:tx[%d]: self=%p cmd=%08x, cnt=%4d. bufp=%08x, next=%08x\n",
+	       dev->name, i, td, td->cmdstat, td->byte_cnt, td->buff_ptr,
+	       td->next);
+}
+
+static void dump_rx_desc(struct net_device *dev, int i)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	gt64240_rd_t *rd = &gp->rx_ring[i];
+
+	printk("%s:rx_dsc[%d]: self=%p cst=%08x,size=%4d. cnt=%4d. "
+	       "bufp=%08x, next=%08x\n",
+	       dev->name, i, rd, rd->cmdstat, rd->buff_sz, rd->byte_cnt,
+	       rd->buff_ptr, rd->next);
+}
+
+// These routines work, just disabled to avoid compile warnings
+static void write_MII(struct net_device *dev, int phy, int reg, int data)
+{
+	u32 smir = (phy << smirPhyAdBit) | (reg << smirRegAdBit) | data;
+	int timedout = 20;
+
+	// wait for last operation to complete
+	while (GT64240_READ(GT64240_ETH_SMI_REG) & smirBusy) {
+		// snooze for 1 msec and check again
+		gt64240_delay(1);
+
+		if (--timedout == 0) {
+			printk("%s: write_MII busy timeout!!\n",
+			       dev->name);
+			return;
+		}
+	}
+
+	GT64240_WRITE(GT64240_ETH_SMI_REG, smir);
+}
+
+static void dump_MII(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	int i, val;
+
+	for (i = 0; i < 7; i++) {
+		if ((val = read_MII(dev, gp->phy_addr, i)) >= 0)
+			printk("%s: MII Reg %d=%x\n", dev->name, i, val);
+	}
+	for (i = 16; i < 21; i++) {
+		if ((val = read_MII(dev, gp->phy_addr, i)) >= 0)
+			printk("%s: MII Reg %d=%x\n", dev->name, i, val);
+	}
+}
+
+
+static void dump_hw_addr(unsigned char *addr_str)
+{
+	int i;
+	for (i = 0; i < 6; i++) {
+		printk("%2.2x", addr_str[i]);
+		printk(i < 5 ? ":" : "\n");
+	}
+}
+
+static int gt64240_add_hash_entry(struct net_device *dev,
+				  unsigned char *addr)
+{
+	static unsigned char swapped[256];
+	struct gt64240_private *gp;
+	u32 value1, value0, *entry;
+	unsigned char hash_ea[6];
+	static int flag = 0;
+	u16 hashResult;
+	int i;
+
+	if (flag == 0) {	/* Create table to swap bits in a byte  */
+		flag = 1;
+		for (i = 0; i < 256; i++) {
+			swapped[i] = (i & 0x01) << 7;
+			swapped[i] |= (i & 0x02) << 5;
+			swapped[i] |= (i & 0x04) << 3;
+			swapped[i] |= (i & 0x08) << 1;
+			swapped[i] |= (i & 0x10) >> 1;
+			swapped[i] |= (i & 0x20) >> 3;
+			swapped[i] |= (i & 0x40) >> 5;
+			swapped[i] |= (i & 0x80) >> 7;
+		}
+	}
+
+	for (i = 0; i < 6; i++) {	/* swap bits from mac to create hash mac */
+		hash_ea[i] = swapped[addr[i]];
+	}
+
+	gp = netdev_priv(dev);
+
+	/* create hash entry address    */
+	hashResult = (((hash_ea[5] >> 2) & 0x3F) << 9) & 0x7E00;
+	hashResult |= ((hash_ea[4] & 0x7F) << 2) | (hash_ea[5] & 0x03);
+	hashResult ^=
+	    ((hash_ea[3] & 0xFF) << 1) | ((hash_ea[4] >> 7) & 0x01);
+	hashResult ^= ((hash_ea[1] & 0x01) << 8) | (hash_ea[2] & 0xFF);
+
+	value0 = hteValid | hteRD;	/* Create hash table entry value */
+	value0 |= (u32) addr[0] << 3;
+	value0 |= (u32) addr[1] << 11;
+	value0 |= (u32) addr[2] << 19;
+	value0 |= ((u32) addr[3] & 0x1f) << 27;
+
+	value1 = ((u32) addr[3] >> 5) & 0x07;
+	value1 |= (u32) addr[4] << 3;
+	value1 |= (u32) addr[5] << 11;
+
+	/* Inset entry value into hash table */
+	for (i = 0; i < HASH_HOP_NUMBER; i++) {
+		entry = (u32 *) ((u32) gp->hash_table +
+				 (((u32) hashResult & 0x07ff) << 3));
+		if ((*entry & hteValid) && !(*entry & hteSkip)) {
+			hashResult += 2;	/* oops, occupied, go to next entry */
+		} else {
+#ifdef __LITTLE_ENDIAN
+			entry[1] = value1;
+			entry[0] = value0;
+#else
+			entry[0] = value1;
+			entry[1] = value0;
+#endif
+			break;
+		}
+	}
+	if (i >= HASH_HOP_NUMBER) {
+		printk("%s: gt64240_add_hash_entry expired!\n", dev->name);
+		return (-1);
+	}
+	return (0);
+}
+
+
+static void read_mib_counters(struct gt64240_private *gp)
+{
+	u32 *mib_regs = (u32 *) & gp->mib;
+	int i;
+
+	for (i = 0; i < sizeof(mib_counters_t) / sizeof(u32); i++)
+		mib_regs[i] =
+		    GT64240ETH_READ(gp,
+				    GT64240_ETH_MIB_COUNT_BASE +
+				    i * sizeof(u32));
+}
+
+
+static void update_stats(struct gt64240_private *gp)
+{
+	mib_counters_t *mib = &gp->mib;
+	struct net_device_stats *stats = &gp->stats;
+
+	read_mib_counters(gp);
+
+	stats->rx_packets = mib->totalFramesReceived;
+	stats->tx_packets = mib->framesSent;
+	stats->rx_bytes = mib->totalByteReceived;
+	stats->tx_bytes = mib->byteSent;
+	stats->rx_errors = mib->totalFramesReceived - mib->framesReceived;
+	//the tx error counters are incremented by the ISR
+	//rx_dropped incremented by gt64240_rx
+	//tx_dropped incremented by gt64240_tx
+	stats->multicast = mib->multicastFramesReceived;
+	// collisions incremented by gt64240_tx_complete
+	stats->rx_length_errors = mib->oversizeFrames + mib->fragments;
+	// The RxError condition means the Rx DMA encountered a
+	// CPU owned descriptor, which, if things are working as
+	// they should, means the Rx ring has overflowed.
+	stats->rx_over_errors = mib->macRxError;
+	stats->rx_crc_errors = mib->cRCError;
+}
+
+static void abort(struct net_device *dev, u32 abort_bits)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	int timedout = 100;	// wait up to 100 msec for hard stop to complete
+
+	if (gt64240_debug > 3)
+		printk("%s: abort\n", dev->name);
+
+	// Return if neither Rx or Tx abort bits are set
+	if (!(abort_bits & (sdcmrAR | sdcmrAT)))
+		return;
+
+	// make sure only the Rx/Tx abort bits are set
+	abort_bits &= (sdcmrAR | sdcmrAT);
+
+	spin_lock(&gp->lock);
+
+	// abort any Rx/Tx DMA immediately
+	GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM, abort_bits);
+
+	if (gt64240_debug > 3)
+		printk("%s: abort: SDMA cmd  = %x/%x\n",
+		       dev->name, abort_bits, GT64240ETH_READ(gp,
+							      GT64240_ETH_SDMA_COMM));
+
+	// wait for abort to complete
+	while ((GT64240ETH_READ(gp, GT64240_ETH_SDMA_COMM)) & abort_bits) {
+		// snooze for 20 msec and check again
+		gt64240_delay(1);
+
+		if (--timedout == 0) {
+			printk("%s: abort timeout!!\n", dev->name);
+			break;
+		}
+	}
+
+	spin_unlock(&gp->lock);
+}
+
+
+static void hard_stop(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+
+	if (gt64240_debug > 3)
+		printk("%s: hard stop\n", dev->name);
+
+	disable_ether_irq(dev);
+
+	abort(dev, sdcmrAR | sdcmrAT);
+
+	// disable port
+	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, 0);
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_hard_stop: Port Config=%x\n",
+		       dev->name, GT64240ETH_READ(gp,
+						  GT64240_ETH_PORT_CONFIG));
+
+}
+
+static void gt64240_tx_complete(struct net_device *dev, u32 status)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	int nextOut, cdp;
+	gt64240_td_t *td;
+	u32 cmdstat;
+
+	cdp = (GT64240ETH_READ(gp, GT64240_ETH_CURR_TX_DESC_PTR0)
+	       - gp->tx_ring_dma) / sizeof(gt64240_td_t);
+
+	if (gt64240_debug > 3) {	/*+prk17aug01 */
+		nextOut = gp->tx_next_out;
+		printk
+		    ("%s: tx_complete: TX_PTR0=0x%08x, cdp=%d. nextOut=%d.\n",
+		     dev->name, GT64240ETH_READ(gp,
+						GT64240_ETH_CURR_TX_DESC_PTR0),
+		     cdp, nextOut);
+		td = &gp->tx_ring[nextOut];
+	}
+
+/*** NEED to check and CLEAR these errors every time thru here: ***/
+	if (gt64240_debug > 2) {
+		if (GT64240_READ(COMM_UNIT_INTERRUPT_CAUSE))
+			printk
+			    ("%s: gt64240_tx_complete: CIU Cause=%08x, Mask=%08x, EAddr=%08x\n",
+			     dev->name,
+			     GT64240_READ(COMM_UNIT_INTERRUPT_CAUSE),
+			     GT64240_READ(COMM_UNIT_INTERRUPT_MASK),
+			     GT64240_READ(COMM_UNIT_ERROR_ADDRESS));
+		GT64240_WRITE(COMM_UNIT_INTERRUPT_CAUSE, 0);
+	}
+	// Continue until we reach the current descriptor pointer
+	for (nextOut = gp->tx_next_out; nextOut != cdp;
+	     nextOut = (nextOut + 1) % TX_RING_SIZE) {
+
+		if (--gp->intr_work_done == 0)
+			break;
+
+		td = &gp->tx_ring[nextOut];
+		cmdstat = td->cmdstat;
+
+		if (cmdstat & (u32) txOwn) {
+			// DMA is not finished writing descriptor???
+			// Leave and come back later to pick-up where we left off.
+			break;
+		}
+		// increment Tx error stats
+		if (cmdstat & (u32) txErrorSummary) {
+			if (gt64240_debug > 2)
+				printk
+				    ("%s: tx_complete: Tx error, cmdstat = %x\n",
+				     dev->name, cmdstat);
+			gp->stats.tx_errors++;
+			if (cmdstat & (u32) txReTxLimit)
+				gp->stats.tx_aborted_errors++;
+			if (cmdstat & (u32) txUnderrun)
+				gp->stats.tx_fifo_errors++;
+			if (cmdstat & (u32) txLateCollision)
+				gp->stats.tx_window_errors++;
+		}
+
+		if (cmdstat & (u32) txCollision)
+			gp->stats.collisions +=
+			    (unsigned long) ((cmdstat & txReTxCntMask) >>
+					     txReTxCntBit);
+
+		// Wake the queue if the ring was full
+		if (gp->tx_full) {
+			gp->tx_full = 0;
+			if (gp->last_psr & psrLink) {
+				netif_wake_queue(dev);
+			}
+		}
+		// decrement tx ring buffer count
+		if (gp->tx_count)
+			gp->tx_count--;
+
+		// free the skb
+		if (gp->tx_skbuff[nextOut]) {
+			if (gt64240_debug > 3)
+				printk
+				    ("%s: tx_complete: good Tx, skb=%p\n",
+				     dev->name, gp->tx_skbuff[nextOut]);
+			dev_kfree_skb_irq(gp->tx_skbuff[nextOut]);
+			gp->tx_skbuff[nextOut] = NULL;
+		} else {
+			printk("%s: tx_complete: no skb!\n", dev->name);
+		}
+	}
+
+	gp->tx_next_out = nextOut;
+
+	if ((status & icrTxEndLow) && gp->tx_count != 0) {
+		// we must restart the DMA
+		GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM,
+				 sdcmrERD | sdcmrTXDL);
+	}
+}
+
+static irqreturn_t gt64240_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *) dev_id;
+	struct gt64240_private *gp = netdev_priv(dev);
+	u32 status;
+
+	if (dev == NULL) {
+		printk("%s: isr: null dev ptr\n", dev->name);
+		return IRQ_NONE;
+	}
+
+	spin_lock(&gp->lock);
+
+	if (gt64240_debug > 3)
+		printk("%s: isr: entry\n", dev->name);
+
+	gp->intr_work_done = max_interrupt_work;
+
+	while (gp->intr_work_done > 0) {
+
+		status = GT64240ETH_READ(gp, GT64240_ETH_INT_CAUSE);
+#ifdef GT64240_NAPI
+		/* dont ack Rx interrupts */
+		if (!(status & icrRxBuffer))
+			GT64240ETH_WRITE(gp, GT64240_ETH_INT_CAUSE, 0);
+#else
+		// ACK interrupts
+		GT64240ETH_WRITE(gp, GT64240_ETH_INT_CAUSE, 0);
+#endif
+
+		if (gt64240_debug > 3)
+			printk("%s: isr: work=%d., icr=%x\n", dev->name,
+			       gp->intr_work_done, status);
+
+		if ((status & icrEtherIntSum) == 0) {
+			if (!(status &
+			      (icrTxBufferLow | icrTxBufferHigh |
+			       icrRxBuffer))) {
+				/* exit from the while() loop */
+				break;
+			}
+		}
+
+		if (status & icrMIIPhySTC) {
+			u32 psr =
+			    GT64240ETH_READ(gp, GT64240_ETH_PORT_STATUS);
+			if (gp->last_psr != psr) {
+				printk("%s: port status: 0x%08x\n",
+				       dev->name, psr);
+				printk
+				    ("%s:    %s MBit/s, %s-duplex, flow-control %s, link is %s,\n",
+				     dev->name,
+				     psr & psrSpeed ? "100" : "10",
+				     psr & psrDuplex ? "full" : "half",
+				     psr & psrFctl ? "disabled" :
+				     "enabled",
+				     psr & psrLink ? "up" : "down");
+				printk
+				    ("%s:    TxLowQ is %s, TxHighQ is %s, Transmitter is %s\n",
+				     dev->name,
+				     psr & psrTxLow ? "running" :
+				     "stopped",
+				     psr & psrTxHigh ? "running" :
+				     "stopped",
+				     psr & psrTxInProg ? "on" : "off");
+
+				if ((psr & psrLink) && !gp->tx_full &&
+				    netif_queue_stopped(dev)) {
+					printk
+					    ("%s: isr: Link up, waking queue.\n",
+					     dev->name);
+					netif_wake_queue(dev);
+				} else if (!(psr & psrLink)
+					   && !netif_queue_stopped(dev)) {
+					printk
+					    ("%s: isr: Link down, stopping queue.\n",
+					     dev->name);
+					netif_stop_queue(dev);
+				}
+
+				gp->last_psr = psr;
+			}
+		}
+
+		if (status & (icrTxBufferLow | icrTxEndLow))
+			gt64240_tx_complete(dev, status);
+
+		if (status & icrRxBuffer) {
+#ifdef GT64240_NAPI
+			if (netif_rx_schedule_prep(dev)) {
+				disable_ether_irq(dev);
+				__netif_rx_schedule(dev);
+			}
+#else
+			gt64240_rx(dev, status);
+#endif
+		}
+		// Now check TX errors (RX errors were handled in gt64240_rx)
+		if (status & icrTxErrorLow) {
+			printk("%s: isr: Tx resource error\n", dev->name);
+		}
+
+		if (status & icrTxUdr) {
+			printk("%s: isr: Tx underrun error\n", dev->name);
+		}
+	}
+
+	if (gp->intr_work_done == 0) {
+		// ACK any remaining pending interrupts
+		GT64240ETH_WRITE(gp, GT64240_ETH_INT_CAUSE, 0);
+		if (gt64240_debug > 3)
+			printk("%s: isr: hit max work\n", dev->name);
+	}
+
+	if (gt64240_debug > 3)
+		printk("%s: isr: exit, icr=%x\n",
+		       dev->name, GT64240ETH_READ(gp,
+						  GT64240_ETH_INT_CAUSE));
+
+	spin_unlock(&gp->lock);
+
+	return IRQ_HANDLED;
+}
+
+static void enable_ether_irq(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	u32 intMask;
+
+	intMask =
+	    icrTxBufferLow | icrTxEndLow | icrTxErrorLow |
+	    icrTxBufferHigh | icrTxEndHigh | icrTxErrorHigh | icrTxUdr |
+	    icrRxBuffer | icrRxOVR | icrRxError | icrMIIPhySTC |
+	    icrEtherIntSum;
+
+
+//- GT64240ETH_WRITE(gp, GT64240_ETH_INT_CAUSE, 0); /* CLEAR existing ints */
+	// unmask device interrupts:
+	GT64240ETH_WRITE(gp, GT64240_ETH_INT_MASK, intMask);
+
+	// now route ethernet interrupts to GT PCI1 (eth0 and eth1 will be
+	// sharing it).
+	intMask = MV_READ(PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH);
+	intMask |= 1 << gp->port_num;
+	MV_WRITE(PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH, intMask);
+}
+
+static void disable_ether_irq(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	u32 intMask;
+
+	intMask = MV_READ(PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH);
+	intMask &= ~(1 << gp->port_num);
+	MV_WRITE(PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH, intMask);
+
+	// mask all device interrupts: 
+	GT64240ETH_WRITE(gp, GT64240_ETH_INT_MASK, 0);
+}
+
+/*
+ * Probe for a GT64240 ethernet controller.
+ */
+static int __init gt64240_probe(void)
+{
+	int found = 0;
+	int i;
+
+	parse_mac_addr_options();
+
+	for (i = 0; i < NUM_INTERFACES; i++) {
+		unsigned long base_addr = gt64240_iflist[i].port;
+
+		if (check_region(base_addr, GT64240_ETH_IO_SIZE)) {
+			printk("gt64240_probe: ioaddr 0x%lx taken?\n",
+			       base_addr);
+			continue;
+		}
+
+		if (gt64240_probe1(base_addr, gt64240_iflist[i].irq, i) == 0) {
+			/*
+			 * Does not seem to be the "traditional" way folks do
+			 * this, but I want to init both eth ports if at all
+			 * possible!
+			 *
+			 * So, until I find out the "correct" way to do this:
+			 */
+			if (++found == NUM_INTERFACES)	/* That's all of them */
+				return 0;
+		}
+	}
+
+	if (found)
+		return 0;	/* as long as we found at least one! */
+
+	return -ENODEV;
+}
+
+module_init(gt64240_probe);
+
+static int __init gt64240_probe1(unsigned long ioaddr, int irq, int port_num)
+{
+	struct net_device *dev = NULL;
+	static unsigned version_printed = 0;
+	struct gt64240_private *gp = NULL;
+	int retval;
+	u32 cpuConfig;
+
+	dev = alloc_etherdev(sizeof(struct gt64240_private));
+	if (!dev)
+		return -ENOMEM;
+
+	if (irq < 0) {
+		printk
+		    ("gt64240_probe1: irq unknown - probing not supported\n");
+		return -ENODEV;
+	}
+#if 1				/* KLUDGE Alert: no check on return value: */
+	if (!request_region(ioaddr, GT64240_ETH_IO_SIZE, "gt64240eth"))
+		printk("*** request_region() failed!\n");
+#endif
+
+	cpuConfig = GT64240_READ(CPU_CONFIGURATION);
+	printk("gt64240_probe1: cpu in %s-endian mode\n",
+	       (cpuConfig & (1 << 12)) ? "little" : "big");
+
+	printk("%s: GT64240 found at ioaddr 0x%lx, irq %d.\n",
+	       dev->name, ioaddr, irq);
+
+	if (gt64240_debug && version_printed++ == 0)
+		printk("%s: %s", dev->name, version);
+
+	/* private struct aligned and zeroed by init_etherdev */
+	/* Fill in the 'dev' fields. */
+	dev->base_addr = ioaddr;
+	dev->irq = irq;
+	memcpy(dev->dev_addr, gt64240_station_addr[port_num],
+	       sizeof(dev->dev_addr));
+
+	printk("%s: HW Address ", dev->name);
+	dump_hw_addr(dev->dev_addr);
+
+	gp = dev->priv;
+
+	gp->msg_enable = (debug < 0 ? GT64240_MSG_ENABLE : debug);
+	gp->port_num = port_num;
+	gp->io_size = GT64240_ETH_IO_SIZE;
+	gp->port_offset = port_num * GT64240_ETH_IO_SIZE;
+	gp->phy_addr = gt64240_phy_addr[port_num];
+
+	printk("%s: GT64240 ethernet port %d\n", dev->name, gp->port_num);
+
+#ifdef GT64240_NAPI
+	printk("Rx NAPI supported \n");
+#endif
+
+/* MII Initialization */
+	gp->mii_if.dev = dev;
+	gp->mii_if.phy_id = dev->base_addr;
+	gp->mii_if.mdio_read = read_MII;
+	gp->mii_if.mdio_write = write_MII;
+	gp->mii_if.advertising = read_MII(dev, gp->phy_addr, MII_ADVERTISE);
+
+	// Allocate Rx and Tx descriptor rings
+	if (gp->rx_ring == NULL) {
+		// All descriptors in ring must be 16-byte aligned
+		gp->rx_ring = dma_alloc_noncoherent(NULL,
+					sizeof(gt64240_rd_t) * RX_RING_SIZE +
+					sizeof(gt64240_td_t) * TX_RING_SIZE,
+					&gp->rx_ring_dma, GFP_KERNEL);
+		if (gp->rx_ring == NULL) {
+			retval = -ENOMEM;
+			goto free_region;
+		}
+
+		gp->tx_ring = (gt64240_td_t *) (gp->rx_ring + RX_RING_SIZE);
+		gp->tx_ring_dma =
+			gp->rx_ring_dma + sizeof(gt64240_rd_t) * RX_RING_SIZE;
+	}
+	// Allocate the Rx Data Buffers
+	if (gp->rx_buff == NULL) {
+		gp->rx_buff = dma_alloc_coherent(NULL,
+				PKT_BUF_SZ * RX_RING_SIZE, &gp->rx_buff_dma,
+				GFP_KERNEL);
+		if (gp->rx_buff == NULL) {
+			dma_free_noncoherent(NULL,
+				sizeof(gt64240_rd_t) * RX_RING_SIZE +
+				sizeof(gt64240_td_t) * TX_RING_SIZE,
+				gp->rx_ring, gp->rx_ring_dma);
+			retval = -ENOMEM;
+			goto free_region;
+		}
+	}
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_probe1, rx_ring=%p, tx_ring=%p\n",
+		       dev->name, gp->rx_ring, gp->tx_ring);
+
+	// Allocate Rx Hash Table
+	if (gp->hash_table == NULL) {
+		gp->hash_table = dma_alloc_coherent(NULL,
+				RX_HASH_TABLE_SIZE, &gp->hash_table_dma,
+				GFP_KERNEL);
+		if (gp->hash_table == NULL) {
+			dma_free_noncoherent(NULL,
+				sizeof(gt64240_rd_t) * RX_RING_SIZE +
+				sizeof(gt64240_td_t) * TX_RING_SIZE,
+				gp->rx_ring, gp->rx_ring_dma);
+			dma_free_noncoherent(NULL, PKT_BUF_SZ * RX_RING_SIZE,
+				gp->rx_buff, gp->rx_buff_dma);
+			retval = -ENOMEM;
+			goto free_region;
+		}
+	}
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_probe1, hash=%p\n",
+		       dev->name, gp->hash_table);
+
+	spin_lock_init(&gp->lock);
+
+	dev->open = gt64240_open;
+	dev->hard_start_xmit = gt64240_tx;
+	dev->stop = gt64240_close;
+	dev->get_stats = gt64240_get_stats;
+	dev->do_ioctl = gt64240_ioctl;
+	dev->set_multicast_list = gt64240_set_rx_mode;
+	dev->tx_timeout = gt64240_tx_timeout;
+	dev->watchdog_timeo = GT64240ETH_TX_TIMEOUT;
+
+#ifdef GT64240_NAPI
+	dev->poll = gt64240_poll;
+	dev->weight = 64;
+#endif
+	dev->ethtool_ops = &gp_ethtool_ops;
+
+	/* Fill in the fields of the device structure with ethernet values. */
+	return 0;
+
+free_region:
+	release_region(ioaddr, gp->io_size);
+	unregister_netdev(dev);
+	free_netdev(dev);
+	printk("%s: gt64240_probe1 failed.  Returns %d\n",
+	       dev->name, retval);
+	return retval;
+}
+
+
+static void reset_tx(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	int i;
+
+	abort(dev, sdcmrAT);
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		if (gp->tx_skbuff[i]) {
+			if (in_interrupt())
+				dev_kfree_skb_irq(gp->tx_skbuff[i]);
+			else
+				dev_kfree_skb(gp->tx_skbuff[i]);
+			gp->tx_skbuff[i] = NULL;
+		}
+//-     gp->tx_ring[i].cmdstat = 0; // CPU owns
+		gp->tx_ring[i].cmdstat =
+		    (u32) (txGenCRC | txEI | txPad | txFirst | txLast);
+		gp->tx_ring[i].byte_cnt = 0;
+		gp->tx_ring[i].buff_ptr = 0;
+		gp->tx_ring[i].next =
+		    gp->tx_ring_dma + sizeof(gt64240_td_t) * (i + 1);
+		if (gt64240_debug > 4)
+			dump_tx_desc(dev, i);
+	}
+	/* Wrap the ring. */
+	gp->tx_ring[i - 1].next = gp->tx_ring_dma;
+	if (gt64240_debug > 4)
+		dump_tx_desc(dev, i - 1);
+
+	// setup only the lowest priority TxCDP reg
+	GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR0,
+			 gp->tx_ring_dma);
+//- GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR0, 0);     /* ROLLINS */
+//- GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR0,virt_to_phys(&gp->tx_ring[0]));  /* ROLLINS */
+
+	GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR1, 0);
+
+	// init Tx indeces and pkt counter
+	gp->tx_next_in = gp->tx_next_out = 0;
+	gp->tx_count = 0;
+}
+
+static void reset_rx(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	int i;
+
+	abort(dev, sdcmrAR);
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		gp->rx_ring[i].next =
+		    gp->rx_ring_dma + sizeof(gt64240_rd_t) * (i + 1);
+		gp->rx_ring[i].buff_ptr = gp->rx_buff_dma + i * PKT_BUF_SZ;
+		gp->rx_ring[i].buff_sz = PKT_BUF_SZ;
+		gp->rx_ring[i].byte_cnt = 0;	/* just for debug printk's */
+		// Give ownership to device, set first and last, enable interrupt
+		gp->rx_ring[i].cmdstat =
+		    (uint32_t) (rxFirst | rxLast | rxOwn | rxEI);
+		if (gt64240_debug > 4)
+			dump_rx_desc(dev, i);
+	}
+	/* Wrap the ring. */
+	gp->rx_ring[i - 1].next = gp->rx_ring_dma;
+	if (gt64240_debug > 4)
+		dump_rx_desc(dev, i - 1);
+
+	// Setup only the lowest priority RxFDP and RxCDP regs
+	for (i = 0; i < 4; i++) {
+		if (i == 0) {
+			GT64240ETH_WRITE(gp, GT64240_ETH_1ST_RX_DESC_PTR0,
+					 gp->rx_ring_dma);
+			GT64240ETH_WRITE(gp, GT64240_ETH_CURR_RX_DESC_PTR0,
+					 gp->rx_ring_dma);
+		} else {
+			GT64240ETH_WRITE(gp,
+					 GT64240_ETH_1ST_RX_DESC_PTR0 +
+					 i * 4, 0);
+			GT64240ETH_WRITE(gp,
+					 GT64240_ETH_CURR_RX_DESC_PTR0 +
+					 i * 4, 0);
+		}
+	}
+
+	// init Rx NextOut index
+	gp->rx_next_out = 0;
+}
+
+
+static int gt64240_init(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+
+	if (gt64240_debug > 3) {
+		printk("%s: gt64240_init: dev=%p\n", dev->name, dev);
+		printk("%s: gt64240_init: scs0_lo=%04x, scs0_hi=%04x\n",
+		       dev->name, GT64240_READ(0x008),
+		       GT64240_READ(0x010));
+		printk("%s: gt64240_init: scs1_lo=%04x, scs1_hi=%04x\n",
+		       dev->name, GT64240_READ(0x208),
+		       GT64240_READ(0x210));
+		printk("%s: gt64240_init: scs2_lo=%04x, scs2_hi=%04x\n",
+		       dev->name, GT64240_READ(0x018),
+		       GT64240_READ(0x020));
+		printk("%s: gt64240_init: scs3_lo=%04x, scs3_hi=%04x\n",
+		       dev->name, GT64240_READ(0x218),
+		       GT64240_READ(0x220));
+	}
+	// Stop and disable Port
+	hard_stop(dev);
+
+	GT64240_WRITE(COMM_UNIT_INTERRUPT_MASK, 0x07070777);	/*+prk21aug01 */
+	if (gt64240_debug > 2)
+		printk
+		    ("%s: gt64240_init: CIU Cause=%08x, Mask=%08x, EAddr=%08x\n",
+		     dev->name, GT64240_READ(COMM_UNIT_INTERRUPT_CAUSE),
+		     GT64240_READ(COMM_UNIT_INTERRUPT_MASK),
+		     GT64240_READ(COMM_UNIT_ERROR_ADDRESS));
+
+	// Set-up hash table
+	memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE);	// clear it
+	gp->hash_mode = 0;
+	// Add a single entry to hash table - our ethernet address
+	gt64240_add_hash_entry(dev, dev->dev_addr);
+	// Set-up DMA ptr to hash table
+	GT64240ETH_WRITE(gp, GT64240_ETH_HASH_TBL_PTR, gp->hash_table_dma);
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_init: Hash Tbl Ptr=%x\n", dev->name,
+		       GT64240ETH_READ(gp, GT64240_ETH_HASH_TBL_PTR));
+
+	// Setup Tx
+	reset_tx(dev);
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_init: Curr Tx Desc Ptr0=%x\n",
+		       dev->name, GT64240ETH_READ(gp,
+						  GT64240_ETH_CURR_TX_DESC_PTR0));
+
+	// Setup Rx
+	reset_rx(dev);
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_init: 1st/Curr Rx Desc Ptr0=%x/%x\n",
+		       dev->name, GT64240ETH_READ(gp,
+						  GT64240_ETH_1ST_RX_DESC_PTR0),
+		       GT64240ETH_READ(gp, GT64240_ETH_CURR_RX_DESC_PTR0));
+
+	if (gt64240_debug > 3)
+		dump_MII(dev);
+
+	/* force a PHY reset -- self-clearing! */
+	write_MII(dev, gp->phy_addr, 0, 0x8000);
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_init: PhyAD=%x\n", dev->name,
+		       GT64240_READ(GT64240_ETH_PHY_ADDR_REG));
+
+	// setup DMA
+	// We want the Rx/Tx DMA to write/read data to/from memory in
+	// Big Endian mode. Also set DMA Burst Size to 8 64Bit words.
+#ifdef DESC_DATA_BE
+	GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_CONFIG,
+			 (0xf << sdcrRCBit) | sdcrRIFB | (3 <<
+							  sdcrBSZBit));
+#else
+	GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_CONFIG, sdcrBLMR | sdcrBLMT |
+//-                  (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit));
+			 (0xf << sdcrRCBit) | sdcrRIFB | (2 <<
+							  sdcrBSZBit));
+#endif
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_init: SDMA Config=%x\n", dev->name,
+		       GT64240ETH_READ(gp, GT64240_ETH_SDMA_CONFIG));
+
+#if 0
+	// start Rx DMA
+	GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM, sdcmrERD);
+#endif
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_init: SDMA Cmd =%x\n", dev->name,
+		       GT64240ETH_READ(gp, GT64240_ETH_SDMA_COMM));
+
+#if 1
+	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, PORT_CONFIG);
+#endif
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_init: Port Config=%x\n", dev->name,
+		       GT64240ETH_READ(gp, GT64240_ETH_PORT_CONFIG));
+
+	/*
+	 * Disable all Type-of-Service queueing. All Rx packets will be
+	 * treated normally and will be sent to the lowest priority
+	 * queue.
+	 *
+	 * Disable flow-control for now. FIX! support flow control?
+	 */
+
+#if 1
+	// clear all the MIB ctr regs
+	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG_EXT,
+			 EXT_CONFIG_CLEAR);
+	read_mib_counters(gp);
+	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG_EXT,
+			 EXT_CONFIG_CLEAR | pcxrMIBclrMode);
+
+#endif
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_init: Port Config Ext=%x\n", dev->name,
+		       GT64240ETH_READ(gp, GT64240_ETH_PORT_CONFIG_EXT));
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_init: Port Command=%x\n", dev->name,
+		       GT64240ETH_READ(gp, GT64240_ETH_PORT_COMMAND));
+	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_COMMAND, 0x0);
+
+	netif_start_queue(dev);
+
+	/* enable the port */
+	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG,
+			 (PORT_CONFIG | pcrEN));
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_init: Port Config=%x\n", dev->name,
+		       GT64240ETH_READ(gp, GT64240_ETH_PORT_CONFIG));
+#if 1
+	// start Rx DMA
+	GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM, sdcmrERD);
+#endif
+
+
+	// enable interrupts
+	enable_ether_irq(dev);
+
+//---    gp->last_psr |= psrLink;   /* KLUDGE ALERT */
+
+	// we should now be receiving frames
+	return 0;
+}
+
+
+static int gt64240_open(struct net_device *dev)
+{
+	int retval;
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_open: dev=%p\n", dev->name, dev);
+
+	if ((retval = request_irq(dev->irq, &gt64240_interrupt,
+				  SA_SHIRQ, dev->name, dev))) {
+		printk("%s: unable to get IRQ %d\n", dev->name, dev->irq);
+
+		return retval;
+	}
+	// Initialize and startup the GT-64240 ethernet port
+	if ((retval = gt64240_init(dev))) {
+		printk("%s: error in gt64240_open\n", dev->name);
+		free_irq(dev->irq, dev);
+
+		return retval;
+	}
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_open: Initialization done.\n",
+		       dev->name);
+
+	return 0;
+}
+
+static int gt64240_close(struct net_device *dev)
+{
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_close: dev=%p\n", dev->name, dev);
+
+	// stop the device
+	if (netif_device_present(dev)) {
+		netif_stop_queue(dev);
+		hard_stop(dev);
+	}
+
+	free_irq(dev->irq, dev);
+
+	return 0;
+}
+
+#ifdef GT64240_NAPI
+/*
+ * Function will release Tx skbs which are now complete
+ */
+static void gt64240_tx_fill(struct net_device *dev, u32 status)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	int nextOut, cdp;
+	gt64240_td_t *td;
+	u32 cmdstat;
+
+	cdp = (GT64240ETH_READ(gp, GT64240_ETH_CURR_TX_DESC_PTR0)
+	       - gp->tx_ring_dma) / sizeof(gt64240_td_t);
+
+	for (nextOut = gp->tx_next_out; nextOut != cdp;
+	     nextOut = (nextOut + 1) % TX_RING_SIZE) {
+		if (--gp->intr_work_done == 0)
+			break;
+
+		td = &gp->tx_ring[nextOut];
+		cmdstat = td->cmdstat;
+
+		if (cmdstat & (u32) txOwn)
+			break;
+
+		if (gp->tx_full) {
+			gp->tx_full = 0;
+			if (gp->last_psr & psrLink) {
+				netif_wake_queue(dev);
+			}
+		}
+		// decrement tx ring buffer count
+		if (gp->tx_count)
+			gp->tx_count--;
+
+		// free the skb
+		if (gp->tx_skbuff[nextOut]) {
+			dev_kfree_skb_irq(gp->tx_skbuff[nextOut]);
+			gp->tx_skbuff[nextOut] = NULL;
+		}
+	}
+
+	gp->tx_next_out = nextOut;
+
+	if ((status & icrTxEndLow) && gp->tx_count != 0)
+		// we must restart the DMA
+		GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM,
+				 sdcmrERD | sdcmrTXDL);
+}
+
+/*
+ * Main function for NAPI
+ */
+static int gt64240_poll(struct net_device *dev, int *budget)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	unsigned long flags;
+	int done = 1, orig_budget, work_done;
+	u32 status = GT64240ETH_READ(gp, GT64240_ETH_INT_CAUSE);
+
+	spin_lock_irqsave(&gp->lock, flags);
+	gt64240_tx_fill(dev, status);
+
+	if (GT64240ETH_READ(gp, GT64240_ETH_CURR_RX_DESC_PTR0) !=
+	    gp->rx_next_out) {
+		orig_budget = *budget;
+		if (orig_budget > dev->quota)
+			orig_budget = dev->quota;
+
+		work_done = gt64240_rx(dev, status, orig_budget);
+		*budget -= work_done;
+		dev->quota -= work_done;
+		if (work_done >= orig_budget)
+			done = 0;
+		if (done) {
+			__netif_rx_complete(dev);
+			enable_ether_irq(dev);
+		}
+	}
+
+	spin_unlock_irqrestore(&gp->lock, flags);
+
+	return (done ? 0 : 1);
+}
+#endif
+
+static int gt64240_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	unsigned long flags;
+	int nextIn;
+
+	spin_lock_irqsave(&gp->lock, flags);
+
+	nextIn = gp->tx_next_in;
+
+	if (gt64240_debug > 3) {
+		printk("%s: gt64240_tx: nextIn=%d.\n", dev->name, nextIn);
+	}
+
+	if (gp->tx_count >= TX_RING_SIZE) {
+		printk("%s: Tx Ring full, pkt dropped.\n", dev->name);
+		gp->stats.tx_dropped++;
+		spin_unlock_irqrestore(&gp->lock, flags);
+		return 1;
+	}
+
+	if (!(gp->last_psr & psrLink)) {
+		printk("%s: gt64240_tx: Link down, pkt dropped.\n",
+		       dev->name);
+		gp->stats.tx_dropped++;
+		spin_unlock_irqrestore(&gp->lock, flags);
+//---   dump_MII(dev);          /* KLUDGE ALERT !!! */
+		return 1;
+	}
+
+	if (gp->tx_ring[nextIn].cmdstat & txOwn) {
+		printk
+		    ("%s: gt64240_tx: device owns descriptor, pkt dropped.\n",
+		     dev->name);
+		gp->stats.tx_dropped++;
+		// stop the queue, so Tx timeout can fix it
+		netif_stop_queue(dev);
+		spin_unlock_irqrestore(&gp->lock, flags);
+		return 1;
+	}
+	// Prepare the Descriptor at tx_next_in
+	gp->tx_skbuff[nextIn] = skb;
+	gp->tx_ring[nextIn].byte_cnt = skb->len;
+	gp->tx_ring[nextIn].buff_ptr = virt_to_phys(skb->data);
+
+	// make sure packet gets written back to memory
+	dma_cache_wback_inv((unsigned long) (skb->data), skb->len);
+	mb();
+
+	// Give ownership to device, set first and last desc, enable interrupt
+	// Setting of ownership bit must be *last*!
+	gp->tx_ring[nextIn].cmdstat =
+	    txOwn | txGenCRC | txEI | txPad | txFirst | txLast;
+
+	if (gt64240_debug > 5) {
+		dump_tx_desc(dev, nextIn);
+	}
+	// increment tx_next_in with wrap
+	gp->tx_next_in = (nextIn + 1) % TX_RING_SIZE;
+
+//+prk20aug01:
+	if (0) {		/* ROLLINS */
+		GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR0,
+				 virt_to_phys(&gp->tx_ring[nextIn]));
+	}
+
+	if (gt64240_debug > 3) {	/*+prk17aug01 */
+		printk
+		    ("%s: gt64240_tx: TX_PTR0=0x%08x, EthPortStatus=0x%08x\n",
+		     dev->name, GT64240ETH_READ(gp,
+						GT64240_ETH_CURR_TX_DESC_PTR0),
+		     GT64240ETH_READ(gp, GT64240_ETH_PORT_STATUS));
+	}
+	// If DMA is stopped, restart
+	if (!((GT64240ETH_READ(gp, GT64240_ETH_PORT_STATUS)) & psrTxLow)) {
+		GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM,
+				 sdcmrERD | sdcmrTXDL);
+	}
+
+	if (gt64240_debug > 3) {	/*+prk17aug01 */
+		printk
+		    ("%s: gt64240_tx: TX_PTR0=0x%08x, EthPortStatus=0x%08x\n",
+		     dev->name, GT64240ETH_READ(gp,
+						GT64240_ETH_CURR_TX_DESC_PTR0),
+		     GT64240ETH_READ(gp, GT64240_ETH_PORT_STATUS));
+	}
+	// increment count and stop queue if full
+	if (++gp->tx_count >= TX_RING_SIZE) {
+		gp->tx_full = 1;
+		netif_stop_queue(dev);
+	}
+
+	dev->trans_start = jiffies;
+	spin_unlock_irqrestore(&gp->lock, flags);
+
+	return 0;
+}
+
+
+static int
+#ifdef GT64240_NAPI
+gt64240_rx(struct net_device *dev, u32 status, int budget)
+#else
+gt64240_rx(struct net_device *dev, u32 status)
+#endif
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	struct sk_buff *skb;
+	int pkt_len, nextOut, cdp;
+	gt64240_rd_t *rd;
+	u32 cmdstat;
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_rx: dev=%p, status=%x\n",
+		       dev->name, dev, status);
+
+	cdp = (GT64240ETH_READ(gp, GT64240_ETH_CURR_RX_DESC_PTR0)
+	       - gp->rx_ring_dma) / sizeof(gt64240_rd_t);
+
+	// Continue until we reach the current descriptor pointer
+	for (nextOut = gp->rx_next_out; nextOut != cdp;
+	     nextOut = (nextOut + 1) % RX_RING_SIZE) {
+
+#ifdef GT64240_NAPI
+		if (budget <= 0)
+			break;
+
+		budget--;
+#endif
+
+		if (--gp->intr_work_done == 0)
+			break;
+
+		if (gt64240_debug > 4)
+			dump_rx_desc(dev, nextOut);
+
+		rd = &gp->rx_ring[nextOut];
+		cmdstat = rd->cmdstat;
+
+		if (gt64240_debug > 3)
+			printk("%s: isr: Rx desc cmdstat=%x, nextOut=%d\n",
+			       dev->name, cmdstat, nextOut);
+
+		if (cmdstat & (u32) rxOwn) {
+			if (gt64240_debug > 2)
+				printk
+				    ("%s: gt64240_rx: device owns descriptor!\n",
+				     dev->name);
+			// DMA is not finished updating descriptor???
+			// Leave and come back later to pick-up where we left off.
+			break;
+		}
+		// must be first and last (ie only) buffer of packet
+		if (!(cmdstat & (u32) rxFirst)
+		    || !(cmdstat & (u32) rxLast)) {
+			printk
+			    ("%s: gt64240_rx: desc not first and last!\n",
+			     dev->name);
+			cmdstat |= (u32) rxOwn;
+			rd->cmdstat = cmdstat;
+			continue;
+		}
+		// Drop this received pkt if there were any errors
+		if ((cmdstat & (u32) rxErrorSummary)
+		    || (status & icrRxError)) {
+			// update the detailed rx error counters that are not covered
+			// by the MIB counters.
+			if (cmdstat & (u32) rxOverrun)
+				gp->stats.rx_fifo_errors++;
+			cmdstat |= (u32) rxOwn;
+			rd->cmdstat = cmdstat;
+			continue;
+		}
+
+		pkt_len = rd->byte_cnt;
+
+		/* Create new skb. */
+//      skb = dev_alloc_skb(pkt_len+2);
+		skb = dev_alloc_skb(1538);
+		if (skb == NULL) {
+			printk("%s: Memory squeeze, dropping packet.\n",
+			       dev->name);
+			gp->stats.rx_dropped++;
+			cmdstat |= (u32) rxOwn;
+			rd->cmdstat = cmdstat;
+			continue;
+		}
+		skb->dev = dev;
+		skb_reserve(skb, 2);	/* 16 byte IP header align */
+		memcpy(skb_put(skb, pkt_len),
+		       &gp->rx_buff[nextOut * PKT_BUF_SZ], pkt_len);
+		skb->protocol = eth_type_trans(skb, dev);
+
+		/* NIC performed some checksum computation */
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+#ifdef GT64240_NAPI
+		netif_receive_skb(skb);
+#else
+		netif_rx(skb);	/* pass the packet to upper layers */
+#endif
+
+		// now we can release ownership of this desc back to device
+		cmdstat |= (u32) rxOwn;
+		rd->cmdstat = cmdstat;
+
+		dev->last_rx = jiffies;
+	}
+
+	if (gt64240_debug > 3 && nextOut == gp->rx_next_out)
+		printk("%s: gt64240_rx: RxCDP did not increment?\n",
+		       dev->name);
+
+	gp->rx_next_out = nextOut;
+	return 0;
+}
+
+
+static void gt64240_tx_timeout(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&gp->lock, flags);
+
+
+	if (!(gp->last_psr & psrLink)) {
+		spin_unlock_irqrestore(&gp->lock, flags);
+	} else {
+		printk("======------> gt64240_tx_timeout: %d jiffies \n",
+		       GT64240ETH_TX_TIMEOUT);
+
+		disable_ether_irq(dev);
+		spin_unlock_irqrestore(&gp->lock, flags);
+		reset_tx(dev);
+		enable_ether_irq(dev);
+
+		netif_wake_queue(dev);
+	}
+}
+
+
+static void gt64240_set_rx_mode(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	unsigned long flags;
+	struct dev_mc_list *mcptr;
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_set_rx_mode: dev=%p, flags=%x\n",
+		       dev->name, dev, dev->flags);
+
+	// stop the Receiver DMA
+	abort(dev, sdcmrAR);
+
+	spin_lock_irqsave(&gp->lock, flags);
+
+	if (dev->flags & IFF_PROMISC)
+		GT64240ETH_SETBIT(gp, GT64240_ETH_PORT_CONFIG, pcrPM);
+	else
+		GT64240ETH_CLRBIT(gp, GT64240_ETH_PORT_CONFIG, pcrPM);
+/*
+	GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG,
+		(PORT_CONFIG | pcrPM | pcrEN));
+*/
+
+	memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE);	// clear hash table
+	// Add our ethernet address
+	gt64240_add_hash_entry(dev, dev->dev_addr);
+	if (dev->mc_count) {
+		for (mcptr = dev->mc_list; mcptr; mcptr = mcptr->next) {
+			if (gt64240_debug > 2) {
+				printk("%s: gt64240_set_rx_mode: addr=\n",
+				       dev->name);
+				dump_hw_addr(mcptr->dmi_addr);
+			}
+			gt64240_add_hash_entry(dev, mcptr->dmi_addr);
+		}
+	}
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_set_rx: Port Config=%x\n", dev->name,
+		       GT64240ETH_READ(gp, GT64240_ETH_PORT_CONFIG));
+
+	// restart Rx DMA
+	GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM, sdcmrERD);
+
+	spin_unlock_irqrestore(&gp->lock, flags);
+}
+
+static struct net_device_stats *gt64240_get_stats(struct net_device *dev)
+{
+	struct gt64240_private *gp = netdev_priv(dev);
+	unsigned long flags;
+
+	if (gt64240_debug > 3)
+		printk("%s: gt64240_get_stats: dev=%p\n", dev->name, dev);
+
+	if (netif_device_present(dev)) {
+		spin_lock_irqsave(&gp->lock, flags);
+		update_stats(gp);
+		spin_unlock_irqrestore(&gp->lock, flags);
+	}
+
+	return &gp->stats;
+}
diff -Naur linux-2.6.19/drivers/net/gt64240eth.h linux-mips-2.6.19/drivers/net/gt64240eth.h
--- linux-2.6.19/drivers/net/gt64240eth.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/net/gt64240eth.h	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,403 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 Patton Electronics Company
+ * Copyright (C) 2002 Momentum Computer
+ *
+ * Copyright 2000 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ *         	stevel@mvista.com or support@mvista.com
+ * Copyright 2004, 05 Ralf Baechle (ralf@linux-mips.org)
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Ethernet driver definitions for the MIPS GT96100 Advanced
+ * Communication Controller.
+ *
+ * Modified for the Marvellous GT64240 Retarded Communication Controller.
+ */
+#ifndef _GT64240ETH_H
+#define _GT64240ETH_H
+
+#include <asm/gt64240.h>
+
+#define ETHERNET_PORTS_DIFFERENCE_OFFSETS	0x400
+
+/* Translate those weanie names from Galileo/VxWorks header files: */
+
+#define GT64240_MRR                    MAIN_ROUTING_REGISTER
+#define GT64240_CIU_ARBITER_CONFIG     COMM_UNIT_ARBITER_CONFIGURATION_REGISTER
+#define GT64240_CIU_ARBITER_CONTROL    COMM_UNIT_ARBITER_CONTROL
+#define GT64240_MAIN_LOW_CAUSE         LOW_INTERRUPT_CAUSE_REGISTER
+#define GT64240_MAIN_HIGH_CAUSE        HIGH_INTERRUPT_CAUSE_REGISTER
+#define GT64240_CPU_LOW_MASK           CPU_INTERRUPT_MASK_REGISTER_LOW
+#define GT64240_CPU_HIGH_MASK          CPU_INTERRUPT_MASK_REGISTER_HIGH
+#define GT64240_CPU_SELECT_CAUSE       CPU_SELECT_CAUSE_REGISTER
+
+#define GT64240_ETH_PHY_ADDR_REG       ETHERNET_PHY_ADDRESS_REGISTER
+#define GT64240_ETH_PORT_CONFIG        ETHERNET0_PORT_CONFIGURATION_REGISTER
+#define GT64240_ETH_PORT_CONFIG_EXT    ETHERNET0_PORT_CONFIGURATION_EXTEND_REGISTER
+#define GT64240_ETH_PORT_COMMAND       ETHERNET0_PORT_COMMAND_REGISTER
+#define GT64240_ETH_PORT_STATUS        ETHERNET0_PORT_STATUS_REGISTER
+#define GT64240_ETH_IO_SIZE            ETHERNET_PORTS_DIFFERENCE_OFFSETS
+#define GT64240_ETH_SMI_REG            ETHERNET_SMI_REGISTER
+#define GT64240_ETH_MIB_COUNT_BASE     ETHERNET0_MIB_COUNTER_BASE
+#define GT64240_ETH_SDMA_CONFIG        ETHERNET0_SDMA_CONFIGURATION_REGISTER
+#define GT64240_ETH_SDMA_COMM          ETHERNET0_SDMA_COMMAND_REGISTER
+#define GT64240_ETH_INT_MASK           ETHERNET0_INTERRUPT_MASK_REGISTER
+#define GT64240_ETH_INT_CAUSE          ETHERNET0_INTERRUPT_CAUSE_REGISTER
+#define GT64240_ETH_CURR_TX_DESC_PTR0  ETHERNET0_CURRENT_TX_DESCRIPTOR_POINTER0
+#define GT64240_ETH_CURR_TX_DESC_PTR1  ETHERNET0_CURRENT_TX_DESCRIPTOR_POINTER1
+#define GT64240_ETH_1ST_RX_DESC_PTR0   ETHERNET0_FIRST_RX_DESCRIPTOR_POINTER0
+#define GT64240_ETH_CURR_RX_DESC_PTR0  ETHERNET0_CURRENT_RX_DESCRIPTOR_POINTER0
+#define GT64240_ETH_HASH_TBL_PTR       ETHERNET0_HASH_TABLE_POINTER_REGISTER
+
+/* Turn on NAPI by default */
+
+#define	GT64240_NAPI			1
+
+/* Some 64240 settings that SHOULD eventually be setup in PROM monitor: */
+/* (Board-specific to the DSL3224 Rev A board ONLY!)                    */
+#define D3224_MPP_CTRL0_SETTING		0x66669900
+#define D3224_MPP_CTRL1_SETTING		0x00000000
+#define D3224_MPP_CTRL2_SETTING		0x00887700
+#define D3224_MPP_CTRL3_SETTING		0x00000044
+#define D3224_GPP_IO_CTRL_SETTING	0x0000e800
+#define D3224_GPP_LEVEL_CTRL_SETTING	0xf001f703
+#define D3224_GPP_VALUE_SETTING		0x00000000
+
+/* Keep the ring sizes a power of two for efficiency. */
+//-#define TX_RING_SIZE 16
+#define TX_RING_SIZE	64	/* TESTING !!! */
+#define RX_RING_SIZE	32
+#define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer. */
+
+#define RX_HASH_TABLE_SIZE 16384
+#define HASH_HOP_NUMBER 12
+
+#define NUM_INTERFACES 3
+
+#define GT64240ETH_TX_TIMEOUT HZ/4
+
+#define MIPS_GT64240_BASE 0xf4000000
+#define GT64240_ETH0_BASE (MIPS_GT64240_BASE + GT64240_ETH_PORT_CONFIG)
+#define GT64240_ETH1_BASE (GT64240_ETH0_BASE + GT64240_ETH_IO_SIZE)
+#define GT64240_ETH2_BASE (GT64240_ETH1_BASE + GT64240_ETH_IO_SIZE)
+
+#if defined(CONFIG_MIPS_DSL3224)
+#define GT64240_ETHER0_IRQ 4
+#define GT64240_ETHER1_IRQ 4
+#else
+#define GT64240_ETHER0_IRQ -1
+#define GT64240_ETHER1_IRQ -1
+#endif
+
+#define REV_GT64240  0x1
+#define REV_GT64240A 0x10
+
+#define GT64240ETH_READ(gp, offset)					\
+	MV_READ((gp)->port_offset + (offset))
+
+#define GT64240ETH_WRITE(gp, offset, data)				\
+	MV_WRITE((gp)->port_offset + (offset), (data))
+
+#define GT64240ETH_SETBIT(gp, offset, bits)				\
+	GT64240ETH_WRITE((gp), (offset),				\
+	                 GT64240ETH_READ((gp), (offset)) | (bits))
+
+#define GT64240ETH_CLRBIT(gp, offset, bits)				\
+	GT64240ETH_WRITE((gp), (offset),				\
+	                 GT64240ETH_READ((gp), (offset)) & ~(bits))
+
+#define GT64240_READ(ofs)		MV_READ(ofs)
+#define GT64240_WRITE(ofs, data)	MV_WRITE((ofs), (data))
+
+/* Bit definitions of the SMI Reg */
+enum {
+	smirDataMask = 0xffff,
+	smirPhyAdMask = 0x1f << 16,
+	smirPhyAdBit = 16,
+	smirRegAdMask = 0x1f << 21,
+	smirRegAdBit = 21,
+	smirOpCode = 1 << 26,
+	smirReadValid = 1 << 27,
+	smirBusy = 1 << 28
+};
+
+/* Bit definitions of the Port Config Reg */
+enum pcr_bits {
+	pcrPM = 1 << 0,
+	pcrRBM = 1 << 1,
+	pcrPBF = 1 << 2,
+	pcrEN = 1 << 7,
+	pcrLPBKMask = 0x3 << 8,
+	pcrLPBKBit = 1 << 8,
+	pcrFC = 1 << 10,
+	pcrHS = 1 << 12,
+	pcrHM = 1 << 13,
+	pcrHDM = 1 << 14,
+	pcrHD = 1 << 15,
+	pcrISLMask = 0x7 << 28,
+	pcrISLBit = 28,
+	pcrACCS = 1 << 31
+};
+
+/* Bit definitions of the Port Config Extend Reg */
+enum pcxr_bits {
+	pcxrIGMP = 1,
+	pcxrSPAN = 2,
+	pcxrPAR = 4,
+	pcxrPRIOtxMask = 0x7 << 3,
+	pcxrPRIOtxBit = 3,
+	pcxrPRIOrxMask = 0x3 << 6,
+	pcxrPRIOrxBit = 6,
+	pcxrPRIOrxOverride = 1 << 8,
+	pcxrDPLXen = 1 << 9,
+	pcxrFCTLen = 1 << 10,
+	pcxrFLP = 1 << 11,
+	pcxrFCTL = 1 << 12,
+	pcxrMFLMask = 0x3 << 14,
+	pcxrMFLBit = 14,
+	pcxrMIBclrMode = 1 << 16,
+	pcxrSpeed = 1 << 18,
+	pcxrSpeeden = 1 << 19,
+	pcxrRMIIen = 1 << 20,
+	pcxrDSCPen = 1 << 21
+};
+
+/* Bit definitions of the Port Command Reg */
+enum pcmr_bits {
+	pcmrFJ = 1 << 15
+};
+
+
+/* Bit definitions of the Port Status Reg */
+enum psr_bits {
+	psrSpeed = 1,
+	psrDuplex = 2,
+	psrFctl = 4,
+	psrLink = 8,
+	psrPause = 1 << 4,
+	psrTxLow = 1 << 5,
+	psrTxHigh = 1 << 6,
+	psrTxInProg = 1 << 7
+};
+
+/* Bit definitions of the SDMA Config Reg */
+enum sdcr_bits {
+	sdcrRCMask = 0xf << 2,
+	sdcrRCBit = 2,
+	sdcrBLMR = 1 << 6,
+	sdcrBLMT = 1 << 7,
+	sdcrPOVR = 1 << 8,
+	sdcrRIFB = 1 << 9,
+	sdcrBSZMask = 0x3 << 12,
+	sdcrBSZBit = 12
+};
+
+/* Bit definitions of the SDMA Command Reg */
+enum sdcmr_bits {
+	sdcmrERD = 1 << 7,
+	sdcmrAR = 1 << 15,
+	sdcmrSTDH = 1 << 16,
+	sdcmrSTDL = 1 << 17,
+	sdcmrTXDH = 1 << 23,
+	sdcmrTXDL = 1 << 24,
+	sdcmrAT = 1 << 31
+};
+
+/* Bit definitions of the Interrupt Cause Reg */
+enum icr_bits {
+	icrRxBuffer = 1,
+	icrTxBufferHigh = 1 << 2,
+	icrTxBufferLow = 1 << 3,
+	icrTxEndHigh = 1 << 6,
+	icrTxEndLow = 1 << 7,
+	icrRxError = 1 << 8,
+	icrTxErrorHigh = 1 << 10,
+	icrTxErrorLow = 1 << 11,
+	icrRxOVR = 1 << 12,
+	icrTxUdr = 1 << 13,
+	icrRxBufferQ0 = 1 << 16,
+	icrRxBufferQ1 = 1 << 17,
+	icrRxBufferQ2 = 1 << 18,
+	icrRxBufferQ3 = 1 << 19,
+	icrRxErrorQ0 = 1 << 20,
+	icrRxErrorQ1 = 1 << 21,
+	icrRxErrorQ2 = 1 << 22,
+	icrRxErrorQ3 = 1 << 23,
+	icrMIIPhySTC = 1 << 28,
+	icrSMIdone = 1 << 29,
+	icrEtherIntSum = 1 << 31
+};
+
+
+/* The Rx and Tx descriptor lists. */
+#ifdef __LITTLE_ENDIAN
+typedef struct {
+	u32 cmdstat;
+	u16 reserved;		//-prk21aug01    u32 reserved:16;
+	u16 byte_cnt;		//-prk21aug01    u32 byte_cnt:16;
+	u32 buff_ptr;
+	u32 next;
+} gt64240_td_t;
+
+typedef struct {
+	u32 cmdstat;
+	u16 byte_cnt;		//-prk21aug01    u32 byte_cnt:16;
+	u16 buff_sz;		//-prk21aug01    u32 buff_sz:16;
+	u32 buff_ptr;
+	u32 next;
+} gt64240_rd_t;
+#elif defined(__BIG_ENDIAN)
+typedef struct {
+	u16 byte_cnt;		//-prk21aug01    u32 byte_cnt:16;
+	u16 reserved;		//-prk21aug01    u32 reserved:16;
+	u32 cmdstat;
+	u32 next;
+	u32 buff_ptr;
+} gt64240_td_t;
+
+typedef struct {
+	u16 buff_sz;		//-prk21aug01    u32 buff_sz:16;
+	u16 byte_cnt;		//-prk21aug01    u32 byte_cnt:16;
+	u32 cmdstat;
+	u32 next;
+	u32 buff_ptr;
+} gt64240_rd_t;
+#else
+#error Either __BIG_ENDIAN or __LITTLE_ENDIAN must be defined!
+#endif
+
+
+/* Values for the Tx command-status descriptor entry. */
+enum td_cmdstat {
+	txOwn = 1 << 31,
+	txAutoMode = 1 << 30,
+	txEI = 1 << 23,
+	txGenCRC = 1 << 22,
+	txPad = 1 << 18,
+	txFirst = 1 << 17,
+	txLast = 1 << 16,
+	txErrorSummary = 1 << 15,
+	txReTxCntMask = 0x0f << 10,
+	txReTxCntBit = 10,
+	txCollision = 1 << 9,
+	txReTxLimit = 1 << 8,
+	txUnderrun = 1 << 6,
+	txLateCollision = 1 << 5
+};
+
+
+/* Values for the Rx command-status descriptor entry. */
+enum rd_cmdstat {
+	rxOwn = 1 << 31,
+	rxAutoMode = 1 << 30,
+	rxEI = 1 << 23,
+	rxFirst = 1 << 17,
+	rxLast = 1 << 16,
+	rxErrorSummary = 1 << 15,
+	rxIGMP = 1 << 14,
+	rxHashExpired = 1 << 13,
+	rxMissedFrame = 1 << 12,
+	rxFrameType = 1 << 11,
+	rxShortFrame = 1 << 8,
+	rxMaxFrameLen = 1 << 7,
+	rxOverrun = 1 << 6,
+	rxCollision = 1 << 4,
+	rxCRCError = 1
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+	hteValid = 1,
+	hteSkip = 2,
+	hteRD = 4
+};
+
+// The MIB counters
+typedef struct {
+	u32 byteReceived;
+	u32 byteSent;
+	u32 framesReceived;
+	u32 framesSent;
+	u32 totalByteReceived;
+	u32 totalFramesReceived;
+	u32 broadcastFramesReceived;
+	u32 multicastFramesReceived;
+	u32 cRCError;
+	u32 oversizeFrames;
+	u32 fragments;
+	u32 jabber;
+	u32 collision;
+	u32 lateCollision;
+	u32 frames64;
+	u32 frames65_127;
+	u32 frames128_255;
+	u32 frames256_511;
+	u32 frames512_1023;
+	u32 frames1024_MaxSize;
+	u32 macRxError;
+	u32 droppedFrames;
+	u32 outMulticastFrames;
+	u32 outBroadcastFrames;
+	u32 undersizeFrames;
+} mib_counters_t;
+
+
+struct gt64240_private {
+	gt64240_rd_t *rx_ring;
+	gt64240_td_t *tx_ring;
+	// The Rx and Tx rings must be 16-byte aligned
+	dma_addr_t rx_ring_dma;
+	dma_addr_t tx_ring_dma;
+	char *hash_table;
+	// The Hash Table must be 8-byte aligned
+	dma_addr_t hash_table_dma;
+	int hash_mode;
+
+	// The Rx buffers must be 8-byte aligned
+	char *rx_buff;
+	dma_addr_t rx_buff_dma;
+	// Tx buffers (tx_skbuff[i]->data) with less than 8 bytes
+	// of payload must be 8-byte aligned
+	struct sk_buff *tx_skbuff[TX_RING_SIZE];
+	int rx_next_out;	/* The next free ring entry to receive */
+	int tx_next_in;		/* The next free ring entry to send */
+	int tx_next_out;	/* The last ring entry the ISR processed */
+	int tx_count;		/* current # of pkts waiting to be sent in Tx ring */
+	int intr_work_done;	/* number of Rx and Tx pkts processed in the isr */
+	int tx_full;		/* Tx ring is full */
+
+	mib_counters_t mib;
+	struct net_device_stats stats;
+
+	int io_size;
+	int port_num;		// 0 or 1
+	u32 port_offset;
+
+	int phy_addr;		// PHY address
+	u32 last_psr;		// last value of the port status register
+
+	int options;		/* User-settable misc. driver options. */
+	int drv_flags;
+	spinlock_t lock;	/* Serialise access to device */
+	struct mii_if_info mii_if;
+
+	u32 msg_enable;
+};
+
+#endif /* _GT64240ETH_H */
diff -Naur linux-2.6.19/drivers/net/ioc3-eth.c linux-mips-2.6.19/drivers/net/ioc3-eth.c
--- linux-2.6.19/drivers/net/ioc3-eth.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/net/ioc3-eth.c	2006-11-29 15:23:09.000000000 -0800
@@ -5,7 +5,7 @@
  *
  * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
  *
- * Copyright (C) 1999, 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle
  * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
  *
  * References:
@@ -62,12 +62,7 @@
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
 #include <asm/sn/types.h>
-#include <asm/sn/sn0/addrs.h>
-#include <asm/sn/sn0/hubni.h>
-#include <asm/sn/sn0/hubio.h>
-#include <asm/sn/klconfig.h>
 #include <asm/sn/ioc3.h>
-#include <asm/sn/sn0/ip27.h>
 #include <asm/pci/bridge.h>
 
 /*
@@ -95,6 +90,9 @@
 	u32 emcr, ehar_h, ehar_l;
 	spinlock_t ioc3_lock;
 	struct mii_if_info mii;
+	unsigned long flags;
+#define IOC3_FLAG_RX_CHECKSUMS	1
+
 	struct pci_dev *pdev;
 
 	/* Members used by autonegotiation  */
@@ -522,8 +520,6 @@
 	return &ip->stats;
 }
 
-#ifdef CONFIG_SGI_IOC3_ETH_HW_RX_CSUM
-
 static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
 {
 	struct ethhdr *eh = eth_hdr(skb);
@@ -591,7 +587,6 @@
 	if (csum == 0xffff)
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
-#endif /* CONFIG_SGI_IOC3_ETH_HW_RX_CSUM */
 
 static inline void ioc3_rx(struct ioc3_private *ip)
 {
@@ -626,9 +621,9 @@
 				goto next;
 			}
 
-#ifdef CONFIG_SGI_IOC3_ETH_HW_RX_CSUM
-			ioc3_tcpudp_checksum(skb, w0 & ERXBUF_IPCKSUM_MASK,len);
-#endif
+			if (likely(ip->flags & IOC3_FLAG_RX_CHECKSUMS))
+				ioc3_tcpudp_checksum(skb,
+					w0 & ERXBUF_IPCKSUM_MASK, len);
 
 			netif_rx(skb);
 
@@ -1289,9 +1284,7 @@
 	dev->set_multicast_list	= ioc3_set_multicast_list;
 	dev->set_mac_address	= ioc3_set_mac_address;
 	dev->ethtool_ops	= &ioc3_ethtool_ops;
-#ifdef CONFIG_SGI_IOC3_ETH_HW_TX_CSUM
 	dev->features		= NETIF_F_IP_CSUM;
-#endif
 
 	sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
 	sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
@@ -1378,7 +1371,6 @@
 	uint32_t w0 = 0;
 	int produce;
 
-#ifdef CONFIG_SGI_IOC3_ETH_HW_TX_CSUM
 	/*
 	 * IOC3 has a fairly simple minded checksumming hardware which simply
 	 * adds up the 1's complement checksum for the entire packet and
@@ -1426,7 +1418,6 @@
 
 		w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
 	}
-#endif /* CONFIG_SGI_IOC3_ETH_HW_TX_CSUM */
 
 	spin_lock_irq(&ip->ioc3_lock);
 
@@ -1580,12 +1571,37 @@
 	return rc;
 }
 
+static u32 ioc3_get_rx_csum(struct net_device *dev)
+{
+	struct ioc3_private *ip = netdev_priv(dev);
+
+	return ip->flags & IOC3_FLAG_RX_CHECKSUMS;
+}
+
+static int ioc3_set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct ioc3_private *ip = netdev_priv(dev);
+
+	spin_lock_bh(&ip->ioc3_lock);
+	if (data)
+		ip->flags |= IOC3_FLAG_RX_CHECKSUMS;
+	else
+		ip->flags &= ~IOC3_FLAG_RX_CHECKSUMS;
+	spin_unlock_bh(&ip->ioc3_lock);
+
+	return 0;
+}
+
 static const struct ethtool_ops ioc3_ethtool_ops = {
 	.get_drvinfo		= ioc3_get_drvinfo,
 	.get_settings		= ioc3_get_settings,
 	.set_settings		= ioc3_set_settings,
 	.nway_reset		= ioc3_nway_reset,
 	.get_link		= ioc3_get_link,
+	.get_rx_csum		= ioc3_get_rx_csum,
+	.set_rx_csum		= ioc3_set_rx_csum,
+	.get_tx_csum		= ethtool_op_get_tx_csum,
+	.set_tx_csum		= ethtool_op_set_tx_csum
 };
 
 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff -Naur linux-2.6.19/drivers/net/titan_ge.c linux-mips-2.6.19/drivers/net/titan_ge.c
--- linux-2.6.19/drivers/net/titan_ge.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/net/titan_ge.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,2069 @@
+/*
+ * drivers/net/titan_ge.c - Driver for Titan ethernet ports
+ *
+ * Copyright (C) 2003 PMC-Sierra Inc.
+ * Author : Manish Lachwani (lachwani@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+/*
+ * The MAC unit of the Titan consists of the following:
+ *
+ * -> XDMA Engine to move data to from the memory to the MAC packet FIFO
+ * -> FIFO is where the incoming and outgoing data is placed
+ * -> TRTG is the unit that pulls the data from the FIFO for Tx and pushes
+ *    the data into the FIFO for Rx
+ * -> TMAC is the outgoing MAC interface and RMAC is the incoming.
+ * -> AFX is the address filtering block
+ * -> GMII block to communicate with the PHY
+ *
+ * Rx will look like the following:
+ * GMII --> RMAC --> AFX --> TRTG --> Rx FIFO --> XDMA --> CPU memory
+ *
+ * Tx will look like the following:
+ * CPU memory --> XDMA --> Tx FIFO --> TRTG --> TMAC --> GMII
+ *
+ * The Titan driver has support for the following performance features:
+ * -> Rx side checksumming
+ * -> Jumbo Frames
+ * -> Interrupt Coalscing
+ * -> Rx NAPI
+ * -> SKB Recycling
+ * -> Transmit/Receive descriptors in SRAM
+ * -> Fast routing for IP forwarding
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ip.h>
+#include <linux/init.h>
+#include <linux/in.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/prefetch.h>
+
+/* For MII specifc registers, titan_mdio.h should be included */
+#include <net/ip.h>
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/types.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/titan_dep.h>
+
+#include "titan_ge.h"
+#include "titan_mdio.h"
+
+/* Static Function Declarations	 */
+static int titan_ge_eth_open(struct net_device *);
+static void titan_ge_eth_stop(struct net_device *);
+static struct net_device_stats *titan_ge_get_stats(struct net_device *);
+static int titan_ge_init_rx_desc_ring(titan_ge_port_info *, int, int,
+				      unsigned long, unsigned long,
+				      unsigned long);
+static int titan_ge_init_tx_desc_ring(titan_ge_port_info *, int,
+				      unsigned long, unsigned long);
+
+static int titan_ge_open(struct net_device *);
+static int titan_ge_start_xmit(struct sk_buff *, struct net_device *);
+static int titan_ge_stop(struct net_device *);
+
+static unsigned long titan_ge_tx_coal(unsigned long, int);
+
+static void titan_ge_port_reset(unsigned int);
+static int titan_ge_free_tx_queue(titan_ge_port_info *);
+static int titan_ge_rx_task(struct net_device *, titan_ge_port_info *);
+static int titan_ge_port_start(struct net_device *, titan_ge_port_info *);
+
+static int titan_ge_return_tx_desc(titan_ge_port_info *, int);
+
+/*
+ * Some configuration for the FIFO and the XDMA channel needs
+ * to be done only once for all the ports. This flag controls
+ * that
+ */
+static unsigned long config_done;
+
+/*
+ * One time out of memory flag
+ */
+static unsigned int oom_flag;
+
+static int titan_ge_poll(struct net_device *netdev, int *budget);
+
+static int titan_ge_receive_queue(struct net_device *, unsigned int);
+
+static struct platform_device *titan_ge_device[3];
+
+/* MAC Address */
+extern unsigned char titan_ge_mac_addr_base[6];
+
+unsigned long titan_ge_base;
+static unsigned long titan_ge_sram;
+
+static char titan_string[] = "titan";
+
+/*
+ * The Titan GE has two alignment requirements:
+ * -> skb->data to be cacheline aligned (32 byte)
+ * -> IP header alignment to 16 bytes
+ *
+ * The latter is not implemented. So, that results in an extra copy on
+ * the Rx. This is a big performance hog. For the former case, the
+ * dev_alloc_skb() has been replaced with titan_ge_alloc_skb(). The size
+ * requested is calculated:
+ *
+ * Ethernet Frame Size : 1518
+ * Ethernet Header     : 14
+ * Future Titan change for IP header alignment : 2
+ *
+ * Hence, we allocate (1518 + 14 + 2+ 64) = 1580 bytes.  For IP header
+ * alignment, we use skb_reserve().
+ */
+
+#define ALIGNED_RX_SKB_ADDR(addr) \
+	((((unsigned long)(addr) + (64UL - 1UL)) \
+	& ~(64UL - 1UL)) - (unsigned long)(addr))
+
+#define titan_ge_alloc_skb(__length, __gfp_flags) \
+({      struct sk_buff *__skb; \
+	__skb = alloc_skb((__length) + 64, (__gfp_flags)); \
+	if(__skb) { \
+		int __offset = (int) ALIGNED_RX_SKB_ADDR(__skb->data); \
+		if(__offset) \
+			skb_reserve(__skb, __offset); \
+	} \
+	__skb; \
+})
+
+/*
+ * Configure the GMII block of the Titan based on what the PHY tells us
+ */
+static void titan_ge_gmii_config(int port_num)
+{
+	unsigned int reg_data = 0, phy_reg;
+	int err;
+
+	err = titan_ge_mdio_read(port_num, TITAN_GE_MDIO_PHY_STATUS, &phy_reg);
+
+	if (err == TITAN_GE_MDIO_ERROR) {
+		printk(KERN_ERR
+		       "Could not read PHY control register 0x11 \n");
+		printk(KERN_ERR
+			"Setting speed to 1000 Mbps and Duplex to Full \n");
+
+		return;
+	}
+
+	err = titan_ge_mdio_write(port_num, TITAN_GE_MDIO_PHY_IE, 0);
+
+	if (phy_reg & 0x8000) {
+		if (phy_reg & 0x2000) {
+			/* Full Duplex and 1000 Mbps */
+			TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
+					(port_num << 12)), 0x201);
+		}  else {
+			/* Half Duplex and 1000 Mbps */
+			TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
+					(port_num << 12)), 0x2201);
+			}
+	}
+	if (phy_reg & 0x4000) {
+		if (phy_reg & 0x2000) {
+			/* Full Duplex and 100 Mbps */
+			TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
+					(port_num << 12)), 0x100);
+		} else {
+			/* Half Duplex and 100 Mbps */
+			TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
+					(port_num << 12)), 0x2100);
+		}
+	}
+	reg_data = TITAN_GE_READ(TITAN_GE_GMII_CONFIG_GENERAL +
+				(port_num << 12));
+	reg_data |= 0x3;
+	TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_GENERAL +
+			(port_num << 12)), reg_data);
+}
+
+/*
+ * Enable the TMAC if it is not
+ */
+static void titan_ge_enable_tx(unsigned int port_num)
+{
+	unsigned long reg_data;
+
+	reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 + (port_num << 12));
+	if (!(reg_data & 0x8000)) {
+		printk("TMAC disabled for port %d!! \n", port_num);
+
+		reg_data |= 0x0001;	/* Enable TMAC */
+		reg_data |= 0x4000;	/* CRC Check Enable */
+		reg_data |= 0x2000;	/* Padding enable */
+		reg_data |= 0x0800;	/* CRC Add enable */
+		reg_data |= 0x0080;	/* PAUSE frame */
+
+		TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 +
+				(port_num << 12)), reg_data);
+	}
+}
+
+/*
+ * Tx Timeout function
+ */
+static void titan_ge_tx_timeout(struct net_device *netdev)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+
+	printk(KERN_INFO "%s: TX timeout  ", netdev->name);
+	printk(KERN_INFO "Resetting card \n");
+
+	/* Do the reset outside of interrupt context */
+	schedule_work(&titan_ge_eth->tx_timeout_task);
+}
+
+/*
+ * Update the AFX tables for UC and MC for slice 0 only
+ */
+static void titan_ge_update_afx(titan_ge_port_info * titan_ge_eth)
+{
+	int port = titan_ge_eth->port_num;
+	unsigned int i;
+	volatile unsigned long reg_data = 0;
+	u8 p_addr[6];
+
+	memcpy(p_addr, titan_ge_eth->port_mac_addr, 6);
+
+	/* Set the MAC address here for TMAC and RMAC */
+	TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_HI + (port << 12)),
+		       ((p_addr[5] << 8) | p_addr[4]));
+	TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_MID + (port << 12)),
+		       ((p_addr[3] << 8) | p_addr[2]));
+	TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_LOW + (port << 12)),
+		       ((p_addr[1] << 8) | p_addr[0]));
+
+	TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_HI + (port << 12)),
+		       ((p_addr[5] << 8) | p_addr[4]));
+	TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_MID + (port << 12)),
+		       ((p_addr[3] << 8) | p_addr[2]));
+	TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_LOW + (port << 12)),
+		       ((p_addr[1] << 8) | p_addr[0]));
+
+	TITAN_GE_WRITE((0x112c | (port << 12)), 0x1);
+	/* Configure the eight address filters */
+	for (i = 0; i < 8; i++) {
+		/* Select each of the eight filters */
+		TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_2 +
+				(port << 12)), i);
+
+		/* Configure the match */
+		reg_data = 0x9;	/* Forward Enable Bit */
+		TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_0 +
+				(port << 12)), reg_data);
+
+		/* Finally, AFX Exact Match Address Registers */
+		TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_LOW + (port << 12)),
+			       ((p_addr[1] << 8) | p_addr[0]));
+		TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_MID + (port << 12)),
+			       ((p_addr[3] << 8) | p_addr[2]));
+		TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_HIGH + (port << 12)),
+			       ((p_addr[5] << 8) | p_addr[4]));
+
+		/* VLAN id set to 0 */
+		TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_VID +
+				(port << 12)), 0);
+	}
+}
+
+/*
+ * Actual Routine to reset the adapter when the timeout occurred
+ */
+static void titan_ge_tx_timeout_task(struct net_device *netdev)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+	int port = titan_ge_eth->port_num;
+
+	printk("Titan GE: Transmit timed out. Resetting ... \n");
+
+	/* Dump debug info */
+	printk(KERN_ERR "TRTG cause : %x \n",
+			TITAN_GE_READ(0x100c + (port << 12)));
+
+	/* Fix this for the other ports */
+	printk(KERN_ERR "FIFO cause : %x \n", TITAN_GE_READ(0x482c));
+	printk(KERN_ERR "IE cause : %x \n", TITAN_GE_READ(0x0040));
+	printk(KERN_ERR "XDMA GDI ERROR : %x \n",
+			TITAN_GE_READ(0x5008 + (port << 8)));
+	printk(KERN_ERR "CHANNEL ERROR: %x \n",
+			TITAN_GE_READ(TITAN_GE_CHANNEL0_INTERRUPT
+						+ (port << 8)));
+
+	netif_device_detach(netdev);
+	titan_ge_port_reset(titan_ge_eth->port_num);
+	titan_ge_port_start(netdev, titan_ge_eth);
+	netif_device_attach(netdev);
+}
+
+/*
+ * Change the MTU of the Ethernet Device
+ */
+static int titan_ge_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+	unsigned long flags;
+
+	if ((new_mtu > 9500) || (new_mtu < 64))
+		return -EINVAL;
+
+	spin_lock_irqsave(&titan_ge_eth->lock, flags);
+
+	netdev->mtu = new_mtu;
+
+	/* Now we have to reopen the interface so that SKBs with the new
+	 * size will be allocated */
+
+	if (netif_running(netdev)) {
+		titan_ge_eth_stop(netdev);
+
+		if (titan_ge_eth_open(netdev) != TITAN_OK) {
+			printk(KERN_ERR
+			       "%s: Fatal error on opening device\n",
+			       netdev->name);
+			spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
+			return -1;
+		}
+	}
+
+	spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
+	return 0;
+}
+
+/*
+ * Titan Gbe Interrupt Handler. All the three ports send interrupt to one line
+ * only. Once an interrupt is triggered, figure out the port and then check
+ * the channel.
+ */
+static irqreturn_t titan_ge_int_handler(int irq, void *dev_id)
+{
+	struct net_device *netdev = (struct net_device *) dev_id;
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+	unsigned int port_num = titan_ge_eth->port_num;
+	unsigned int reg_data;
+	unsigned int eth_int_cause_error = 0, is;
+	unsigned long eth_int_cause1;
+	int err = 0;
+#ifdef CONFIG_SMP
+	unsigned long eth_int_cause2;
+#endif
+
+	/* Ack the CPU interrupt */
+	switch (port_num) {
+	case 0:
+		is = OCD_READ(RM9000x2_OCD_INTP0STATUS1);
+		OCD_WRITE(RM9000x2_OCD_INTP0CLEAR1, is);
+
+#ifdef CONFIG_SMP
+		is = OCD_READ(RM9000x2_OCD_INTP1STATUS1);
+		OCD_WRITE(RM9000x2_OCD_INTP1CLEAR1, is);
+#endif
+		break;
+
+	case 1:
+		is = OCD_READ(RM9000x2_OCD_INTP0STATUS0);
+		OCD_WRITE(RM9000x2_OCD_INTP0CLEAR0, is);
+
+#ifdef CONFIG_SMP
+		is = OCD_READ(RM9000x2_OCD_INTP1STATUS0);
+		OCD_WRITE(RM9000x2_OCD_INTP1CLEAR0, is);
+#endif
+		break;
+
+	case 2:
+		is = OCD_READ(RM9000x2_OCD_INTP0STATUS4);
+		OCD_WRITE(RM9000x2_OCD_INTP0CLEAR4, is);
+
+#ifdef CONFIG_SMP
+		is = OCD_READ(RM9000x2_OCD_INTP1STATUS4);
+		OCD_WRITE(RM9000x2_OCD_INTP1CLEAR4, is);
+#endif
+	}
+
+	eth_int_cause1 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_A);
+#ifdef CONFIG_SMP
+	eth_int_cause2 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_B);
+#endif
+
+	/* Spurious interrupt */
+#ifdef CONFIG_SMP
+	if ( (eth_int_cause1 == 0) && (eth_int_cause2 == 0)) {
+#else
+	if (eth_int_cause1 == 0) {
+#endif
+		eth_int_cause_error = TITAN_GE_READ(TITAN_GE_CHANNEL0_INTERRUPT +
+					(port_num << 8));
+
+		if (eth_int_cause_error == 0)
+			return IRQ_NONE;
+	}
+
+	/* Handle Tx first. No need to ack interrupts */
+#ifdef CONFIG_SMP
+	if ( (eth_int_cause1 & 0x20202) ||
+		(eth_int_cause2 & 0x20202) )
+#else
+	if (eth_int_cause1 & 0x20202)
+#endif
+		titan_ge_free_tx_queue(titan_ge_eth);
+
+	/* Handle the Rx next */
+#ifdef CONFIG_SMP
+	if ( (eth_int_cause1 & 0x10101) ||
+		(eth_int_cause2 & 0x10101)) {
+#else
+	if (eth_int_cause1 & 0x10101) {
+#endif
+		if (netif_rx_schedule_prep(netdev)) {
+			unsigned int ack;
+
+			ack = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE);
+			/* Disable Tx and Rx both */
+			if (port_num == 0)
+				ack &= ~(0x3);
+			if (port_num == 1)
+				ack &= ~(0x300);
+
+			if (port_num == 2)
+				ack &= ~(0x30000);
+
+			/* Interrupts have been disabled */
+			TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, ack);
+
+			__netif_rx_schedule(netdev);
+		}
+	}
+
+	/* Handle error interrupts */
+	if (eth_int_cause_error && (eth_int_cause_error != 0x2)) {
+		printk(KERN_ERR
+			"XDMA Channel Error : %x  on port %d\n",
+			eth_int_cause_error, port_num);
+
+		printk(KERN_ERR
+			"XDMA GDI Hardware error : %x  on port %d\n",
+			TITAN_GE_READ(0x5008 + (port_num << 8)), port_num);
+
+		printk(KERN_ERR
+			"XDMA currently has %d Rx descriptors \n",
+			TITAN_GE_READ(0x5048 + (port_num << 8)));
+
+		printk(KERN_ERR
+			"XDMA currently has prefetcted %d Rx descriptors \n",
+			TITAN_GE_READ(0x505c + (port_num << 8)));
+
+		TITAN_GE_WRITE((TITAN_GE_CHANNEL0_INTERRUPT +
+			       (port_num << 8)), eth_int_cause_error);
+	}
+
+	/*
+	 * PHY interrupt to inform abt the changes. Reading the
+	 * PHY Status register will clear the interrupt
+	 */
+	if ((!(eth_int_cause1 & 0x30303)) &&
+		(eth_int_cause_error == 0)) {
+		err =
+		    titan_ge_mdio_read(port_num,
+			       TITAN_GE_MDIO_PHY_IS, &reg_data);
+
+		if (reg_data & 0x0400) {
+			/* Link status change */
+			titan_ge_mdio_read(port_num,
+				   TITAN_GE_MDIO_PHY_STATUS, &reg_data);
+			if (!(reg_data & 0x0400)) {
+				/* Link is down */
+				netif_carrier_off(netdev);
+				netif_stop_queue(netdev);
+			} else {
+				/* Link is up */
+				netif_carrier_on(netdev);
+				netif_wake_queue(netdev);
+
+				/* Enable the queue */
+				titan_ge_enable_tx(port_num);
+			}
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Multicast and Promiscuous mode set. The
+ * set_multi entry point is called whenever the
+ * multicast address list or the network interface
+ * flags are updated.
+ */
+static void titan_ge_set_multi(struct net_device *netdev)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+	unsigned int port_num = titan_ge_eth->port_num;
+	unsigned long reg_data;
+
+	reg_data = TITAN_GE_READ(TITAN_GE_AFX_ADDRS_FILTER_CTRL_1 +
+				(port_num << 12));
+
+	if (netdev->flags & IFF_PROMISC) {
+		reg_data |= 0x2;
+	}
+	else if (netdev->flags & IFF_ALLMULTI) {
+		reg_data |= 0x01;
+		reg_data |= 0x400; /* Use the 64-bit Multicast Hash bin */
+	}
+	else {
+		reg_data = 0x2;
+	}
+
+	TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_1 +
+			(port_num << 12)), reg_data);
+	if (reg_data & 0x01) {
+		TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_LOW +
+				(port_num << 12)), 0xffff);
+		TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_MIDLOW +
+				(port_num << 12)), 0xffff);
+		TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_MIDHI +
+				(port_num << 12)), 0xffff);
+		TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_HI +
+				(port_num << 12)), 0xffff);
+	}
+}
+
+/*
+ * Open the network device
+ */
+static int titan_ge_open(struct net_device *netdev)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+	unsigned int port_num = titan_ge_eth->port_num;
+	unsigned int irq = TITAN_ETH_PORT_IRQ - port_num;
+	int retval;
+
+	retval = request_irq(irq, titan_ge_int_handler,
+		     SA_INTERRUPT | SA_SAMPLE_RANDOM , netdev->name, netdev);
+
+	if (retval != 0) {
+		printk(KERN_ERR "Cannot assign IRQ number to TITAN GE \n");
+		return -1;
+	}
+
+	netdev->irq = irq;
+	printk(KERN_INFO "Assigned IRQ %d to port %d\n", irq, port_num);
+
+	spin_lock_irq(&(titan_ge_eth->lock));
+
+	if (titan_ge_eth_open(netdev) != TITAN_OK) {
+		spin_unlock_irq(&(titan_ge_eth->lock));
+		printk("%s: Error opening interface \n", netdev->name);
+		free_irq(netdev->irq, netdev);
+		return -EBUSY;
+	}
+
+	spin_unlock_irq(&(titan_ge_eth->lock));
+
+	return 0;
+}
+
+/*
+ * Allocate the SKBs for the Rx ring. Also used
+ * for refilling the queue
+ */
+static int titan_ge_rx_task(struct net_device *netdev,
+				titan_ge_port_info *titan_ge_port)
+{
+	struct device *device = &titan_ge_device[titan_ge_port->port_num]->dev;
+	volatile titan_ge_rx_desc *rx_desc;
+	struct sk_buff *skb;
+	int rx_used_desc;
+	int count = 0;
+
+	while (titan_ge_port->rx_ring_skbs < titan_ge_port->rx_ring_size) {
+
+	/* First try to get the skb from the recycler */
+#ifdef TITAN_GE_JUMBO_FRAMES
+		skb = titan_ge_alloc_skb(TITAN_GE_JUMBO_BUFSIZE, GFP_ATOMIC);
+#else
+		skb = titan_ge_alloc_skb(TITAN_GE_STD_BUFSIZE, GFP_ATOMIC);
+#endif
+		if (unlikely(!skb)) {
+			/* OOM, set the flag */
+			printk("OOM \n");
+			oom_flag = 1;
+			break;
+		}
+		count++;
+		skb->dev = netdev;
+
+		titan_ge_port->rx_ring_skbs++;
+
+		rx_used_desc = titan_ge_port->rx_used_desc_q;
+		rx_desc = &(titan_ge_port->rx_desc_area[rx_used_desc]);
+
+#ifdef TITAN_GE_JUMBO_FRAMES
+		rx_desc->buffer_addr = dma_map_single(device, skb->data,
+				TITAN_GE_JUMBO_BUFSIZE - 2, DMA_FROM_DEVICE);
+#else
+		rx_desc->buffer_addr = dma_map_single(device, skb->data,
+				TITAN_GE_STD_BUFSIZE - 2, DMA_FROM_DEVICE);
+#endif
+
+		titan_ge_port->rx_skb[rx_used_desc] = skb;
+		rx_desc->cmd_sts = TITAN_GE_RX_BUFFER_OWNED;
+
+		titan_ge_port->rx_used_desc_q =
+			(rx_used_desc + 1) % TITAN_GE_RX_QUEUE;
+	}
+
+	return count;
+}
+
+/*
+ * Actual init of the Tital GE port. There is one register for
+ * the channel configuration
+ */
+static void titan_port_init(struct net_device *netdev,
+			    titan_ge_port_info * titan_ge_eth)
+{
+	unsigned long reg_data;
+
+	titan_ge_port_reset(titan_ge_eth->port_num);
+
+	/* First reset the TMAC */
+	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
+	reg_data |= 0x80000000;
+	TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
+
+	udelay(30);
+
+	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
+	reg_data &= ~(0xc0000000);
+	TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
+
+	/* Now reset the RMAC */
+	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
+	reg_data |= 0x00080000;
+	TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
+
+	udelay(30);
+
+	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
+	reg_data &= ~(0x000c0000);
+	TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
+}
+
+/*
+ * Start the port. All the hardware specific configuration
+ * for the XDMA, Tx FIFO, Rx FIFO, TMAC, RMAC, TRTG and AFX
+ * go here
+ */
+static int titan_ge_port_start(struct net_device *netdev,
+				titan_ge_port_info * titan_port)
+{
+	volatile unsigned long reg_data, reg_data1;
+	int port_num = titan_port->port_num;
+	int count = 0;
+	unsigned long reg_data_1;
+
+	if (config_done == 0) {
+		reg_data = TITAN_GE_READ(0x0004);
+		reg_data |= 0x100;
+		TITAN_GE_WRITE(0x0004, reg_data);
+
+		reg_data &= ~(0x100);
+		TITAN_GE_WRITE(0x0004, reg_data);
+
+		/* Turn on GMII/MII mode and turn off TBI mode */
+		reg_data = TITAN_GE_READ(TITAN_GE_TSB_CTRL_1);
+		reg_data |= 0x00000700;
+		reg_data &= ~(0x00800000); /* Fencing */
+
+		TITAN_GE_WRITE(0x000c, 0x00001100);
+
+		TITAN_GE_WRITE(TITAN_GE_TSB_CTRL_1, reg_data);
+
+		/* Set the CPU Resource Limit register */
+		TITAN_GE_WRITE(0x00f8, 0x8);
+
+		/* Be conservative when using the BIU buffers */
+		TITAN_GE_WRITE(0x0068, 0x4);
+	}
+
+	titan_port->tx_threshold = 0;
+	titan_port->rx_threshold = 0;
+
+	/* We need to write the descriptors for Tx and Rx */
+	TITAN_GE_WRITE((TITAN_GE_CHANNEL0_TX_DESC + (port_num << 8)),
+		       (unsigned long) titan_port->tx_dma);
+	TITAN_GE_WRITE((TITAN_GE_CHANNEL0_RX_DESC + (port_num << 8)),
+		       (unsigned long) titan_port->rx_dma);
+
+	if (config_done == 0) {
+		/* Step 1:  XDMA config	*/
+		reg_data = TITAN_GE_READ(TITAN_GE_XDMA_CONFIG);
+		reg_data &= ~(0x80000000);      /* clear reset */
+		reg_data |= 0x1 << 29;	/* sparse tx descriptor spacing */
+		reg_data |= 0x1 << 28;	/* sparse rx descriptor spacing */
+		reg_data |= (0x1 << 23) | (0x1 << 24);  /* Descriptor Coherency */
+		reg_data |= (0x1 << 21) | (0x1 << 22);  /* Data Coherency */
+		TITAN_GE_WRITE(TITAN_GE_XDMA_CONFIG, reg_data);
+	}
+
+	/* IR register for the XDMA */
+	reg_data = TITAN_GE_READ(TITAN_GE_GDI_INTERRUPT_ENABLE + (port_num << 8));
+	reg_data |= 0x80068000; /* No Rx_OOD */
+	TITAN_GE_WRITE((TITAN_GE_GDI_INTERRUPT_ENABLE + (port_num << 8)), reg_data);
+
+	/* Start the Tx and Rx XDMA controller */
+	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG + (port_num << 8));
+	reg_data &= 0x4fffffff;     /* Clear tx reset */
+	reg_data &= 0xfff4ffff;     /* Clear rx reset */
+
+#ifdef TITAN_GE_JUMBO_FRAMES
+	reg_data |= 0xa0 | 0x30030000;
+#else
+	reg_data |= 0x40 | 0x20030000;
+#endif
+
+#ifndef CONFIG_SMP
+	reg_data &= ~(0x10);
+	reg_data |= 0x0f; /* All of the packet */
+#endif
+
+	TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG + (port_num << 8)), reg_data);
+
+	/* Rx desc count */
+	count = titan_ge_rx_task(netdev, titan_port);
+	TITAN_GE_WRITE((0x5048 + (port_num << 8)), count);
+	count = TITAN_GE_READ(0x5048 + (port_num << 8));
+
+	udelay(30);
+
+	/*
+	 * Step 2:  Configure the SDQPF, i.e. FIFO
+	 */
+	if (config_done == 0) {
+		reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_CTL);
+		reg_data = 0x1;
+		TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data);
+		reg_data &= ~(0x1);
+		TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data);
+		reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_CTL);
+		TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data);
+
+		reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_CTL);
+		reg_data = 0x1;
+		TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data);
+		reg_data &= ~(0x1);
+		TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data);
+		reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_CTL);
+		TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data);
+	}
+	/*
+	 * Enable RX FIFO 0, 4 and 8
+	 */
+	if (port_num == 0) {
+		reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_0);
+
+		reg_data |= 0x100000;
+		reg_data |= (0xff << 10);
+
+		TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_0, reg_data);
+		/*
+		 * BAV2,BAV and DAV settings for the Rx FIFO
+		 */
+		reg_data1 = TITAN_GE_READ(0x4844);
+		reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
+		TITAN_GE_WRITE(0x4844, reg_data1);
+
+		reg_data &= ~(0x00100000);
+		reg_data |= 0x200000;
+
+		TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_0, reg_data);
+
+		reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_0);
+		reg_data |= 0x100000;
+
+		TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data);
+
+		reg_data |= (0xff << 10);
+
+		TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data);
+
+		/*
+		 * BAV2, BAV and DAV settings for the Tx FIFO
+		 */
+		reg_data1 = TITAN_GE_READ(0x4944);
+		reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
+
+		TITAN_GE_WRITE(0x4944, reg_data1);
+
+		reg_data &= ~(0x00100000);
+		reg_data |= 0x200000;
+
+		TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data);
+
+	}
+
+	if (port_num == 1) {
+		reg_data = TITAN_GE_READ(0x4870);
+
+		reg_data |= 0x100000;
+		reg_data |= (0xff << 10) | (0xff + 1);
+
+		TITAN_GE_WRITE(0x4870, reg_data);
+		/*
+		 * BAV2,BAV and DAV settings for the Rx FIFO
+		 */
+		reg_data1 = TITAN_GE_READ(0x4874);
+		reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
+		TITAN_GE_WRITE(0x4874, reg_data1);
+
+		reg_data &= ~(0x00100000);
+		reg_data |= 0x200000;
+
+		TITAN_GE_WRITE(0x4870, reg_data);
+
+		reg_data = TITAN_GE_READ(0x494c);
+		reg_data |= 0x100000;
+
+		TITAN_GE_WRITE(0x494c, reg_data);
+		reg_data |= (0xff << 10) | (0xff + 1);
+		TITAN_GE_WRITE(0x494c, reg_data);
+
+		/*
+		 * BAV2, BAV and DAV settings for the Tx FIFO
+		 */
+		reg_data1 = TITAN_GE_READ(0x4950);
+		reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
+
+		TITAN_GE_WRITE(0x4950, reg_data1);
+
+		reg_data &= ~(0x00100000);
+		reg_data |= 0x200000;
+
+		TITAN_GE_WRITE(0x494c, reg_data);
+	}
+
+	/*
+	 * Titan 1.2 revision does support port #2
+	 */
+	if (port_num == 2) {
+		/*
+		 * Put the descriptors in the SRAM
+		 */
+		reg_data = TITAN_GE_READ(0x48a0);
+
+		reg_data |= 0x100000;
+		reg_data |= (0xff << 10) | (2*(0xff + 1));
+
+		TITAN_GE_WRITE(0x48a0, reg_data);
+		/*
+		 * BAV2,BAV and DAV settings for the Rx FIFO
+		 */
+		reg_data1 = TITAN_GE_READ(0x48a4);
+		reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
+		TITAN_GE_WRITE(0x48a4, reg_data1);
+
+		reg_data &= ~(0x00100000);
+		reg_data |= 0x200000;
+
+		TITAN_GE_WRITE(0x48a0, reg_data);
+		
+		reg_data = TITAN_GE_READ(0x4958);
+		reg_data |= 0x100000;
+
+		TITAN_GE_WRITE(0x4958, reg_data);
+		reg_data |= (0xff << 10) | (2*(0xff + 1));
+		TITAN_GE_WRITE(0x4958, reg_data);
+
+		/*
+		 * BAV2, BAV and DAV settings for the Tx FIFO
+		 */
+		reg_data1 = TITAN_GE_READ(0x495c);
+		reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
+
+		TITAN_GE_WRITE(0x495c, reg_data1);
+
+		reg_data &= ~(0x00100000);
+		reg_data |= 0x200000;
+
+		TITAN_GE_WRITE(0x4958, reg_data);
+	}
+
+	if (port_num == 2) {
+		reg_data = TITAN_GE_READ(0x48a0);
+
+		reg_data |= 0x100000;
+		reg_data |= (0xff << 10) | (2*(0xff + 1));
+
+		TITAN_GE_WRITE(0x48a0, reg_data);
+		/*
+		 * BAV2,BAV and DAV settings for the Rx FIFO
+		 */
+		reg_data1 = TITAN_GE_READ(0x48a4);
+		reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
+		TITAN_GE_WRITE(0x48a4, reg_data1);
+
+		reg_data &= ~(0x00100000);
+		reg_data |= 0x200000;
+
+		TITAN_GE_WRITE(0x48a0, reg_data);
+
+		reg_data = TITAN_GE_READ(0x4958);
+		reg_data |= 0x100000;
+
+		TITAN_GE_WRITE(0x4958, reg_data);
+		reg_data |= (0xff << 10) | (2*(0xff + 1));
+		TITAN_GE_WRITE(0x4958, reg_data);
+
+		/*
+		 * BAV2, BAV and DAV settings for the Tx FIFO
+		 */
+		reg_data1 = TITAN_GE_READ(0x495c);
+		reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
+
+		TITAN_GE_WRITE(0x495c, reg_data1);
+
+		reg_data &= ~(0x00100000);
+		reg_data |= 0x200000;
+
+		TITAN_GE_WRITE(0x4958, reg_data);
+	}
+
+	/*
+	 * Step 3:  TRTG block enable
+	 */
+	reg_data = TITAN_GE_READ(TITAN_GE_TRTG_CONFIG + (port_num << 12));
+
+	/*
+	 * This is the 1.2 revision of the chip. It has fix for the
+	 * IP header alignment. Now, the IP header begins at an
+	 * aligned address and this wont need an extra copy in the
+	 * driver. This performance drawback existed in the previous
+	 * versions of the silicon
+	 */
+	reg_data_1 = TITAN_GE_READ(0x103c + (port_num << 12));
+	reg_data_1 |= 0x40000000;
+	TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1);
+
+	reg_data_1 |= 0x04000000;
+	TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1);
+
+	mdelay(5);
+
+	reg_data_1 &= ~(0x04000000);
+	TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1);
+
+	mdelay(5);
+
+	reg_data |= 0x0001;
+	TITAN_GE_WRITE((TITAN_GE_TRTG_CONFIG + (port_num << 12)), reg_data);
+
+	/*
+	 * Step 4:  Start the Tx activity
+	 */
+	TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_2 + (port_num << 12)), 0xe197);
+#ifdef TITAN_GE_JUMBO_FRAMES
+	TITAN_GE_WRITE((0x1258 + (port_num << 12)), 0x4000);
+#endif
+	reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 + (port_num << 12));
+	reg_data |= 0x0001;	/* Enable TMAC */
+	reg_data |= 0x6c70;	/* PAUSE also set */
+
+	TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 + (port_num << 12)), reg_data);
+
+	udelay(30);
+
+	/* Destination Address drop bit */
+	reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_2 + (port_num << 12));
+	reg_data |= 0x218;        /* DA_DROP bit and pause */
+	TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_2 + (port_num << 12)), reg_data);
+
+	TITAN_GE_WRITE((0x1218 + (port_num << 12)), 0x3);
+
+#ifdef TITAN_GE_JUMBO_FRAMES
+	TITAN_GE_WRITE((0x1208 + (port_num << 12)), 0x4000);
+#endif
+	/* Start the Rx activity */
+	reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 + (port_num << 12));
+	reg_data |= 0x0001;	/* RMAC Enable */
+	reg_data |= 0x0010;	/* CRC Check enable */
+	reg_data |= 0x0040;	/* Min Frame check enable */
+	reg_data |= 0x4400;	/* Max Frame check enable */
+
+	TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 + (port_num << 12)), reg_data);
+
+	udelay(30);
+
+	/*
+	 * Enable the Interrupts for Tx and Rx
+	 */
+	reg_data1 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE);
+
+	if (port_num == 0) {
+		reg_data1 |= 0x3;
+#ifdef CONFIG_SMP
+		TITAN_GE_WRITE(0x0038, 0x003);
+#else
+		TITAN_GE_WRITE(0x0038, 0x303);
+#endif
+	}
+
+	if (port_num == 1) {
+		reg_data1 |= 0x300;
+	}
+
+	if (port_num == 2)
+		reg_data1 |= 0x30000;
+
+	TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, reg_data1);
+	TITAN_GE_WRITE(0x003c, 0x300);
+
+	if (config_done == 0) {
+		TITAN_GE_WRITE(0x0024, 0x04000024);	/* IRQ vector */
+		TITAN_GE_WRITE(0x0020, 0x000fb000);	/* INTMSG base */
+	}
+
+	/* Priority */
+	reg_data = TITAN_GE_READ(0x1038 + (port_num << 12));
+	reg_data &= ~(0x00f00000);
+	TITAN_GE_WRITE((0x1038 + (port_num << 12)), reg_data);
+
+	/* Step 5:  GMII config */
+	titan_ge_gmii_config(port_num);
+
+	if (config_done == 0) {
+		TITAN_GE_WRITE(0x1a80, 0);
+		config_done = 1;
+	}
+
+	return TITAN_OK;
+}
+
+/*
+ * Function to queue the packet for the Ethernet device
+ */
+static void titan_ge_tx_queue(titan_ge_port_info * titan_ge_eth,
+				struct sk_buff * skb)
+{
+	struct device *device = &titan_ge_device[titan_ge_eth->port_num]->dev;
+	unsigned int curr_desc = titan_ge_eth->tx_curr_desc_q;
+	volatile titan_ge_tx_desc *tx_curr;
+	int port_num = titan_ge_eth->port_num;
+
+	tx_curr = &(titan_ge_eth->tx_desc_area[curr_desc]);
+	tx_curr->buffer_addr =
+		dma_map_single(device, skb->data, skb_headlen(skb),
+			       DMA_TO_DEVICE);
+
+	titan_ge_eth->tx_skb[curr_desc] = (struct sk_buff *) skb;
+	tx_curr->buffer_len = skb_headlen(skb);
+
+	/* Last descriptor enables interrupt and changes ownership */
+	tx_curr->cmd_sts = 0x1 | (1 << 15) | (1 << 5);
+
+	/* Kick the XDMA to start the transfer from memory to the FIFO */
+	TITAN_GE_WRITE((0x5044 + (port_num << 8)), 0x1);
+
+	/* Current descriptor updated */
+	titan_ge_eth->tx_curr_desc_q = (curr_desc + 1) % TITAN_GE_TX_QUEUE;
+
+	/* Prefetch the next descriptor */
+	prefetch((const void *)
+		 &titan_ge_eth->tx_desc_area[titan_ge_eth->tx_curr_desc_q]);
+}
+
+/*
+ * Actually does the open of the Ethernet device
+ */
+static int titan_ge_eth_open(struct net_device *netdev)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+	unsigned int port_num = titan_ge_eth->port_num;
+	struct device *device = &titan_ge_device[port_num]->dev;
+	unsigned long reg_data;
+	unsigned int phy_reg;
+	int err = 0;
+
+	/* Stop the Rx activity */
+	reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 + (port_num << 12));
+	reg_data &= ~(0x00000001);
+	TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 + (port_num << 12)), reg_data);
+
+	/* Clear the port interrupts */
+	TITAN_GE_WRITE((TITAN_GE_CHANNEL0_INTERRUPT + (port_num << 8)), 0x0);
+
+	if (config_done == 0) {
+		TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0);
+		TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_B, 0);
+	}
+
+	/* Set the MAC Address */
+	memcpy(titan_ge_eth->port_mac_addr, netdev->dev_addr, 6);
+
+	if (config_done == 0)
+		titan_port_init(netdev, titan_ge_eth);
+
+	titan_ge_update_afx(titan_ge_eth);
+
+	/* Allocate the Tx ring now */
+	titan_ge_eth->tx_ring_skbs = 0;
+	titan_ge_eth->tx_ring_size = TITAN_GE_TX_QUEUE;
+
+	/* Allocate space in the SRAM for the descriptors */
+	titan_ge_eth->tx_desc_area = (titan_ge_tx_desc *)
+		(titan_ge_sram + TITAN_TX_RING_BYTES * port_num);
+	titan_ge_eth->tx_dma = TITAN_SRAM_BASE + TITAN_TX_RING_BYTES * port_num;
+
+	if (!titan_ge_eth->tx_desc_area) {
+		printk(KERN_ERR
+		       "%s: Cannot allocate Tx Ring (size %d bytes) for port %d\n",
+		       netdev->name, TITAN_TX_RING_BYTES, port_num);
+		return -ENOMEM;
+	}
+
+	memset(titan_ge_eth->tx_desc_area, 0, titan_ge_eth->tx_desc_area_size);
+
+	/* Now initialize the Tx descriptor ring */
+	titan_ge_init_tx_desc_ring(titan_ge_eth,
+				   titan_ge_eth->tx_ring_size,
+				   (unsigned long) titan_ge_eth->tx_desc_area,
+				   (unsigned long) titan_ge_eth->tx_dma);
+
+	/* Allocate the Rx ring now */
+	titan_ge_eth->rx_ring_size = TITAN_GE_RX_QUEUE;
+	titan_ge_eth->rx_ring_skbs = 0;
+
+	titan_ge_eth->rx_desc_area =
+		(titan_ge_rx_desc *)(titan_ge_sram + 0x1000 + TITAN_RX_RING_BYTES * port_num);
+
+	titan_ge_eth->rx_dma = TITAN_SRAM_BASE + 0x1000 + TITAN_RX_RING_BYTES * port_num;
+
+	if (!titan_ge_eth->rx_desc_area) {
+		printk(KERN_ERR "%s: Cannot allocate Rx Ring (size %d bytes)\n",
+		       netdev->name, TITAN_RX_RING_BYTES);
+
+		printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
+		       netdev->name);
+
+		dma_free_coherent(device, titan_ge_eth->tx_desc_area_size,
+				    (void *) titan_ge_eth->tx_desc_area,
+				    titan_ge_eth->tx_dma);
+
+		return -ENOMEM;
+	}
+
+	memset(titan_ge_eth->rx_desc_area, 0, titan_ge_eth->rx_desc_area_size);
+
+	/* Now initialize the Rx ring */
+#ifdef TITAN_GE_JUMBO_FRAMES
+	if ((titan_ge_init_rx_desc_ring
+	    (titan_ge_eth, titan_ge_eth->rx_ring_size, TITAN_GE_JUMBO_BUFSIZE,
+	     (unsigned long) titan_ge_eth->rx_desc_area, 0,
+	      (unsigned long) titan_ge_eth->rx_dma)) == 0)
+#else
+	if ((titan_ge_init_rx_desc_ring
+	     (titan_ge_eth, titan_ge_eth->rx_ring_size, TITAN_GE_STD_BUFSIZE,
+	      (unsigned long) titan_ge_eth->rx_desc_area, 0,
+	      (unsigned long) titan_ge_eth->rx_dma)) == 0)
+#endif
+		panic("%s: Error initializing RX Ring\n", netdev->name);
+
+	/* Fill the Rx ring with the SKBs */
+	titan_ge_port_start(netdev, titan_ge_eth);
+
+	/*
+	 * Check if Interrupt Coalscing needs to be turned on. The
+	 * values specified in the register is multiplied by
+	 * (8 x 64 nanoseconds) to determine when an interrupt should
+	 * be sent to the CPU.
+	 */
+
+	if (TITAN_GE_TX_COAL) {
+		titan_ge_eth->tx_int_coal =
+		    titan_ge_tx_coal(TITAN_GE_TX_COAL, port_num);
+	}
+
+	err = titan_ge_mdio_read(port_num, TITAN_GE_MDIO_PHY_STATUS, &phy_reg);
+	if (err == TITAN_GE_MDIO_ERROR) {
+		printk(KERN_ERR
+		       "Could not read PHY control register 0x11 \n");
+		return TITAN_ERROR;
+	}
+	if (!(phy_reg & 0x0400)) {
+		netif_carrier_off(netdev);
+		netif_stop_queue(netdev);
+		return TITAN_ERROR;
+	} else {
+		netif_carrier_on(netdev);
+		netif_start_queue(netdev);
+	}
+
+	return TITAN_OK;
+}
+
+/*
+ * Queue the packet for Tx. Currently no support for zero copy,
+ * checksum offload and Scatter Gather. The chip does support
+ * Scatter Gather only. But, that wont help here since zero copy
+ * requires support for Tx checksumming also.
+ */
+int titan_ge_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+	unsigned long flags;
+	struct net_device_stats *stats;
+//printk("titan_ge_start_xmit\n");
+
+	stats = &titan_ge_eth->stats;
+	spin_lock_irqsave(&titan_ge_eth->lock, flags);
+
+	if ((TITAN_GE_TX_QUEUE - titan_ge_eth->tx_ring_skbs) <=
+	    (skb_shinfo(skb)->nr_frags + 1)) {
+		netif_stop_queue(netdev);
+		spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
+		printk(KERN_ERR "Tx OOD \n");
+		return 1;
+	}
+
+	titan_ge_tx_queue(titan_ge_eth, skb);
+	titan_ge_eth->tx_ring_skbs++;
+
+	if (TITAN_GE_TX_QUEUE <= (titan_ge_eth->tx_ring_skbs + 4)) {
+		spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
+		titan_ge_free_tx_queue(titan_ge_eth);
+		spin_lock_irqsave(&titan_ge_eth->lock, flags);
+	}
+
+	stats->tx_bytes += skb->len;
+	stats->tx_packets++;
+
+	spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
+
+	netdev->trans_start = jiffies;
+
+	return 0;
+}
+
+/*
+ * Actually does the Rx. Rx side checksumming supported.
+ */
+static int titan_ge_rx(struct net_device *netdev, int port_num,
+			titan_ge_port_info * titan_ge_port,
+		       titan_ge_packet * packet)
+{
+	int rx_curr_desc, rx_used_desc;
+	volatile titan_ge_rx_desc *rx_desc;
+
+	rx_curr_desc = titan_ge_port->rx_curr_desc_q;
+	rx_used_desc = titan_ge_port->rx_used_desc_q;
+
+	if (((rx_curr_desc + 1) % TITAN_GE_RX_QUEUE) == rx_used_desc)
+		return TITAN_ERROR;
+
+	rx_desc = &(titan_ge_port->rx_desc_area[rx_curr_desc]);
+
+	if (rx_desc->cmd_sts & TITAN_GE_RX_BUFFER_OWNED)
+		return TITAN_ERROR;
+
+	packet->skb = titan_ge_port->rx_skb[rx_curr_desc];
+	packet->len = (rx_desc->cmd_sts & 0x7fff);
+
+	/*
+	 * At this point, we dont know if the checksumming
+	 * actually helps relieve CPU. So, keep it for
+	 * port 0 only
+	 */
+	packet->checksum = ntohs((rx_desc->buffer & 0xffff0000) >> 16);
+	packet->cmd_sts = rx_desc->cmd_sts;
+
+	titan_ge_port->rx_curr_desc_q = (rx_curr_desc + 1) % TITAN_GE_RX_QUEUE;
+
+	/* Prefetch the next descriptor */
+	prefetch((const void *)
+	       &titan_ge_port->rx_desc_area[titan_ge_port->rx_curr_desc_q + 1]);
+
+	return TITAN_OK;
+}
+
+/*
+ * Free the Tx queue of the used SKBs
+ */
+static int titan_ge_free_tx_queue(titan_ge_port_info *titan_ge_eth)
+{
+	unsigned long flags;
+
+	/* Take the lock */
+	spin_lock_irqsave(&(titan_ge_eth->lock), flags);
+
+	while (titan_ge_return_tx_desc(titan_ge_eth, titan_ge_eth->port_num) == 0)
+		if (titan_ge_eth->tx_ring_skbs != 1)
+			titan_ge_eth->tx_ring_skbs--;
+
+	spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
+
+	return TITAN_OK;
+}
+
+/*
+ * Threshold beyond which we do the cleaning of
+ * Tx queue and new allocation for the Rx
+ * queue
+ */
+#define	TX_THRESHOLD	4
+#define	RX_THRESHOLD	10
+
+/*
+ * Receive the packets and send it to the kernel.
+ */
+static int titan_ge_receive_queue(struct net_device *netdev, unsigned int max)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+	unsigned int port_num = titan_ge_eth->port_num;
+	titan_ge_packet packet;
+	struct net_device_stats *stats;
+	struct sk_buff *skb;
+	unsigned long received_packets = 0;
+	unsigned int ack;
+
+	stats = &titan_ge_eth->stats;
+
+	while ((--max)
+	       && (titan_ge_rx(netdev, port_num, titan_ge_eth, &packet) == TITAN_OK)) {
+		skb = (struct sk_buff *) packet.skb;
+
+		titan_ge_eth->rx_ring_skbs--;
+
+		if (--titan_ge_eth->rx_work_limit < 0)
+			break;
+		received_packets++;
+
+		stats->rx_packets++;
+		stats->rx_bytes += packet.len;
+
+		if ((packet.cmd_sts & TITAN_GE_RX_PERR) ||
+			(packet.cmd_sts & TITAN_GE_RX_OVERFLOW_ERROR) ||
+			(packet.cmd_sts & TITAN_GE_RX_TRUNC) ||
+			(packet.cmd_sts & TITAN_GE_RX_CRC_ERROR)) {
+				stats->rx_dropped++;
+				dev_kfree_skb_any(skb);
+
+				continue;
+		}
+		/*
+		 * Either support fast path or slow path. Decision
+		 * making can really slow down the performance. The
+		 * idea is to cut down the number of checks and improve
+		 * the fastpath.
+		 */
+
+		skb_put(skb, packet.len - 2);
+
+		/*
+		 * Increment data pointer by two since thats where
+		 * the MAC starts
+		 */
+		skb_reserve(skb, 2);
+		skb->protocol = eth_type_trans(skb, netdev);
+		netif_receive_skb(skb);
+
+		if (titan_ge_eth->rx_threshold > RX_THRESHOLD) {
+			ack = titan_ge_rx_task(netdev, titan_ge_eth);
+			TITAN_GE_WRITE((0x5048 + (port_num << 8)), ack);
+			titan_ge_eth->rx_threshold = 0;
+		} else
+			titan_ge_eth->rx_threshold++;
+
+		if (titan_ge_eth->tx_threshold > TX_THRESHOLD) {
+			titan_ge_eth->tx_threshold = 0;
+			titan_ge_free_tx_queue(titan_ge_eth);
+		}
+		else
+			titan_ge_eth->tx_threshold++;
+
+	}
+	return received_packets;
+}
+
+
+/*
+ * Enable the Rx side interrupts
+ */
+static void titan_ge_enable_int(unsigned int port_num,
+			titan_ge_port_info *titan_ge_eth,
+			struct net_device *netdev)
+{
+	unsigned long reg_data = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE);
+
+	if (port_num == 0)
+		reg_data |= 0x3;
+	if (port_num == 1)
+		reg_data |= 0x300;
+	if (port_num == 2)
+		reg_data |= 0x30000;
+
+	/* Re-enable interrupts */
+	TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, reg_data);
+}
+
+/*
+ * Main function to handle the polling for Rx side NAPI.
+ * Receive interrupts have been disabled at this point.
+ * The poll schedules the transmit followed by receive.
+ */
+static int titan_ge_poll(struct net_device *netdev, int *budget)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+	int port_num = titan_ge_eth->port_num;
+	int work_done = 0;
+	unsigned long flags, status;
+
+	titan_ge_eth->rx_work_limit = *budget;
+	if (titan_ge_eth->rx_work_limit > netdev->quota)
+		titan_ge_eth->rx_work_limit = netdev->quota;
+
+	do {
+		/* Do the transmit cleaning work here */
+		titan_ge_free_tx_queue(titan_ge_eth);
+
+		/* Ack the Rx interrupts */
+		if (port_num == 0)
+			TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x3);
+		if (port_num == 1)
+			TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x300);
+		if (port_num == 2)
+			TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x30000);
+
+		work_done += titan_ge_receive_queue(netdev, 0);
+
+		/* Out of quota and there is work to be done */
+		if (titan_ge_eth->rx_work_limit < 0)
+			goto not_done;
+
+		/* Receive alloc_skb could lead to OOM */
+		if (oom_flag == 1) {
+			oom_flag = 0;
+			goto oom;
+		}
+
+		status = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_A);
+	} while (status & 0x30300);
+
+	/* If we are here, then no more interrupts to process */
+	goto done;
+
+not_done:
+	*budget -= work_done;
+	netdev->quota -= work_done;
+	return 1;
+
+oom:
+	printk(KERN_ERR "OOM \n");
+	netif_rx_complete(netdev);
+	return 0;
+
+done:
+	/*
+	 * No more packets on the poll list. Turn the interrupts
+	 * back on and we should be able to catch the new
+	 * packets in the interrupt handler
+	 */
+	if (!work_done)
+		work_done = 1;
+
+	*budget -= work_done;
+	netdev->quota -= work_done;
+
+	spin_lock_irqsave(&titan_ge_eth->lock, flags);
+
+	/* Remove us from the poll list */
+	netif_rx_complete(netdev);
+
+	/* Re-enable interrupts */
+	titan_ge_enable_int(port_num, titan_ge_eth, netdev);
+
+	spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
+
+	return 0;
+}
+
+/*
+ * Close the network device
+ */
+int titan_ge_stop(struct net_device *netdev)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+
+	spin_lock_irq(&(titan_ge_eth->lock));
+	titan_ge_eth_stop(netdev);
+	free_irq(netdev->irq, netdev);
+	spin_unlock_irq(&titan_ge_eth->lock);
+
+	return TITAN_OK;
+}
+
+/*
+ * Free the Tx ring
+ */
+static void titan_ge_free_tx_rings(struct net_device *netdev)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+	unsigned int port_num = titan_ge_eth->port_num;
+	unsigned int curr;
+	unsigned long reg_data;
+
+	/* Stop the Tx DMA */
+	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG +
+				(port_num << 8));
+	reg_data |= 0xc0000000;
+	TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG +
+			(port_num << 8)), reg_data);
+
+	/* Disable the TMAC */
+	reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 +
+				(port_num << 12));
+	reg_data &= ~(0x00000001);
+	TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 +
+			(port_num << 12)), reg_data);
+
+	for (curr = 0;
+	     (titan_ge_eth->tx_ring_skbs) && (curr < TITAN_GE_TX_QUEUE);
+	     curr++) {
+		if (titan_ge_eth->tx_skb[curr]) {
+			dev_kfree_skb(titan_ge_eth->tx_skb[curr]);
+			titan_ge_eth->tx_ring_skbs--;
+		}
+	}
+
+	if (titan_ge_eth->tx_ring_skbs != 0)
+		printk
+		    ("%s: Error on Tx descriptor free - could not free %d"
+		     " descriptors\n", netdev->name,
+		     titan_ge_eth->tx_ring_skbs);
+
+#ifndef TITAN_RX_RING_IN_SRAM
+	dma_free_coherent(&titan_ge_device[port_num]->dev,
+			  titan_ge_eth->tx_desc_area_size,
+			  (void *) titan_ge_eth->tx_desc_area,
+			  titan_ge_eth->tx_dma);
+#endif
+}
+
+/*
+ * Free the Rx ring
+ */
+static void titan_ge_free_rx_rings(struct net_device *netdev)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+	unsigned int port_num = titan_ge_eth->port_num;
+	unsigned int curr;
+	unsigned long reg_data;
+
+	/* Stop the Rx DMA */
+	reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG +
+				(port_num << 8));
+	reg_data |= 0x000c0000;
+	TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG +
+			(port_num << 8)), reg_data);
+
+	/* Disable the RMAC */
+	reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 +
+				(port_num << 12));
+	reg_data &= ~(0x00000001);
+	TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 +
+			(port_num << 12)), reg_data);
+
+	for (curr = 0;
+	     titan_ge_eth->rx_ring_skbs && (curr < TITAN_GE_RX_QUEUE);
+	     curr++) {
+		if (titan_ge_eth->rx_skb[curr]) {
+			dev_kfree_skb(titan_ge_eth->rx_skb[curr]);
+			titan_ge_eth->rx_ring_skbs--;
+		}
+	}
+
+	if (titan_ge_eth->rx_ring_skbs != 0)
+		printk(KERN_ERR
+		       "%s: Error in freeing Rx Ring. %d skb's still"
+		       " stuck in RX Ring - ignoring them\n", netdev->name,
+		       titan_ge_eth->rx_ring_skbs);
+
+#ifndef TITAN_RX_RING_IN_SRAM
+	dma_free_coherent(&titan_ge_device[port_num]->dev,
+			  titan_ge_eth->rx_desc_area_size,
+			  (void *) titan_ge_eth->rx_desc_area,
+			  titan_ge_eth->rx_dma);
+#endif
+}
+
+/*
+ * Actually does the stop of the Ethernet device
+ */
+static void titan_ge_eth_stop(struct net_device *netdev)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+
+	netif_stop_queue(netdev);
+
+	titan_ge_port_reset(titan_ge_eth->port_num);
+
+	titan_ge_free_tx_rings(netdev);
+	titan_ge_free_rx_rings(netdev);
+
+	/* Disable the Tx and Rx Interrupts for all channels */
+	TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, 0x0);
+}
+
+/*
+ * Update the MAC address. Note that we have to write the
+ * address in three station registers, 16 bits each. And this
+ * has to be done for TMAC and RMAC
+ */
+static void titan_ge_update_mac_address(struct net_device *netdev)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+	unsigned int port_num = titan_ge_eth->port_num;
+	u8 p_addr[6];
+
+	memcpy(titan_ge_eth->port_mac_addr, netdev->dev_addr, 6);
+	memcpy(p_addr, netdev->dev_addr, 6);
+
+	/* Update the Address Filtering Match tables */
+	titan_ge_update_afx(titan_ge_eth);
+
+	printk("Station MAC : %d %d %d %d %d %d  \n",
+		p_addr[5], p_addr[4], p_addr[3],
+		p_addr[2], p_addr[1], p_addr[0]);
+
+	/* Set the MAC address here for TMAC and RMAC */
+	TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_HI + (port_num << 12)),
+		       ((p_addr[5] << 8) | p_addr[4]));
+	TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_MID + (port_num << 12)),
+		       ((p_addr[3] << 8) | p_addr[2]));
+	TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_LOW + (port_num << 12)),
+		       ((p_addr[1] << 8) | p_addr[0]));
+
+	TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_HI + (port_num << 12)),
+		       ((p_addr[5] << 8) | p_addr[4]));
+	TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_MID + (port_num << 12)),
+		       ((p_addr[3] << 8) | p_addr[2]));
+	TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_LOW + (port_num << 12)),
+		       ((p_addr[1] << 8) | p_addr[0]));
+}
+
+/*
+ * Set the MAC address of the Ethernet device
+ */
+static int titan_ge_set_mac_address(struct net_device *dev, void *addr)
+{
+	titan_ge_port_info *tp = netdev_priv(dev);
+	struct sockaddr *sa = addr;
+
+	memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+	spin_lock_irq(&tp->lock);
+	titan_ge_update_mac_address(dev);
+	spin_unlock_irq(&tp->lock);
+
+	return 0;
+}
+
+/*
+ * Get the Ethernet device stats
+ */
+static struct net_device_stats *titan_ge_get_stats(struct net_device *netdev)
+{
+	titan_ge_port_info *titan_ge_eth = netdev_priv(netdev);
+
+	return &titan_ge_eth->stats;
+}
+
+/*
+ * Initialize the Rx descriptor ring for the Titan Ge
+ */
+static int titan_ge_init_rx_desc_ring(titan_ge_port_info * titan_eth_port,
+				      int rx_desc_num,
+				      int rx_buff_size,
+				      unsigned long rx_desc_base_addr,
+				      unsigned long rx_buff_base_addr,
+				      unsigned long rx_dma)
+{
+	volatile titan_ge_rx_desc *rx_desc;
+	unsigned long buffer_addr;
+	int index;
+	unsigned long titan_ge_rx_desc_bus = rx_dma;
+
+	buffer_addr = rx_buff_base_addr;
+	rx_desc = (titan_ge_rx_desc *) rx_desc_base_addr;
+
+	/* Check alignment */
+	if (rx_buff_base_addr & 0xF)
+		return 0;
+
+	/* Check Rx buffer size */
+	if ((rx_buff_size < 8) || (rx_buff_size > TITAN_GE_MAX_RX_BUFFER))
+		return 0;
+
+	/* 64-bit alignment
+	if ((rx_buff_base_addr + rx_buff_size) & 0x7)
+		return 0; */
+
+	/* Initialize the Rx desc ring */
+	for (index = 0; index < rx_desc_num; index++) {
+		titan_ge_rx_desc_bus += sizeof(titan_ge_rx_desc);
+		rx_desc[index].cmd_sts = 0;
+		rx_desc[index].buffer_addr = buffer_addr;
+		titan_eth_port->rx_skb[index] = NULL;
+		buffer_addr += rx_buff_size;
+	}
+
+	titan_eth_port->rx_curr_desc_q = 0;
+	titan_eth_port->rx_used_desc_q = 0;
+
+	titan_eth_port->rx_desc_area = (titan_ge_rx_desc *) rx_desc_base_addr;
+	titan_eth_port->rx_desc_area_size =
+	    rx_desc_num * sizeof(titan_ge_rx_desc);
+
+	titan_eth_port->rx_dma = rx_dma;
+
+	return TITAN_OK;
+}
+
+/*
+ * Initialize the Tx descriptor ring. Descriptors in the SRAM
+ */
+static int titan_ge_init_tx_desc_ring(titan_ge_port_info * titan_ge_port,
+				      int tx_desc_num,
+				      unsigned long tx_desc_base_addr,
+				      unsigned long tx_dma)
+{
+	titan_ge_tx_desc *tx_desc;
+	int index;
+	unsigned long titan_ge_tx_desc_bus = tx_dma;
+
+	if (tx_desc_base_addr & 0xF)
+		return 0;
+
+	tx_desc = (titan_ge_tx_desc *) tx_desc_base_addr;
+
+	for (index = 0; index < tx_desc_num; index++) {
+		titan_ge_port->tx_dma_array[index] =
+		    (dma_addr_t) titan_ge_tx_desc_bus;
+		titan_ge_tx_desc_bus += sizeof(titan_ge_tx_desc);
+		tx_desc[index].cmd_sts = 0x0000;
+		tx_desc[index].buffer_len = 0;
+		tx_desc[index].buffer_addr = 0x00000000;
+		titan_ge_port->tx_skb[index] = NULL;
+	}
+
+	titan_ge_port->tx_curr_desc_q = 0;
+	titan_ge_port->tx_used_desc_q = 0;
+
+	titan_ge_port->tx_desc_area = (titan_ge_tx_desc *) tx_desc_base_addr;
+	titan_ge_port->tx_desc_area_size =
+	    tx_desc_num * sizeof(titan_ge_tx_desc);
+
+	titan_ge_port->tx_dma = tx_dma;
+	return TITAN_OK;
+}
+
+/*
+ * Initialize the device as an Ethernet device
+ */
+static int __init titan_ge_probe(struct device *device)
+{
+	titan_ge_port_info *titan_ge_eth;
+	struct net_device *netdev;
+	int port = to_platform_device(device)->id;
+	int err;
+
+	netdev = alloc_etherdev(sizeof(titan_ge_port_info));
+	if (!netdev) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	netdev->open = titan_ge_open;
+	netdev->stop = titan_ge_stop;
+	netdev->hard_start_xmit = titan_ge_start_xmit;
+	netdev->get_stats = titan_ge_get_stats;
+	netdev->set_multicast_list = titan_ge_set_multi;
+	netdev->set_mac_address = titan_ge_set_mac_address;
+
+	/* Tx timeout */
+	netdev->tx_timeout = titan_ge_tx_timeout;
+	netdev->watchdog_timeo = 2 * HZ;
+
+	/* Set these to very high values */
+	netdev->poll = titan_ge_poll;
+	netdev->weight = 64;
+
+	netdev->tx_queue_len = TITAN_GE_TX_QUEUE;
+	netif_carrier_off(netdev);
+	netdev->base_addr = 0;
+
+	netdev->change_mtu = titan_ge_change_mtu;
+
+	titan_ge_eth = netdev_priv(netdev);
+	/* Allocation of memory for the driver structures */
+
+	titan_ge_eth->port_num = port;
+
+	/* Configure the Tx timeout handler */
+	INIT_WORK(&titan_ge_eth->tx_timeout_task,
+		  (void (*)(void *)) titan_ge_tx_timeout_task, netdev);
+
+	spin_lock_init(&titan_ge_eth->lock);
+
+	/* set MAC addresses */
+	memcpy(netdev->dev_addr, titan_ge_mac_addr_base, 6);
+	netdev->dev_addr[5] += port;
+
+	err = register_netdev(netdev);
+
+	if (err)
+		goto out_free_netdev;
+
+	printk(KERN_NOTICE
+	       "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
+	       netdev->name, port, netdev->dev_addr[0],
+	       netdev->dev_addr[1], netdev->dev_addr[2],
+	       netdev->dev_addr[3], netdev->dev_addr[4],
+	       netdev->dev_addr[5]);
+
+	printk(KERN_NOTICE "Rx NAPI supported, Tx Coalescing ON \n");
+
+	return 0;
+
+out_free_netdev:
+	kfree(netdev);
+
+out:
+	return err;
+}
+
+static void __devexit titan_device_remove(struct device *device)
+{
+}
+
+/*
+ * Reset the Ethernet port
+ */
+static void titan_ge_port_reset(unsigned int port_num)
+{
+	unsigned int reg_data;
+
+	/* Stop the Tx port activity */
+	reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 +
+				(port_num << 12));
+	reg_data &= ~(0x0001);
+	TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 +
+			(port_num << 12)), reg_data);
+
+	/* Stop the Rx port activity */
+	reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 +
+				(port_num << 12));
+	reg_data &= ~(0x0001);
+	TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 +
+			(port_num << 12)), reg_data);
+
+	return;
+}
+
+/*
+ * Return the Tx desc after use by the XDMA
+ */
+static int titan_ge_return_tx_desc(titan_ge_port_info * titan_ge_eth, int port)
+{
+	int tx_desc_used;
+	struct sk_buff *skb;
+
+	tx_desc_used = titan_ge_eth->tx_used_desc_q;
+
+	/* return right away */
+	if (tx_desc_used == titan_ge_eth->tx_curr_desc_q)
+		return TITAN_ERROR;
+
+	/* Now the critical stuff */
+	skb = titan_ge_eth->tx_skb[tx_desc_used];
+
+	dev_kfree_skb_any(skb);
+
+	titan_ge_eth->tx_skb[tx_desc_used] = NULL;
+	titan_ge_eth->tx_used_desc_q =
+	    (tx_desc_used + 1) % TITAN_GE_TX_QUEUE;
+
+	return 0;
+}
+
+/*
+ * Coalescing for the Tx path
+ */
+static unsigned long titan_ge_tx_coal(unsigned long delay, int port)
+{
+	unsigned long rx_delay;
+
+	rx_delay = TITAN_GE_READ(TITAN_GE_INT_COALESCING);
+	delay = (delay << 16) | rx_delay;
+
+	TITAN_GE_WRITE(TITAN_GE_INT_COALESCING, delay);
+	TITAN_GE_WRITE(0x5038, delay);
+
+	return delay;
+}
+
+static struct device_driver titan_soc_driver = {
+	.name   = titan_string,
+	.bus    = &platform_bus_type,
+	.probe  = titan_ge_probe,
+	.remove = __devexit_p(titan_device_remove),
+};
+
+static void titan_platform_release (struct device *device)
+{
+	struct platform_device *pldev;
+
+	/* free device */
+	pldev = to_platform_device (device);
+	kfree (pldev);
+}
+
+/*
+ * Register the Titan GE with the kernel
+ */
+static int __init titan_ge_init_module(void)
+{
+	struct platform_device *pldev;
+	unsigned int version, device;
+	int i;
+
+	printk(KERN_NOTICE
+	       "PMC-Sierra TITAN 10/100/1000 Ethernet Driver \n");
+
+	titan_ge_base = (unsigned long) ioremap(TITAN_GE_BASE, TITAN_GE_SIZE);
+	if (!titan_ge_base) {
+		printk("Mapping Titan GE failed\n");
+		goto out;
+	}
+
+	device = TITAN_GE_READ(TITAN_GE_DEVICE_ID);
+	version = (device & 0x000f0000) >> 16;
+	device &= 0x0000ffff;
+
+	printk(KERN_NOTICE "Device Id : %x,  Version : %x \n", device, version);
+
+#ifdef TITAN_RX_RING_IN_SRAM
+	titan_ge_sram = (unsigned long) ioremap(TITAN_SRAM_BASE,
+						TITAN_SRAM_SIZE);
+	if (!titan_ge_sram) {
+		printk("Mapping Titan SRAM failed\n");
+		goto out_unmap_ge;
+	}
+#endif
+
+	if (driver_register(&titan_soc_driver)) {
+		printk(KERN_ERR "Driver registration failed\n");
+		goto out_unmap_sram;
+	}
+
+	for (i = 0; i < 3; i++) {
+		titan_ge_device[i] = NULL;
+
+	        if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL)))
+	                continue;
+
+                memset (pldev, 0, sizeof (*pldev));
+                pldev->name		= titan_string;
+                pldev->id		= i;
+                pldev->dev.release	= titan_platform_release;
+                titan_ge_device[i]	= pldev;
+
+                if (platform_device_register (pldev)) {
+                        kfree (pldev);
+                        titan_ge_device[i] = NULL;
+                        continue;
+                }
+                                                                                
+                if (!pldev->dev.driver) {
+	                /*
+			 * The driver was not bound to this device, there was
+	                 * no hardware at this address. Unregister it, as the
+	                 * release fuction will take care of freeing the
+	                 * allocated structure
+			 */
+                        titan_ge_device[i] = NULL;
+                        platform_device_unregister (pldev);
+                }
+        }
+
+	return 0;
+
+out_unmap_sram:
+	iounmap((void *)titan_ge_sram);
+
+out_unmap_ge:
+	iounmap((void *)titan_ge_base);
+
+out:
+	return -ENOMEM;
+}
+
+/*
+ * Unregister the Titan GE from the kernel
+ */
+static void __exit titan_ge_cleanup_module(void)
+{
+	int i;
+
+	driver_unregister(&titan_soc_driver);
+
+	for (i = 0; i < 3; i++) {
+		if (titan_ge_device[i]) {
+			platform_device_unregister (titan_ge_device[i]);
+			titan_ge_device[i] = NULL;
+		}
+	}
+
+	iounmap((void *)titan_ge_sram);
+	iounmap((void *)titan_ge_base);
+}
+
+MODULE_AUTHOR("Manish Lachwani <lachwani@pmc-sierra.com>");
+MODULE_DESCRIPTION("Titan GE Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_init(titan_ge_init_module);
+module_exit(titan_ge_cleanup_module);
diff -Naur linux-2.6.19/drivers/net/titan_ge.h linux-mips-2.6.19/drivers/net/titan_ge.h
--- linux-2.6.19/drivers/net/titan_ge.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/net/titan_ge.h	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,415 @@
+#ifndef _TITAN_GE_H_
+#define _TITAN_GE_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <asm/byteorder.h>
+
+/*
+ * These functions should be later moved to a more generic location since there
+ * will be others accessing it also
+ */
+
+/*
+ * This is the way it works: LKB5 Base is at 0x0128. TITAN_BASE is defined in
+ * include/asm/titan_dep.h. TITAN_GE_BASE is the value in the TITAN_GE_LKB5
+ * register.
+ */
+
+#define	TITAN_GE_BASE	0xfe000000UL
+#define	TITAN_GE_SIZE	0x10000UL
+
+extern unsigned long titan_ge_base;
+
+#define	TITAN_GE_WRITE(offset, data) \
+		*(volatile u32 *)(titan_ge_base + (offset)) = (data)
+
+#define TITAN_GE_READ(offset) *(volatile u32 *)(titan_ge_base + (offset))
+
+#ifndef msec_delay
+#define msec_delay(x)   do { if(in_interrupt()) { \
+				/* Don't mdelay in interrupt context! */ \
+				BUG(); \
+			} else { \
+				set_current_state(TASK_UNINTERRUPTIBLE); \
+				schedule_timeout((x * HZ)/1000); \
+			} } while(0)
+#endif
+
+#define TITAN_GE_PORT_0
+
+#define	TITAN_SRAM_BASE		((OCD_READ(RM9000x2_OCD_LKB13) & ~1) << 4)
+#define	TITAN_SRAM_SIZE		0x2000UL
+
+/*
+ * We may need these constants
+ */
+#define TITAN_BIT0    0x00000001
+#define TITAN_BIT1    0x00000002
+#define TITAN_BIT2    0x00000004
+#define TITAN_BIT3    0x00000008
+#define TITAN_BIT4    0x00000010
+#define TITAN_BIT5    0x00000020
+#define TITAN_BIT6    0x00000040
+#define TITAN_BIT7    0x00000080
+#define TITAN_BIT8    0x00000100
+#define TITAN_BIT9    0x00000200
+#define TITAN_BIT10   0x00000400
+#define TITAN_BIT11   0x00000800
+#define TITAN_BIT12   0x00001000
+#define TITAN_BIT13   0x00002000
+#define TITAN_BIT14   0x00004000
+#define TITAN_BIT15   0x00008000
+#define TITAN_BIT16   0x00010000
+#define TITAN_BIT17   0x00020000
+#define TITAN_BIT18   0x00040000
+#define TITAN_BIT19   0x00080000
+#define TITAN_BIT20   0x00100000
+#define TITAN_BIT21   0x00200000
+#define TITAN_BIT22   0x00400000
+#define TITAN_BIT23   0x00800000
+#define TITAN_BIT24   0x01000000
+#define TITAN_BIT25   0x02000000
+#define TITAN_BIT26   0x04000000
+#define TITAN_BIT27   0x08000000
+#define TITAN_BIT28   0x10000000
+#define TITAN_BIT29   0x20000000
+#define TITAN_BIT30   0x40000000
+#define TITAN_BIT31   0x80000000
+
+/* Flow Control */
+#define	TITAN_GE_FC_NONE	0x0
+#define	TITAN_GE_FC_FULL	0x1
+#define	TITAN_GE_FC_TX_PAUSE	0x2
+#define	TITAN_GE_FC_RX_PAUSE	0x3
+
+/* Duplex Settings */
+#define	TITAN_GE_FULL_DUPLEX	0x1
+#define	TITAN_GE_HALF_DUPLEX	0x2
+
+/* Speed settings */
+#define	TITAN_GE_SPEED_1000	0x1
+#define	TITAN_GE_SPEED_100	0x2
+#define	TITAN_GE_SPEED_10	0x3
+
+/* Debugging info only */
+#undef TITAN_DEBUG
+
+/* Keep the rings in the Titan's SSRAM */
+#define TITAN_RX_RING_IN_SRAM
+
+#ifdef CONFIG_64BIT
+#define	TITAN_GE_IE_MASK	0xfffffffffb001b64
+#define	TITAN_GE_IE_STATUS	0xfffffffffb001b60
+#else
+#define	TITAN_GE_IE_MASK	0xfb001b64
+#define	TITAN_GE_IE_STATUS	0xfb001b60
+#endif
+
+/* Support for Jumbo Frames */
+#undef TITAN_GE_JUMBO_FRAMES
+
+/* Rx buffer size */
+#ifdef TITAN_GE_JUMBO_FRAMES
+#define	TITAN_GE_JUMBO_BUFSIZE	9080
+#else
+#define	TITAN_GE_STD_BUFSIZE	1580
+#endif
+
+/*
+ * Tx and Rx Interrupt Coalescing parameter. These values are
+ * for 1 Ghz processor. Rx coalescing can be taken care of
+ * by NAPI. NAPI is adaptive and hence useful. Tx coalescing
+ * is not adaptive. Hence, these values need to be adjusted
+ * based on load, CPU speed etc.
+ */
+#define	TITAN_GE_RX_COAL	150
+#define	TITAN_GE_TX_COAL	300
+
+#if defined(__BIG_ENDIAN)
+
+/* Define the Rx descriptor */
+typedef struct eth_rx_desc {
+	u32     reserved;	/* Unused 		*/
+	u32     buffer_addr;	/* CPU buffer address 	*/
+	u32	cmd_sts;	/* Command and Status	*/
+	u32	buffer;		/* XDMA buffer address	*/
+} titan_ge_rx_desc;
+
+/* Define the Tx descriptor */
+typedef struct eth_tx_desc {
+	u16     cmd_sts;	/* Command, Status and Buffer count */
+	u16	buffer_len;	/* Length of the buffer	*/
+	u32     buffer_addr;	/* Physical address of the buffer */
+} titan_ge_tx_desc;
+
+#elif defined(__LITTLE_ENDIAN)
+
+/* Define the Rx descriptor */
+typedef struct eth_rx_desc {
+	u32	buffer_addr;	/* CPU buffer address   */
+	u32	reserved;	/* Unused               */
+	u32	buffer;		/* XDMA buffer address  */
+	u32	cmd_sts;	/* Command and Status   */
+} titan_ge_rx_desc;
+
+/* Define the Tx descriptor */
+typedef struct eth_tx_desc {
+	u32     buffer_addr;	/* Physical address of the buffer */
+	u16     buffer_len;     /* Length of the buffer */
+	u16     cmd_sts;        /* Command, Status and Buffer count */
+} titan_ge_tx_desc;
+#endif
+
+/* Default Tx Queue Size */
+#define	TITAN_GE_TX_QUEUE	128
+#define TITAN_TX_RING_BYTES	(TITAN_GE_TX_QUEUE * sizeof(struct eth_tx_desc))
+
+/* Default Rx Queue Size */
+#define	TITAN_GE_RX_QUEUE	64
+#define TITAN_RX_RING_BYTES	(TITAN_GE_RX_QUEUE * sizeof(struct eth_rx_desc))
+
+/* Packet Structure */
+typedef struct _pkt_info {
+	unsigned int           len;
+	unsigned int            cmd_sts;
+	unsigned int            buffer;
+	struct sk_buff          *skb;
+	unsigned int		checksum;
+} titan_ge_packet;
+
+
+#define	PHYS_CNT	3
+
+/* Titan Port specific data structure */
+typedef struct _eth_port_ctrl {
+	unsigned int		port_num;
+	u8			port_mac_addr[6];
+
+	/* Rx descriptor pointers */
+	int 			rx_curr_desc_q, rx_used_desc_q;
+
+	/* Tx descriptor pointers */
+	int 			tx_curr_desc_q, tx_used_desc_q;
+
+	/* Rx descriptor area */
+	volatile titan_ge_rx_desc	*rx_desc_area;
+	unsigned int			rx_desc_area_size;
+	struct sk_buff*			rx_skb[TITAN_GE_RX_QUEUE];
+
+	/* Tx Descriptor area */
+	volatile titan_ge_tx_desc	*tx_desc_area;
+	unsigned int                    tx_desc_area_size;
+	struct sk_buff*                 tx_skb[TITAN_GE_TX_QUEUE];
+
+	/* Timeout task */
+	struct work_struct		tx_timeout_task;
+
+	/* DMA structures and handles */
+	dma_addr_t			tx_dma;
+	dma_addr_t			rx_dma;
+	dma_addr_t			tx_dma_array[TITAN_GE_TX_QUEUE];
+
+	/* Device lock */
+	spinlock_t			lock;
+
+	unsigned int			tx_ring_skbs;
+	unsigned int			rx_ring_size;
+	unsigned int			tx_ring_size;
+	unsigned int			rx_ring_skbs;
+
+	struct net_device_stats		stats;
+
+	/* Tx and Rx coalescing */
+	unsigned long			rx_int_coal;
+	unsigned long			tx_int_coal;
+
+	/* Threshold for replenishing the Rx and Tx rings */
+	unsigned int			tx_threshold;
+	unsigned int			rx_threshold;
+
+	/* NAPI work limit */
+	unsigned int			rx_work_limit;
+} titan_ge_port_info;
+
+/* Titan specific constants */
+#define	TITAN_ETH_PORT_IRQ		3
+
+/* Max Rx buffer */
+#define	TITAN_GE_MAX_RX_BUFFER		65536
+
+/* Tx and Rx Error */
+#define	TITAN_GE_ERROR
+
+/* Rx Descriptor Command and Status */
+
+#define	TITAN_GE_RX_CRC_ERROR		TITAN_BIT27	/* crc error */
+#define	TITAN_GE_RX_OVERFLOW_ERROR	TITAN_BIT15	/* overflow */
+#define TITAN_GE_RX_BUFFER_OWNED	TITAN_BIT21	/* buffer ownership */
+#define	TITAN_GE_RX_STP			TITAN_BIT31	/* start of packet */
+#define	TITAN_GE_RX_BAM			TITAN_BIT30	/* broadcast address match */
+#define TITAN_GE_RX_PAM			TITAN_BIT28	/* physical address match */
+#define TITAN_GE_RX_LAFM		TITAN_BIT29	/* logical address filter match */
+#define TITAN_GE_RX_VLAN		TITAN_BIT26	/* virtual lans */
+#define TITAN_GE_RX_PERR		TITAN_BIT19	/* packet error */
+#define TITAN_GE_RX_TRUNC		TITAN_BIT20	/* packet size greater than 32 buffers */
+
+/* Tx Descriptor Command */
+#define	TITAN_GE_TX_BUFFER_OWNED	TITAN_BIT5	/* buffer ownership */
+#define	TITAN_GE_TX_ENABLE_INTERRUPT	TITAN_BIT15	/* Interrupt Enable */
+
+/* Return Status */
+#define	TITAN_OK	0x1	/* Good Status */
+#define	TITAN_ERROR	0x2	/* Error Status */
+
+/* MIB specific register offset */
+#define TITAN_GE_MSTATX_STATS_BASE_LOW       0x0800  /* MSTATX COUNTL[15:0] */
+#define TITAN_GE_MSTATX_STATS_BASE_MID       0x0804  /* MSTATX COUNTM[15:0] */
+#define TITAN_GE_MSTATX_STATS_BASE_HI        0x0808  /* MSTATX COUNTH[7:0] */
+#define TITAN_GE_MSTATX_CONTROL              0x0828  /* MSTATX Control */
+#define TITAN_GE_MSTATX_VARIABLE_SELECT      0x082C  /* MSTATX Variable Select */
+
+/* MIB counter offsets, add to the TITAN_GE_MSTATX_STATS_BASE_XXX */
+#define TITAN_GE_MSTATX_RXFRAMESOK                   0x0040
+#define TITAN_GE_MSTATX_RXOCTETSOK                   0x0050
+#define TITAN_GE_MSTATX_RXFRAMES                     0x0060
+#define TITAN_GE_MSTATX_RXOCTETS                     0x0070
+#define TITAN_GE_MSTATX_RXUNICASTFRAMESOK            0x0080
+#define TITAN_GE_MSTATX_RXBROADCASTFRAMESOK          0x0090
+#define TITAN_GE_MSTATX_RXMULTICASTFRAMESOK          0x00A0
+#define TITAN_GE_MSTATX_RXTAGGEDFRAMESOK             0x00B0
+#define TITAN_GE_MSTATX_RXMACPAUSECONTROLFRAMESOK    0x00C0
+#define TITAN_GE_MSTATX_RXMACCONTROLFRAMESOK         0x00D0
+#define TITAN_GE_MSTATX_RXFCSERROR                   0x00E0
+#define TITAN_GE_MSTATX_RXALIGNMENTERROR             0x00F0
+#define TITAN_GE_MSTATX_RXSYMBOLERROR                0x0100
+#define TITAN_GE_MSTATX_RXLAYER1ERROR                0x0110
+#define TITAN_GE_MSTATX_RXINRANGELENGTHERROR         0x0120
+#define TITAN_GE_MSTATX_RXLONGLENGTHERROR            0x0130
+#define TITAN_GE_MSTATX_RXLONGLENGTHCRCERROR         0x0140
+#define TITAN_GE_MSTATX_RXSHORTLENGTHERROR           0x0150
+#define TITAN_GE_MSTATX_RXSHORTLLENGTHCRCERROR       0x0160
+#define TITAN_GE_MSTATX_RXFRAMES64OCTETS             0x0170
+#define TITAN_GE_MSTATX_RXFRAMES65TO127OCTETS        0x0180
+#define TITAN_GE_MSTATX_RXFRAMES128TO255OCTETS       0x0190
+#define TITAN_GE_MSTATX_RXFRAMES256TO511OCTETS       0x01A0
+#define TITAN_GE_MSTATX_RXFRAMES512TO1023OCTETS      0x01B0
+#define TITAN_GE_MSTATX_RXFRAMES1024TO1518OCTETS     0x01C0
+#define TITAN_GE_MSTATX_RXFRAMES1519TOMAXSIZE        0x01D0
+#define TITAN_GE_MSTATX_RXSTATIONADDRESSFILTERED     0x01E0
+#define TITAN_GE_MSTATX_RXVARIABLE                   0x01F0
+#define TITAN_GE_MSTATX_GENERICADDRESSFILTERED       0x0200
+#define TITAN_GE_MSTATX_UNICASTFILTERED              0x0210
+#define TITAN_GE_MSTATX_MULTICASTFILTERED            0x0220
+#define TITAN_GE_MSTATX_BROADCASTFILTERED            0x0230
+#define TITAN_GE_MSTATX_HASHFILTERED                 0x0240
+#define TITAN_GE_MSTATX_TXFRAMESOK                   0x0250
+#define TITAN_GE_MSTATX_TXOCTETSOK                   0x0260
+#define TITAN_GE_MSTATX_TXOCTETS                     0x0270
+#define TITAN_GE_MSTATX_TXTAGGEDFRAMESOK             0x0280
+#define TITAN_GE_MSTATX_TXMACPAUSECONTROLFRAMESOK    0x0290
+#define TITAN_GE_MSTATX_TXFCSERROR                   0x02A0
+#define TITAN_GE_MSTATX_TXSHORTLENGTHERROR           0x02B0
+#define TITAN_GE_MSTATX_TXLONGLENGTHERROR            0x02C0
+#define TITAN_GE_MSTATX_TXSYSTEMERROR                0x02D0
+#define TITAN_GE_MSTATX_TXMACERROR                   0x02E0
+#define TITAN_GE_MSTATX_TXCARRIERSENSEERROR          0x02F0
+#define TITAN_GE_MSTATX_TXSQETESTERROR               0x0300
+#define TITAN_GE_MSTATX_TXUNICASTFRAMESOK            0x0310
+#define TITAN_GE_MSTATX_TXBROADCASTFRAMESOK          0x0320
+#define TITAN_GE_MSTATX_TXMULTICASTFRAMESOK          0x0330
+#define TITAN_GE_MSTATX_TXUNICASTFRAMESATTEMPTED     0x0340
+#define TITAN_GE_MSTATX_TXBROADCASTFRAMESATTEMPTED   0x0350
+#define TITAN_GE_MSTATX_TXMULTICASTFRAMESATTEMPTED   0x0360
+#define TITAN_GE_MSTATX_TXFRAMES64OCTETS             0x0370
+#define TITAN_GE_MSTATX_TXFRAMES65TO127OCTETS        0x0380
+#define TITAN_GE_MSTATX_TXFRAMES128TO255OCTETS       0x0390
+#define TITAN_GE_MSTATX_TXFRAMES256TO511OCTETS       0x03A0
+#define TITAN_GE_MSTATX_TXFRAMES512TO1023OCTETS      0x03B0
+#define TITAN_GE_MSTATX_TXFRAMES1024TO1518OCTETS     0x03C0
+#define TITAN_GE_MSTATX_TXFRAMES1519TOMAXSIZE        0x03D0
+#define TITAN_GE_MSTATX_TXVARIABLE                   0x03E0
+#define TITAN_GE_MSTATX_RXSYSTEMERROR                0x03F0
+#define TITAN_GE_MSTATX_SINGLECOLLISION              0x0400
+#define TITAN_GE_MSTATX_MULTIPLECOLLISION            0x0410
+#define TITAN_GE_MSTATX_DEFERREDXMISSIONS            0x0420
+#define TITAN_GE_MSTATX_LATECOLLISIONS               0x0430
+#define TITAN_GE_MSTATX_ABORTEDDUETOXSCOLLS          0x0440
+
+/* Interrupt specific defines */
+#define TITAN_GE_DEVICE_ID         0x0000  /* Device ID */
+#define TITAN_GE_RESET             0x0004  /* Reset reg */
+#define TITAN_GE_TSB_CTRL_0        0x000C  /* TSB Control reg 0 */
+#define TITAN_GE_TSB_CTRL_1        0x0010  /* TSB Control reg 1 */
+#define TITAN_GE_INTR_GRP0_STATUS  0x0040  /* General Interrupt Group 0 Status */
+#define TITAN_GE_INTR_XDMA_CORE_A  0x0048  /* XDMA Channel Interrupt Status, Core A*/
+#define TITAN_GE_INTR_XDMA_CORE_B  0x004C  /* XDMA Channel Interrupt Status, Core B*/
+#define	TITAN_GE_INTR_XDMA_IE	   0x0058  /* XDMA Channel Interrupt Enable */
+#define TITAN_GE_SDQPF_ECC_INTR    0x480C  /* SDQPF ECC Interrupt Status */
+#define TITAN_GE_SDQPF_RXFIFO_CTL  0x4828  /* SDQPF RxFifo Control and Interrupt Enb*/
+#define TITAN_GE_SDQPF_RXFIFO_INTR 0x482C  /* SDQPF RxFifo Interrupt Status */
+#define TITAN_GE_SDQPF_TXFIFO_CTL  0x4928  /* SDQPF TxFifo Control and Interrupt Enb*/
+#define TITAN_GE_SDQPF_TXFIFO_INTR 0x492C  /* SDQPF TxFifo Interrupt Status */
+#define	TITAN_GE_SDQPF_RXFIFO_0	   0x4840  /* SDQPF RxFIFO Enable */
+#define	TITAN_GE_SDQPF_TXFIFO_0	   0x4940  /* SDQPF TxFIFO Enable */
+#define TITAN_GE_XDMA_CONFIG       0x5000  /* XDMA Global Configuration */
+#define TITAN_GE_XDMA_INTR_SUMMARY 0x5010  /* XDMA Interrupt Summary */
+#define TITAN_GE_XDMA_BUFADDRPRE   0x5018  /* XDMA Buffer Address Prefix */
+#define TITAN_GE_XDMA_DESCADDRPRE  0x501C  /* XDMA Descriptor Address Prefix */
+#define TITAN_GE_XDMA_PORTWEIGHT   0x502C  /* XDMA Port Weight Configuration */
+
+/* Rx MAC defines */
+#define TITAN_GE_RMAC_CONFIG_1               0x1200  /* RMAC Configuration 1 */
+#define TITAN_GE_RMAC_CONFIG_2               0x1204  /* RMAC Configuration 2 */
+#define TITAN_GE_RMAC_MAX_FRAME_LEN          0x1208  /* RMAC Max Frame Length */
+#define TITAN_GE_RMAC_STATION_HI             0x120C  /* Rx Station Address High */
+#define TITAN_GE_RMAC_STATION_MID            0x1210  /* Rx Station Address Middle */
+#define TITAN_GE_RMAC_STATION_LOW            0x1214  /* Rx Station Address Low */
+#define TITAN_GE_RMAC_LINK_CONFIG            0x1218  /* RMAC Link Configuration */
+
+/* Tx MAC defines */
+#define TITAN_GE_TMAC_CONFIG_1               0x1240  /* TMAC Configuration 1 */
+#define TITAN_GE_TMAC_CONFIG_2               0x1244  /* TMAC Configuration 2 */
+#define TITAN_GE_TMAC_IPG                    0x1248  /* TMAC Inter-Packet Gap */
+#define TITAN_GE_TMAC_STATION_HI             0x124C  /* Tx Station Address High */
+#define TITAN_GE_TMAC_STATION_MID            0x1250  /* Tx Station Address Middle */
+#define TITAN_GE_TMAC_STATION_LOW            0x1254  /* Tx Station Address Low */
+#define TITAN_GE_TMAC_MAX_FRAME_LEN          0x1258  /* TMAC Max Frame Length */
+#define TITAN_GE_TMAC_MIN_FRAME_LEN          0x125C  /* TMAC Min Frame Length */
+#define TITAN_GE_TMAC_PAUSE_FRAME_TIME       0x1260  /* TMAC Pause Frame Time */
+#define TITAN_GE_TMAC_PAUSE_FRAME_INTERVAL   0x1264  /* TMAC Pause Frame Interval */
+
+/* GMII register */
+#define TITAN_GE_GMII_INTERRUPT_STATUS       0x1348  /* GMII Interrupt Status */
+#define TITAN_GE_GMII_CONFIG_GENERAL         0x134C  /* GMII Configuration General */
+#define TITAN_GE_GMII_CONFIG_MODE            0x1350  /* GMII Configuration Mode */
+
+/* Tx and Rx XDMA defines */
+#define	TITAN_GE_INT_COALESCING		     0x5030 /* Interrupt Coalescing */
+#define	TITAN_GE_CHANNEL0_CONFIG	     0x5040 /* Channel 0 XDMA config */
+#define	TITAN_GE_CHANNEL0_INTERRUPT	     0x504c /* Channel 0 Interrupt Status */
+#define	TITAN_GE_GDI_INTERRUPT_ENABLE        0x5050 /* IE for the GDI Errors */
+#define	TITAN_GE_CHANNEL0_PACKET	     0x5060 /* Channel 0 Packet count */
+#define	TITAN_GE_CHANNEL0_BYTE		     0x5064 /* Channel 0 Byte count */
+#define	TITAN_GE_CHANNEL0_TX_DESC	     0x5054 /* Channel 0 Tx first desc */
+#define	TITAN_GE_CHANNEL0_RX_DESC	     0x5058 /* Channel 0 Rx first desc */
+
+/* AFX (Address Filter Exact) register offsets for Slice 0 */
+#define TITAN_GE_AFX_EXACT_MATCH_LOW         0x1100  /* AFX Exact Match Address Low*/
+#define TITAN_GE_AFX_EXACT_MATCH_MID         0x1104  /* AFX Exact Match Address Mid*/
+#define TITAN_GE_AFX_EXACT_MATCH_HIGH        0x1108  /* AFX Exact Match Address Hi */
+#define TITAN_GE_AFX_EXACT_MATCH_VID         0x110C  /* AFX Exact Match VID */
+#define TITAN_GE_AFX_MULTICAST_HASH_LOW      0x1110  /* AFX Multicast HASH Low */
+#define TITAN_GE_AFX_MULTICAST_HASH_MIDLOW   0x1114  /* AFX Multicast HASH MidLow */
+#define TITAN_GE_AFX_MULTICAST_HASH_MIDHI    0x1118  /* AFX Multicast HASH MidHi */
+#define TITAN_GE_AFX_MULTICAST_HASH_HI       0x111C  /* AFX Multicast HASH Hi */
+#define TITAN_GE_AFX_ADDRS_FILTER_CTRL_0     0x1120  /* AFX Address Filter Ctrl 0 */
+#define TITAN_GE_AFX_ADDRS_FILTER_CTRL_1     0x1124  /* AFX Address Filter Ctrl 1 */
+#define TITAN_GE_AFX_ADDRS_FILTER_CTRL_2     0x1128  /* AFX Address Filter Ctrl 2 */
+
+/* Traffic Groomer block */
+#define        TITAN_GE_TRTG_CONFIG	     0x1000  /* TRTG Config */
+
+#endif 				/* _TITAN_GE_H_ */
+
diff -Naur linux-2.6.19/drivers/net/titan_mdio.c linux-mips-2.6.19/drivers/net/titan_mdio.c
--- linux-2.6.19/drivers/net/titan_mdio.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/net/titan_mdio.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,217 @@
+/*
+ * drivers/net/titan_mdio.c - Driver for Titan ethernet ports
+ *
+ * Copyright (C) 2003 PMC-Sierra Inc.
+ * Author : Manish Lachwani (lachwani@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ * Management Data IO (MDIO) driver for the Titan GMII. Interacts with the Marvel PHY
+ * on the Titan. No support for the TBI as yet.
+ *
+ */
+
+#include	"titan_mdio.h"
+
+#define MDIO_DEBUG
+
+/*
+ * Local constants
+ */
+#define MAX_CLKA            1023
+#define MAX_PHY_DEV         31
+#define MAX_PHY_REG         31
+#define WRITEADDRS_OPCODE   0x0
+#define	READ_OPCODE	    0x2
+#define WRITE_OPCODE        0x1
+#define MAX_MDIO_POLL       100
+
+/*
+ * Titan MDIO and SCMB registers
+ */
+#define TITAN_GE_SCMB_CONTROL                0x01c0  /* SCMB Control */
+#define TITAN_GE_SCMB_CLKA	             0x01c4  /* SCMB Clock A */
+#define TITAN_GE_MDIO_COMMAND                0x01d0  /* MDIO Command */
+#define TITAN_GE_MDIO_DEVICE_PORT_ADDRESS    0x01d4  /* MDIO Device and Port addrs */
+#define TITAN_GE_MDIO_DATA                   0x01d8  /* MDIO Data */
+#define TITAN_GE_MDIO_INTERRUPTS             0x01dC  /* MDIO Interrupts */
+
+/*
+ * Function to poll the MDIO
+ */
+static int titan_ge_mdio_poll(void)
+{
+	int	i, val;
+
+	for (i = 0; i < MAX_MDIO_POLL; i++) {
+		val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_COMMAND);
+
+		if (!(val & 0x8000))
+			return TITAN_GE_MDIO_GOOD;
+	}
+
+	return TITAN_GE_MDIO_ERROR;
+}
+
+
+/*
+ * Initialize and configure the MDIO
+ */
+int titan_ge_mdio_setup(titan_ge_mdio_config *titan_mdio)
+{
+	unsigned long	val;
+
+	/* Reset the SCMB and program into MDIO mode*/
+	TITAN_GE_MDIO_WRITE(TITAN_GE_SCMB_CONTROL, 0x9000);
+	TITAN_GE_MDIO_WRITE(TITAN_GE_SCMB_CONTROL, 0x1000);
+
+	/* CLK A */
+	val = TITAN_GE_MDIO_READ(TITAN_GE_SCMB_CLKA);
+	val = ( (val & ~(0x03ff)) | (titan_mdio->clka & 0x03ff));
+	TITAN_GE_MDIO_WRITE(TITAN_GE_SCMB_CLKA, val);
+
+	/* Preamble Suppresion */
+	val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_COMMAND);
+	val = ( (val & ~(0x0001)) | (titan_mdio->mdio_spre & 0x0001));
+	TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_COMMAND, val);
+
+	/* MDIO mode */
+	val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS);
+	val = ( (val & ~(0x4000)) | (titan_mdio->mdio_mode & 0x4000));
+	TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS, val);
+
+	return TITAN_GE_MDIO_GOOD;
+}
+
+/*
+ * Set the PHY address in indirect mode
+ */
+int titan_ge_mdio_inaddrs(int dev_addr, int reg_addr)
+{
+	volatile unsigned long	val;
+
+	/* Setup the PHY device */
+	val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS);
+	val = ( (val & ~(0x1f00)) | ( (dev_addr << 8) & 0x1f00));
+	val = ( (val & ~(0x001f)) | ( reg_addr & 0x001f));
+	TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS, val);
+
+	/* Write the new address */
+	val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_COMMAND);
+	val = ( (val & ~(0x0300)) | ( (WRITEADDRS_OPCODE << 8) & 0x0300));
+	TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_COMMAND, val);
+
+	return TITAN_GE_MDIO_GOOD;
+}
+
+/*
+ * Read the MDIO register. This is what the individual parametes mean:
+ *
+ * dev_addr : PHY ID
+ * reg_addr : register offset
+ *
+ * See the spec for the Titan MAC. We operate in the Direct Mode.
+ */
+
+#define MAX_RETRIES	2
+
+int titan_ge_mdio_read(int dev_addr, int reg_addr, unsigned int *pdata)
+{
+	volatile unsigned long	val;
+	int retries = 0;
+
+	/* Setup the PHY device */
+
+again:
+	val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS);
+	val = ( (val & ~(0x1f00)) | ( (dev_addr << 8) & 0x1f00));
+	val = ( (val & ~(0x001f)) | ( reg_addr & 0x001f));
+	val |= 0x4000;
+	TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS, val);
+
+	udelay(30);
+
+	/* Issue the read command */
+	val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_COMMAND);
+	val = ( (val & ~(0x0300)) | ( (READ_OPCODE << 8) & 0x0300));
+	TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_COMMAND, val);
+
+	udelay(30);
+
+	if (titan_ge_mdio_poll() != TITAN_GE_MDIO_GOOD)
+		return TITAN_GE_MDIO_ERROR;
+
+	*pdata = (unsigned int)TITAN_GE_MDIO_READ(TITAN_GE_MDIO_DATA);
+	val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_INTERRUPTS);
+
+	udelay(30);
+
+	if (val & 0x2) {
+		if (retries == MAX_RETRIES)
+			return TITAN_GE_MDIO_ERROR;
+		else {
+			retries++;
+			goto again;
+		}
+	}
+
+	return TITAN_GE_MDIO_GOOD;
+}
+
+/*
+ * Write to the MDIO register
+ *
+ * dev_addr : PHY ID
+ * reg_addr : register that needs to be written to
+ *
+ */
+int titan_ge_mdio_write(int dev_addr, int reg_addr, unsigned int data)
+{
+	volatile unsigned long	val;
+
+	if (titan_ge_mdio_poll() != TITAN_GE_MDIO_GOOD)
+		return TITAN_GE_MDIO_ERROR;
+
+	/* Setup the PHY device */
+	val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS);
+	val = ( (val & ~(0x1f00)) | ( (dev_addr << 8) & 0x1f00));
+	val = ( (val & ~(0x001f)) | ( reg_addr & 0x001f));
+	val |= 0x4000;
+	TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_DEVICE_PORT_ADDRESS, val);
+
+	udelay(30);
+
+	/* Setup the data to write */
+	TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_DATA, data);
+
+	udelay(30);
+
+	/* Issue the write command */
+	val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_COMMAND);
+	val = ( (val & ~(0x0300)) | ( (WRITE_OPCODE << 8) & 0x0300));
+	TITAN_GE_MDIO_WRITE(TITAN_GE_MDIO_COMMAND, val);
+
+	udelay(30);
+
+	if (titan_ge_mdio_poll() != TITAN_GE_MDIO_GOOD)
+		return TITAN_GE_MDIO_ERROR;
+
+	val = TITAN_GE_MDIO_READ(TITAN_GE_MDIO_INTERRUPTS);
+	if (val & 0x2)
+		return TITAN_GE_MDIO_ERROR;
+
+	return TITAN_GE_MDIO_GOOD;
+}
+
diff -Naur linux-2.6.19/drivers/net/titan_mdio.h linux-mips-2.6.19/drivers/net/titan_mdio.h
--- linux-2.6.19/drivers/net/titan_mdio.h	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/net/titan_mdio.h	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,56 @@
+/*
+ * MDIO used to interact with the PHY when using GMII/MII
+ */
+#ifndef _TITAN_MDIO_H
+#define _TITAN_MDIO_H
+
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include "titan_ge.h"
+
+
+#define	TITAN_GE_MDIO_ERROR	(-9000)
+#define	TITAN_GE_MDIO_GOOD	0
+
+#define	TITAN_GE_MDIO_BASE		titan_ge_base
+
+#define	TITAN_GE_MDIO_READ(offset)	\
+	*(volatile u32 *)(titan_ge_base + (offset))
+
+#define	TITAN_GE_MDIO_WRITE(offset, data)	\
+	*(volatile u32 *)(titan_ge_base + (offset)) = (data)
+
+
+/* GMII specific registers */
+#define	TITAN_GE_MARVEL_PHY_ID		0x00
+#define	TITAN_PHY_AUTONEG_ADV		0x04
+#define	TITAN_PHY_LP_ABILITY		0x05
+#define	TITAN_GE_MDIO_MII_CTRL		0x09
+#define	TITAN_GE_MDIO_MII_EXTENDED	0x0f
+#define	TITAN_GE_MDIO_PHY_CTRL		0x10
+#define	TITAN_GE_MDIO_PHY_STATUS	0x11
+#define	TITAN_GE_MDIO_PHY_IE		0x12
+#define	TITAN_GE_MDIO_PHY_IS		0x13
+#define	TITAN_GE_MDIO_PHY_LED		0x18
+#define	TITAN_GE_MDIO_PHY_LED_OVER	0x19
+#define	PHY_ANEG_TIME_WAIT		45	/* 45 seconds wait time */
+
+/*
+ * MDIO Config Structure
+ */
+typedef struct {
+	unsigned int		clka;
+	int			mdio_spre;
+	int			mdio_mode;
+} titan_ge_mdio_config;
+
+/*
+ * Function Prototypes
+ */
+int titan_ge_mdio_setup(titan_ge_mdio_config *);
+int titan_ge_mdio_inaddrs(int, int);
+int titan_ge_mdio_read(int, int, unsigned int *);
+int titan_ge_mdio_write(int, int, unsigned int);
+
+#endif /* _TITAN_MDIO_H */
diff -Naur linux-2.6.19/drivers/scsi/NCR53C9x.h linux-mips-2.6.19/drivers/scsi/NCR53C9x.h
--- linux-2.6.19/drivers/scsi/NCR53C9x.h	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/scsi/NCR53C9x.h	2006-11-29 15:23:09.000000000 -0800
@@ -144,12 +144,7 @@
 
 #ifndef MULTIPLE_PAD_SIZES
 
-#ifdef CONFIG_CPU_HAS_WB
-#include <asm/wbflush.h>
-#define esp_write(__reg, __val) do{(__reg) = (__val); wbflush();} while(0)
-#else
-#define esp_write(__reg, __val) ((__reg) = (__val))
-#endif
+#define esp_write(__reg, __val) do{(__reg) = (__val); iob();} while(0)
 #define esp_read(__reg) (__reg)
 
 struct ESP_regs {
diff -Naur linux-2.6.19/drivers/scsi/dec_esp.c linux-mips-2.6.19/drivers/scsi/dec_esp.c
--- linux-2.6.19/drivers/scsi/dec_esp.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/scsi/dec_esp.c	2006-11-29 15:23:09.000000000 -0800
@@ -55,7 +55,7 @@
 
 static int  dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
 static void dma_drain(struct NCR_ESP *esp);
-static int  dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp);
+static int  dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd * sp);
 static void dma_dump_state(struct NCR_ESP *esp);
 static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
 static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
@@ -230,7 +230,7 @@
 			mem_start = get_tc_base_addr(slot);
 
 			/* Store base addr into esp struct */
-			esp->slot = CPHYSADDR(mem_start);
+			esp->slot = mem_start;
 
 			esp->dregs = 0;
 			esp->eregs = (void *)CKSEG1ADDR(mem_start +
diff -Naur linux-2.6.19/drivers/scsi/sgiwd93.c linux-mips-2.6.19/drivers/scsi/sgiwd93.c
--- linux-2.6.19/drivers/scsi/sgiwd93.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/scsi/sgiwd93.c	2006-11-29 15:23:09.000000000 -0800
@@ -14,6 +14,7 @@
 #include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
@@ -197,6 +198,7 @@
 	udelay(50);
 	hregs->ctrl = 0;
 }
+EXPORT_SYMBOL_GPL(sgiwd93_reset);
 
 static inline void init_hpc_chain(struct hpc_data *hd)
 {
diff -Naur linux-2.6.19/drivers/serial/Kconfig linux-mips-2.6.19/drivers/serial/Kconfig
--- linux-2.6.19/drivers/serial/Kconfig	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/serial/Kconfig	2006-11-29 15:23:09.000000000 -0800
@@ -657,6 +657,25 @@
 	depends on SERIAL_SH_SCI=y
 	select SERIAL_CORE_CONSOLE
 
+config SERIAL_IP3106
+	bool "Enable IP3106 UART Support (Philips PNX 8xx0 SoCs)"
+	depends on MIPS && (SOC_PNX8550 || SOC_PNX8330)
+	select SERIAL_CORE
+	help
+	  If you have a Philips SoC with an IP 3106 UART in it, such as
+	  the PNX8550 or PNX8330 (MIPS based) and you want to use
+	  serial ports, say Y.  Otherwise, say N.
+
+config SERIAL_IP3106_CONSOLE
+	bool "Enable PNX8XX0 serial console"
+	depends on SERIAL_IP3106
+	select SERIAL_CORE_CONSOLE
+	help
+	  If you have a Philips SoC with an IP 3106 UART in it, such as
+	  the PNX8550 or PNX8330 (MIPS based) and you want to use
+	  a serial console, say Y.
+	  Otherwise, say N.
+
 config SERIAL_CORE
 	tristate
 
diff -Naur linux-2.6.19/drivers/serial/Makefile linux-mips-2.6.19/drivers/serial/Makefile
--- linux-2.6.19/drivers/serial/Makefile	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/serial/Makefile	2006-11-29 15:23:09.000000000 -0800
@@ -39,6 +39,7 @@
 obj-$(CONFIG_V850E_UART) += v850e_uart.o
 obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o
 obj-$(CONFIG_SERIAL_LH7A40X) += serial_lh7a40x.o
+obj-$(CONFIG_SERIAL_IP3106) += ip3106_uart.o
 obj-$(CONFIG_SERIAL_DZ) += dz.o
 obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
 obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o
diff -Naur linux-2.6.19/drivers/serial/ip22zilog.c linux-mips-2.6.19/drivers/serial/ip22zilog.c
--- linux-2.6.19/drivers/serial/ip22zilog.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/serial/ip22zilog.c	2006-11-29 15:23:09.000000000 -0800
@@ -863,6 +863,7 @@
 	up->cflag = termios->c_cflag;
 
 	ip22zilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port));
+	uart_update_timeout(port, termios->c_cflag, baud);
 
 	spin_unlock_irqrestore(&up->port.lock, flags);
 }
@@ -1024,6 +1025,8 @@
 	}
 
 	con->cflag = cflag | CS8;			/* 8N1 */
+
+	uart_update_timeout(&ip22zilog_port_table[con->index].port, cflag, baud);
 }
 
 static int __init ip22zilog_console_setup(struct console *con, char *options)
diff -Naur linux-2.6.19/drivers/serial/ip3106_uart.c linux-mips-2.6.19/drivers/serial/ip3106_uart.c
--- linux-2.6.19/drivers/serial/ip3106_uart.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/serial/ip3106_uart.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,909 @@
+/*
+ * Initially based on linux-2.4.20_mvl31-pnx8xx0/drivers/char/serial_pnx8550.c
+ *
+ * Complete rewrite to drivers/serial/pnx8550_uart.c by
+ * Embedded Alley Solutions, source@embeddedalley.com as part of the
+ * PNX8550 2.6 port, and then drivers/serial/ip3106_uart.c to work
+ * with other Philips SoCs.
+ *
+ * Existing copyrights from files used to write this driver:
+ * Author: Per Hallsmark per.hallsmark@mvista.com
+ *
+ * and
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ * Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *
+ */
+
+#if defined(CONFIG_SERIAL_IP3106_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/serial_ip3106.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include <uart.h>
+
+/* We've been assigned a range on the "Low-density serial ports" major */
+#define SERIAL_IP3106_MAJOR	204
+#define MINOR_START		5
+
+#define NR_PORTS		2
+
+#define IP3106_ISR_PASS_LIMIT	256
+
+/*
+ * Convert from ignore_status_mask or read_status_mask to FIFO
+ * and interrupt status bits
+ */
+#define SM_TO_FIFO(x)	((x) >> 10)
+#define SM_TO_ISTAT(x)	((x) & 0x000001ff)
+#define FIFO_TO_SM(x)	((x) << 10)
+#define ISTAT_TO_SM(x)	((x) & 0x000001ff)
+
+/*
+ * This is the size of our serial port register set.
+ */
+#define UART_PORT_SIZE	0x1000
+
+/*
+ * This determines how often we check the modem status signals
+ * for any change.  They generally aren't connected to an IRQ
+ * so we have to poll them.  We also check immediately before
+ * filling the TX fifo incase CTS has been dropped.
+ */
+#define MCTRL_TIMEOUT	(250*HZ/1000)
+
+
+extern struct ip3106_port ip3106_ports[];
+
+static inline int serial_in(struct ip3106_port *sport, int offset)
+{
+	return (__raw_readl(sport->port.membase + offset));
+}
+
+static inline void serial_out(struct ip3106_port *sport, int offset, int value)
+{
+	__raw_writel(value, sport->port.membase + offset);
+}
+
+/*
+ * Handle any change of modem status signal since we were last called.
+ */
+static void ip3106_mctrl_check(struct ip3106_port *sport)
+{
+	unsigned int status, changed;
+
+	status = sport->port.ops->get_mctrl(&sport->port);
+	changed = status ^ sport->old_status;
+
+	if (changed == 0)
+		return;
+
+	sport->old_status = status;
+
+	if (changed & TIOCM_RI)
+		sport->port.icount.rng++;
+	if (changed & TIOCM_DSR)
+		sport->port.icount.dsr++;
+	if (changed & TIOCM_CAR)
+		uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
+	if (changed & TIOCM_CTS)
+		uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
+
+	wake_up_interruptible(&sport->port.info->delta_msr_wait);
+}
+
+/*
+ * This is our per-port timeout handler, for checking the
+ * modem status signals.
+ */
+static void ip3106_timeout(unsigned long data)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)data;
+	unsigned long flags;
+
+	if (sport->port.info) {
+		spin_lock_irqsave(&sport->port.lock, flags);
+		ip3106_mctrl_check(sport);
+		spin_unlock_irqrestore(&sport->port.lock, flags);
+
+		mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
+	}
+}
+
+/*
+ * interrupts disabled on entry
+ */
+static void ip3106_stop_tx(struct uart_port *port, unsigned int tty_stop)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+	u32 ien;
+
+	/* Disable TX intr */
+	ien = serial_in(sport, IP3106_IEN);
+	serial_out(sport, IP3106_IEN, ien & ~IP3106_UART_INT_ALLTX);
+
+	/* Clear all pending TX intr */
+	serial_out(sport, IP3106_ICLR, IP3106_UART_INT_ALLTX);
+}
+
+/*
+ * interrupts may not be disabled on entry
+ */
+static void ip3106_start_tx(struct uart_port *port, unsigned int tty_start)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+	unsigned long flags;
+	u32 ien;
+
+	spin_lock_irqsave(&sport->port.lock, flags);
+
+	/* Clear all pending TX intr */
+	serial_out(sport, IP3106_ICLR, IP3106_UART_INT_ALLTX);
+
+	/* Enable TX intr */
+	ien = serial_in(sport, IP3106_IEN);
+	serial_out(sport, IP3106_IEN, ien | IP3106_UART_INT_ALLTX);
+
+	spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+/*
+ * Interrupts enabled
+ */
+static void ip3106_stop_rx(struct uart_port *port)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+	u32 ien;
+
+	/* Disable RX intr */
+	ien = serial_in(sport, IP3106_IEN);
+	serial_out(sport, IP3106_IEN, ien & ~IP3106_UART_INT_ALLRX);
+
+	/* Clear all pending RX intr */
+	serial_out(sport, IP3106_ICLR, IP3106_UART_INT_ALLRX);
+}
+
+/*
+ * Set the modem control timer to fire immediately.
+ */
+static void ip3106_enable_ms(struct uart_port *port)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+
+	mod_timer(&sport->timer, jiffies);
+}
+
+static void ip3106_rx_chars(struct ip3106_port *sport)
+{
+	struct tty_struct *tty = sport->port.info->tty;
+	unsigned int status, ch, flg, ignored = 0;
+
+	status = FIFO_TO_SM(serial_in(sport, IP3106_FIFO)) |
+		 ISTAT_TO_SM(serial_in(sport, IP3106_ISTAT));
+	while (status & FIFO_TO_SM(IP3106_UART_FIFO_RXFIFO)) {
+		ch = serial_in(sport, IP3106_FIFO);
+
+		if (tty->flip.count >= TTY_FLIPBUF_SIZE)
+			goto ignore_char;
+		sport->port.icount.rx++;
+
+		flg = TTY_NORMAL;
+
+		/*
+		 * note that the error handling code is
+		 * out of the main execution path
+		 */
+		if (status & FIFO_TO_SM(IP3106_UART_FIFO_RXFE |
+					IP3106_UART_FIFO_RXPAR))
+			goto handle_error;
+
+		if (uart_handle_sysrq_char(&sport->port, ch, regs))
+			goto ignore_char;
+
+	error_return:
+		tty_insert_flip_char(tty, ch, flg);
+	ignore_char:
+		serial_out(sport, IP3106_LCR, serial_in(sport, IP3106_LCR) |
+				IP3106_UART_LCR_RX_NEXT);
+		status = FIFO_TO_SM(serial_in(sport, IP3106_FIFO)) |
+			 ISTAT_TO_SM(serial_in(sport, IP3106_ISTAT));
+	}
+ out:
+	tty_flip_buffer_push(tty);
+	return;
+
+ handle_error:
+	if (status & FIFO_TO_SM(IP3106_UART_FIFO_RXPAR))
+		sport->port.icount.parity++;
+	else if (status & FIFO_TO_SM(IP3106_UART_FIFO_RXFE))
+		sport->port.icount.frame++;
+	if (status & ISTAT_TO_SM(IP3106_UART_INT_RXOVRN))
+		sport->port.icount.overrun++;
+
+	if (status & sport->port.ignore_status_mask) {
+		if (++ignored > 100)
+			goto out;
+		goto ignore_char;
+	}
+
+//	status &= sport->port.read_status_mask;
+
+	if (status & FIFO_TO_SM(IP3106_UART_FIFO_RXPAR))
+		flg = TTY_PARITY;
+	else if (status & FIFO_TO_SM(IP3106_UART_FIFO_RXFE))
+		flg = TTY_FRAME;
+
+	if (status & ISTAT_TO_SM(IP3106_UART_INT_RXOVRN)) {
+		/*
+		 * overrun does *not* affect the character
+		 * we read from the FIFO
+		 */
+		tty_insert_flip_char(tty, ch, flg);
+		ch = 0;
+		flg = TTY_OVERRUN;
+	}
+#ifdef SUPPORT_SYSRQ
+	sport->port.sysrq = 0;
+#endif
+	goto error_return;
+}
+
+static void ip3106_tx_chars(struct ip3106_port *sport)
+{
+	struct circ_buf *xmit = &sport->port.info->xmit;
+
+	if (sport->port.x_char) {
+		serial_out(sport, IP3106_FIFO, sport->port.x_char);
+		sport->port.icount.tx++;
+		sport->port.x_char = 0;
+		return;
+	}
+
+	/*
+	 * Check the modem control lines before
+	 * transmitting anything.
+	 */
+	ip3106_mctrl_check(sport);
+
+	if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
+		ip3106_stop_tx(&sport->port, 0);
+		return;
+	}
+
+	/*
+	 * TX while bytes available
+	 */
+	while (((serial_in(sport, IP3106_FIFO) &
+					IP3106_UART_FIFO_TXFIFO) >> 16) < 16) {
+		serial_out(sport, IP3106_FIFO, xmit->buf[xmit->tail]);
+		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+		sport->port.icount.tx++;
+		if (uart_circ_empty(xmit))
+			break;
+	}
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(&sport->port);
+
+	if (uart_circ_empty(xmit))
+		ip3106_stop_tx(&sport->port, 0);
+}
+
+static irqreturn_t ip3106_int(int irq, void *dev_id)
+{
+	struct ip3106_port *sport = dev_id;
+	unsigned int status;
+
+	spin_lock(&sport->port.lock);
+	/* Get the interrupts */
+	status  = serial_in(sport, IP3106_ISTAT) & serial_in(sport, IP3106_IEN);
+
+	/* RX Receiver Holding Register Overrun */
+	if (status & IP3106_UART_INT_RXOVRN) {
+		sport->port.icount.overrun++;
+		serial_out(sport, IP3106_ICLR, IP3106_UART_INT_RXOVRN);
+	}
+
+	/* RX Frame Error */
+	if (status & IP3106_UART_INT_FRERR) {
+		sport->port.icount.frame++;
+		serial_out(sport, IP3106_ICLR, IP3106_UART_INT_FRERR);
+	}
+
+	/* Break signal received */
+	if (status & IP3106_UART_INT_BREAK) {
+		sport->port.icount.brk++;
+		serial_out(sport, IP3106_ICLR, IP3106_UART_INT_BREAK);
+	}
+
+	/* RX Parity Error */
+	if (status & IP3106_UART_INT_PARITY) {
+		sport->port.icount.parity++;
+		serial_out(sport, IP3106_ICLR, IP3106_UART_INT_PARITY);
+	}
+
+	/* Byte received */
+	if (status & IP3106_UART_INT_RX) {
+		ip3106_rx_chars(sport, regs);
+		serial_out(sport, IP3106_ICLR, IP3106_UART_INT_RX);
+	}
+
+	/* TX holding register empty - transmit a byte */
+	if (status & IP3106_UART_INT_TX) {
+		ip3106_tx_chars(sport);
+		serial_out(sport, IP3106_ICLR, IP3106_UART_INT_TX);
+	}
+
+	/* TX shift register and holding register empty  */
+	if (status & IP3106_UART_INT_EMPTY) {
+		serial_out(sport, IP3106_ICLR, IP3106_UART_INT_EMPTY);
+	}
+
+	/* Receiver time out */
+	if (status & IP3106_UART_INT_RCVTO) {
+		serial_out(sport, IP3106_ICLR, IP3106_UART_INT_RCVTO);
+	}
+	spin_unlock(&sport->port.lock);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Return TIOCSER_TEMT when transmitter is not busy.
+ */
+static unsigned int ip3106_tx_empty(struct uart_port *port)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+
+	return serial_in(sport, IP3106_FIFO) & IP3106_UART_FIFO_TXFIFO_STA ? 0 : TIOCSER_TEMT;
+}
+
+static unsigned int ip3106_get_mctrl(struct uart_port *port)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+	unsigned int mctrl = TIOCM_DSR;
+	unsigned int msr;
+
+	/* REVISIT */
+
+	msr = serial_in(sport, IP3106_MCR);
+
+	mctrl |= msr & IP3106_UART_MCR_CTS ? TIOCM_CTS : 0;
+	mctrl |= msr & IP3106_UART_MCR_DCD ? TIOCM_CAR : 0;
+
+	return mctrl;
+}
+
+static void ip3106_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+#if	0	/* FIXME */
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+	unsigned int msr;
+#endif
+}
+
+/*
+ * Interrupts always disabled.
+ */
+static void ip3106_break_ctl(struct uart_port *port, int break_state)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+	unsigned long flags;
+	unsigned int lcr;
+
+	spin_lock_irqsave(&sport->port.lock, flags);
+	lcr = serial_in(sport, IP3106_LCR);
+	if (break_state == -1)
+		lcr |= IP3106_UART_LCR_TXBREAK;
+	else
+		lcr &= ~IP3106_UART_LCR_TXBREAK;
+	serial_out(sport, IP3106_LCR, lcr);
+	spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static int ip3106_startup(struct uart_port *port)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+	int retval;
+
+	/*
+	 * Allocate the IRQ
+	 */
+	retval = request_irq(sport->port.irq, ip3106_int, 0,
+			     "ip3106-uart", sport);
+	if (retval)
+		return retval;
+
+	/*
+	 * Finally, clear and enable interrupts
+	 */
+
+	serial_out(sport, IP3106_ICLR, IP3106_UART_INT_ALLRX |
+			     IP3106_UART_INT_ALLTX);
+
+	serial_out(sport, IP3106_IEN, serial_in(sport, IP3106_IEN) |
+			    IP3106_UART_INT_ALLRX |
+			    IP3106_UART_INT_ALLTX);
+
+	/*
+	 * Enable modem status interrupts
+	 */
+	spin_lock_irq(&sport->port.lock);
+	ip3106_enable_ms(&sport->port);
+	spin_unlock_irq(&sport->port.lock);
+
+	return 0;
+}
+
+static void ip3106_shutdown(struct uart_port *port)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+
+	/*
+	 * Stop our timer.
+	 */
+	del_timer_sync(&sport->timer);
+
+	/*
+	 * Disable all interrupts, port and break condition.
+	 */
+	serial_out(sport, IP3106_IEN, 0);
+
+	/*
+	 * Reset the Tx and Rx FIFOS
+	 */
+	serial_out(sport, IP3106_LCR, serial_in(sport, IP3106_LCR) |
+			    IP3106_UART_LCR_TX_RST |
+			    IP3106_UART_LCR_RX_RST);
+
+	/*
+	 * Clear all interrupts
+	 */
+	serial_out(sport, IP3106_ICLR, IP3106_UART_INT_ALLRX |
+			     IP3106_UART_INT_ALLTX);
+
+	/*
+	 * Free the interrupt
+	 */
+	free_irq(sport->port.irq, sport);
+}
+
+static void
+ip3106_set_termios(struct uart_port *port, struct termios *termios,
+		   struct termios *old)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+	unsigned long flags;
+	unsigned int lcr_fcr, old_ien, baud, quot;
+	unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
+
+	/*
+	 * We only support CS7 and CS8.
+	 */
+	while ((termios->c_cflag & CSIZE) != CS7 &&
+	       (termios->c_cflag & CSIZE) != CS8) {
+		termios->c_cflag &= ~CSIZE;
+		termios->c_cflag |= old_csize;
+		old_csize = CS8;
+	}
+
+	if ((termios->c_cflag & CSIZE) == CS8)
+		lcr_fcr = IP3106_UART_LCR_8BIT;
+	else
+		lcr_fcr = 0;
+
+	if (termios->c_cflag & CSTOPB)
+		lcr_fcr |= IP3106_UART_LCR_2STOPB;
+	if (termios->c_cflag & PARENB) {
+		lcr_fcr |= IP3106_UART_LCR_PAREN;
+		if (!(termios->c_cflag & PARODD))
+			lcr_fcr |= IP3106_UART_LCR_PAREVN;
+	}
+
+	/*
+	 * Ask the core to calculate the divisor for us.
+	 */
+	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+	quot = uart_get_divisor(port, baud);
+
+	spin_lock_irqsave(&sport->port.lock, flags);
+
+#if	0	/* REVISIT */
+	sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
+	sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR);
+	if (termios->c_iflag & INPCK)
+		sport->port.read_status_mask |=
+				UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE);
+	if (termios->c_iflag & (BRKINT | PARMRK))
+		sport->port.read_status_mask |=
+				UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB);
+
+	/*
+	 * Characters to ignore
+	 */
+	sport->port.ignore_status_mask = 0;
+	if (termios->c_iflag & IGNPAR)
+		sport->port.ignore_status_mask |=
+				UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE);
+	if (termios->c_iflag & IGNBRK) {
+		sport->port.ignore_status_mask |=
+				UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB);
+		/*
+		 * If we're ignoring parity and break indicators,
+		 * ignore overruns too (for real raw support).
+		 */
+		if (termios->c_iflag & IGNPAR)
+			sport->port.ignore_status_mask |=
+				UTSR1_TO_SM(UTSR1_ROR);
+	}
+#endif
+
+	del_timer_sync(&sport->timer);
+
+	/*
+	 * Update the per-port timeout.
+	 */
+	uart_update_timeout(port, termios->c_cflag, baud);
+
+	/*
+	 * disable interrupts and drain transmitter
+	 */
+	old_ien = serial_in(sport, IP3106_IEN);
+	serial_out(sport, IP3106_IEN, old_ien & ~(IP3106_UART_INT_ALLTX |
+					IP3106_UART_INT_ALLRX));
+
+	while (serial_in(sport, IP3106_FIFO) & IP3106_UART_FIFO_TXFIFO_STA)
+		barrier();
+
+	/* then, disable everything */
+	serial_out(sport, IP3106_IEN, 0);
+
+	/* Reset the Rx and Tx FIFOs too */
+	lcr_fcr |= IP3106_UART_LCR_TX_RST;
+	lcr_fcr |= IP3106_UART_LCR_RX_RST;
+
+	/* set the parity, stop bits and data size */
+	serial_out(sport, IP3106_LCR, lcr_fcr);
+
+	/* set the baud rate */
+	quot -= 1;
+	serial_out(sport, IP3106_BAUD, quot);
+
+	serial_out(sport, IP3106_ICLR, -1);
+
+	serial_out(sport, IP3106_IEN, old_ien);
+
+	if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
+		ip3106_enable_ms(&sport->port);
+
+	spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static const char *ip3106_type(struct uart_port *port)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+
+	return sport->port.type == PORT_IP3106 ? "IP3106" : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'.
+ */
+static void ip3106_release_port(struct uart_port *port)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+
+	release_mem_region(sport->port.mapbase, UART_PORT_SIZE);
+}
+
+/*
+ * Request the memory region(s) being used by 'port'.
+ */
+static int ip3106_request_port(struct uart_port *port)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+
+	return request_mem_region(sport->port.mapbase, UART_PORT_SIZE,
+			"ip3106-uart") != NULL ? 0 : -EBUSY;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void ip3106_config_port(struct uart_port *port, int flags)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+
+	if (flags & UART_CONFIG_TYPE &&
+	    ip3106_request_port(&sport->port) == 0)
+		sport->port.type = PORT_IP3106;
+}
+
+/*
+ * Verify the new serial_struct (for TIOCSSERIAL).
+ * The only change we allow are to the flags and type, and
+ * even then only between PORT_IP3106 and PORT_UNKNOWN
+ */
+static int
+ip3106_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+	struct ip3106_port *sport = (struct ip3106_port *)port;
+	int ret = 0;
+
+	if (ser->type != PORT_UNKNOWN && ser->type != PORT_IP3106)
+		ret = -EINVAL;
+	if (sport->port.irq != ser->irq)
+		ret = -EINVAL;
+	if (ser->io_type != SERIAL_IO_MEM)
+		ret = -EINVAL;
+	if (sport->port.uartclk / 16 != ser->baud_base)
+		ret = -EINVAL;
+	if ((void *)sport->port.mapbase != ser->iomem_base)
+		ret = -EINVAL;
+	if (sport->port.iobase != ser->port)
+		ret = -EINVAL;
+	if (ser->hub6 != 0)
+		ret = -EINVAL;
+	return ret;
+}
+
+struct uart_ops ip3106_pops = {
+	.tx_empty	= ip3106_tx_empty,
+	.set_mctrl	= ip3106_set_mctrl,
+	.get_mctrl	= ip3106_get_mctrl,
+	.stop_tx	= ip3106_stop_tx,
+	.start_tx	= ip3106_start_tx,
+	.stop_rx	= ip3106_stop_rx,
+	.enable_ms	= ip3106_enable_ms,
+	.break_ctl	= ip3106_break_ctl,
+	.startup	= ip3106_startup,
+	.shutdown	= ip3106_shutdown,
+	.set_termios	= ip3106_set_termios,
+	.type		= ip3106_type,
+	.release_port	= ip3106_release_port,
+	.request_port	= ip3106_request_port,
+	.config_port	= ip3106_config_port,
+	.verify_port	= ip3106_verify_port,
+};
+
+
+/*
+ * Setup the IP3106 serial ports.
+ *
+ * Note also that we support "console=ttySx" where "x" is either 0 or 1.
+ */
+static void __init ip3106_init_ports(void)
+{
+	static int first = 1;
+	int i;
+
+	if (!first)
+		return;
+	first = 0;
+
+	for (i = 0; i < NR_PORTS; i++) {
+		init_timer(&ip3106_ports[i].timer);
+		ip3106_ports[i].timer.function = ip3106_timeout;
+		ip3106_ports[i].timer.data     = (unsigned long)&ip3106_ports[i];
+	}
+}
+
+#ifdef CONFIG_SERIAL_IP3106_CONSOLE
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+ip3106_console_write(struct console *co, const char *s, unsigned int count)
+{
+	struct ip3106_port *sport = &ip3106_ports[co->index];
+	unsigned int old_ien, status, i;
+
+	/*
+	 *	First, save IEN and then disable interrupts
+	 */
+	old_ien = serial_in(sport, IP3106_IEN);
+	serial_out(sport, IP3106_IEN, old_ien & ~(IP3106_UART_INT_ALLTX |
+					IP3106_UART_INT_ALLRX));
+
+	/*
+	 *	Now, do each character
+	 */
+	for (i = 0; i < count; i++) {
+		do {
+			/* Wait for UART_TX register to empty */
+			status = serial_in(sport, IP3106_FIFO);
+		} while (status & IP3106_UART_FIFO_TXFIFO);
+		serial_out(sport, IP3106_FIFO, s[i]);
+		if (s[i] == '\n') {
+			do {
+				status = serial_in(sport, IP3106_FIFO);
+			} while (status & IP3106_UART_FIFO_TXFIFO);
+			serial_out(sport, IP3106_FIFO, '\r');
+		}
+	}
+
+	/*
+	 *	Finally, wait for transmitter to become empty
+	 *	and restore IEN
+	 */
+	do {
+		/* Wait for UART_TX register to empty */
+		status = serial_in(sport, IP3106_FIFO);
+	} while (status & IP3106_UART_FIFO_TXFIFO);
+
+	/* Clear TX and EMPTY interrupt */
+	serial_out(sport, IP3106_ICLR, IP3106_UART_INT_TX |
+			     IP3106_UART_INT_EMPTY);
+
+	serial_out(sport, IP3106_IEN, old_ien);
+}
+
+static int __init
+ip3106_console_setup(struct console *co, char *options)
+{
+	struct ip3106_port *sport;
+	int baud = 38400;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+
+	/*
+	 * Check whether an invalid uart number has been specified, and
+	 * if so, search for the first available port that does have
+	 * console support.
+	 */
+	if (co->index == -1 || co->index >= NR_PORTS)
+		co->index = 0;
+	sport = &ip3106_ports[co->index];
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+	return uart_set_options(&sport->port, co, baud, parity, bits, flow);
+}
+
+extern struct uart_driver ip3106_reg;
+static struct console ip3106_console = {
+	.name		= "ttyS",
+	.write		= ip3106_console_write,
+	.device		= uart_console_device,
+	.setup		= ip3106_console_setup,
+	.flags		= CON_PRINTBUFFER,
+	.index		= -1,
+	.data		= &ip3106_reg,
+};
+
+static int __init ip3106_rs_console_init(void)
+{
+	ip3106_init_ports();
+	register_console(&ip3106_console);
+	return 0;
+}
+console_initcall(ip3106_rs_console_init);
+
+#define IP3106_CONSOLE	&ip3106_console
+#else
+#define IP3106_CONSOLE	NULL
+#endif
+
+static struct uart_driver ip3106_reg = {
+	.owner			= THIS_MODULE,
+	.driver_name		= "ttyS",
+	.dev_name		= "ttyS",
+	.devfs_name		= "tts/",
+	.major			= SERIAL_IP3106_MAJOR,
+	.minor			= MINOR_START,
+	.nr			= NR_PORTS,
+	.cons			= IP3106_CONSOLE,
+};
+
+static int ip3106_serial_suspend(struct device *_dev, u32 state, u32 level)
+{
+	struct ip3106_port *sport = dev_get_drvdata(_dev);
+
+	if (sport && level == SUSPEND_DISABLE)
+		uart_suspend_port(&ip3106_reg, &sport->port);
+
+	return 0;
+}
+
+static int ip3106_serial_resume(struct device *_dev, u32 level)
+{
+	struct ip3106_port *sport = dev_get_drvdata(_dev);
+
+	if (sport && level == RESUME_ENABLE)
+		uart_resume_port(&ip3106_reg, &sport->port);
+
+	return 0;
+}
+
+static int ip3106_serial_probe(struct device *_dev)
+{
+	struct platform_device *dev = to_platform_device(_dev);
+	struct resource *res = dev->resource;
+	int i;
+
+	for (i = 0; i < dev->num_resources; i++, res++) {
+		if (!(res->flags & IORESOURCE_MEM))
+			continue;
+
+		for (i = 0; i < NR_PORTS; i++) {
+			if (ip3106_ports[i].port.mapbase != res->start)
+				continue;
+
+			ip3106_ports[i].port.dev = _dev;
+			uart_add_one_port(&ip3106_reg, &ip3106_ports[i].port);
+			dev_set_drvdata(_dev, &ip3106_ports[i]);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int ip3106_serial_remove(struct device *_dev)
+{
+	struct ip3106_port *sport = dev_get_drvdata(_dev);
+
+	dev_set_drvdata(_dev, NULL);
+
+	if (sport)
+		uart_remove_one_port(&ip3106_reg, &sport->port);
+
+	return 0;
+}
+
+static struct device_driver ip3106_serial_driver = {
+	.name		= "ip3106-uart",
+	.bus		= &platform_bus_type,
+	.probe		= ip3106_serial_probe,
+	.remove		= ip3106_serial_remove,
+	.suspend	= ip3106_serial_suspend,
+	.resume		= ip3106_serial_resume,
+};
+
+static int __init ip3106_serial_init(void)
+{
+	int ret;
+
+	printk(KERN_INFO "Serial: IP3106 driver $Revision: 1.2 $\n");
+
+	ip3106_init_ports();
+
+	ret = uart_register_driver(&ip3106_reg);
+	if (ret == 0) {
+		ret = driver_register(&ip3106_serial_driver);
+		if (ret)
+			uart_unregister_driver(&ip3106_reg);
+	}
+	return ret;
+}
+
+static void __exit ip3106_serial_exit(void)
+{
+	driver_unregister(&ip3106_serial_driver);
+	uart_unregister_driver(&ip3106_reg);
+}
+
+module_init(ip3106_serial_init);
+module_exit(ip3106_serial_exit);
+
+MODULE_AUTHOR("Embedded Alley Solutions, Inc.");
+MODULE_DESCRIPTION("IP3106 generic serial port driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_IP3106_MAJOR);
diff -Naur linux-2.6.19/drivers/usb/host/ohci-hcd.c linux-mips-2.6.19/drivers/usb/host/ohci-hcd.c
--- linux-2.6.19/drivers/usb/host/ohci-hcd.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/usb/host/ohci-hcd.c	2006-11-29 15:23:09.000000000 -0800
@@ -931,6 +931,10 @@
 #include "ohci-au1xxx.c"
 #endif
 
+#ifdef CONFIG_PNX8550
+#include "ohci-pnx8550.c"
+#endif
+
 #ifdef CONFIG_USB_OHCI_HCD_PPC_SOC
 #include "ohci-ppc-soc.c"
 #endif
diff -Naur linux-2.6.19/drivers/usb/host/ohci-pnx8550.c linux-mips-2.6.19/drivers/usb/host/ohci-pnx8550.c
--- linux-2.6.19/drivers/usb/host/ohci-pnx8550.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/usb/host/ohci-pnx8550.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,277 @@
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ * (C) Copyright 2002 Hewlett-Packard Company
+ * (C) Copyright 2005 Embedded Alley Solutions, Inc.
+ *
+ * Bus Glue for PNX8550
+ *
+ * Written by Christopher Hoover <ch@hpl.hp.com>
+ * Based on fragments of previous driver by Russell King et al.
+ *
+ * Modified for LH7A404 from ohci-sa1111.c
+ *  by Durgesh Pattamatta <pattamattad@sharpsec.com>
+ *
+ * Modified for pxa27x from ohci-lh7a404.c
+ *  by Nick Bane <nick@cecomputing.co.uk> 26-8-2004
+ *
+ * Modified for PNX8550 from ohci-pxa27x.c
+ *  by Embedded Alley Solutions, Inc. 
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/device.h>
+#include <asm/mach-pnx8550/usb.h>
+#include <asm/mach-pnx8550/int.h>
+#include <asm/mach-pnx8550/pci.h>
+
+#ifndef CONFIG_PNX8550
+#error "This file is PNX8550 bus glue.  CONFIG_PNX8550 must be defined."
+#endif
+
+extern int usb_disabled(void);
+
+/*-------------------------------------------------------------------------*/
+
+static void pnx8550_start_hc(struct platform_device *dev)
+{
+	/*
+	 * Set register CLK48CTL to enable and 48MHz
+	 */
+	outl(0x00000003, PCI_BASE | 0x0004770c);
+
+	/*
+	 * Set register CLK12CTL to enable and 48MHz
+	 */
+	outl(0x00000003, PCI_BASE | 0x00047710);
+
+	udelay(100);
+}
+
+static void pnx8550_stop_hc(struct platform_device *dev)
+{
+	udelay(10);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* configure so an HC device and id are always provided */
+/* always called with process context; sleeping is OK */
+
+
+/**
+ * usb_hcd_pnx8550_probe - initialize pnx8550-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ *
+ */
+int usb_hcd_pnx8550_probe (const struct hc_driver *driver,
+			  struct platform_device *dev)
+{
+	int retval;
+	struct usb_hcd *hcd;
+
+	if (dev->resource[1].flags != IORESOURCE_IRQ) {
+		pr_debug ("resource[1] is not IORESOURCE_IRQ");
+		return -ENOMEM;
+	}
+
+	hcd = usb_create_hcd (driver, &dev->dev, "pnx8550");
+	if (!hcd)
+		return -ENOMEM;
+	hcd->rsrc_start = dev->resource[0].start;
+	hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1;
+
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+		pr_debug("request_mem_region failed");
+		retval = -EBUSY;
+		goto err1;
+	}
+
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (!hcd->regs) {
+		pr_debug("ioremap failed");
+		retval = -ENOMEM;
+		goto err2;
+	}
+
+	pnx8550_start_hc(dev);
+
+	ohci_hcd_init(hcd_to_ohci(hcd));
+
+	retval = usb_add_hcd(hcd, dev->resource[1].start, SA_INTERRUPT);
+	if (retval == 0)
+		return retval;
+
+	pnx8550_stop_hc(dev);
+	iounmap(hcd->regs);
+ err2:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ err1:
+	usb_put_hcd(hcd);
+	return retval;
+}
+
+
+/* may be called without controller electrically present */
+/* may be called with controller, bus, and devices active */
+
+/**
+ * usb_hcd_pnx8550_remove - shutdown processing for pnx8550-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_hcd_pnx8550_probe(), first invoking
+ * the HCD's stop() method.  It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ *
+ */
+void usb_hcd_pnx8550_remove (struct usb_hcd *hcd, struct platform_device *dev)
+{
+	usb_remove_hcd(hcd);
+	pnx8550_stop_hc(dev);
+	iounmap(hcd->regs);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int __devinit
+ohci_pnx8550_start (struct usb_hcd *hcd)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+	int		ret;
+
+	ohci_dbg (ohci, "ohci_pnx8550_start, ohci:%p", ohci);
+
+	if ((ret = ohci_init(ohci)) < 0)
+		return ret;
+
+	if ((ret = ohci_run (ohci)) < 0) {
+		err ("can't start %s", hcd->self.bus_name);
+		ohci_stop (hcd);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver ohci_pnx8550_hc_driver = {
+	.description =		hcd_name,
+	.product_desc =		"PNX8550 OHCI",
+	.hcd_priv_size =	sizeof(struct ohci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq =			ohci_irq,
+	.flags =		HCD_USB11 | HCD_MEMORY,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.start =		ohci_pnx8550_start,
+	.stop =			ohci_stop,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		ohci_urb_enqueue,
+	.urb_dequeue =		ohci_urb_dequeue,
+	.endpoint_disable =	ohci_endpoint_disable,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number =	ohci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data =	ohci_hub_status_data,
+	.hub_control =		ohci_hub_control,
+#ifdef  CONFIG_USB_SUSPEND
+	.hub_suspend =		ohci_hub_suspend,
+	.hub_resume =		ohci_hub_resume,
+#endif
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int ohci_hcd_pnx8550_drv_probe(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	int ret;
+
+	pr_debug ("In ohci_hcd_pnx8550_drv_probe");
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	ret = usb_hcd_pnx8550_probe(&ohci_pnx8550_hc_driver, pdev);
+	return ret;
+}
+
+static int ohci_hcd_pnx8550_drv_remove(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+	usb_hcd_pnx8550_remove(hcd, pdev);
+	return 0;
+}
+
+static int ohci_hcd_pnx8550_drv_suspend(struct device *dev, u32 state, u32 level)
+{
+//	struct platform_device *pdev = to_platform_device(dev);
+//	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	printk("%s: not implemented yet\n", __FUNCTION__);
+
+	return 0;
+}
+
+static int ohci_hcd_pnx8550_drv_resume(struct device *dev, u32 state)
+{
+//	struct platform_device *pdev = to_platform_device(dev);
+//	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	printk("%s: not implemented yet\n", __FUNCTION__);
+
+	return 0;
+}
+
+
+static struct device_driver ohci_hcd_pnx8550_driver = {
+	.name		= "pnx8550-ohci",
+	.bus		= &platform_bus_type,
+	.probe		= ohci_hcd_pnx8550_drv_probe,
+	.remove		= ohci_hcd_pnx8550_drv_remove,
+	.suspend	= ohci_hcd_pnx8550_drv_suspend, 
+	.resume		= ohci_hcd_pnx8550_drv_resume, 
+};
+
+static int __init ohci_hcd_pnx8550_init (void)
+{
+	pr_debug (DRIVER_INFO " (pnx8550)");
+	pr_debug ("block sizes: ed %d td %d\n",
+		sizeof (struct ed), sizeof (struct td));
+
+	return driver_register(&ohci_hcd_pnx8550_driver);
+}
+
+static void __exit ohci_hcd_pnx8550_cleanup (void)
+{
+	driver_unregister(&ohci_hcd_pnx8550_driver);
+}
+
+module_init (ohci_hcd_pnx8550_init);
+module_exit (ohci_hcd_pnx8550_cleanup);
diff -Naur linux-2.6.19/drivers/video/Kconfig linux-mips-2.6.19/drivers/video/Kconfig
--- linux-2.6.19/drivers/video/Kconfig	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/video/Kconfig	2006-11-29 15:23:09.000000000 -0800
@@ -1277,6 +1277,17 @@
 	  Please read the <file:Documentation/fb/README-sstfb.txt> for supported
 	  options and other important info  support.
 
+config FB_SMIVGX
+	tristate "Silicon Motion VoyagerGX support"
+	depends on FB && PCI && (MIPS || EXPERIMENTAL)
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	---help---
+	  This drivers supports SMI VoyagerGX 501 based PCI boards
+	  The default settings drive both a CRT and LCD.  The CRT
+	  can be turned off by passing in the no_crt option
+
 config FB_CYBLA
 	tristate "Cyberblade/i1 support"
 	depends on FB && PCI && X86_32 && !64BIT
@@ -1341,7 +1352,25 @@
 
 config FB_AU1100
 	bool "Au1100 LCD Driver"
-	depends on (FB = y) && EXPERIMENTAL && PCI && MIPS && MIPS_PB1100=y
+	depends on FB && MIPS && SOC_AU1100
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	help
+	  This is the framebuffer driver for the AMD Au1100 SOC.  It can drive
+	  various panels and CRTs by passing in kernel cmd line option
+	  au1100fb:panel=<name>.
+
+config FB_AU1200
+	bool "Au1200 LCD Driver"
+	depends on FB && MIPS && SOC_AU1200
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	help
+	  This is the framebuffer driver for the AMD Au1200 SOC.  It can drive
+	  various panels and CRTs by passing in kernel cmd line option
+	  au1200fb:panel=<name>.
 
 config FB_AU1200
 	bool "Au1200 LCD Driver"
@@ -1457,8 +1486,8 @@
  	select FB_CFB_IMAGEBLIT
 	help
 	  Support for the PMAGB-B TURBOchannel framebuffer card used mainly
-	  in the MIPS-based DECstation series. The card is currently only
-	  supported in 1280x1024x8 mode.
+	  in the MIPS-based DECstation series. The card is currently only 
+	  supported in 1280x1024x8 mode.  
 
 config FB_MAXINE
 	bool "Maxine (Personal DECstation) onboard framebuffer support"
diff -Naur linux-2.6.19/drivers/video/Makefile linux-mips-2.6.19/drivers/video/Makefile
--- linux-2.6.19/drivers/video/Makefile	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/video/Makefile	2006-11-29 15:23:09.000000000 -0800
@@ -96,6 +96,7 @@
 obj-$(CONFIG_FB_TX3912)		  += tx3912fb.o
 obj-$(CONFIG_FB_S1D13XXX)	  += s1d13xxxfb.o
 obj-$(CONFIG_FB_IMX)              += imxfb.o
+obj-$(CONFIG_FB_SMIVGX)		  += smivgxfb.o
 obj-$(CONFIG_FB_S3C2410)	  += s3c2410fb.o
 obj-$(CONFIG_FB_PNX4008_DUM)	  += pnx4008/
 obj-$(CONFIG_FB_PNX4008_DUM_RGB)  += pnx4008/
diff -Naur linux-2.6.19/drivers/video/au1100fb.c linux-mips-2.6.19/drivers/video/au1100fb.c
--- linux-2.6.19/drivers/video/au1100fb.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/drivers/video/au1100fb.c	2006-11-29 15:23:09.000000000 -0800
@@ -41,6 +41,7 @@
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
diff -Naur linux-2.6.19/drivers/video/smivgxfb.c linux-mips-2.6.19/drivers/video/smivgxfb.c
--- linux-2.6.19/drivers/video/smivgxfb.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/drivers/video/smivgxfb.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,387 @@
+/***************************************************************************
+ *  Silicon Motion VoyagerGX framebuffer driver
+ *
+ * 	ported to 2.6 by Embedded Alley Solutions, Inc
+ * 	Copyright (C) 2005 Embedded Alley Solutions, Inc
+ *
+ * 		based on
+    copyright            : (C) 2001 by Szu-Tao Huang
+    email                : johuang@siliconmotion.com
+    Updated to SM501 by Eric.Devolder@amd.com and dan@embeddededge.com
+    for the AMD Mirage Portable Tablet.  20 Oct 2003
+ ***************************************************************************/
+
+/***************************************************************************
+ *                                                                         *
+ *   This program is free software; you can redistribute it and/or modify  *
+ *   it under the terms of the GNU General Public License as published by  *
+ *   the Free Software Foundation; either version 2 of the License, or     *
+ *   (at your option) any later version.                                   *
+ *                                                                         *
+ ***************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+static char __iomem *SMIRegs;	// point to virtual Memory Map IO starting address
+static char __iomem *SMILFB;	// point to virtual video memory starting address
+
+static struct fb_fix_screeninfo smifb_fix __devinitdata = {
+	.id =		"smivgx",
+	.type =		FB_TYPE_PACKED_PIXELS,
+	.visual =	FB_VISUAL_TRUECOLOR,
+	.ywrapstep = 	0,
+	.line_length	= 1024 * 2, /* (bbp * xres)/8 */
+	.accel =	FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo smifb_var __devinitdata = {
+	.xres           = 1024,
+	.yres           = 768,
+	.xres_virtual   = 1024,
+	.yres_virtual   = 768,
+	.bits_per_pixel = 16,
+	.red            = { 11, 5, 0 },
+	.green          = {  5, 6, 0 },
+	.blue           = {  0, 5, 0 },
+	.activate       = FB_ACTIVATE_NOW,
+	.height         = -1,
+	.width          = -1,
+	.vmode          = FB_VMODE_NONINTERLACED,
+};
+
+
+static struct fb_info info;
+
+#define smi_mmiowb(dat,reg)	writeb(dat, (SMIRegs + reg))
+#define smi_mmioww(dat,reg)	writew(dat, (SMIRegs + reg))
+#define smi_mmiowl(dat,reg)	writel(dat, (SMIRegs + reg))
+
+#define smi_mmiorb(reg)	        readb(SMIRegs + reg)
+#define smi_mmiorw(reg)	        readw(SMIRegs + reg)
+#define smi_mmiorl(reg)	        readl(SMIRegs + reg)
+
+/* Address space offsets for various control/status registers.
+*/
+#define MISC_CTRL			0x000004
+#define GPIO_LO_CTRL			0x000008
+#define GPIO_HI_CTRL			0x00000c
+#define DRAM_CTRL			0x000010
+#define CURRENT_POWER_GATE		0x000038
+#define CURRENT_POWER_CLOCK		0x00003C
+#define POWER_MODE1_GATE                0x000048
+#define POWER_MODE1_CLOCK               0x00004C
+#define POWER_MODE_CTRL			0x000054
+
+#define GPIO_DATA_LO			0x010000
+#define GPIO_DATA_HI			0x010004
+#define GPIO_DATA_DIR_LO		0x010008
+#define GPIO_DATA_DIR_HI		0x01000c
+#define I2C_BYTE_COUNT			0x010040
+#define I2C_CONTROL			0x010041
+#define I2C_STATUS_RESET		0x010042
+#define I2C_SLAVE_ADDRESS		0x010043
+#define I2C_DATA			0x010044
+
+#define DE_COLOR_COMPARE		0x100020
+#define DE_COLOR_COMPARE_MASK		0x100024
+#define DE_MASKS			0x100028
+#define DE_WRAP				0x10004C
+
+#define PANEL_DISPLAY_CTRL              0x080000
+#define PANEL_PAN_CTRL                  0x080004
+#define PANEL_COLOR_KEY                 0x080008
+#define PANEL_FB_ADDRESS                0x08000C
+#define PANEL_FB_WIDTH                  0x080010
+#define PANEL_WINDOW_WIDTH              0x080014
+#define PANEL_WINDOW_HEIGHT             0x080018
+#define PANEL_PLANE_TL                  0x08001C
+#define PANEL_PLANE_BR                  0x080020
+#define PANEL_HORIZONTAL_TOTAL          0x080024
+#define PANEL_HORIZONTAL_SYNC           0x080028
+#define PANEL_VERTICAL_TOTAL            0x08002C
+#define PANEL_VERTICAL_SYNC             0x080030
+#define PANEL_CURRENT_LINE              0x080034
+#define VIDEO_DISPLAY_CTRL		0x080040
+#define VIDEO_DISPLAY_FB0		0x080044
+#define VIDEO_DISPLAY_FBWIDTH		0x080048
+#define VIDEO_DISPLAY_FB0LAST		0x08004C
+#define VIDEO_DISPLAY_TL		0x080050
+#define VIDEO_DISPLAY_BR		0x080054
+#define VIDEO_SCALE			0x080058
+#define VIDEO_INITIAL_SCALE		0x08005C
+#define VIDEO_YUV_CONSTANTS		0x080060
+#define VIDEO_DISPLAY_FB1		0x080064
+#define VIDEO_DISPLAY_FB1LAST		0x080068
+#define VIDEO_ALPHA_CTRL		0x080080
+#define PANEL_HWC_ADDRESS		0x0800F0
+#define CRT_DISPLAY_CTRL		0x080200
+#define CRT_FB_ADDRESS			0x080204
+#define CRT_FB_WIDTH			0x080208
+#define CRT_HORIZONTAL_TOTAL		0x08020c
+#define CRT_HORIZONTAL_SYNC		0x080210
+#define CRT_VERTICAL_TOTAL		0x080214
+#define CRT_VERTICAL_SYNC		0x080218
+#define CRT_HWC_ADDRESS			0x080230
+#define CRT_HWC_LOCATION		0x080234
+
+#define ZV_CAPTURE_CTRL			0x090000
+#define ZV_CAPTURE_CLIP			0x090004
+#define ZV_CAPTURE_SIZE			0x090008
+#define ZV_CAPTURE_BUF0			0x09000c
+#define ZV_CAPTURE_BUF1			0x090010
+#define ZV_CAPTURE_OFFSET		0x090014
+#define ZV_FIFO_CTRL			0x090018
+
+#define waitforvsync() udelay(400)
+
+static int initdone = 0;
+static int crt_out = 1;
+
+
+static int
+smi_setcolreg(unsigned regno, unsigned red, unsigned green,
+	unsigned blue, unsigned transp,
+	struct fb_info *info)
+{
+	if (regno > 255)
+		return 1;
+
+	((u32 *)(info->pseudo_palette))[regno] =
+		    ((red & 0xf800) >> 0) |
+		    ((green & 0xfc00) >> 5) |
+		    ((blue & 0xf800) >> 11);
+
+	return 0;
+}
+
+/* This function still needs lots of work to generically support
+ * different output devices (CRT or LCD) and resolutions.
+ * Currently hard-coded for 1024x768 LCD panel.
+ */
+static void smi_setmode(void)
+{
+	if (initdone)
+		return;
+
+	initdone = 1;
+
+	/* Just blast in some control values based upon the chip
+	 * documentation.  We use the internal memory, I don't know
+	 * how to determine the amount available yet.
+	 */
+	smi_mmiowl(0x07F127C2, DRAM_CTRL);
+	smi_mmiowl(0x02000020, PANEL_HWC_ADDRESS);
+	smi_mmiowl(0x007FF800, PANEL_HWC_ADDRESS);
+	smi_mmiowl(0x00021827, POWER_MODE1_GATE);
+	smi_mmiowl(0x011A0A09, POWER_MODE1_CLOCK);
+	smi_mmiowl(0x00000001, POWER_MODE_CTRL);
+	smi_mmiowl(0x80000000, PANEL_FB_ADDRESS);
+	smi_mmiowl(0x08000800, PANEL_FB_WIDTH);
+	smi_mmiowl(0x04000000, PANEL_WINDOW_WIDTH);
+	smi_mmiowl(0x03000000, PANEL_WINDOW_HEIGHT);
+	smi_mmiowl(0x00000000, PANEL_PLANE_TL);
+	smi_mmiowl(0x02FF03FF, PANEL_PLANE_BR);
+	smi_mmiowl(0x05D003FF, PANEL_HORIZONTAL_TOTAL);
+	smi_mmiowl(0x00C80424, PANEL_HORIZONTAL_SYNC);
+	smi_mmiowl(0x032502FF, PANEL_VERTICAL_TOTAL);
+	smi_mmiowl(0x00060302, PANEL_VERTICAL_SYNC);
+	smi_mmiowl(0x00013905, PANEL_DISPLAY_CTRL);
+	smi_mmiowl(0x01013105, PANEL_DISPLAY_CTRL);
+	waitforvsync();
+	smi_mmiowl(0x03013905, PANEL_DISPLAY_CTRL);
+	waitforvsync();
+	smi_mmiowl(0x07013905, PANEL_DISPLAY_CTRL);
+	waitforvsync();
+	smi_mmiowl(0x0F013905, PANEL_DISPLAY_CTRL);
+	smi_mmiowl(0x0002187F, POWER_MODE1_GATE);
+	smi_mmiowl(0x01011801, POWER_MODE1_CLOCK);
+	smi_mmiowl(0x00000001, POWER_MODE_CTRL);
+
+	smi_mmiowl(0x80000000, PANEL_FB_ADDRESS);
+	smi_mmiowl(0x00000000, PANEL_PAN_CTRL);
+	smi_mmiowl(0x00000000, PANEL_COLOR_KEY);
+
+	if (crt_out) {
+		/* Just sent the panel out to the CRT for now.
+		*/
+		smi_mmiowl(0x80000000, CRT_FB_ADDRESS);
+		smi_mmiowl(0x08000800, CRT_FB_WIDTH);
+		smi_mmiowl(0x05D003FF, CRT_HORIZONTAL_TOTAL);
+		smi_mmiowl(0x00C80424, CRT_HORIZONTAL_SYNC);
+		smi_mmiowl(0x032502FF, CRT_VERTICAL_TOTAL);
+		smi_mmiowl(0x00060302, CRT_VERTICAL_SYNC);
+		smi_mmiowl(0x007FF800, CRT_HWC_ADDRESS);
+		smi_mmiowl(0x00010305, CRT_DISPLAY_CTRL);
+		smi_mmiowl(0x00000001, MISC_CTRL);
+	}
+}
+
+/*
+ * Unmap in the memory mapped IO registers
+ *
+ */
+
+static void __devinit smi_unmap_mmio(void)
+{
+	if (SMIRegs) {
+		iounmap(SMIRegs);
+		SMIRegs = NULL;
+	}
+}
+
+
+/*
+ * Unmap in the screen memory
+ *
+ */
+static void __devinit smi_unmap_smem(void)
+{
+	if (SMILFB) {
+		iounmap(SMILFB);
+		SMILFB = NULL;
+	}
+}
+
+static void vgxfb_setup(char *options)
+{
+
+	if (!options || !*options)
+		return;
+
+	/* The only thing I'm looking for right now is to disable a
+	 * CRT output that mirrors the panel display.
+	 */
+	if (strcmp(options, "no_crt") == 0)
+		crt_out = 0;
+
+	return;
+}
+
+static struct fb_ops smifb_ops = {
+	.owner =		THIS_MODULE,
+	.fb_setcolreg =		smi_setcolreg,
+	.fb_fillrect =		cfb_fillrect,
+	.fb_copyarea =		cfb_copyarea,
+	.fb_imageblit =		cfb_imageblit,
+};
+
+static int __devinit vgx_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	int err;
+
+	/* Enable the chip.
+	*/
+	err = pci_enable_device(dev);
+	if (err)
+		return err;
+
+
+	/* Set up MMIO space.
+	*/
+	smifb_fix.mmio_start = pci_resource_start(dev,1);
+	smifb_fix.mmio_len = 0x00200000;
+	SMIRegs = ioremap(smifb_fix.mmio_start, smifb_fix.mmio_len);
+
+	/* Set up framebuffer.  It's a 64M space, various amount of
+	 * internal memory.  I don't know how to determine the real
+	 * amount of memory (yet).
+	 */
+	smifb_fix.smem_start = pci_resource_start(dev,0);
+	smifb_fix.smem_len = 0x00800000;
+	SMILFB = ioremap(smifb_fix.smem_start, smifb_fix.smem_len);
+
+	memset_io(SMILFB, 0, smifb_fix.smem_len);
+
+	info.screen_base = SMILFB;
+	info.fbops = &smifb_ops;
+	info.fix = smifb_fix;
+
+	info.flags = FBINFO_FLAG_DEFAULT;
+
+	info.pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
+	if (!info.pseudo_palette) {
+		return -ENOMEM;
+	}
+	memset(info.pseudo_palette, 0, sizeof(u32) *16);
+
+	fb_alloc_cmap(&info.cmap,256,0);
+
+	smi_setmode();
+
+	info.var = smifb_var;
+
+	if (register_framebuffer(&info) < 0)
+		goto failed;
+
+	return 0;
+
+failed:
+	smi_unmap_smem();
+	smi_unmap_mmio();
+
+	return err;
+}
+
+static void __devexit vgx_pci_remove(struct pci_dev *dev)
+{
+	unregister_framebuffer(&info);
+	smi_unmap_smem();
+	smi_unmap_mmio();
+}
+
+static struct pci_device_id vgx_devices[] = {
+	{PCI_VENDOR_ID_SILICON_MOTION, PCI_DEVICE_ID_SM501_VOYAGER_GX_REV_AA,
+	 PCI_ANY_ID, PCI_ANY_ID},
+	{PCI_VENDOR_ID_SILICON_MOTION, PCI_DEVICE_ID_SM501_VOYAGER_GX_REV_B,
+	 PCI_ANY_ID, PCI_ANY_ID},
+	{0}
+};
+
+MODULE_DEVICE_TABLE(pci, vgx_devices);
+
+static struct pci_driver vgxfb_pci_driver = {
+	.name	= "vgxfb",
+	.id_table= vgx_devices,
+	.probe	= vgx_pci_probe,
+	.remove	= __devexit_p(vgx_pci_remove),
+};
+
+static int __init vgxfb_init(void)
+{
+	char *option = NULL;
+
+	if (fb_get_options("vgxfb", &option))
+		return -ENODEV;
+	vgxfb_setup(option);
+
+	printk("Silicon Motion Inc. VOYAGER Init complete.\n");
+	return pci_module_init(&vgxfb_pci_driver);
+}
+
+static void __exit vgxfb_exit(void)
+{
+	pci_unregister_driver(&vgxfb_pci_driver);
+}
+
+module_init(vgxfb_init);
+module_exit(vgxfb_exit);
+
+MODULE_AUTHOR("");
+MODULE_DESCRIPTION("Framebuffer driver for SMI Voyager");
+MODULE_LICENSE("GPL");
diff -Naur linux-2.6.19/include/asm-mips/compat.h linux-mips-2.6.19/include/asm-mips/compat.h
--- linux-2.6.19/include/asm-mips/compat.h	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/include/asm-mips/compat.h	2006-11-29 15:23:09.000000000 -0800
@@ -32,6 +32,7 @@
 	s32	val[2];
 } compat_fsid_t;
 typedef s32		compat_timer_t;
+typedef s32		compat_key_t;
 
 typedef s32		compat_int_t;
 typedef s32		compat_long_t;
@@ -146,4 +147,71 @@
 	return (void __user *) (regs->regs[29] - len);
 }
 
+struct compat_ipc64_perm {
+	compat_key_t key;
+	__compat_uid32_t uid;
+	__compat_gid32_t gid;
+	__compat_uid32_t cuid;
+	__compat_gid32_t cgid;
+	compat_mode_t mode;
+	unsigned short seq;
+	unsigned short __pad2;
+	compat_ulong_t __unused1;
+	compat_ulong_t __unused2;
+};
+
+struct compat_semid64_ds {
+	struct compat_ipc64_perm sem_perm;
+	compat_time_t	sem_otime;
+	compat_time_t	sem_ctime;
+	compat_ulong_t	sem_nsems;
+	compat_ulong_t	__unused1;
+	compat_ulong_t	__unused2;
+};
+
+struct compat_msqid64_ds {
+	struct compat_ipc64_perm msg_perm;
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+	compat_ulong_t	__unused1;
+#endif
+	compat_time_t	msg_stime;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+	compat_ulong_t	__unused1;
+#endif
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+	compat_ulong_t	__unused2;
+#endif
+	compat_time_t	msg_rtime;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+	compat_ulong_t	__unused2;
+#endif
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+	compat_ulong_t	__unused3;
+#endif
+	compat_time_t	msg_ctime;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+	compat_ulong_t	__unused3;
+#endif
+	compat_ulong_t	msg_cbytes;
+	compat_ulong_t	msg_qnum;
+	compat_ulong_t	msg_qbytes;
+	compat_pid_t	msg_lspid;
+	compat_pid_t	msg_lrpid;
+	compat_ulong_t	__unused4;
+	compat_ulong_t	__unused5;
+};
+
+struct compat_shmid64_ds {
+	struct compat_ipc64_perm shm_perm;
+	compat_size_t	shm_segsz;
+	compat_time_t	shm_atime;
+	compat_time_t	shm_dtime;
+	compat_time_t	shm_ctime;
+	compat_pid_t	shm_cpid;
+	compat_pid_t	shm_lpid;
+	compat_ulong_t	shm_nattch;
+	compat_ulong_t	__unused1;
+	compat_ulong_t	__unused2;
+};
+
 #endif /* _ASM_COMPAT_H */
diff -Naur linux-2.6.19/include/asm-mips/io.h linux-mips-2.6.19/include/asm-mips/io.h
--- linux-2.6.19/include/asm-mips/io.h	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/include/asm-mips/io.h	2006-11-29 15:23:09.000000000 -0800
@@ -518,34 +518,6 @@
 }
 
 /*
- * Memory Mapped I/O
- */
-#define ioread8(addr)		readb(addr)
-#define ioread16(addr)		readw(addr)
-#define ioread32(addr)		readl(addr)
-
-#define iowrite8(b,addr)	writeb(b,addr)
-#define iowrite16(w,addr)	writew(w,addr)
-#define iowrite32(l,addr)	writel(l,addr)
-
-#define ioread8_rep(a,b,c)	readsb(a,b,c)
-#define ioread16_rep(a,b,c)	readsw(a,b,c)
-#define ioread32_rep(a,b,c)	readsl(a,b,c)
-
-#define iowrite8_rep(a,b,c)	writesb(a,b,c)
-#define iowrite16_rep(a,b,c)	writesw(a,b,c)
-#define iowrite32_rep(a,b,c)	writesl(a,b,c)
-
-/* Create a virtual mapping cookie for an IO port range */
-extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
-extern void ioport_unmap(void __iomem *);
-
-/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-
-/*
  * ISA space is 'always mapped' on currently supported MIPS systems, no need
  * to explicitly ioremap() it. The fact that the ISA IO space is mapped
  * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
diff -Naur linux-2.6.19/include/asm-mips/mach-au1x00/au1000.h linux-mips-2.6.19/include/asm-mips/mach-au1x00/au1000.h
--- linux-2.6.19/include/asm-mips/mach-au1x00/au1000.h	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/include/asm-mips/mach-au1x00/au1000.h	2006-11-29 15:23:09.000000000 -0800
@@ -39,6 +39,7 @@
 #ifndef _LANGUAGE_ASSEMBLY
 
 #include <linux/delay.h>
+#include <linux/types.h>
 #include <asm/io.h>
 
 /* cpu pipeline flush */
@@ -1664,12 +1665,12 @@
  * addresses.  For PCI IO, it's simpler because we get to do the ioremap
  * ourselves and then adjust the device's resources.
  */
-#define Au1500_EXT_CFG            0x600000000ULL
-#define Au1500_EXT_CFG_TYPE1      0x680000000ULL
-#define Au1500_PCI_IO_START       0x500000000ULL
-#define Au1500_PCI_IO_END         0x5000FFFFFULL
-#define Au1500_PCI_MEM_START      0x440000000ULL
-#define Au1500_PCI_MEM_END        0x44FFFFFFFULL
+#define Au1500_EXT_CFG            ((resource_size_t) 0x600000000ULL)
+#define Au1500_EXT_CFG_TYPE1      ((resource_size_t) 0x680000000ULL)
+#define Au1500_PCI_IO_START       ((resource_size_t) 0x500000000ULL)
+#define Au1500_PCI_IO_END         ((resource_size_t) 0x5000FFFFFULL)
+#define Au1500_PCI_MEM_START      ((resource_size_t) 0x440000000ULL)
+#define Au1500_PCI_MEM_END        ((resource_size_t) 0x44FFFFFFFULL)
 
 #define PCI_IO_START    (Au1500_PCI_IO_START + 0x1000)
 #define PCI_IO_END      (Au1500_PCI_IO_END)
diff -Naur linux-2.6.19/include/asm-mips/mach-au1x00/au1xxx_ide.h linux-mips-2.6.19/include/asm-mips/mach-au1x00/au1xxx_ide.h
--- linux-2.6.19/include/asm-mips/mach-au1x00/au1xxx_ide.h	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/include/asm-mips/mach-au1x00/au1xxx_ide.h	2006-11-29 15:23:09.000000000 -0800
@@ -83,6 +83,7 @@
 } _auide_hwif;
 
 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+
 /* HD white list */
 static const struct drive_list_entry dma_white_list [] = {
 /*
diff -Naur linux-2.6.19/include/asm-mips/page.h linux-mips-2.6.19/include/asm-mips/page.h
--- linux-2.6.19/include/asm-mips/page.h	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/include/asm-mips/page.h	2006-11-29 15:23:09.000000000 -0800
@@ -59,16 +59,13 @@
 		flush_data_cache_page((unsigned long)addr);
 }
 
-static inline void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
-	struct page *to)
-{
-	extern void (*flush_data_cache_page)(unsigned long addr);
-
-	copy_page(vto, vfrom);
-	if (!cpu_has_ic_fills_f_dc ||
-	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
-		flush_data_cache_page((unsigned long)vto);
-}
+extern void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+	struct page *to);
+struct vm_area_struct;
+extern void copy_user_highpage(struct page *to, struct page *from,
+	unsigned long vaddr, struct vm_area_struct *vma);
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
 
 /*
  * These are used to make use of C type-checking..
diff -Naur linux-2.6.19/include/linux/highmem.h linux-mips-2.6.19/include/linux/highmem.h
--- linux-2.6.19/include/linux/highmem.h	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/include/linux/highmem.h	2006-11-29 15:23:09.000000000 -0800
@@ -94,7 +94,10 @@
 	kunmap_atomic(kaddr, KM_USER0);
 }
 
-static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr)
+#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+static inline void copy_user_highpage(struct page *to, struct page *from,
+	unsigned long vaddr, struct vm_area_struct *vma)
 {
 	char *vfrom, *vto;
 
@@ -107,6 +110,8 @@
 	smp_wmb();
 }
 
+#endif
+
 static inline void copy_highpage(struct page *to, struct page *from)
 {
 	char *vfrom, *vto;
diff -Naur linux-2.6.19/include/linux/pci_ids.h linux-mips-2.6.19/include/linux/pci_ids.h
--- linux-2.6.19/include/linux/pci_ids.h	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/include/linux/pci_ids.h	2006-11-29 15:23:09.000000000 -0800
@@ -1604,6 +1604,9 @@
 #define PCI_VENDOR_ID_SATSAGEM		0x1267
 #define PCI_DEVICE_ID_SATSAGEM_NICCY	0x1016
 
+#define PCI_VENDOR_ID_SILICON_MOTION		0x126f
+#define PCI_DEVICE_ID_SM501_VOYAGER_GX_REV_AA	0x0501
+#define PCI_DEVICE_ID_SM501_VOYAGER_GX_REV_B	0x0510
 
 #define PCI_VENDOR_ID_ENSONIQ		0x1274
 #define PCI_DEVICE_ID_ENSONIQ_CT5880	0x5880
diff -Naur linux-2.6.19/include/linux/serial.h linux-mips-2.6.19/include/linux/serial.h
--- linux-2.6.19/include/linux/serial.h	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/include/linux/serial.h	2006-11-29 15:23:09.000000000 -0800
@@ -76,7 +76,8 @@
 #define PORT_16654	11
 #define PORT_16850	12
 #define PORT_RSA	13	/* RSA-DV II/S card */
-#define PORT_MAX	13
+#define PORT_SB1250	14
+#define PORT_MAX	14
 
 #define SERIAL_IO_PORT	0
 #define SERIAL_IO_HUB6	1
diff -Naur linux-2.6.19/include/linux/serial_ip3106.h linux-mips-2.6.19/include/linux/serial_ip3106.h
--- linux-2.6.19/include/linux/serial_ip3106.h	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/include/linux/serial_ip3106.h	2006-11-29 15:23:09.000000000 -0800
@@ -78,4 +78,16 @@
 #define IP3106_UART_FIFO_RXFIFO		0x00001F00
 #define IP3106_UART_FIFO_RBRTHR		0x000000FF
 
+#define ip3106_lcr(base,port)    *(volatile u32 *)(base+(port*0x1000) + 0x000)
+#define ip3106_mcr(base, port)   *(volatile u32 *)(base+(port*0x1000) + 0x004)
+#define ip3106_baud(base, port)  *(volatile u32 *)(base+(port*0x1000) + 0x008)
+#define ip3106_cfg(base, port)   *(volatile u32 *)(base+(port*0x1000) + 0x00C)
+#define ip3106_fifo(base, port)	 *(volatile u32 *)(base+(port*0x1000) + 0x028)
+#define ip3106_istat(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFE0)
+#define ip3106_ien(base, port)   *(volatile u32 *)(base+(port*0x1000) + 0xFE4)
+#define ip3106_iclr(base, port)  *(volatile u32 *)(base+(port*0x1000) + 0xFE8)
+#define ip3106_iset(base, port)  *(volatile u32 *)(base+(port*0x1000) + 0xFEC)
+#define ip3106_pd(base, port)    *(volatile u32 *)(base+(port*0x1000) + 0xFF4)
+#define ip3106_mid(base, port)   *(volatile u32 *)(base+(port*0x1000) + 0xFFC)
+
 #endif
diff -Naur linux-2.6.19/mm/memory.c linux-mips-2.6.19/mm/memory.c
--- linux-2.6.19/mm/memory.c	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/mm/memory.c	2006-11-29 15:23:09.000000000 -0800
@@ -1431,7 +1431,7 @@
 	return pte;
 }
 
-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va)
+static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
 {
 	/*
 	 * If the source page was a PFN mapping, we don't have
@@ -1454,9 +1454,9 @@
 		kunmap_atomic(kaddr, KM_USER0);
 		flush_dcache_page(dst);
 		return;
-		
+
 	}
-	copy_user_highpage(dst, src, va);
+	copy_user_highpage(dst, src, va, vma);
 }
 
 /*
@@ -1567,7 +1567,7 @@
 		new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
 		if (!new_page)
 			goto oom;
-		cow_user_page(new_page, old_page, address);
+		cow_user_page(new_page, old_page, address, vma);
 	}
 
 	/*
@@ -2191,7 +2191,7 @@
 			page = alloc_page_vma(GFP_HIGHUSER, vma, address);
 			if (!page)
 				goto oom;
-			copy_user_highpage(page, new_page, address);
+			copy_user_highpage(page, new_page, address, vma);
 			page_cache_release(new_page);
 			new_page = page;
 			anon = 1;
diff -Naur linux-2.6.19/sound/oss/Kconfig linux-mips-2.6.19/sound/oss/Kconfig
--- linux-2.6.19/sound/oss/Kconfig	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/sound/oss/Kconfig	2006-11-29 15:23:09.000000000 -0800
@@ -128,6 +128,13 @@
 	select SND_AC97_CODEC
 	depends on SOUND_PRIME && (SOC_AU1550 || SOC_AU1200)
 
+config SOUND_AU1550_I2S
+	tristate "Au1550 I2S Sound"
+	depends on SOUND_PRIME && SOC_AU1550
+	# Weird I2S driver needs I2C driver to talk to the codec...
+	select I2C
+	select I2C_AU1550
+
 config SOUND_TRIDENT
 	tristate "Trident 4DWave DX/NX, SiS 7018 or ALi 5451 PCI Audio Core"
 	depends on SOUND_PRIME && PCI
diff -Naur linux-2.6.19/sound/oss/Makefile linux-mips-2.6.19/sound/oss/Makefile
--- linux-2.6.19/sound/oss/Makefile	2006-11-29 13:57:37.000000000 -0800
+++ linux-mips-2.6.19/sound/oss/Makefile	2006-11-29 15:23:09.000000000 -0800
@@ -45,6 +45,7 @@
 obj-$(CONFIG_SOUND_ES1371)	+= es1371.o ac97_codec.o
 obj-$(CONFIG_SOUND_VRC5477)	+= nec_vrc5477.o ac97_codec.o
 obj-$(CONFIG_SOUND_AU1550_AC97)	+= au1550_ac97.o ac97_codec.o
+obj-$(CONFIG_SOUND_AU1550_I2S)	+= au1550_i2s.o
 obj-$(CONFIG_SOUND_FUSION)	+= cs46xx.o ac97_codec.o
 obj-$(CONFIG_SOUND_TRIDENT)	+= trident.o ac97_codec.o
 obj-$(CONFIG_SOUND_EMU10K1)	+= ac97_codec.o
diff -Naur linux-2.6.19/sound/oss/au1550_i2s.c linux-mips-2.6.19/sound/oss/au1550_i2s.c
--- linux-2.6.19/sound/oss/au1550_i2s.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.19/sound/oss/au1550_i2s.c	2006-11-29 15:23:09.000000000 -0800
@@ -0,0 +1,2029 @@
+/*
+ *      au1550_i2s.c  --  Sound driver for Alchemy Au1550 MIPS
+ *			Internet Edge Processor.
+ *
+ * Copyright 2004 Embedded Edge, LLC
+ *	dan@embeddededge.com
+ * Copyright 2005 Matt Porter <mporter@kernel.crashing.org>
+ *
+ * Mostly copied from the au1550_psc.c driver and some from the
+ * PowerMac dbdma driver.
+ * We assume the processor can do memory coherent DMA.
+ *
+ * WM8731 mixer support, codec framework, cleanup, and 2.6 port
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * The SMBus (I2C) is required for the control of the
+ * appears at I2C address 0x36 (I2C binary 0011011).  The Pb1550
+ * uses the Wolfson WM8731 codec, which is controlled over the I2C.
+ * It's connected to a 12MHz clock, so we can only reliably support
+ * 96KHz, 48KHz, 32KHz, and 8KHz data rates.  Variable rate audio is
+ * unsupported, we currently force it to 48KHz.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/hardirq.h>
+#include <linux/sound.h>
+#include <linux/slab.h>
+#include <linux/soundcard.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/hardirq.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1xxx_psc.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-pb1x00/pb1550.h>
+
+#undef OSS_DOCUMENTED_MIXER_SEMANTICS
+
+#define AU1550_MODULE_NAME "Au1550 I2S Audio"
+#define PFX AU1550_MODULE_NAME
+
+/* Define this if you want to try running at the 44.1 KHz rate.
+ * It's just a little off, I think it's actually 44117 or something.
+ * I did this for debugging, since many programs, including this
+ * driver, will try to upsample from 44.1 to 48 KHz.
+ * Seems to work well, we'll just leave it this way.
+ */
+#define TRY_441KHz
+
+#ifdef TRY_441KHz
+#define SAMP_RATE	44100
+#else
+#define SAMP_RATE	48000
+#endif
+
+/* The number of DBDMA ring descriptors to allocate.  No sense making
+ * this too large....if you can't keep up with a few you aren't likely
+ * to be able to with lots of them, either.
+ */
+#define NUM_DBDMA_DESCRIPTORS 4
+
+#define pr_error(format, arg...) printk(KERN_ERR PFX ": " format "\n" , ## arg)
+
+static void
+au1550_delay(int msec)
+{
+	unsigned long   tmo;
+	signed long     tmo2;
+
+	if (in_interrupt())
+		return;
+
+	tmo = jiffies + (msec * HZ) / 1000;
+	for (;;) {
+		tmo2 = tmo - jiffies;
+		if (tmo2 <= 0)
+			break;
+		schedule_timeout(tmo2);
+	}
+}
+
+/*
+ * Codec framework. If somebody supports another codec, they
+ * should hopefully be able to define another struct i2s_codec
+ * definition, and #ifdef the support for it and the WM8731 so
+ * they can be selected via a CONFIG option. For now, we just
+ * hardcode WM8731_CODEC.
+ */
+#define i2s_supported_mixer(CODEC,FOO) ((FOO >= 0) && \
+                                    (FOO < SOUND_MIXER_NRDEVICES) && \
+                                    (CODEC)->supported_mixers & (1<<FOO) )
+
+struct i2s_codec {
+	int			modcnt;
+	int			supported_mixers;
+	int			stereo_mixers;
+	int			record_sources;
+	unsigned int		mixer_state[SOUND_MIXER_NRDEVICES];
+	void			*data;
+	int			(*set_mixer) (struct i2s_codec *codec, unsigned int oss_mixer, unsigned int val);
+	void			(*init_codec) (struct i2s_codec *codec);
+};
+
+#define WM8731_CODEC
+#ifdef WM8731_CODEC
+/*
+ * WM8731 codec support
+ */
+#define WM8731_SUPPORTED_MASK (WM8731_STEREO_MASK|WM8731_RECORD_MASK)
+#define WM8731_STEREO_MASK (SOUND_MASK_VOLUME|SOUND_MASK_LINE)
+#define WM8731_RECORD_MASK (SOUND_MASK_MIC|SOUND_MASK_LINE)
+
+static struct codec_data {
+	u16			audio_path;
+} wm8731_data;
+
+static void
+wm8731_wrcodec(u8 ctlreg, u8 val)
+{
+	int	rcnt;
+	extern int pb1550_wm_codec_write(u8 addr, u8 reg, u8 val);
+
+	/* The codec is a write only device, with a 16-bit control/data
+	 * word.  Although it is written as two bytes on the I2C, the
+	 * format is actually 7 bits of register and 9 bits of data.
+	 * The ls bit of the first byte is the ms bit of the data.
+	 */
+	rcnt = 0;
+	while ((pb1550_wm_codec_write((0x36 >> 1), ctlreg, val) != 1)
+							&& (rcnt < 50)) {
+		rcnt++;
+	}
+}
+
+static int
+wm8731_set_mixer(struct i2s_codec *codec, unsigned int oss_mixer, unsigned int val)
+{
+	unsigned int lvol, rvol;
+	struct codec_data *cdata = (struct codec_data *)codec->data;
+
+	switch (oss_mixer) {
+		case SOUND_MIXER_VOLUME:
+			/* normalize OSS range to fit codec volume control */
+			lvol = ((((val & 0x7f00) >> 8) * 0x60) / 0x64) + 0x1f;
+			rvol = (((val & 0x7f) * 0x60) / 0x64) + 0x1f;
+			lvol |= 0x80;
+			rvol |= 0x80;
+			wm8731_wrcodec(0x04, lvol);
+			au1550_delay(10);
+			wm8731_wrcodec(0x06, rvol);
+			au1550_delay(10);
+			codec->mixer_state[oss_mixer] = val;
+			break;
+		case SOUND_MIXER_LINE:
+			/* normalize OSS range to fit codec line control */
+			lvol = ((((val & 0x7f00) >> 8) * 0x1f) / 0x64);
+			rvol = (((val & 0x7f) * 0x1f) / 0x64);
+			if (!(val & 0x1f00))
+				lvol |= 0x80;
+			else
+				lvol &= ~0x80;
+			if (!(val & 0x001f))
+				rvol |= 0x80;
+			else
+				rvol &= ~0x80;
+			wm8731_wrcodec(0x00, lvol);
+			au1550_delay(10);
+			wm8731_wrcodec(0x02, rvol);
+			au1550_delay(10);
+			codec->mixer_state[oss_mixer] = val;
+			break;
+		case SOUND_MIXER_MIC:
+			if (!val)
+				cdata->audio_path |= 0x02;
+			else {
+				if (val >= 0x32)
+					cdata->audio_path |= 0x01;
+				else
+					cdata->audio_path &= ~0x01;
+				cdata->audio_path &= ~0x02;
+			}
+			wm8731_wrcodec(0x08, cdata->audio_path);
+			au1550_delay(10);
+			codec->mixer_state[oss_mixer] = val;
+			break;
+		case SOUND_MIXER_RECSRC:
+			if (val & SOUND_MASK_LINE)
+				cdata->audio_path &= ~0x04;
+			else
+				cdata->audio_path |= 0x04;
+			wm8731_wrcodec(0x08, cdata->audio_path);
+			au1550_delay(10);
+			codec->mixer_state[oss_mixer] = val;
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+void
+wm8731_init_codec(struct i2s_codec *codec)
+{
+	struct codec_data *cdata = (struct codec_data *)codec->data;
+
+	wm8731_wrcodec(0x1e, 0x00);	/* Reset */
+	au1550_delay(200);
+	wm8731_wrcodec(0x0c, 0x00);	/* Power up everything */
+	au1550_delay(10);
+	wm8731_wrcodec(0x12, 0x00);	/* Deactivate codec */
+	au1550_delay(10);
+	cdata->audio_path = 0x10;
+	/* Select DAC outputs to line out */
+	wm8731_wrcodec(0x08, cdata->audio_path);
+	au1550_delay(10);
+	wm8731_wrcodec(0x0a, 0x00);	/* Disable output mute */
+	au1550_delay(10);
+	wm8731_wrcodec(0x0e, 0x02);	/* Set slave, 16-bit, I2S modes */
+	au1550_delay(10);
+	wm8731_wrcodec(0x10, 0x01);	/* 12MHz (USB), 250fs */
+	au1550_delay(10);
+	wm8731_wrcodec(0x12, 0x01);	/* Activate codec */
+	au1550_delay(10);
+
+	codec->set_mixer(codec, SOUND_MIXER_VOLUME, 0x5050);
+	codec->set_mixer(codec, SOUND_MIXER_LINE, 0x0000);
+	codec->set_mixer(codec, SOUND_MIXER_MIC, 0x00);
+	codec->mixer_state[SOUND_MIXER_RECSRC] = SOUND_MIXER_LINE;
+}
+
+static struct i2s_codec au1550_i2s_codec = {
+	.supported_mixers	= WM8731_SUPPORTED_MASK,
+	.stereo_mixers		= WM8731_STEREO_MASK,
+	.record_sources		= WM8731_RECORD_MASK,
+	.init_codec		= &wm8731_init_codec,
+	.set_mixer		= &wm8731_set_mixer,
+	.data			= &wm8731_data,
+};
+#endif /* WM8731_CODEC */
+
+static struct au1550_state {
+	/* soundcore stuff */
+	int             dev_audio;
+	int		dev_mixer;
+
+	spinlock_t		lock;
+	struct semaphore	open_sem;
+	struct semaphore	sem;
+	mode_t			open_mode;
+	wait_queue_head_t	open_wait;
+	volatile psc_i2s_t	*psc_addr;
+	struct i2s_codec	*codec;
+
+	struct dmabuf {
+		u32		dmanr;
+		unsigned        sample_rate;
+		unsigned	src_factor;
+		unsigned        sample_size;
+		int             num_channels;
+		int		dma_bytes_per_sample;
+		int		user_bytes_per_sample;
+		int		cnt_factor;
+
+		void		*rawbuf;
+		unsigned        buforder;
+		unsigned	numfrag;
+		unsigned        fragshift;
+		void		*nextIn;
+		void		*nextOut;
+		int		count;
+		unsigned        total_bytes;
+		unsigned        error;
+		wait_queue_head_t wait;
+
+		/* redundant, but makes calculations easier */
+		unsigned	fragsize;
+		unsigned	dma_fragsize;
+		unsigned	dmasize;
+		unsigned	dma_qcount;
+
+		/* OSS stuff */
+		unsigned        mapped:1;
+		unsigned        ready:1;
+		unsigned        stopped:1;
+		unsigned        ossfragshift;
+		int             ossmaxfrags;
+		unsigned        subdivision;
+
+		/* Mixer stuff */
+		int		dev_mixer;
+	} dma_dac, dma_adc;
+} au1550_state;
+
+static unsigned
+ld2(unsigned int x)
+{
+	unsigned        r = 0;
+
+	if (x >= 0x10000) {
+		x >>= 16;
+		r += 16;
+	}
+	if (x >= 0x100) {
+		x >>= 8;
+		r += 8;
+	}
+	if (x >= 0x10) {
+		x >>= 4;
+		r += 4;
+	}
+	if (x >= 4) {
+		x >>= 2;
+		r += 2;
+	}
+	if (x >= 2)
+		r++;
+	return r;
+}
+
+/* stop the ADC before calling */
+static void
+set_adc_rate(struct au1550_state *s, unsigned rate)
+{
+	struct dmabuf  *adc = &s->dma_adc;
+
+	/* calc SRC factor */
+	adc->src_factor = (((SAMP_RATE*2) / rate) + 1) >> 1;
+	adc->sample_rate = SAMP_RATE / adc->src_factor;
+	return;
+
+	adc->src_factor = 1;
+}
+
+/* stop the DAC before calling */
+static void
+set_dac_rate(struct au1550_state *s, unsigned rate)
+{
+	struct dmabuf  *dac = &s->dma_dac;
+
+	/* calc SRC factor */
+	dac->src_factor = (((SAMP_RATE*2) / rate) + 1) >> 1;
+	dac->sample_rate = SAMP_RATE / dac->src_factor;
+	return;
+
+	dac->src_factor = 1;
+}
+
+static void
+stop_dac(struct au1550_state *s)
+{
+	struct dmabuf  *db = &s->dma_dac;
+	unsigned long   flags;
+	uint	stat;
+	volatile psc_i2s_t *ip;
+
+	if (db->stopped)
+		return;
+
+	ip = s->psc_addr;
+	spin_lock_irqsave(&s->lock, flags);
+
+	ip->psc_i2spcr = PSC_I2SPCR_TP;
+	au_sync();
+
+	/* Wait for Transmit Busy to show disabled.
+	*/
+	do {
+		stat = ip->psc_i2sstat;
+		au_sync();
+	} while ((stat & PSC_I2SSTAT_TB) != 0);
+
+	au1xxx_dbdma_reset(db->dmanr);
+
+	db->stopped = 1;
+
+	spin_unlock_irqrestore(&s->lock, flags);
+}
+
+static void
+stop_adc(struct au1550_state *s)
+{
+	struct dmabuf  *db = &s->dma_adc;
+	unsigned long   flags;
+	uint	stat;
+	volatile psc_i2s_t *ip;
+
+	if (db->stopped)
+		return;
+
+	ip = s->psc_addr;
+	spin_lock_irqsave(&s->lock, flags);
+
+	ip->psc_i2spcr = PSC_I2SPCR_RP;
+	au_sync();
+
+	/* Wait for Receive Busy to show disabled.
+	*/
+	do {
+		stat = ip->psc_i2sstat;
+		au_sync();
+	} while ((stat & PSC_I2SSTAT_RB) != 0);
+
+	au1xxx_dbdma_reset(db->dmanr);
+
+	db->stopped = 1;
+
+	spin_unlock_irqrestore(&s->lock, flags);
+}
+
+static void
+set_xmit_slots(int num_channels)
+{
+	/* This is here just as a place holder.  The WM8731 only
+	 * supports two fixed channels.
+	 */
+}
+
+static void
+set_recv_slots(int num_channels)
+{
+	/* This is here just as a place holder.  The WM8731 only
+	 * supports two fixed channels.
+	 */
+}
+
+static void
+start_dac(struct au1550_state *s)
+{
+	struct dmabuf  *db = &s->dma_dac;
+	unsigned long   flags;
+	volatile psc_i2s_t *ip;
+
+	if (!db->stopped)
+		return;
+
+	spin_lock_irqsave(&s->lock, flags);
+
+	ip = s->psc_addr;
+	set_xmit_slots(db->num_channels);
+	ip->psc_i2spcr = PSC_I2SPCR_TC;
+	au_sync();
+	ip->psc_i2spcr = PSC_I2SPCR_TS;
+	au_sync();
+
+	au1xxx_dbdma_start(db->dmanr);
+
+	db->stopped = 0;
+
+	spin_unlock_irqrestore(&s->lock, flags);
+}
+
+static void
+start_adc(struct au1550_state *s)
+{
+	struct dmabuf  *db = &s->dma_adc;
+	int	i;
+	volatile psc_i2s_t *ip;
+
+	if (!db->stopped)
+		return;
+
+	/* Put two buffers on the ring to get things started.
+	*/
+	for (i=0; i<2; i++) {
+		au1xxx_dbdma_put_dest(db->dmanr, db->nextIn, db->dma_fragsize);
+
+		db->nextIn += db->dma_fragsize;
+		if (db->nextIn >= db->rawbuf + db->dmasize)
+			db->nextIn -= db->dmasize;
+	}
+
+	ip = s->psc_addr;
+	set_recv_slots(db->num_channels);
+	au1xxx_dbdma_start(db->dmanr);
+	ip->psc_i2spcr = PSC_I2SPCR_RC;
+	au_sync();
+	ip->psc_i2spcr = PSC_I2SPCR_RS;
+	au_sync();
+
+	db->stopped = 0;
+}
+
+static int
+prog_dmabuf(struct au1550_state *s, struct dmabuf *db)
+{
+	unsigned user_bytes_per_sec;
+	unsigned        bufs;
+	unsigned        rate = db->sample_rate;
+
+	if (!db->rawbuf) {
+		db->ready = db->mapped = 0;
+		db->buforder = 5;	/* 32 * PAGE_SIZE */
+		db->rawbuf = kmalloc((PAGE_SIZE << db->buforder), GFP_KERNEL);
+		if (!db->rawbuf)
+			return -ENOMEM;
+	}
+
+	db->cnt_factor = 1;
+	if (db->sample_size == 8)
+		db->cnt_factor *= 2;
+	if (db->num_channels == 1)
+		db->cnt_factor *= 2;
+	db->cnt_factor *= db->src_factor;
+
+	db->count = 0;
+	db->dma_qcount = 0;
+	db->nextIn = db->nextOut = db->rawbuf;
+
+	db->user_bytes_per_sample = (db->sample_size>>3) * db->num_channels;
+	db->dma_bytes_per_sample = 2 * ((db->num_channels == 1) ?
+					2 : db->num_channels);
+
+	user_bytes_per_sec = rate * db->user_bytes_per_sample;
+	bufs = PAGE_SIZE << db->buforder;
+	if (db->ossfragshift) {
+		if ((1000 << db->ossfragshift) < user_bytes_per_sec)
+			db->fragshift = ld2(user_bytes_per_sec/1000);
+		else
+			db->fragshift = db->ossfragshift;
+	} else {
+		db->fragshift = ld2(user_bytes_per_sec / 100 /
+				    (db->subdivision ? db->subdivision : 1));
+		if (db->fragshift < 3)
+			db->fragshift = 3;
+	}
+
+	db->fragsize = 1 << db->fragshift;
+	db->dma_fragsize = db->fragsize * db->cnt_factor;
+	db->numfrag = bufs / db->dma_fragsize;
+
+	while (db->numfrag < 4 && db->fragshift > 3) {
+		db->fragshift--;
+		db->fragsize = 1 << db->fragshift;
+		db->dma_fragsize = db->fragsize * db->cnt_factor;
+		db->numfrag = bufs / db->dma_fragsize;
+	}
+
+	if (db->ossmaxfrags >= 4 && db->ossmaxfrags < db->numfrag)
+		db->numfrag = db->ossmaxfrags;
+
+	db->dmasize = db->dma_fragsize * db->numfrag;
+	memset(db->rawbuf, 0, bufs);
+
+#ifdef AU1000_VERBOSE_DEBUG
+	dbg("rate=%d, samplesize=%d, channels=%d",
+	    rate, db->sample_size, db->num_channels);
+	dbg("fragsize=%d, cnt_factor=%d, dma_fragsize=%d",
+	    db->fragsize, db->cnt_factor, db->dma_fragsize);
+	dbg("numfrag=%d, dmasize=%d", db->numfrag, db->dmasize);
+#endif
+
+	db->ready = 1;
+	return 0;
+}
+
+static int
+prog_dmabuf_adc(struct au1550_state *s)
+{
+	stop_adc(s);
+	return prog_dmabuf(s, &s->dma_adc);
+
+}
+
+static int
+prog_dmabuf_dac(struct au1550_state *s)
+{
+	stop_dac(s);
+	return prog_dmabuf(s, &s->dma_dac);
+}
+
+
+/* hold spinlock for the following */
+static void
+dac_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct au1550_state *s = (struct au1550_state *) dev_id;
+	struct dmabuf  *db = &s->dma_dac;
+	u32	i2s_stat;
+	volatile psc_i2s_t *ip;
+
+	ip = s->psc_addr;
+	i2s_stat = ip->psc_i2sstat;
+#ifdef AU1000_VERBOSE_DEBUG
+	if (i2s_stat & (PSC_I2SSTAT_TF | PSC_I2SSTAT_TR | PSC_I2SSTAT_TF))
+		dbg("I2S status = 0x%08x", i2s_stat);
+#endif
+	db->dma_qcount--;
+
+	if (db->count >= db->fragsize) {
+		if (au1xxx_dbdma_put_source(db->dmanr, db->nextOut,
+							db->fragsize) == 0) {
+			pr_error("qcount < 2 and no ring room!");
+		}
+		db->nextOut += db->fragsize;
+		if (db->nextOut >= db->rawbuf + db->dmasize)
+			db->nextOut -= db->dmasize;
+		db->count -= db->fragsize;
+		db->total_bytes += db->dma_fragsize;
+		db->dma_qcount++;
+	}
+
+	/* wake up anybody listening */
+	if (waitqueue_active(&db->wait))
+		wake_up(&db->wait);
+}
+
+
+static void
+adc_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct	au1550_state *s = (struct au1550_state *)dev_id;
+	struct	dmabuf  *dp = &s->dma_adc;
+	u32	obytes;
+	char	*obuf;
+
+	/* Pull the buffer from the dma queue.
+	*/
+	au1xxx_dbdma_get_dest(dp->dmanr, (void *)(&obuf), &obytes);
+
+	if ((dp->count + obytes) > dp->dmasize) {
+		/* Overrun. Stop ADC and log the error
+		*/
+		stop_adc(s);
+		dp->error++;
+		pr_error("adc overrun");
+		return;
+	}
+
+	/* Put a new empty buffer on the destination DMA.
+	*/
+	au1xxx_dbdma_put_dest(dp->dmanr, dp->nextIn, dp->dma_fragsize);
+
+	dp->nextIn += dp->dma_fragsize;
+	if (dp->nextIn >= dp->rawbuf + dp->dmasize)
+		dp->nextIn -= dp->dmasize;
+
+	dp->count += obytes;
+	dp->total_bytes += obytes;
+
+	/* wake up anybody listening
+	*/
+	if (waitqueue_active(&dp->wait))
+		wake_up(&dp->wait);
+
+}
+
+static loff_t
+au1550_llseek(struct file *file, loff_t offset, int origin)
+{
+	return -ESPIPE;
+}
+
+static int
+au1550_open_mixdev(struct inode *inode, struct file *file)
+{
+	file->private_data = &au1550_state;
+	return 0;
+}
+
+static int
+au1550_release_mixdev(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+#define I2S_CODEC	"Wolfson WM8731"
+
+static int
+au1550_ioctl_mixdev(struct inode *inode, struct file *file,
+			       unsigned int cmd, unsigned long arg)
+{
+	struct au1550_state *s = (struct au1550_state *)file->private_data;
+	struct i2s_codec *codec = s->codec;
+	int i, val = 0;
+
+	if (cmd == SOUND_MIXER_INFO) {
+		mixer_info info;
+		memset(&info, 0, sizeof(info));
+		strlcpy(info.id, I2S_CODEC, sizeof(info.id));
+		strlcpy(info.name, I2S_CODEC, sizeof(info.name));
+		info.modify_counter = codec->modcnt;
+		if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+			return -EFAULT;
+		return 0;
+	}
+	if (cmd == SOUND_OLD_MIXER_INFO) {
+		_old_mixer_info info;
+		memset(&info, 0, sizeof(info));
+		strlcpy(info.id, I2S_CODEC, sizeof(info.id));
+		strlcpy(info.name, I2S_CODEC, sizeof(info.name));
+		if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+			return -EFAULT;
+		return 0;
+	}
+
+	if (_IOC_TYPE(cmd) != 'M' || _SIOC_SIZE(cmd) != sizeof(int))
+		return -EINVAL;
+
+	if (cmd == OSS_GETVERSION)
+		return put_user(SOUND_VERSION, (int __user *)arg);
+
+	if (_SIOC_DIR(cmd) == _SIOC_READ) {
+		switch (_IOC_NR(cmd)) {
+		case SOUND_MIXER_RECSRC: /* give them the current record source */
+			val = codec->mixer_state[SOUND_MIXER_RECSRC];
+			break;
+
+		case SOUND_MIXER_DEVMASK: /* give them the supported mixers */
+			val = codec->supported_mixers;
+			break;
+
+		case SOUND_MIXER_RECMASK: /* Arg contains a bit for each supported recording source */
+			val = codec->record_sources;
+			break;
+
+		case SOUND_MIXER_STEREODEVS: /* Mixer channels supporting stereo */
+			val = codec->stereo_mixers;
+			break;
+
+		case SOUND_MIXER_CAPS:
+			val = SOUND_CAP_EXCL_INPUT;
+			break;
+
+		default: /* read a specific mixer */
+			i = _IOC_NR(cmd);
+
+			if (!i2s_supported_mixer(codec, i))
+				return -EINVAL;
+
+			val = codec->mixer_state[i];
+ 			break;
+		}
+		return put_user(val, (int __user *)arg);
+	}
+
+	if (_SIOC_DIR(cmd) == (_SIOC_WRITE|_SIOC_READ)) {
+		codec->modcnt++;
+		if (get_user(val, (int __user *)arg))
+			return -EFAULT;
+
+		switch (_IOC_NR(cmd)) {
+		case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */
+			if (!val) return 0;
+			if (!(val &= codec->record_sources)) return -EINVAL;
+
+			codec->set_mixer(codec, SOUND_MIXER_RECSRC, val);
+
+			return 0;
+		default: /* write a specific mixer */
+			i = _IOC_NR(cmd);
+
+			if (!i2s_supported_mixer(codec, i))
+				return -EINVAL;
+
+			codec->set_mixer(codec, i, val);
+
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static struct file_operations au1550_mixer_fops = {
+	owner:THIS_MODULE,
+	llseek:au1550_llseek,
+	ioctl:au1550_ioctl_mixdev,
+	open:au1550_open_mixdev,
+	release:au1550_release_mixdev,
+};
+
+static int
+drain_dac(struct au1550_state *s, int nonblock)
+{
+	unsigned long   flags;
+	int             count, tmo;
+
+	if (s->dma_dac.mapped || !s->dma_dac.ready || s->dma_dac.stopped)
+		return 0;
+
+	for (;;) {
+		spin_lock_irqsave(&s->lock, flags);
+		count = s->dma_dac.count;
+		spin_unlock_irqrestore(&s->lock, flags);
+		if (count <= 0)
+			break;
+		if (signal_pending(current))
+			break;
+		if (nonblock)
+			return -EBUSY;
+		tmo = 1000 * count / SAMP_RATE;
+		tmo /= s->dma_dac.dma_bytes_per_sample;
+		au1550_delay(tmo);
+	}
+	if (signal_pending(current))
+		return -ERESTARTSYS;
+	return 0;
+}
+
+static inline u8 S16_TO_U8(s16 ch)
+{
+	return (u8) (ch >> 8) + 0x80;
+}
+static inline s16 U8_TO_S16(u8 ch)
+{
+	return (s16) (ch - 0x80) << 8;
+}
+
+/*
+ * Translates user samples to dma buffer suitable for audio DAC data:
+ *     If mono, copy left channel to right channel in dma buffer.
+ *     If 8 bit samples, cvt to 16-bit before writing to dma buffer.
+ *     If interpolating (no VRA), duplicate every audio frame src_factor times.
+ */
+static int
+translate_from_user(struct dmabuf *db, char* dmabuf, char* userbuf,
+							       int dmacount)
+{
+	int             sample, i;
+	int             interp_bytes_per_sample;
+	int             num_samples;
+	int             mono = (db->num_channels == 1);
+	char            usersample[12];
+	s16             ch, dmasample[6];
+
+	if (db->sample_size == 16 && !mono && db->src_factor == 1) {
+		/* no translation necessary, just copy
+		*/
+		if (copy_from_user(dmabuf, userbuf, dmacount))
+			return -EFAULT;
+		return dmacount;
+	}
+
+	interp_bytes_per_sample = db->dma_bytes_per_sample * db->src_factor;
+	num_samples = dmacount / interp_bytes_per_sample;
+
+	for (sample = 0; sample < num_samples; sample++) {
+		if (copy_from_user(usersample, userbuf,
+				   db->user_bytes_per_sample)) {
+			return -EFAULT;
+		}
+
+		for (i = 0; i < db->num_channels; i++) {
+			if (db->sample_size == 8)
+				ch = U8_TO_S16(usersample[i]);
+			else
+				ch = *((s16 *) (&usersample[i * 2]));
+			dmasample[i] = ch;
+			if (mono)
+				dmasample[i + 1] = ch;	/* right channel */
+		}
+
+		/* duplicate every audio frame src_factor times
+		*/
+		for (i = 0; i < db->src_factor; i++)
+			memcpy(dmabuf, dmasample, db->dma_bytes_per_sample);
+
+		userbuf += db->user_bytes_per_sample;
+		dmabuf += interp_bytes_per_sample;
+	}
+
+	return num_samples * interp_bytes_per_sample;
+}
+
+/*
+ * Translates audio ADC samples to user buffer:
+ *     If mono, send only left channel to user buffer.
+ *     If 8 bit samples, cvt from 16 to 8 bit before writing to user buffer.
+ *     If decimating (no VRA), skip over src_factor audio frames.
+ */
+static int
+translate_to_user(struct dmabuf *db, char* userbuf, char* dmabuf,
+							     int dmacount)
+{
+	int             sample, i;
+	int             interp_bytes_per_sample;
+	int             num_samples;
+	int             mono = (db->num_channels == 1);
+	char            usersample[12];
+
+	if (db->sample_size == 16 && !mono && db->src_factor == 1) {
+		/* no translation necessary, just copy
+		*/
+		if (copy_to_user(userbuf, dmabuf, dmacount))
+			return -EFAULT;
+		return dmacount;
+	}
+
+	interp_bytes_per_sample = db->dma_bytes_per_sample * db->src_factor;
+	num_samples = dmacount / interp_bytes_per_sample;
+
+	for (sample = 0; sample < num_samples; sample++) {
+		for (i = 0; i < db->num_channels; i++) {
+			if (db->sample_size == 8)
+				usersample[i] =
+					S16_TO_U8(*((s16 *) (&dmabuf[i * 2])));
+			else
+				*((s16 *) (&usersample[i * 2])) =
+					*((s16 *) (&dmabuf[i * 2]));
+		}
+
+		if (copy_to_user(userbuf, usersample,
+				 db->user_bytes_per_sample)) {
+			return -EFAULT;
+		}
+
+		userbuf += db->user_bytes_per_sample;
+		dmabuf += interp_bytes_per_sample;
+	}
+
+	return num_samples * interp_bytes_per_sample;
+}
+
+/*
+ * Copy audio data to/from user buffer from/to dma buffer, taking care
+ * that we wrap when reading/writing the dma buffer. Returns actual byte
+ * count written to or read from the dma buffer.
+ */
+static int
+copy_dmabuf_user(struct dmabuf *db, char* userbuf, int count, int to_user)
+{
+	char           *bufptr = to_user ? db->nextOut : db->nextIn;
+	char           *bufend = db->rawbuf + db->dmasize;
+	int             cnt, ret;
+
+	if (bufptr + count > bufend) {
+		int             partial = (int) (bufend - bufptr);
+		if (to_user) {
+			if ((cnt = translate_to_user(db, userbuf,
+						     bufptr, partial)) < 0)
+				return cnt;
+			ret = cnt;
+			if ((cnt = translate_to_user(db, userbuf + partial,
+						     db->rawbuf,
+						     count - partial)) < 0)
+				return cnt;
+			ret += cnt;
+		} else {
+			if ((cnt = translate_from_user(db, bufptr, userbuf,
+						       partial)) < 0)
+				return cnt;
+			ret = cnt;
+			if ((cnt = translate_from_user(db, db->rawbuf,
+						       userbuf + partial,
+						       count - partial)) < 0)
+				return cnt;
+			ret += cnt;
+		}
+	} else {
+		if (to_user)
+			ret = translate_to_user(db, userbuf, bufptr, count);
+		else
+			ret = translate_from_user(db, bufptr, userbuf, count);
+	}
+
+	return ret;
+}
+
+static ssize_t
+au1550_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
+{
+	struct au1550_state *s = (struct au1550_state *)file->private_data;
+	struct dmabuf  *db = &s->dma_adc;
+	DECLARE_WAITQUEUE(wait, current);
+	ssize_t         ret;
+	unsigned long   flags;
+	int             cnt, usercnt, avail;
+
+	if (db->mapped)
+		return -ENXIO;
+	if (!access_ok(VERIFY_WRITE, buffer, count))
+		return -EFAULT;
+	ret = 0;
+
+	count *= db->cnt_factor;
+
+	down(&s->sem);
+	add_wait_queue(&db->wait, &wait);
+
+	while (count > 0) {
+		/* wait for samples in ADC dma buffer
+		*/
+		do {
+			if (db->stopped)
+				start_adc(s);
+			spin_lock_irqsave(&s->lock, flags);
+			avail = db->count;
+			if (avail <= 0)
+				__set_current_state(TASK_INTERRUPTIBLE);
+			spin_unlock_irqrestore(&s->lock, flags);
+			if (avail <= 0) {
+				if (file->f_flags & O_NONBLOCK) {
+					if (!ret)
+						ret = -EAGAIN;
+					goto out;
+				}
+				up(&s->sem);
+				schedule();
+				if (signal_pending(current)) {
+					if (!ret)
+						ret = -ERESTARTSYS;
+					goto out2;
+				}
+				down(&s->sem);
+			}
+		} while (avail <= 0);
+
+		/* copy from nextOut to user
+		*/
+		if ((cnt = copy_dmabuf_user(db, buffer,
+					    count > avail ?
+					    avail : count, 1)) < 0) {
+			if (!ret)
+				ret = -EFAULT;
+			goto out;
+		}
+
+		spin_lock_irqsave(&s->lock, flags);
+		db->count -= cnt;
+		db->nextOut += cnt;
+		if (db->nextOut >= db->rawbuf + db->dmasize)
+			db->nextOut -= db->dmasize;
+		spin_unlock_irqrestore(&s->lock, flags);
+
+		count -= cnt;
+		usercnt = cnt / db->cnt_factor;
+		buffer += usercnt;
+		ret += usercnt;
+	}			/* while (count > 0) */
+
+out:
+	up(&s->sem);
+out2:
+	remove_wait_queue(&db->wait, &wait);
+	set_current_state(TASK_RUNNING);
+	return ret;
+}
+
+static ssize_t
+au1550_write(struct file *file, const char *buffer, size_t count, loff_t * ppos)
+{
+	struct au1550_state *s = (struct au1550_state *)file->private_data;
+	struct dmabuf  *db = &s->dma_dac;
+	DECLARE_WAITQUEUE(wait, current);
+	ssize_t         ret = 0;
+	unsigned long   flags;
+	int             cnt, usercnt, avail;
+
+#ifdef AU1000_VERBOSE_DEBUG
+	dbg("write: count=%d", count);
+#endif
+
+	if (db->mapped)
+		return -ENXIO;
+	if (!access_ok(VERIFY_READ, buffer, count))
+		return -EFAULT;
+
+	count *= db->cnt_factor;
+
+	down(&s->sem);
+	add_wait_queue(&db->wait, &wait);
+
+	while (count > 0) {
+		/* wait for space in playback buffer
+		*/
+		do {
+			spin_lock_irqsave(&s->lock, flags);
+			avail = (int) db->dmasize - db->count;
+			if (avail <= 0)
+				__set_current_state(TASK_INTERRUPTIBLE);
+			spin_unlock_irqrestore(&s->lock, flags);
+			if (avail <= 0) {
+				if (file->f_flags & O_NONBLOCK) {
+					if (!ret)
+						ret = -EAGAIN;
+					goto out;
+				}
+				up(&s->sem);
+				schedule();
+				if (signal_pending(current)) {
+					if (!ret)
+						ret = -ERESTARTSYS;
+					goto out2;
+				}
+				down(&s->sem);
+			}
+		} while (avail <= 0);
+
+		/* copy from user to nextIn
+		*/
+		if ((cnt = copy_dmabuf_user(db, (char *) buffer,
+					    count > avail ?
+					    avail : count, 0)) < 0) {
+			if (!ret)
+				ret = -EFAULT;
+			goto out;
+		}
+
+		spin_lock_irqsave(&s->lock, flags);
+		db->count += cnt;
+		db->nextIn += cnt;
+		if (db->nextIn >= db->rawbuf + db->dmasize)
+			db->nextIn -= db->dmasize;
+
+		/* If the data is available, we want to keep two buffers
+		 * on the dma queue.  If the queue count reaches zero,
+		 * we know the dma has stopped.
+		 */
+		while ((db->dma_qcount < 2) && (db->count >= db->fragsize)) {
+			if (au1xxx_dbdma_put_source(db->dmanr, db->nextOut,
+							db->fragsize) == 0) {
+				pr_error("qcount < 2 and no ring room!");
+			}
+			db->nextOut += db->fragsize;
+			if (db->nextOut >= db->rawbuf + db->dmasize)
+				db->nextOut -= db->dmasize;
+			db->count -= db->fragsize;
+			db->total_bytes += db->dma_fragsize;
+			if (db->dma_qcount == 0)
+				start_dac(s);
+			db->dma_qcount++;
+		}
+		spin_unlock_irqrestore(&s->lock, flags);
+
+		count -= cnt;
+		usercnt = cnt / db->cnt_factor;
+		buffer += usercnt;
+		ret += usercnt;
+	}			/* while (count > 0) */
+
+out:
+	up(&s->sem);
+out2:
+	remove_wait_queue(&db->wait, &wait);
+	set_current_state(TASK_RUNNING);
+	return ret;
+}
+
+
+/* No kernel lock - we have our own spinlock */
+static unsigned int
+au1550_poll(struct file *file, struct poll_table_struct *wait)
+{
+	struct au1550_state *s = (struct au1550_state *)file->private_data;
+	unsigned long   flags;
+	unsigned int    mask = 0;
+
+	if (file->f_mode & FMODE_WRITE) {
+		if (!s->dma_dac.ready)
+			return 0;
+		poll_wait(file, &s->dma_dac.wait, wait);
+	}
+	if (file->f_mode & FMODE_READ) {
+		if (!s->dma_adc.ready)
+			return 0;
+		poll_wait(file, &s->dma_adc.wait, wait);
+	}
+
+	spin_lock_irqsave(&s->lock, flags);
+
+	if (file->f_mode & FMODE_READ) {
+		if (s->dma_adc.count >= (signed)s->dma_adc.dma_fragsize)
+			mask |= POLLIN | POLLRDNORM;
+	}
+	if (file->f_mode & FMODE_WRITE) {
+		if (s->dma_dac.mapped) {
+			if (s->dma_dac.count >=
+			    (signed)s->dma_dac.dma_fragsize)
+				mask |= POLLOUT | POLLWRNORM;
+		} else {
+			if ((signed) s->dma_dac.dmasize >=
+			    s->dma_dac.count + (signed)s->dma_dac.dma_fragsize)
+				mask |= POLLOUT | POLLWRNORM;
+		}
+	}
+	spin_unlock_irqrestore(&s->lock, flags);
+	return mask;
+}
+
+static int
+au1550_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct au1550_state *s = (struct au1550_state *)file->private_data;
+	struct dmabuf  *db;
+	unsigned long   size;
+	int ret = 0;
+
+	lock_kernel();
+	down(&s->sem);
+	if (vma->vm_flags & VM_WRITE)
+		db = &s->dma_dac;
+	else if (vma->vm_flags & VM_READ)
+		db = &s->dma_adc;
+	else {
+		ret = -EINVAL;
+		goto out;
+	}
+	if (vma->vm_pgoff != 0) {
+		ret = -EINVAL;
+		goto out;
+	}
+	size = vma->vm_end - vma->vm_start;
+	if (size > (PAGE_SIZE << db->buforder)) {
+		ret = -EINVAL;
+		goto out;
+	}
+	if (remap_pfn_range(vma, vma->vm_start,
+			     page_to_pfn(virt_to_page(db->rawbuf)),
+			     size, vma->vm_page_prot)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	vma->vm_flags &= ~VM_IO;
+	db->mapped = 1;
+out:
+	up(&s->sem);
+	unlock_kernel();
+	return ret;
+}
+
+
+#ifdef AU1000_VERBOSE_DEBUG
+static struct ioctl_str_t {
+	unsigned int    cmd;
+	const char     *str;
+} ioctl_str[] = {
+	{SNDCTL_DSP_RESET, "SNDCTL_DSP_RESET"},
+	{SNDCTL_DSP_SYNC, "SNDCTL_DSP_SYNC"},
+	{SNDCTL_DSP_SPEED, "SNDCTL_DSP_SPEED"},
+	{SNDCTL_DSP_STEREO, "SNDCTL_DSP_STEREO"},
+	{SNDCTL_DSP_GETBLKSIZE, "SNDCTL_DSP_GETBLKSIZE"},
+	{SNDCTL_DSP_SAMPLESIZE, "SNDCTL_DSP_SAMPLESIZE"},
+	{SNDCTL_DSP_CHANNELS, "SNDCTL_DSP_CHANNELS"},
+	{SOUND_PCM_WRITE_CHANNELS, "SOUND_PCM_WRITE_CHANNELS"},
+	{SOUND_PCM_WRITE_FILTER, "SOUND_PCM_WRITE_FILTER"},
+	{SNDCTL_DSP_POST, "SNDCTL_DSP_POST"},
+	{SNDCTL_DSP_SUBDIVIDE, "SNDCTL_DSP_SUBDIVIDE"},
+	{SNDCTL_DSP_SETFRAGMENT, "SNDCTL_DSP_SETFRAGMENT"},
+	{SNDCTL_DSP_GETFMTS, "SNDCTL_DSP_GETFMTS"},
+	{SNDCTL_DSP_SETFMT, "SNDCTL_DSP_SETFMT"},
+	{SNDCTL_DSP_GETOSPACE, "SNDCTL_DSP_GETOSPACE"},
+	{SNDCTL_DSP_GETISPACE, "SNDCTL_DSP_GETISPACE"},
+	{SNDCTL_DSP_NONBLOCK, "SNDCTL_DSP_NONBLOCK"},
+	{SNDCTL_DSP_GETCAPS, "SNDCTL_DSP_GETCAPS"},
+	{SNDCTL_DSP_GETTRIGGER, "SNDCTL_DSP_GETTRIGGER"},
+	{SNDCTL_DSP_SETTRIGGER, "SNDCTL_DSP_SETTRIGGER"},
+	{SNDCTL_DSP_GETIPTR, "SNDCTL_DSP_GETIPTR"},
+	{SNDCTL_DSP_GETOPTR, "SNDCTL_DSP_GETOPTR"},
+	{SNDCTL_DSP_MAPINBUF, "SNDCTL_DSP_MAPINBUF"},
+	{SNDCTL_DSP_MAPOUTBUF, "SNDCTL_DSP_MAPOUTBUF"},
+	{SNDCTL_DSP_SETSYNCRO, "SNDCTL_DSP_SETSYNCRO"},
+	{SNDCTL_DSP_SETDUPLEX, "SNDCTL_DSP_SETDUPLEX"},
+	{SNDCTL_DSP_GETODELAY, "SNDCTL_DSP_GETODELAY"},
+	{SNDCTL_DSP_GETCHANNELMASK, "SNDCTL_DSP_GETCHANNELMASK"},
+	{SNDCTL_DSP_BIND_CHANNEL, "SNDCTL_DSP_BIND_CHANNEL"},
+	{OSS_GETVERSION, "OSS_GETVERSION"},
+	{SOUND_PCM_READ_RATE, "SOUND_PCM_READ_RATE"},
+	{SOUND_PCM_READ_CHANNELS, "SOUND_PCM_READ_CHANNELS"},
+	{SOUND_PCM_READ_BITS, "SOUND_PCM_READ_BITS"},
+	{SOUND_PCM_READ_FILTER, "SOUND_PCM_READ_FILTER"}
+};
+#endif
+
+static int
+dma_count_done(struct dmabuf *db)
+{
+	if (db->stopped)
+		return 0;
+
+	return db->dma_fragsize - au1xxx_get_dma_residue(db->dmanr);
+}
+
+
+static int
+au1550_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+							unsigned long arg)
+{
+	struct au1550_state *s = (struct au1550_state *)file->private_data;
+	unsigned long   flags;
+	audio_buf_info  abinfo;
+	count_info      cinfo;
+	int             count;
+	int             val, mapped, ret, diff;
+
+	mapped = ((file->f_mode & FMODE_WRITE) && s->dma_dac.mapped) ||
+		((file->f_mode & FMODE_READ) && s->dma_adc.mapped);
+
+#ifdef AU1000_VERBOSE_DEBUG
+	for (count=0; count<sizeof(ioctl_str)/sizeof(ioctl_str[0]); count++) {
+		if (ioctl_str[count].cmd == cmd)
+			break;
+	}
+	if (count < sizeof(ioctl_str) / sizeof(ioctl_str[0]))
+		dbg("ioctl %s, arg=0x%lx", ioctl_str[count].str, arg);
+	else
+		dbg("ioctl 0x%x unknown, arg=0x%lx", cmd, arg);
+#endif
+
+	switch (cmd) {
+	case OSS_GETVERSION:
+		return put_user(SOUND_VERSION, (int *) arg);
+
+	case SNDCTL_DSP_SYNC:
+		if (file->f_mode & FMODE_WRITE)
+			return drain_dac(s, file->f_flags & O_NONBLOCK);
+		return 0;
+
+	case SNDCTL_DSP_SETDUPLEX:
+		return 0;
+
+	case SNDCTL_DSP_GETCAPS:
+		return put_user(DSP_CAP_DUPLEX | DSP_CAP_REALTIME |
+				DSP_CAP_TRIGGER | DSP_CAP_MMAP, (int *)arg);
+
+	case SNDCTL_DSP_RESET:
+		if (file->f_mode & FMODE_WRITE) {
+			stop_dac(s);
+			synchronize_irq();
+			s->dma_dac.count = s->dma_dac.total_bytes = 0;
+			s->dma_dac.nextIn = s->dma_dac.nextOut =
+				s->dma_dac.rawbuf;
+		}
+		if (file->f_mode & FMODE_READ) {
+			stop_adc(s);
+			synchronize_irq();
+			s->dma_adc.count = s->dma_adc.total_bytes = 0;
+			s->dma_adc.nextIn = s->dma_adc.nextOut =
+				s->dma_adc.rawbuf;
+		}
+		return 0;
+
+	case SNDCTL_DSP_SPEED:
+		if (get_user(val, (int *) arg))
+			return -EFAULT;
+		if (val >= 0) {
+			if (file->f_mode & FMODE_READ) {
+				stop_adc(s);
+				set_adc_rate(s, val);
+			}
+			if (file->f_mode & FMODE_WRITE) {
+				stop_dac(s);
+				set_dac_rate(s, val);
+			}
+			if (s->open_mode & FMODE_READ)
+				if ((ret = prog_dmabuf_adc(s)))
+					return ret;
+			if (s->open_mode & FMODE_WRITE)
+				if ((ret = prog_dmabuf_dac(s)))
+					return ret;
+		}
+		return put_user((file->f_mode & FMODE_READ) ?
+				s->dma_adc.sample_rate :
+				s->dma_dac.sample_rate,
+				(int *)arg);
+
+	case SNDCTL_DSP_STEREO:
+		if (get_user(val, (int *) arg))
+			return -EFAULT;
+		if (file->f_mode & FMODE_READ) {
+			stop_adc(s);
+			s->dma_adc.num_channels = val ? 2 : 1;
+			if ((ret = prog_dmabuf_adc(s)))
+				return ret;
+		}
+		if (file->f_mode & FMODE_WRITE) {
+			stop_dac(s);
+			s->dma_dac.num_channels = val ? 2 : 1;
+			if ((ret = prog_dmabuf_dac(s)))
+				return ret;
+		}
+		return 0;
+
+	case SNDCTL_DSP_CHANNELS:
+		if (get_user(val, (int *) arg))
+			return -EFAULT;
+		if (val != 0) {
+			if (file->f_mode & FMODE_READ) {
+				if (val < 0 || val > 2)
+					return -EINVAL;
+				stop_adc(s);
+				s->dma_adc.num_channels = val;
+				if ((ret = prog_dmabuf_adc(s)))
+					return ret;
+			}
+			if (file->f_mode & FMODE_WRITE) {
+				switch (val) {
+				case 1:
+				case 2:
+					break;
+				default:
+					return -EINVAL;
+				}
+
+				stop_dac(s);
+				s->dma_dac.num_channels = val;
+				if ((ret = prog_dmabuf_dac(s)))
+					return ret;
+			}
+		}
+		return put_user(val, (int *) arg);
+
+	case SNDCTL_DSP_GETFMTS:	/* Returns a mask */
+		return put_user(AFMT_S16_LE | AFMT_U8, (int *) arg);
+
+	case SNDCTL_DSP_SETFMT:	/* Selects ONE fmt */
+		if (get_user(val, (int *) arg))
+			return -EFAULT;
+		if (val != AFMT_QUERY) {
+			if (file->f_mode & FMODE_READ) {
+				stop_adc(s);
+				if (val == AFMT_S16_LE)
+					s->dma_adc.sample_size = 16;
+				else {
+					val = AFMT_U8;
+					s->dma_adc.sample_size = 8;
+				}
+				if ((ret = prog_dmabuf_adc(s)))
+					return ret;
+			}
+			if (file->f_mode & FMODE_WRITE) {
+				stop_dac(s);
+				if (val == AFMT_S16_LE)
+					s->dma_dac.sample_size = 16;
+				else {
+					val = AFMT_U8;
+					s->dma_dac.sample_size = 8;
+				}
+				if ((ret = prog_dmabuf_dac(s)))
+					return ret;
+			}
+		} else {
+			if (file->f_mode & FMODE_READ)
+				val = (s->dma_adc.sample_size == 16) ?
+					AFMT_S16_LE : AFMT_U8;
+			else
+				val = (s->dma_dac.sample_size == 16) ?
+					AFMT_S16_LE : AFMT_U8;
+		}
+		return put_user(val, (int *) arg);
+
+	case SNDCTL_DSP_POST:
+		return 0;
+
+	case SNDCTL_DSP_GETTRIGGER:
+		val = 0;
+		spin_lock_irqsave(&s->lock, flags);
+		if (file->f_mode & FMODE_READ && !s->dma_adc.stopped)
+			val |= PCM_ENABLE_INPUT;
+		if (file->f_mode & FMODE_WRITE && !s->dma_dac.stopped)
+			val |= PCM_ENABLE_OUTPUT;
+		spin_unlock_irqrestore(&s->lock, flags);
+		return put_user(val, (int *) arg);
+
+	case SNDCTL_DSP_SETTRIGGER:
+		if (get_user(val, (int *) arg))
+			return -EFAULT;
+		if (file->f_mode & FMODE_READ) {
+			if (val & PCM_ENABLE_INPUT)
+				start_adc(s);
+			else
+				stop_adc(s);
+		}
+		if (file->f_mode & FMODE_WRITE) {
+			if (val & PCM_ENABLE_OUTPUT)
+				start_dac(s);
+			else
+				stop_dac(s);
+		}
+		return 0;
+
+	case SNDCTL_DSP_GETOSPACE:
+		if (!(file->f_mode & FMODE_WRITE))
+			return -EINVAL;
+		abinfo.fragsize = s->dma_dac.fragsize;
+		spin_lock_irqsave(&s->lock, flags);
+		count = s->dma_dac.count;
+		count -= dma_count_done(&s->dma_dac);
+		spin_unlock_irqrestore(&s->lock, flags);
+		if (count < 0)
+			count = 0;
+		abinfo.bytes = (s->dma_dac.dmasize - count) /
+			s->dma_dac.cnt_factor;
+		abinfo.fragstotal = s->dma_dac.numfrag;
+		abinfo.fragments = abinfo.bytes >> s->dma_dac.fragshift;
+#ifdef AU1000_VERBOSE_DEBUG
+		dbg("bytes=%d, fragments=%d", abinfo.bytes, abinfo.fragments);
+#endif
+		return copy_to_user((void *) arg, &abinfo,
+				    sizeof(abinfo)) ? -EFAULT : 0;
+
+	case SNDCTL_DSP_GETISPACE:
+		if (!(file->f_mode & FMODE_READ))
+			return -EINVAL;
+		abinfo.fragsize = s->dma_adc.fragsize;
+		spin_lock_irqsave(&s->lock, flags);
+		count = s->dma_adc.count;
+		count += dma_count_done(&s->dma_adc);
+		spin_unlock_irqrestore(&s->lock, flags);
+		if (count < 0)
+			count = 0;
+		abinfo.bytes = count / s->dma_adc.cnt_factor;
+		abinfo.fragstotal = s->dma_adc.numfrag;
+		abinfo.fragments = abinfo.bytes >> s->dma_adc.fragshift;
+		return copy_to_user((void *) arg, &abinfo,
+				    sizeof(abinfo)) ? -EFAULT : 0;
+
+	case SNDCTL_DSP_NONBLOCK:
+		file->f_flags |= O_NONBLOCK;
+		return 0;
+
+	case SNDCTL_DSP_GETODELAY:
+		if (!(file->f_mode & FMODE_WRITE))
+			return -EINVAL;
+		spin_lock_irqsave(&s->lock, flags);
+		count = s->dma_dac.count;
+		count -= dma_count_done(&s->dma_dac);
+		spin_unlock_irqrestore(&s->lock, flags);
+		if (count < 0)
+			count = 0;
+		count /= s->dma_dac.cnt_factor;
+		return put_user(count, (int *) arg);
+
+	case SNDCTL_DSP_GETIPTR:
+		if (!(file->f_mode & FMODE_READ))
+			return -EINVAL;
+		spin_lock_irqsave(&s->lock, flags);
+		cinfo.bytes = s->dma_adc.total_bytes;
+		count = s->dma_adc.count;
+		if (!s->dma_adc.stopped) {
+			diff = dma_count_done(&s->dma_adc);
+			count += diff;
+			cinfo.bytes += diff;
+			cinfo.ptr =  virt_to_phys(s->dma_adc.nextIn) + diff -
+				virt_to_phys(s->dma_adc.rawbuf);
+		} else
+			cinfo.ptr = virt_to_phys(s->dma_adc.nextIn) -
+				virt_to_phys(s->dma_adc.rawbuf);
+		if (s->dma_adc.mapped)
+			s->dma_adc.count &= (s->dma_adc.dma_fragsize-1);
+		spin_unlock_irqrestore(&s->lock, flags);
+		if (count < 0)
+			count = 0;
+		cinfo.blocks = count >> s->dma_adc.fragshift;
+		return copy_to_user((void *) arg, &cinfo, sizeof(cinfo));
+
+	case SNDCTL_DSP_GETOPTR:
+		if (!(file->f_mode & FMODE_READ))
+			return -EINVAL;
+		spin_lock_irqsave(&s->lock, flags);
+		cinfo.bytes = s->dma_dac.total_bytes;
+		count = s->dma_dac.count;
+		if (!s->dma_dac.stopped) {
+			diff = dma_count_done(&s->dma_dac);
+			count -= diff;
+			cinfo.bytes += diff;
+			cinfo.ptr = virt_to_phys(s->dma_dac.nextOut) + diff -
+				virt_to_phys(s->dma_dac.rawbuf);
+		} else
+			cinfo.ptr = virt_to_phys(s->dma_dac.nextOut) -
+				virt_to_phys(s->dma_dac.rawbuf);
+		if (s->dma_dac.mapped)
+			s->dma_dac.count &= (s->dma_dac.dma_fragsize-1);
+		spin_unlock_irqrestore(&s->lock, flags);
+		if (count < 0)
+			count = 0;
+		cinfo.blocks = count >> s->dma_dac.fragshift;
+		return copy_to_user((void *) arg, &cinfo, sizeof(cinfo));
+
+	case SNDCTL_DSP_GETBLKSIZE:
+		if (file->f_mode & FMODE_WRITE)
+			return put_user(s->dma_dac.fragsize, (int *) arg);
+		else
+			return put_user(s->dma_adc.fragsize, (int *) arg);
+
+	case SNDCTL_DSP_SETFRAGMENT:
+		if (get_user(val, (int *) arg))
+			return -EFAULT;
+		if (file->f_mode & FMODE_READ) {
+			stop_adc(s);
+			s->dma_adc.ossfragshift = val & 0xffff;
+			s->dma_adc.ossmaxfrags = (val >> 16) & 0xffff;
+			if (s->dma_adc.ossfragshift < 4)
+				s->dma_adc.ossfragshift = 4;
+			if (s->dma_adc.ossfragshift > 15)
+				s->dma_adc.ossfragshift = 15;
+			if (s->dma_adc.ossmaxfrags < 4)
+				s->dma_adc.ossmaxfrags = 4;
+			if ((ret = prog_dmabuf_adc(s)))
+				return ret;
+		}
+		if (file->f_mode & FMODE_WRITE) {
+			stop_dac(s);
+			s->dma_dac.ossfragshift = val & 0xffff;
+			s->dma_dac.ossmaxfrags = (val >> 16) & 0xffff;
+			if (s->dma_dac.ossfragshift < 4)
+				s->dma_dac.ossfragshift = 4;
+			if (s->dma_dac.ossfragshift > 15)
+				s->dma_dac.ossfragshift = 15;
+			if (s->dma_dac.ossmaxfrags < 4)
+				s->dma_dac.ossmaxfrags = 4;
+			if ((ret = prog_dmabuf_dac(s)))
+				return ret;
+		}
+		return 0;
+
+	case SNDCTL_DSP_SUBDIVIDE:
+		if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision) ||
+		    (file->f_mode & FMODE_WRITE && s->dma_dac.subdivision))
+			return -EINVAL;
+		if (get_user(val, (int *) arg))
+			return -EFAULT;
+		if (val != 1 && val != 2 && val != 4)
+			return -EINVAL;
+		if (file->f_mode & FMODE_READ) {
+			stop_adc(s);
+			s->dma_adc.subdivision = val;
+			if ((ret = prog_dmabuf_adc(s)))
+				return ret;
+		}
+		if (file->f_mode & FMODE_WRITE) {
+			stop_dac(s);
+			s->dma_dac.subdivision = val;
+			if ((ret = prog_dmabuf_dac(s)))
+				return ret;
+		}
+		return 0;
+
+	case SOUND_PCM_READ_RATE:
+		return put_user((file->f_mode & FMODE_READ) ?
+				s->dma_adc.sample_rate :
+				s->dma_dac.sample_rate,
+				(int *)arg);
+
+	case SOUND_PCM_READ_CHANNELS:
+		if (file->f_mode & FMODE_READ)
+			return put_user(s->dma_adc.num_channels, (int *)arg);
+		else
+			return put_user(s->dma_dac.num_channels, (int *)arg);
+
+	case SOUND_PCM_READ_BITS:
+		if (file->f_mode & FMODE_READ)
+			return put_user(s->dma_adc.sample_size, (int *)arg);
+		else
+			return put_user(s->dma_dac.sample_size, (int *)arg);
+
+	case SOUND_PCM_WRITE_FILTER:
+	case SNDCTL_DSP_SETSYNCRO:
+	case SOUND_PCM_READ_FILTER:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+static int
+au1550_open(struct inode *inode, struct file *file)
+{
+	int             minor = MINOR(inode->i_rdev);
+	DECLARE_WAITQUEUE(wait, current);
+	struct au1550_state *s = &au1550_state;
+	int             ret;
+
+#ifdef AU1000_VERBOSE_DEBUG
+	if (file->f_flags & O_NONBLOCK)
+		dbg(__FUNCTION__ ": non-blocking");
+	else
+		dbg(__FUNCTION__ ": blocking");
+#endif
+
+	file->private_data = s;
+	/* wait for device to become free */
+	down(&s->open_sem);
+	while (s->open_mode & file->f_mode) {
+		if (file->f_flags & O_NONBLOCK) {
+			up(&s->open_sem);
+			return -EBUSY;
+		}
+		add_wait_queue(&s->open_wait, &wait);
+		__set_current_state(TASK_INTERRUPTIBLE);
+		up(&s->open_sem);
+		schedule();
+		remove_wait_queue(&s->open_wait, &wait);
+		set_current_state(TASK_RUNNING);
+		if (signal_pending(current))
+			return -ERESTARTSYS;
+		down(&s->open_sem);
+	}
+
+	stop_dac(s);
+	stop_adc(s);
+
+	if (file->f_mode & FMODE_READ) {
+		s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags =
+			s->dma_adc.subdivision = s->dma_adc.total_bytes = 0;
+		s->dma_adc.num_channels = 1;
+		s->dma_adc.sample_size = 8;
+		set_adc_rate(s, 8000);
+		if ((minor & 0xf) == SND_DEV_DSP16)
+			s->dma_adc.sample_size = 16;
+	}
+
+	if (file->f_mode & FMODE_WRITE) {
+		s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags =
+			s->dma_dac.subdivision = s->dma_dac.total_bytes = 0;
+		s->dma_dac.num_channels = 1;
+		s->dma_dac.sample_size = 8;
+		set_dac_rate(s, 8000);
+		if ((minor & 0xf) == SND_DEV_DSP16)
+			s->dma_dac.sample_size = 16;
+	}
+
+	if (file->f_mode & FMODE_READ) {
+		if ((ret = prog_dmabuf_adc(s)))
+			return ret;
+	}
+	if (file->f_mode & FMODE_WRITE) {
+		if ((ret = prog_dmabuf_dac(s)))
+			return ret;
+	}
+
+	s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
+	up(&s->open_sem);
+	init_MUTEX(&s->sem);
+	return 0;
+}
+
+static int
+au1550_release(struct inode *inode, struct file *file)
+{
+	struct au1550_state *s = (struct au1550_state *)file->private_data;
+
+	lock_kernel();
+
+	if (file->f_mode & FMODE_WRITE) {
+		unlock_kernel();
+		drain_dac(s, file->f_flags & O_NONBLOCK);
+		lock_kernel();
+	}
+
+	down(&s->open_sem);
+	if (file->f_mode & FMODE_WRITE) {
+		stop_dac(s);
+		kfree(s->dma_dac.rawbuf);
+		s->dma_dac.rawbuf = NULL;
+	}
+	if (file->f_mode & FMODE_READ) {
+		stop_adc(s);
+		kfree(s->dma_adc.rawbuf);
+		s->dma_adc.rawbuf = NULL;
+	}
+	s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE));
+	up(&s->open_sem);
+	wake_up(&s->open_wait);
+	unlock_kernel();
+	return 0;
+}
+
+static struct file_operations au1550_audio_fops = {
+	owner:		THIS_MODULE,
+	llseek:		au1550_llseek,
+	read:		au1550_read,
+	write:		au1550_write,
+	poll:		au1550_poll,
+	ioctl:		au1550_ioctl,
+	mmap:		au1550_mmap,
+	open:		au1550_open,
+	release:	au1550_release,
+};
+
+/* Set up an internal clock for the PSC3.  This will then get
+ * driven out of the Au1550 as the master.
+ */
+static void
+intclk_setup(void)
+{
+	uint clk, rate;
+
+	/* Wire up Freq4 as a clock for the PSC3.
+	 * We know SMBus uses Freq3.
+	 * By making changes to this rate, plus the word strobe
+	 * size, we can make fine adjustments to the actual data rate.
+	 */
+	rate = get_au1x00_speed();
+#ifdef TRY_441KHz
+	rate /= (11 * 1000000);
+#else
+	rate /= (12 * 1000000);
+#endif
+
+	/* The FRDIV in the frequency control is (FRDIV + 1) * 2
+	*/
+	rate /=2;
+	rate--;
+	clk = au_readl(SYS_FREQCTRL1);
+	au_sync();
+	clk &= ~(SYS_FC_FRDIV4_MASK | SYS_FC_FS4);;
+	clk |= (rate << SYS_FC_FRDIV4_BIT);
+	clk |= SYS_FC_FE4;
+	au_writel(clk, SYS_FREQCTRL1);
+	au_sync();
+
+	/* Set up the clock source routing to get Freq4 to PSC3_intclk.
+	*/
+	clk = au_readl(SYS_CLKSRC);
+	au_sync();
+	clk &= ~0x01f00000;
+	clk |= (6 << 22);
+	au_writel(clk, SYS_CLKSRC);
+	au_sync();
+}
+
+static int __devinit
+au1550_probe(void)
+{
+	struct au1550_state *s = &au1550_state;
+	int val;
+	volatile psc_i2s_t *ip;
+#ifdef AU1550_DEBUG
+	char proc_str[80];
+#endif
+
+	memset(s, 0, sizeof(struct au1550_state));
+
+	init_waitqueue_head(&s->dma_adc.wait);
+	init_waitqueue_head(&s->dma_dac.wait);
+	init_waitqueue_head(&s->open_wait);
+	init_MUTEX(&s->open_sem);
+	spin_lock_init(&s->lock);
+
+	s->codec = &au1550_i2s_codec;
+	s->psc_addr = (volatile psc_i2s_t *)I2S_PSC_BASE;
+	ip = s->psc_addr;
+
+	if (!request_region(CPHYSADDR(ip),
+			    0x30, AU1550_MODULE_NAME)) {
+		pr_error("I2S Audio ports in use");
+	}
+
+	/* Allocate the DMA Channels
+	*/
+	if ((s->dma_dac.dmanr = au1xxx_dbdma_chan_alloc(DBDMA_MEM_CHAN,
+	    DBDMA_I2S_TX_CHAN, dac_dma_interrupt, (void *)s)) == 0) {
+		pr_error("Can't get DAC DMA");
+		goto err_dma1;
+	}
+	au1xxx_dbdma_set_devwidth(s->dma_dac.dmanr, 16);
+	if (au1xxx_dbdma_ring_alloc(s->dma_dac.dmanr,
+					NUM_DBDMA_DESCRIPTORS) == 0) {
+		pr_error("Can't get DAC DMA descriptors");
+		goto err_dma1;
+	}
+
+	if ((s->dma_adc.dmanr = au1xxx_dbdma_chan_alloc(DBDMA_I2S_RX_CHAN,
+	    DBDMA_MEM_CHAN, adc_dma_interrupt, (void *)s)) == 0) {
+		pr_error("Can't get ADC DMA");
+		goto err_dma2;
+	}
+	au1xxx_dbdma_set_devwidth(s->dma_adc.dmanr, 16);
+	if (au1xxx_dbdma_ring_alloc(s->dma_adc.dmanr,
+					NUM_DBDMA_DESCRIPTORS) == 0) {
+		pr_error("Can't get ADC DMA descriptors");
+		goto err_dma2;
+	}
+
+	pr_info("DAC: DMA%d, ADC: DMA%d", DBDMA_I2S_TX_CHAN, DBDMA_I2S_RX_CHAN);
+
+	/* register devices */
+
+	if ((s->dev_audio = register_sound_dsp(&au1550_audio_fops, -1)) < 0)
+		goto err_dev1;
+
+	if ((s->dev_mixer = register_sound_mixer(&au1550_mixer_fops, -1)) < 0)
+		goto err_dev2;
+
+#ifdef AU1550_DEBUG
+	/* intialize the debug proc device */
+	s->ps = create_proc_read_entry(AU1000_MODULE_NAME, 0, NULL,
+				       proc_au1550_dump, NULL);
+#endif /* AU1550_DEBUG */
+
+	intclk_setup();
+
+	/* The GPIO for the appropriate PSC was configured by the
+	 * board specific start up.
+	 *
+	 * configure PSC for I2S Audio
+	 */
+	ip->psc_ctrl = PSC_CTRL_DISABLE;	/* Disable PSC */
+	au_sync();
+	ip->psc_sel = (PSC_SEL_CLK_INTCLK | PSC_SEL_PS_I2SMODE);
+	au_sync();
+
+	/* Enable PSC
+	*/
+	ip->psc_ctrl = PSC_CTRL_ENABLE;
+	au_sync();
+
+	/* Wait for PSC ready.
+	*/
+	do {
+		val = ip->psc_i2sstat;
+		au_sync();
+	} while ((val & PSC_I2SSTAT_SR) == 0);
+
+	/* Configure I2S controller.
+	 * Deep FIFO, 16-bit sample, DMA, make sure DMA matches fifo size.
+	 * Actual I2S mode (first bit delayed by one clock).
+	 * Master mode (We provide the clock from the PSC).
+	 */
+	val = PSC_I2SCFG_SET_LEN(16);
+#ifdef TRY_441KHz
+	/* This really should be 250, but it appears that all of the
+	 * PLLs, dividers and so on in the chain shift it.  That's the
+	 * problem with sourceing the clock instead of letting the very
+	 * stable codec provide it.  But, the PSC doesn't appear to want
+	 * to work in slave mode, so this is what we get.  It's  not
+	 * studio quality timing, but it's good enough for listening
+	 * to mp3s.
+	 */
+	val |= PSC_I2SCFG_SET_WS(252);
+#else
+	val |= PSC_I2SCFG_SET_WS(250);
+#endif
+	val |= PSC_I2SCFG_RT_FIFO8 | PSC_I2SCFG_TT_FIFO8 | \
+					PSC_I2SCFG_BI | PSC_I2SCFG_XM;
+
+	ip->psc_i2scfg = val;
+	au_sync();
+	val |= PSC_I2SCFG_DE_ENABLE;
+	ip->psc_i2scfg = val;
+	au_sync();
+
+	/* Wait for Device ready.
+	*/
+	do {
+		val = ip->psc_i2sstat;
+		au_sync();
+	} while ((val & PSC_I2SSTAT_DR) == 0);
+
+	val = ip->psc_i2scfg;
+	au_sync();
+
+	s->codec->init_codec(s->codec);
+
+	return 0;
+
+ err_dev2:
+	unregister_sound_dsp(s->dev_audio);
+ err_dev1:
+	au1xxx_dbdma_chan_free(s->dma_adc.dmanr);
+ err_dma2:
+	au1xxx_dbdma_chan_free(s->dma_dac.dmanr);
+ err_dma1:
+	release_region(CPHYSADDR(I2S_PSC_BASE), 0x30);
+
+	return -1;
+}
+
+static void __devinit
+au1550_remove(void)
+{
+	struct au1550_state *s = &au1550_state;
+
+	if (!s)
+		return;
+#ifdef AU1550_DEBUG
+	if (s->ps)
+		remove_proc_entry(AU1000_MODULE_NAME, NULL);
+#endif /* AU1000_DEBUG */
+	synchronize_irq();
+	au1xxx_dbdma_chan_free(s->dma_adc.dmanr);
+	au1xxx_dbdma_chan_free(s->dma_dac.dmanr);
+	release_region(CPHYSADDR(I2S_PSC_BASE), 0x30);
+	unregister_sound_dsp(s->dev_audio);
+	unregister_sound_mixer(s->dev_mixer);
+}
+
+static int __init
+init_au1550(void)
+{
+	return au1550_probe();
+}
+
+static void __exit
+cleanup_au1550(void)
+{
+	au1550_remove();
+}
+
+module_init(init_au1550);
+module_exit(cleanup_au1550);
+
+MODULE_AUTHOR("Advanced Micro Devices (AMD), dan@embeddededge.com");
+MODULE_DESCRIPTION("Au1550 I2S Audio Driver");
