diff -urpN --exclude-from=/home/davej/.exclude bk-linus/arch/x86_64/kernel/mtrr.c linux-2.5/arch/x86_64/kernel/mtrr.c
--- bk-linus/arch/x86_64/kernel/mtrr.c	2002-11-21 02:12:59.000000000 +0000
+++ linux-2.5/arch/x86_64/kernel/mtrr.c	2002-11-21 17:57:06.000000000 +0000
@@ -1223,17 +1223,17 @@ static void __init mtrr_setup (void)
 	if (!cpu_has_mtrr || boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 
 		return; 
 
-		 /* Query the width (in bits) of the physical
-		   addressable memory on the Hammer family. */
-		if ((cpuid_eax (0x80000000) >= 0x80000008)) {
-			u32 phys_addr;
-			phys_addr = cpuid_eax (0x80000008) & 0xff;
-			size_or_mask = ~((1L << phys_addr) - 1);
-			/*
-			 * top bits MBZ as its beyond the addressable range.
-			 * bottom bits MBZ as we don't care about lower 12 bits of addr.
-			 */
-			size_and_mask = (~size_or_mask) & 0x000ffffffffff000L;
+	 /* Query the width (in bits) of the physical
+	   addressable memory on the Hammer family. */
+	if ((cpuid_eax (0x80000000) >= 0x80000008)) {
+		u32 phys_addr;
+		phys_addr = cpuid_eax (0x80000008) & 0xff;
+		size_or_mask = ~((1L << phys_addr) - 1);
+		/*
+		 * top bits MBZ as its beyond the addressable range.
+		 * bottom bits MBZ as we don't care about lower 12 bits of addr.
+		 */
+		size_and_mask = (~size_or_mask) & 0x000ffffffffff000L;
 	}
 }
 
@@ -1251,27 +1251,27 @@ void mtrr_init_cpu(int cpu)
 		mtrr_setup();
 #else
 	if (cpu == 0) { 
-	mtrr_setup();
-	get_mtrr_state (&smp_mtrr_state);
+		mtrr_setup();
+		get_mtrr_state (&smp_mtrr_state);
 	} else { 
-	u64 mask;
-	int count;
-	struct set_mtrr_context ctxt;
+		u64 mask;
+		int count;
+		struct set_mtrr_context ctxt;
 
 		/* Note that this is not ideal, since the cache is
 		   only flushed/disabled for this CPU while the MTRRs
 		   are changed, but changing this requires more
 		   invasive changes to the way the kernel boots  */
-	set_mtrr_prepare (&ctxt);
-	mask = set_mtrr_state (&smp_mtrr_state, &ctxt);
-	set_mtrr_done (&ctxt);
+		set_mtrr_prepare (&ctxt);
+		mask = set_mtrr_state (&smp_mtrr_state, &ctxt);
+		set_mtrr_done (&ctxt);
 
-	/*  Use the atomic bitops to update the global mask  */
+		/*  Use the atomic bitops to update the global mask  */
 		for (count = 0; count < (sizeof mask) * 8; ++count) {
 			if (mask & 1)
-			set_bit (count, &smp_changes_mask);
-		mask >>= 1;
-	}
+				set_bit (count, &smp_changes_mask);
+			mask >>= 1;
+		}
 	} 
 #endif
 }
