cpu/x86/smm: Add support for long mode

Enable long mode in SMM handler.
x86_32 isn't affected by this change.

As the rsm instruction used to leave SMM doesn't restore MSR registers,
drop back to protected mode after running the smi_handler and restore
IA32_EFER MSR (which enables long mode support) to previous value.

NOTE: This commit does NOT introduce a new security model. It uses the
      same page tables as the remaining firmware does.
      This can be a security risk if someone is able to manipulate the
      page tables stored in ROM at runtime. USE FOR TESTING ONLY!

Tested on Qemu Q35.

Change-Id: I8bba4af4688c723fc079ae905dac95f57ea956f8
Signed-off-by: Patrick Rudolph <siro@das-labor.org>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/35681
Reviewed-by: Raul Rangel <rrangel@chromium.org>
Reviewed-by: Angel Pons <th3fanbus@gmail.com>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S
index 036bc83..340840f 100644
--- a/src/cpu/x86/smm/smmhandler.S
+++ b/src/cpu/x86/smm/smmhandler.S
@@ -8,6 +8,7 @@
  */
 
 #include <cpu/x86/lapic_def.h>
+#include <cpu/x86/msr.h>
 
 /*
  * +--------------------------------+ 0xaffff
@@ -42,6 +43,14 @@
 
 #define SMM_HANDLER_OFFSET 0x0000
 
+#if defined(__x86_64__)
+.bss
+ia32efer_backup_eax:
+.long
+ia32efer_backup_edx:
+.long
+#endif
+
 /* initially SMM is some sort of real mode. Let gcc know
  * how to treat the SMM handler stub
  */
@@ -159,12 +168,44 @@
 	/* Get SMM revision */
 	movl	$0xa8000 + 0x7efc, %ebx	/* core 0 address */
 	subl	%ebp, %ebx		/* subtract core X offset */
+
+#if defined(__x86_64__)
+	/* Backup IA32_EFER. Preserves ebx. */
+	movl	$(IA32_EFER), %ecx
+	rdmsr
+	movl	%eax, ia32efer_backup_eax
+	movl	%edx, ia32efer_backup_edx
+
+	/* Enable long mode. Preserves ebx. */
+#include <cpu/x86/64bit/entry64.inc>
+
+	mov	(%ebx), %rdi
+
+#else
 	movl	(%ebx), %eax
 	pushl	%eax
+#endif
 
-	/* Call 32bit C handler */
+	/* Call C handler */
 	call	smi_handler
 
+#if defined(__x86_64__)
+	/*
+	 * The only reason to go back to protected mode is that RSM doesn't restore
+	 * MSR registers and MSR IA32_EFER was modified by entering long mode.
+	 * Drop to protected mode to safely operate on the IA32_EFER MSR.
+	 */
+
+	/* Disable long mode. */
+	#include <cpu/x86/64bit/exit32.inc>
+
+	/* Restore IA32_EFER as RSM doesn't restore MSRs. */
+	movl	$(IA32_EFER), %ecx
+	movl	ia32efer_backup_eax, %eax
+	movl	ia32efer_backup_edx, %edx
+	wrmsr
+#endif
+
 	/* To return, just do rsm. It will "clean up" protected mode */
 	rsm
 
@@ -190,6 +231,9 @@
 	.word	0xffff, 0x0000
 	.byte	0x00, 0x93, 0xcf, 0x00
 
+	/* gdt selector 0x18, flat code segment (64-bit) */
+	.word   0xffff, 0x0000
+	.byte   0x00, 0x9b, 0xaf, 0x00
 smm_gdt_end: