untrusted comment: verify with openbsd-64-base.pub RWQq6XmS4eDAcVmCs9KUJiefVR74e+0Sf5NE2Nt1N8BSC7i+a8fuwJjErW4HENOgNmfQ6E1O4iYZJENYY/IE0ZZXkRIhhBk+zAs= OpenBSD 6.4 errata 021, Aug 9, 2019: Intel CPUs have another cross privilege side-channel attack (SWAPGS). Apply by doing: signify -Vep /etc/signify/openbsd-64-base.pub -x 021_swapgs.patch.sig \ -m - | (cd /usr/src && patch -p0) And then rebuild and install a new kernel: KK=`sysctl -n kern.osversion | cut -d# -f1` cd /usr/src/sys/arch/`machine`/compile/$KK make obj make config make make install Index: sys/arch/amd64/amd64/codepatch.c =================================================================== RCS file: /var/cvs/src/sys/arch/amd64/amd64/codepatch.c,v retrieving revision 1.7 diff -u -p -r1.7 codepatch.c --- sys/arch/amd64/amd64/codepatch.c 13 Jul 2018 08:30:34 -0000 1.7 +++ sys/arch/amd64/amd64/codepatch.c 7 Aug 2019 20:45:51 -0000 @@ -39,6 +39,9 @@ extern struct codepatch codepatch_end; extern char __cptext_start[]; extern char __cptext_end[]; +void codepatch_control_flow(uint16_t _tag, void *_func, int _opcode, + const char *_op); + void codepatch_fill_nop(void *caddr, uint16_t len) { @@ -149,28 +152,42 @@ codepatch_replace(uint16_t tag, void *co DBGPRINT("patched %d places", i); } -/* Patch with calls to func */ void codepatch_call(uint16_t tag, void *func) { + /* 0xe8 == call near */ + codepatch_control_flow(tag, func, 0xe8, "call"); +} + +void +codepatch_jmp(uint16_t tag, void *func) +{ + /* 0xe9 == jmp near */ + codepatch_control_flow(tag, func, 0xe9, "jmp"); +} + +/* Patch with call or jump to func */ +void +codepatch_control_flow(uint16_t tag, void *func, int opcode, const char *op) +{ struct codepatch *patch; unsigned char *rwaddr; int32_t offset; int i = 0; vaddr_t rwmap = 0; - DBGPRINT("patching tag %u with call %p", tag, func); + DBGPRINT("patching tag %u with %s %p", tag, op, func); for (patch = &codepatch_begin; patch < &codepatch_end; patch++) { if (patch->tag != tag) continue; if (patch->len < 5) - panic("%s: can't replace len %u with call at %#lx", - __func__, patch->len, patch->addr); + panic("%s: can't replace len %u with %s at %#lx", + __func__, patch->len, op, patch->addr); offset = (vaddr_t)func - (patch->addr + 5); rwaddr = codepatch_maprw(&rwmap, patch->addr); - rwaddr[0] = 0xe8; /* call near */ + rwaddr[0] = opcode; memcpy(rwaddr + 1, &offset, sizeof(offset)); codepatch_fill_nop(rwaddr + 5, patch->len - 5); i++; Index: sys/arch/amd64/amd64/cpu.c =================================================================== RCS file: /var/cvs/src/sys/arch/amd64/amd64/cpu.c,v retrieving revision 1.129.2.1 diff -u -p -r1.129.2.1 cpu.c --- sys/arch/amd64/amd64/cpu.c 28 May 2019 14:23:50 -0000 1.129.2.1 +++ sys/arch/amd64/amd64/cpu.c 7 Aug 2019 20:45:51 -0000 @@ -168,20 +168,66 @@ void replacemeltdown(void) { static int replacedone = 0; - int s; + struct cpu_info *ci = &cpu_info_primary; + int swapgs_vuln = 0, s; if (replacedone) return; replacedone = 1; + if (strcmp(cpu_vendor, "GenuineIntel") == 0) { + int family = ci->ci_family; + int model = ci->ci_model; + + swapgs_vuln = 1; + if (family == 0x6 && + (model == 0x37 || model == 0x4a || model == 0x4c || + model == 0x4d || model == 0x5a || model == 0x5d || + model == 0x6e || model == 0x65 || model == 0x75)) { + /* Silvermont, Airmont */ + swapgs_vuln = 0; + } else if (family == 0x6 && (model == 0x85 || model == 0x57)) { + /* KnightsLanding */ + swapgs_vuln = 0; + } + } + s = splhigh(); if (!cpu_meltdown) codepatch_nop(CPTAG_MELTDOWN_NOP); - else if (pmap_use_pcid) { - extern long _pcid_set_reuse; - DPRINTF("%s: codepatching PCID use", __func__); - codepatch_replace(CPTAG_PCID_SET_REUSE, &_pcid_set_reuse, - PCID_SET_REUSE_SIZE); + else { + extern long alltraps_kern_meltdown; + + /* eliminate conditional branch in alltraps */ + codepatch_jmp(CPTAG_MELTDOWN_ALLTRAPS, &alltraps_kern_meltdown); + + /* enable reuse of PCID for U-K page tables */ + if (pmap_use_pcid) { + extern long _pcid_set_reuse; + DPRINTF("%s: codepatching PCID use", __func__); + codepatch_replace(CPTAG_PCID_SET_REUSE, + &_pcid_set_reuse, PCID_SET_REUSE_SIZE); + } + } + + /* + * CVE-2019-1125: if the CPU has SMAP and it's not vulnerable to + * Meltdown, then it's protected both from speculatively mis-skipping + * the swapgs during interrupts of userspace and from speculatively + * mis-taking a swapgs during interrupts while already in the kernel + * as the speculative path will fault from SMAP. Warning: enabling + * WRGSBASE would break this 'protection'. + * + * Otherwise, if the CPU's swapgs can't be speculated over and it + * _is_ vulnerable to Meltdown then the %cr3 change will serialize + * user->kern transitions, but we still need to mitigate the + * already-in-kernel cases. + */ + if (!cpu_meltdown && (ci->ci_feature_sefflags_ebx & SEFF0EBX_SMAP)) { + codepatch_nop(CPTAG_FENCE_SWAPGS_MIS_TAKEN); + codepatch_nop(CPTAG_FENCE_NO_SAFE_SMAP); + } else if (!swapgs_vuln && cpu_meltdown) { + codepatch_nop(CPTAG_FENCE_SWAPGS_MIS_TAKEN); } splx(s); } Index: sys/arch/amd64/amd64/locore.S =================================================================== RCS file: /var/cvs/src/sys/arch/amd64/amd64/locore.S,v retrieving revision 1.111.2.1 diff -u -p -r1.111.2.1 locore.S --- sys/arch/amd64/amd64/locore.S 28 May 2019 14:23:50 -0000 1.111.2.1 +++ sys/arch/amd64/amd64/locore.S 7 Aug 2019 20:45:51 -0000 @@ -581,7 +581,7 @@ XUsyscall_meltdown: * (thank you, Intel), at which point we'll continue at the * "movq CPUVAR(KERN_RSP),%rax" after Xsyscall below. * In case the CPU speculates past the mov to cr3, we put a - * retpoline-style pause-jmp-to-pause loop. + * retpoline-style pause-lfence-jmp-to-pause loop. */ swapgs movq %rax,CPUVAR(SCRATCH) Index: sys/arch/amd64/amd64/vector.S =================================================================== RCS file: /var/cvs/src/sys/arch/amd64/amd64/vector.S,v retrieving revision 1.77.2.1 diff -u -p -r1.77.2.1 vector.S --- sys/arch/amd64/amd64/vector.S 28 May 2019 14:23:50 -0000 1.77.2.1 +++ sys/arch/amd64/amd64/vector.S 7 Aug 2019 20:45:51 -0000 @@ -141,6 +141,7 @@ calltrap_specstk: # special stack path .text .globl INTRENTRY_LABEL(calltrap_specstk) INTRENTRY_LABEL(calltrap_specstk): + lfence # block speculation through jz above cld SMAP_CLAC movq %rsp,%rdi @@ -183,6 +184,7 @@ IDTVEC(trap03) .text .global INTRENTRY_LABEL(trap03) INTRENTRY_LABEL(trap03): + FENCE_NO_SAFE_SMAP INTR_ENTRY_KERN INTR_SAVE_MOST_GPRS_NO_ADJ sti @@ -313,7 +315,8 @@ IDTVEC(trap0d) .Lhandle_doreti: /* iretq faulted; resume in a stub that acts like we got a #GP */ leaq .Lhandle_doreti_resume(%rip),%rcx -1: movq %rcx,24(%rsp) /* over %r[cd]x and err to %rip */ +1: lfence /* block speculation through conditionals above */ + movq %rcx,24(%rsp) /* over %r[cd]x and err to %rip */ popq %rcx popq %rdx addq $8,%rsp /* pop the err code */ @@ -388,12 +391,13 @@ KUTEXT_PAGE_START * the kernel page tables (thank you, Intel) will make us * continue at the "movq CPUVAR(KERN_RSP),%rax" after alltraps * below. In case the CPU speculates past the mov to cr3, - * we put a retpoline-style pause-jmp-to-pause loop. + * we put a retpoline-style pause-lfence-jmp-to-pause loop. */ Xalltraps: swapgs movq %rax,CPUVAR(SCRATCH) movq CPUVAR(KERN_CR3),%rax + .byte 0x66, 0x90 /* space for FENCE_SWAPGS_MIS_TAKEN below */ movq %rax,%cr3 0: pause lfence @@ -403,9 +407,12 @@ KUTEXT_PAGE_END KTEXT_PAGE_START .align NBPG, 0xcc GENTRY(alltraps) + CODEPATCH_START testb $SEL_RPL,24(%rsp) je alltraps_kern swapgs + CODEPATCH_END(CPTAG_MELTDOWN_ALLTRAPS) + FENCE_SWAPGS_MIS_TAKEN movq %rax,CPUVAR(SCRATCH) .space (0b - Xalltraps) - (. - alltraps), 0x90 @@ -428,9 +435,15 @@ END(alltraps) /* * Traps from supervisor mode (kernel) + * If we're not mitigating Meltdown, then there's a conditional branch + * above and we may need a fence to mitigate CVE-2019-1225. If we're + * doing Meltdown mitigation there's just an unconditional branch and + * can skip the fence. */ _ALIGN_TRAPS GENTRY(alltraps_kern) + FENCE_NO_SAFE_SMAP +GENTRY(alltraps_kern_meltdown) INTR_ENTRY_KERN INTR_SAVE_MOST_GPRS_NO_ADJ sti @@ -467,6 +480,7 @@ spl_lowered: .popsection #endif /* DIAGNOSTIC */ END(alltraps_kern) +END(alltraps_kern_meltdown) KTEXT_PAGE_END Index: sys/arch/amd64/include/codepatch.h =================================================================== RCS file: /var/cvs/src/sys/arch/amd64/include/codepatch.h,v retrieving revision 1.8.2.1 diff -u -p -r1.8.2.1 codepatch.h --- sys/arch/amd64/include/codepatch.h 28 May 2019 14:23:50 -0000 1.8.2.1 +++ sys/arch/amd64/include/codepatch.h 7 Aug 2019 20:45:51 -0000 @@ -30,7 +30,8 @@ __cptext void codepatch_unmaprw(vaddr_t __cptext void codepatch_fill_nop(void *caddr, uint16_t len); __cptext void codepatch_nop(uint16_t tag); __cptext void codepatch_replace(uint16_t tag, void *code, size_t len); -__cptext void codepatch_call(uint16_t tag, void *func); +__cptext void codepatch_call(uint16_t _tag, void *_func); +__cptext void codepatch_jmp(uint16_t _tag, void *_func); void codepatch_disable(void); #endif /* !_LOCORE */ @@ -58,9 +59,12 @@ void codepatch_disable(void); #define CPTAG_XRSTOR 4 #define CPTAG_XSAVE 5 #define CPTAG_MELTDOWN_NOP 6 -#define CPTAG_PCID_SET_REUSE 7 -#define CPTAG_MDS 8 -#define CPTAG_MDS_VMM 9 +#define CPTAG_MELTDOWN_ALLTRAPS 7 +#define CPTAG_PCID_SET_REUSE 8 +#define CPTAG_MDS 9 +#define CPTAG_MDS_VMM 10 +#define CPTAG_FENCE_SWAPGS_MIS_TAKEN 11 +#define CPTAG_FENCE_NO_SAFE_SMAP 12 /* * As stac/clac SMAP instructions are 3 bytes, we want the fastest @@ -78,6 +82,17 @@ void codepatch_disable(void); #define SMAP_CLAC CODEPATCH_START ;\ SMAP_NOP ;\ CODEPATCH_END(CPTAG_CLAC) + +/* CVE-2019-1125: block speculation after swapgs */ +#define FENCE_SWAPGS_MIS_TAKEN \ + CODEPATCH_START ; \ + lfence ; \ + CODEPATCH_END(CPTAG_FENCE_SWAPGS_MIS_TAKEN) +/* block speculation when a correct SMAP impl would have been enough */ +#define FENCE_NO_SAFE_SMAP \ + CODEPATCH_START ; \ + lfence ; \ + CODEPATCH_END(CPTAG_FENCE_NO_SAFE_SMAP) #define PCID_SET_REUSE_SIZE 12 #define PCID_SET_REUSE_NOP \ Index: sys/arch/amd64/include/frameasm.h =================================================================== RCS file: /var/cvs/src/sys/arch/amd64/include/frameasm.h,v retrieving revision 1.20 diff -u -p -r1.20 frameasm.h --- sys/arch/amd64/include/frameasm.h 23 Jul 2018 17:54:04 -0000 1.20 +++ sys/arch/amd64/include/frameasm.h 7 Aug 2019 20:45:51 -0000 @@ -63,6 +63,7 @@ testb $SEL_RPL,24(%rsp) ; \ je INTRENTRY_LABEL(label) ; \ swapgs ; \ + FENCE_SWAPGS_MIS_TAKEN ; \ movq %rax,CPUVAR(SCRATCH) ; \ CODEPATCH_START ; \ movq CPUVAR(KERN_CR3),%rax ; \ @@ -73,6 +74,7 @@ _ALIGN_TRAPS ; \ .global INTRENTRY_LABEL(label) ; \ INTRENTRY_LABEL(label): /* from kernel */ \ + FENCE_NO_SAFE_SMAP ; \ INTR_ENTRY_KERN ; \ jmp 99f ; \ _ALIGN_TRAPS ; \