- Improve the check whether the kernel supports WP mappings so that it

can accomodate a XenPV guest due to how the latter is setting up the PAT
 machinery
 
 Now that the retbleed nightmare is public, here's the first round of
 fallout fixes:
 
 - Fix a build failure on 32-bit due to missing include
 
 - Remove an untraining point in espfix64 return path
 
 - other small cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmLTudcACgkQEsHwGGHe
 VUrY4BAAtWm7wC6T8rzovbsyticj6kcehRMBEXxtlEP5LOeltR0dbNaIGskrS2Li
 Q9YxxtQhbZPXqzqB+xeHVhDPThzsd3+wRvvetmR4fW/c3XCYr+fLLFjHj0NEvX0P
 lQzuY8GKWGU/QTrjKSKclGvqyB692Fvdu4YImlnrGSbR6ywwVttditd3YNJR0w1q
 7S4zq90uwWuX6cTLqXIcbKbhssOjcR1Agj9+bE8i+rzyB2VtNoihJCJh0pTJAn3P
 RaXnxI/7J6Y+5imPf5/ywu8gxhvGBTy5MU/1v2pw939EurU9tmhVkNVWdO2g/qYY
 V+Y1nj9xV0ucL4hlUBqAFdM+5jFC89Ey1X2tSgUgSl+44L/d8IIjVCp6inVoyCzE
 Olbc6q7A/V8PNXfo4g6gDwVc3Ii53Fwgtu8xVHkwPGfjly6+yZ9O/RUBXcBAOnpU
 jfS9LSc/Ro7kxqFy32beUgB7wwhMpkYuHe6ECxrvXj1IK13y3OkdxFzm03ty7S/E
 BwjrkltDia9BQ6i4Ywy+qSBYkSH6+sxxt4pboB+ft6/p2JIw4YJIp9PRqzPAG0jx
 JTjcZ9YAr7zPNlWp5e30BjGawgbuKPq0wkF1r6QD+3VzNf9+SSDmtkYGWeAyTTP2
 SkzLy5QCNBTeeq0FZVYZFm/vcF7wccQhQdwNcezxygKAmihH0xw=
 =ND3p
 -----END PGP SIGNATURE-----

Merge tag 'x86_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

 - Improve the check whether the kernel supports WP mappings so that it
   can accomodate a XenPV guest due to how the latter is setting up the
   PAT machinery

  - Now that the retbleed nightmare is public, here's the first round of
    fallout fixes:

      * Fix a build failure on 32-bit due to missing include

      * Remove an untraining point in espfix64 return path

      * other small cleanups

* tag 'x86_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/bugs: Remove apostrophe typo
  um: Add missing apply_returns()
  x86/entry: Remove UNTRAIN_RET from native_irq_return_ldt
  x86/bugs: Mark retbleed_strings static
  x86/pat: Fix x86_has_pat_wp()
  x86/asm/32: Fix ANNOTATE_UNRET_SAFE use on 32-bit
This commit is contained in:
Linus Torvalds 2022-07-17 08:27:30 -07:00
commit 59c80f053d
5 changed files with 19 additions and 5 deletions

View File

@ -432,6 +432,10 @@ void apply_retpolines(s32 *start, s32 *end)
{
}
void apply_returns(s32 *start, s32 *end)
{
}
void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
}

View File

@ -727,7 +727,6 @@ native_irq_return_ldt:
pushq %rdi /* Stash user RDI */
swapgs /* to kernel GS */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
UNTRAIN_RET
movq PER_CPU_VAR(espfix_waddr), %rdi
movq %rax, (0*8)(%rdi) /* user RAX */

View File

@ -793,7 +793,7 @@ enum retbleed_mitigation_cmd {
RETBLEED_CMD_IBPB,
};
const char * const retbleed_strings[] = {
static const char * const retbleed_strings[] = {
[RETBLEED_MITIGATION_NONE] = "Vulnerable",
[RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
[RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
@ -1181,7 +1181,7 @@ spectre_v2_user_select_mitigation(void)
if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) {
if (mode != SPECTRE_V2_USER_STRICT &&
mode != SPECTRE_V2_USER_STRICT_PREFERRED)
pr_info("Selecting STIBP always-on mode to complement retbleed mitigation'\n");
pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
}

View File

@ -23,6 +23,7 @@
#include <asm/cpufeatures.h>
#include <asm/percpu.h>
#include <asm/nops.h>
#include <asm/nospec-branch.h>
#include <asm/bootparam.h>
#include <asm/export.h>
#include <asm/pgtable_32.h>

View File

@ -77,10 +77,20 @@ static uint8_t __pte2cachemode_tbl[8] = {
[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
};
/* Check that the write-protect PAT entry is set for write-protect */
/*
* Check that the write-protect PAT entry is set for write-protect.
* To do this without making assumptions how PAT has been set up (Xen has
* another layout than the kernel), translate the _PAGE_CACHE_MODE_WP cache
* mode via the __cachemode2pte_tbl[] into protection bits (those protection
* bits will select a cache mode of WP or better), and then translate the
* protection bits back into the cache mode using __pte2cm_idx() and the
* __pte2cachemode_tbl[] array. This will return the really used cache mode.
*/
bool x86_has_pat_wp(void)
{
return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP;
uint16_t prot = __cachemode2pte_tbl[_PAGE_CACHE_MODE_WP];
return __pte2cachemode_tbl[__pte2cm_idx(prot)] == _PAGE_CACHE_MODE_WP;
}
enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)