Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Switch to using SLRT interface and process DRTM policy #7

Merged
merged 7 commits into from
Dec 11, 2023
23 changes: 0 additions & 23 deletions xen/arch/x86/dom0_build.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
#include <asm/guest.h>
#include <asm/hpet.h>
#include <asm/io_apic.h>
#include <asm/intel_txt.h>
#include <asm/p2m.h>
#include <asm/setup.h>
#include <asm/spec_ctrl.h>
Expand Down Expand Up @@ -586,28 +585,6 @@ int __init construct_dom0(struct domain *d, const module_t *image,
BUG_ON(d->vcpu[0] == NULL);
BUG_ON(d->vcpu[0]->is_initialised);

if ( sl_status ) {
/*
* Note: __start_xen() changed the meaning of mod_start and mod_end
* fields, they are now MFN and module length in bytes, respectively.
* For kernel, image_headroom was added both to mod_end and mod_start.
*/
printk("Measuring dom0 kernel...\n");
tpm_hash_extend(DRTM_LOC, DRTM_CODE_PCR,
__va(image->mod_start * PAGE_SIZE + image_headroom),
image->mod_end - image_headroom, TXT_EVTYPE_KERNEL,
NULL, 0);

if ( initrd != NULL ) {
process_pending_softirqs();

printk("Measuring dom0 initrd...\n");
tpm_hash_extend(DRTM_LOC, DRTM_CODE_PCR,
__va(initrd->mod_start * PAGE_SIZE),
initrd->mod_end, TXT_EVTYPE_INITRD, NULL, 0);
}
}

process_pending_softirqs();

if ( is_hvm_domain(d) )
Expand Down
93 changes: 64 additions & 29 deletions xen/arch/x86/include/asm/intel_txt.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
* TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE)
*/

#include <xen/multiboot.h>

#define TXT_PUB_CONFIG_REGS_BASE 0xfed30000
#define TXT_PRIV_CONFIG_REGS_BASE 0xfed20000

Expand Down Expand Up @@ -70,8 +72,6 @@
#define SL_ERROR_TPM_UNKNOWN_DIGEST 0xc0008020
#define SL_ERROR_TPM_INVALID_EVENT 0xc0008021

#define TXT_OS_MLE_MAX_VARIABLE_MTRRS 32

#define SLAUNCH_BOOTLOADER_MAGIC 0x4c534254

#define TXT_AP_BOOT_CS 0x0030
Expand All @@ -93,6 +93,8 @@ extern uint32_t trampoline_gdt[];
#define _txt(x) __va(x)
#endif

#include <xen/slr_table.h>

/*
* Always use private space as some of registers are either read-only or not
* present in public space.
Expand Down Expand Up @@ -120,32 +122,16 @@ static inline void txt_reset(uint32_t error)
while (1);
}

/*
* Secure Launch defined MTRR saving structures
*/
struct txt_mtrr_pair {
uint64_t mtrr_physbase;
uint64_t mtrr_physmask;
} __packed;

struct txt_mtrr_state {
uint64_t default_mem_type;
uint64_t mtrr_vcnt;
struct txt_mtrr_pair mtrr_pair[TXT_OS_MLE_MAX_VARIABLE_MTRRS];
} __packed;

/*
* Secure Launch defined OS/MLE TXT Heap table
*/
struct txt_os_mle_data {
uint32_t version;
uint32_t boot_params_addr;
uint64_t saved_misc_enable_msr;
struct txt_mtrr_state saved_bsp_mtrrs;
uint32_t slrt;
krystian-hebel marked this conversation as resolved.
Show resolved Hide resolved
uint32_t txt_info;
uint32_t ap_wake_block;
uint32_t ap_wake_block_size;
uint64_t evtlog_addr;
uint32_t evtlog_size;
uint8_t mle_scratch[64];
} __packed;

Expand Down Expand Up @@ -341,22 +327,71 @@ static inline int is_in_pmr(struct txt_os_sinit_data *os_sinit, uint64_t base,
return 0;
}

/*
* This helper function is used to map memory using L2 page tables by aligning
* mapped regions to 2MB. This way page allocator (which at this point isn't
* yet initialized) isn't needed for creating new L1 mappings. The function
* also checks and skips memory already mapped by the prebuilt tables.
*
* There is no unmap_l2() because the function is meant to be used for code that
* accesses TXT registers and TXT heap soon after which Xen rebuilds memory
* maps, effectively dropping all existing mappings.
*/
extern int map_l2(unsigned long paddr, unsigned long size);

/* evt_log is a physical address and the caller must map it to virtual, if
* needed. */
static inline void find_evt_log(void **evt_log, uint32_t *evt_log_size)
krystian-hebel marked this conversation as resolved.
Show resolved Hide resolved
{
struct txt_os_mle_data *os_mle;
struct slr_table *slrt;
struct slr_entry_log_info *log_info;

os_mle = txt_os_mle_data_start(_txt(read_txt_reg(TXTCR_HEAP_BASE)));
krystian-hebel marked this conversation as resolved.
Show resolved Hide resolved
slrt = _txt(os_mle->slrt);

log_info = (struct slr_entry_log_info *)
slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_LOG_INFO);
if ( log_info != NULL ) {
*evt_log = _p(log_info->addr);
*evt_log_size = log_info->size;
} else {
*evt_log = NULL;
*evt_log_size = 0;
}
}

extern void map_txt_mem_regions(void);
extern void protect_txt_mem_regions(void);
extern void txt_restore_mtrrs(bool e820_verbose);

#define DRTM_LOC 2
#define DRTM_CODE_PCR 17
#define DRTM_DATA_PCR 18
#define DRTM_LOC 2
#define DRTM_CODE_PCR 17
#define DRTM_DATA_PCR 18

/* TXT-defined use 0x4xx, TrenchBoot in Linux uses 0x5xx, use 0x6xx here. */
#define TXT_EVTYPE_MBI 0x600
#define TXT_EVTYPE_KERNEL 0x601
#define TXT_EVTYPE_INITRD 0x602
/*
* Secure Launch event log entry type. The TXT specification defines the
* base event value as 0x400 for DRTM values.
*/
#define TXT_EVTYPE_BASE 0x400
#define TXT_EVTYPE_SLAUNCH (TXT_EVTYPE_BASE + 0x102)
#define TXT_EVTYPE_SLAUNCH_START (TXT_EVTYPE_BASE + 0x103)
#define TXT_EVTYPE_SLAUNCH_END (TXT_EVTYPE_BASE + 0x104)

#define SHA1_DIGEST_SIZE 20
#define SHA256_DIGEST_SIZE 32
#define SHA1_DIGEST_SIZE 20
#define SHA256_DIGEST_SIZE 32

void tpm_hash_extend(unsigned loc, unsigned pcr, uint8_t *buf, unsigned size,
uint32_t type, uint8_t *log_data, unsigned log_data_size);

/* Measures essential parts of SLR table before making use of them. */
void tpm_measure_slrt(void);

/* Takes measurements of DRTM policy entries except for MBI and SLRT which
* should have been measured by the time this is called. Also performs sanity
* checks of the policy and panics on failure. In particular, the function
* verifies that DRTM is consistent with MultibootInfo (MBI) (the MBI address
* is assumed to be virtual). */
void tpm_process_drtm_policy(const multiboot_info_t *mbi);

#endif /* __ASSEMBLY__ */
101 changes: 41 additions & 60 deletions xen/arch/x86/intel_txt.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,23 +5,15 @@
#include <asm/intel_txt.h>
#include <xen/init.h>
#include <xen/mm.h>
#include <xen/slr_table.h>

static uint64_t __initdata txt_heap_base, txt_heap_size;

unsigned long __initdata sl_status;

#define PREBUILT_MAP_LIMIT (1 << L2_PAGETABLE_SHIFT)

/*
* These helper functions are used to (un)map memory using L2 page tables by
* aligning mapped regions to 2MB. This way page allocator (which at this point
* isn't yet initialized) isn't needed for creating new L1 mappings. Functions
* also check and skip memory already mapped by the prebuilt tables.
*
* There are no tests against multiple mappings in the same superpage, in such
* case first call to unmap_l2() destroys all mappings to given memory range.
*/
static int map_l2(unsigned long paddr, unsigned long size)
int __init map_l2(unsigned long paddr, unsigned long size)
{
unsigned long aligned_paddr = paddr & ~((1ULL << L2_PAGETABLE_SHIFT) - 1);
unsigned long pages = ((paddr + size) - aligned_paddr);
Expand All @@ -42,47 +34,33 @@ static int map_l2(unsigned long paddr, unsigned long size)
pages, PAGE_HYPERVISOR);
}

static int unmap_l2(unsigned long paddr, unsigned long size)
void __init map_txt_mem_regions(void)
{
unsigned long aligned_paddr = paddr & ~((1ULL << L2_PAGETABLE_SHIFT) - 1);
unsigned long pages = ((paddr + size) - aligned_paddr);
pages += (1ULL << L2_PAGETABLE_SHIFT) - 1;
pages &= ~((1ULL << L2_PAGETABLE_SHIFT) - 1);
pages >>= PAGE_SHIFT;
map_l2(TXT_PRIV_CONFIG_REGS_BASE, NR_TXT_CONFIG_PAGES * PAGE_SIZE);

if ( (aligned_paddr + pages * PAGE_SIZE) <= PREBUILT_MAP_LIMIT )
return 0;
/* TXT Heap */
txt_heap_base = read_txt_reg(TXTCR_HEAP_BASE);
txt_heap_size = read_txt_reg(TXTCR_HEAP_SIZE);

if ( aligned_paddr < PREBUILT_MAP_LIMIT ) {
pages -= (PREBUILT_MAP_LIMIT - aligned_paddr) >> PAGE_SHIFT;
aligned_paddr = PREBUILT_MAP_LIMIT;
}
if ( txt_heap_base != 0 ) {
void *evt_log_addr;
uint32_t evt_log_size;

return destroy_xen_mappings(aligned_paddr,
aligned_paddr + pages * PAGE_SIZE);
map_l2(txt_heap_base, txt_heap_size);

find_evt_log(&evt_log_addr, &evt_log_size);
map_l2((unsigned long)evt_log_addr, evt_log_size);
}
}

void __init protect_txt_mem_regions(void)
{
uint64_t sinit_base, sinit_size;

map_l2(TXT_PUB_CONFIG_REGS_BASE, NR_TXT_CONFIG_PAGES * PAGE_SIZE);

txt_heap_base = txt_heap_size = sinit_base = sinit_size = 0;

/* TXT Heap */
txt_heap_base = read_txt_reg(TXTCR_HEAP_BASE);
txt_heap_size = read_txt_reg(TXTCR_HEAP_SIZE);
/* SINIT */
sinit_base = read_txt_reg(TXTCR_SINIT_BASE);
sinit_size = read_txt_reg(TXTCR_SINIT_SIZE);

/* Remove mapping of TXT register space. */
unmap_l2(TXT_PUB_CONFIG_REGS_BASE, NR_TXT_CONFIG_PAGES * PAGE_SIZE);

/* TXT Heap */
if ( txt_heap_base != 0 ) {
struct txt_os_mle_data *os_mle;
void *evt_log_addr;
uint32_t evt_log_size;

printk("SLAUNCH: reserving TXT heap (%#lx - %#lx)\n", txt_heap_base,
txt_heap_base + txt_heap_size);
Expand All @@ -91,20 +69,21 @@ void __init protect_txt_mem_regions(void)
E820_RAM, E820_RESERVED);

/* TXT TPM Event Log */
map_l2(txt_heap_base, txt_heap_size);
os_mle = txt_os_mle_data_start(__va(txt_heap_base));

if ( os_mle->evtlog_addr != 0 ) {
printk("SLAUNCH: reserving event log (%#lx - %#lx)\n", os_mle->evtlog_addr,
os_mle->evtlog_addr + os_mle->evtlog_size);
e820_change_range_type(&e820_raw, os_mle->evtlog_addr,
os_mle->evtlog_addr + os_mle->evtlog_size,
find_evt_log(&evt_log_addr, &evt_log_size);
krystian-hebel marked this conversation as resolved.
Show resolved Hide resolved

if ( evt_log_addr != 0 ) {
printk("SLAUNCH: reserving event log (%#lx - %#lx)\n",
(uint64_t)evt_log_addr,
(uint64_t)evt_log_addr + evt_log_size);
e820_change_range_type(&e820_raw, (uint64_t)evt_log_addr,
(uint64_t)evt_log_addr + evt_log_size,
E820_RAM, E820_RESERVED);
}

unmap_l2(txt_heap_base, txt_heap_size);
}

sinit_base = read_txt_reg(TXTCR_SINIT_BASE);
sinit_size = read_txt_reg(TXTCR_SINIT_SIZE);

/* SINIT */
if ( sinit_base != 0 ) {
printk("SLAUNCH: reserving SINIT memory (%#lx - %#lx)\n", sinit_base,
Expand All @@ -123,12 +102,12 @@ void __init protect_txt_mem_regions(void)
void __init txt_restore_mtrrs(bool e820_verbose)
{
struct txt_os_mle_data *os_mle;
struct slr_table *slrt;
struct slr_entry_intel_info *intel_info;
int os_mle_size;
uint64_t mtrr_cap, mtrr_def, base, mask;
unsigned int i;

map_l2(txt_heap_base, txt_heap_size);

os_mle_size = txt_os_mle_data_size(__va(txt_heap_base));
os_mle = txt_os_mle_data_start(__va(txt_heap_base));

Expand All @@ -152,27 +131,29 @@ void __init txt_restore_mtrrs(bool e820_verbose)
}
}

if ( (mtrr_cap & 0xFF) != os_mle->saved_bsp_mtrrs.mtrr_vcnt ) {
slrt = __va(os_mle->slrt);
intel_info = (struct slr_entry_intel_info *)
slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO);

if ( (mtrr_cap & 0xFF) != intel_info->saved_bsp_mtrrs.mtrr_vcnt ) {
printk("Bootloader saved %ld MTRR values, but there should be %ld\n",
os_mle->saved_bsp_mtrrs.mtrr_vcnt, mtrr_cap & 0xFF);
intel_info->saved_bsp_mtrrs.mtrr_vcnt, mtrr_cap & 0xFF);
/* Choose the smaller one to be on the safe side. */
mtrr_cap = (mtrr_cap & 0xFF) > os_mle->saved_bsp_mtrrs.mtrr_vcnt ?
os_mle->saved_bsp_mtrrs.mtrr_vcnt : mtrr_cap;
mtrr_cap = (mtrr_cap & 0xFF) > intel_info->saved_bsp_mtrrs.mtrr_vcnt ?
intel_info->saved_bsp_mtrrs.mtrr_vcnt : mtrr_cap;
}

/* Restore MTRRs saved by bootloader. */
wrmsrl(MSR_MTRRdefType, os_mle->saved_bsp_mtrrs.default_mem_type);
wrmsrl(MSR_MTRRdefType, intel_info->saved_bsp_mtrrs.default_mem_type);

for ( i = 0; i < (uint8_t)mtrr_cap; i++ )
{
base = os_mle->saved_bsp_mtrrs.mtrr_pair[i].mtrr_physbase;
mask = os_mle->saved_bsp_mtrrs.mtrr_pair[i].mtrr_physmask;
base = intel_info->saved_bsp_mtrrs.mtrr_pair[i].mtrr_physbase;
mask = intel_info->saved_bsp_mtrrs.mtrr_pair[i].mtrr_physmask;
wrmsrl(MSR_IA32_MTRR_PHYSBASE(i), base);
wrmsrl(MSR_IA32_MTRR_PHYSMASK(i), mask);
}

unmap_l2(txt_heap_base, txt_heap_size);

if ( e820_verbose )
printk("Restored MTRRs:\n"); /* Printed by caller, mtrr_top_of_ram(). */
}
Loading
Loading