From a636944d32e37a4c82a1534e6bb367409d190639 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 6 Oct 2021 12:43:17 -0700 Subject: [PATCH] create-diff-object: Create __patchable_function_entries section for aarch64 The mcount_loc section contains the addresses of patchable ftrace sites which is used by the ftrace infrastructure in the kernel to create a list of tracable functions and to know where to patch to enable tracing of them. On aarch64 this section is called __patchable_function_entries and is generated by the compiler. Modify kpatch_create_mcount_sections() to create the __patchable_function_entries section on aarch64 rather than an mcount_loc section. Either name will be recognised by the kernel but keep the name which is expected. In order to verify which functions should have an entry in the __patchable_function_entries section, preserve the section from the kelf_patched file. Note that any symbols not included in the output elf must be NULLed in the relocation section as it would be unsafe to access them after they're freed. Also check for the 2 required NOP instructions on function entry to be pedantic. Signed-off-by: Suraj Jitindar Singh --- kpatch-build/create-diff-object.c | 74 +++++++++++++++++++++++++++++-- 1 file changed, 71 insertions(+), 3 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 27f3c8f26..537ce9325 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -1938,6 +1938,30 @@ static void kpatch_migrate_included_elements(struct kpatch_elf *kelf, struct kpa sec->secsym = NULL; } +#ifdef __aarch64__ +{ + /* + * On aarch64 the __patchable_function_entries section is used to store + * information about patchable functions which we will use in + * kpatch_create_mcount_sections(). Since this section isn't included it + * will be freed in kpatch_elf_teardown(), so we preserve it in the main + * function. However the relas->sym of any relocation will be freed if + * that symbol isn't included and thus it is unsafe to dereference it + * later. Zero the "sym" field for any symbol not included. + */ + struct rela *rela; + struct section *patchable_sec; + + patchable_sec = find_section_by_name(&kelf->sections, + "__patchable_function_entries"); + if (patchable_sec) { + list_for_each_entry(rela, &patchable_sec->rela->relas, list) { + if (!rela->sym->include) + rela->sym = NULL; + } + } +} +#endif /* migrate included symbols from kelf to out */ list_for_each_entry_safe(sym, safesym, &kelf->symbols, list) { if (!sym->include) @@ -3410,7 +3434,8 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * * TODO: Eventually we can modify recordmount so that it recognizes our bundled * sections as valid and does this work for us. */ -static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) +static void kpatch_create_mcount_sections(struct kpatch_elf *kelf, + struct section *patchable_sec) { int nr, index; struct section *sec, *relasec; @@ -3426,7 +3451,11 @@ static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) nr++; /* create text/rela section pair */ +#ifdef __aarch64__ + sec = create_section_pair(kelf, "__patchable_function_entries", sizeof(void*), nr); +#else /* !__aarch64__ */ sec = create_section_pair(kelf, "__mcount_loc", sizeof(void*), nr); +#endif relasec = sec->rela; /* populate sections */ @@ -3484,6 +3513,37 @@ static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) rela->type = R_X86_64_PC32; } +#elif defined(__aarch64__) +{ + unsigned char *insn; + bool found = false; + int i; + + if (!patchable_sec) + ERROR("%s: unexpected missing __patchable_function_entries section", __func__); + + /* We search the patchable_sec to ensure that the symbol is patchable. */ + list_for_each_entry(rela, &patchable_sec->relas, list) { + if (rela->sym && rela->sym->sec && sym->sec == + rela->sym->sec) { + found = true; + break; + } + } + if (!found) + ERROR("%s: unexpected missing __patchable_function_entries entry", sym->name); + + /* Then to be pedantic we also check for the 2 NOPs at function entry. */ + insn = sym->sec->data->d_buf + rela->addend; + for (i = 0; i < 8; i += 4) { + /* We expect a NOP i.e. 0xd503201f (little endian) */ + if (insn[i] != 0x1f || insn[i + 1] != 0x20 || + insn[i + 2] != 0x03 || insn [i + 3] != 0xd5) + ERROR("%s: unexpected instruction in patch section of function", sym->name); + } + + insn_offset = rela->addend; +} #else /* __powerpc64__ */ { bool found = false; @@ -3706,7 +3766,7 @@ int main(int argc, char *argv[]) struct arguments arguments; int num_changed, callbacks_exist, new_globals_exist; struct lookup_table *lookup; - struct section *sec, *symtab; + struct section *sec, *symtab, *patchable_sec; char *orig_obj, *patched_obj, *parent_name; char *parent_symtab, *mod_symvers, *patch_name, *output_obj; @@ -3790,6 +3850,14 @@ int main(int argc, char *argv[]) /* this is destructive to kelf_patched */ kpatch_migrate_included_elements(kelf_patched, &kelf_out); + /* Preserve the patchable_function_entries sec before it's torn down */ + patchable_sec = find_section_by_name(&kelf_patched->sections, + "__patchable_function_entries"); + if (patchable_sec) { + patchable_sec = patchable_sec->rela; + list_del(&patchable_sec->list); + } + /* * Teardown kelf_patched since we shouldn't access sections or symbols * through it anymore. Don't free however, since our section and symbol @@ -3808,7 +3876,7 @@ int main(int argc, char *argv[]) kpatch_create_callbacks_objname_rela(kelf_out, parent_name); kpatch_build_strings_section_data(kelf_out); - kpatch_create_mcount_sections(kelf_out); + kpatch_create_mcount_sections(kelf_out, patchable_sec); /* * At this point, the set of output sections and symbols is