Skip to content
Snippets Groups Projects
Commit 7e2ab91e authored by Cheng Jian's avatar Cheng Jian Committed by Yang Yingliang
Browse files

livepatch/x86: support livepatch without ftrace


hulk inclusion
category: feature
bugzilla: 5507
CVE: NA

----------------------------------------

support livepatch without ftrace for x86_64

supported now:
        livepatch relocation when init_patch after load_module;
        instruction patched when enable;
	activeness function check;
	enforcing the patch stacking principle;

x86_64 use variable length instruction, so there's no need to consider
extra implementation for long jumps.

Signed-off-by: default avatarCheng Jian <cj.chengjian@huawei.com>
Signed-off-by: default avatarLi Bin <huawei.libin@huawei.com>
Tested-by: default avatarYang ZuoTing <yangzuoting@huawei.com>
Tested-by: default avatarCheng Jian <cj.chengjian@huawei.com>
Reviewed-by: default avatarXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: default avatarzhangyi (F) <yi.zhang@huawei.com>
Reviewed-By: default avatarXie XiuQi <xiexiuqi@huawei.com>
Reviewed-By: default avatarXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: default avatarYang Yingliang <yangyingliang@huawei.com>
parent eb232fcd
No related branches found
No related tags found
No related merge requests found
......@@ -166,6 +166,7 @@ config X86
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH_FTRACE if X86_64
select HAVE_LIVEPATCH_WO_FTRACE if X86_64
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MIXED_BREAKPOINTS_REGS
......
......@@ -24,17 +24,48 @@
#include <asm/setup.h>
#include <linux/ftrace.h>
#ifdef CONFIG_LIVEPATCH
static inline int klp_check_compiler_support(void)
{
#ifndef CC_USING_FENTRY
#if defined(CONFIG_LIVEPATCH_FTRACE) && !defined(CC_USING_FENTRY)
return 1;
#endif
return 0;
}
#ifdef CONFIG_LIVEPATCH_FTRACE
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->ip = ip;
}
static inline unsigned long klp_arch_stub_ip(unsigned long addr)
{
return addr;
}
#else /* CONFIG_LIVEPATCH_WO_FTRACE */
#define klp_smp_isb()
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
BUG();
}
static inline unsigned long klp_arch_stub_ip(unsigned long addr)
{
BUG();
return 0;
}
struct klp_patch;
struct klp_func;
int arch_klp_patch_func(struct klp_func *func);
void arch_klp_unpatch_func(struct klp_func *func);
int klp_check_calltrace(struct klp_patch *patch, int enable);
#endif
#else
#error Live patching support is disabled; check CONFIG_LIVEPATCH
#endif
#endif /* _ASM_X86_LIVEPATCH_H */
......@@ -15,10 +15,14 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/livepatch.h>
#include <asm/text-patching.h>
#include <asm/stacktrace.h>
#include <asm/set_memory.h>
/* Apply per-object alternatives. Based on x86 module_finalize() */
void arch_klp_init_object_loaded(struct klp_patch *patch,
......@@ -63,3 +67,279 @@ void arch_klp_init_object_loaded(struct klp_patch *patch,
apply_paravirt(pseg, pseg + para->sh_size);
}
}
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
static inline int klp_compare_address(unsigned long stack_addr,
unsigned long func_addr, unsigned long func_size,
const char *func_name)
{
if (stack_addr >= func_addr && stack_addr < func_addr + func_size) {
pr_err("func %s is in use!\n", func_name);
return -EBUSY;
}
return 0;
}
static int klp_check_stack_func(struct klp_func *func,
struct stack_trace *trace, int enable)
{
unsigned long func_addr, func_size, address;
const char *func_name;
int i;
for (i = 0; i < trace->nr_entries; i++) {
address = trace->entries[i];
if (enable) {
if (func->force)
continue;
func_addr = func->old_addr;
func_size = func->old_size;
} else {
func_addr = (unsigned long)func->new_func;
func_size = func->new_size;
}
func_name = func->old_name;
if (klp_compare_address(address, func_addr,
func_size, func_name))
return -EAGAIN;
}
return 0;
}
static void klp_print_stack_trace(struct stack_trace *trace)
{
int i;
pr_err("Call Trace:\n");
for (i = 0; i < trace->nr_entries; i++) {
pr_err("[<%pK>] %pS\n",
(void *)trace->entries[i],
(void *)trace->entries[i]);
}
}
#ifdef MAX_STACK_ENTRIES
#undef MAX_STACK_ENTRIES
#endif
#define MAX_STACK_ENTRIES 100
/*
* Determine whether it's safe to transition the task to the target patch state
* by looking for any to-be-patched or to-be-unpatched functions on its stack.
*/
static int klp_check_stack(struct task_struct *task,
struct klp_patch *patch, int enable)
{
static unsigned long entries[MAX_STACK_ENTRIES];
struct stack_trace trace;
struct klp_object *obj;
struct klp_func *func;
int ret;
trace.skip = 0;
trace.nr_entries = 0;
trace.max_entries = MAX_STACK_ENTRIES;
trace.entries = entries;
ret = save_stack_trace_tsk_reliable(task, &trace);
WARN_ON_ONCE(ret == -ENOSYS);
if (ret) {
pr_info("%s: %s:%d has an unreliable stack\n",
__func__, task->comm, task->pid);
return ret;
}
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
ret = klp_check_stack_func(func, &trace, enable);
if (ret) {
pr_info("%s: %s:%d is sleeping on function %s\n",
__func__, task->comm, task->pid,
func->old_name);
klp_print_stack_trace(&trace);
return ret;
}
}
}
return 0;
}
int klp_check_calltrace(struct klp_patch *patch, int enable)
{
struct task_struct *g, *t;
int ret = 0;
for_each_process_thread(g, t) {
ret = klp_check_stack(t, patch, enable);
if (ret)
goto out;
}
out:
return ret;
}
#include <linux/slab.h>
#include <asm/nops.h>
#include <asm/sections.h>
#define JMP_E9_INSN_SIZE 5
union klp_code_union {
char code[JMP_E9_INSN_SIZE];
struct {
unsigned char e9;
int offset;
} __packed;
};
struct klp_func_node {
struct list_head node;
struct list_head func_stack;
unsigned long old_addr;
unsigned char old_code[JMP_E9_INSN_SIZE];
};
static LIST_HEAD(klp_func_list);
static struct klp_func_node *klp_find_func_node(unsigned long old_addr)
{
struct klp_func_node *func_node;
list_for_each_entry(func_node, &klp_func_list, node) {
if (func_node->old_addr == old_addr)
return func_node;
}
return NULL;
}
int arch_klp_init_func(struct klp_object *obj, struct klp_func *func)
{
return 0;
}
void arch_klp_free_func(struct klp_object *obj, struct klp_func *limit)
{
}
static int klp_calc_offset(long pc, long addr)
{
return (int)(addr - pc);
}
static unsigned char *klp_jmp_code(unsigned long ip, unsigned long addr)
{
static union klp_code_union calc;
calc.e9 = 0xe9;
calc.offset = klp_calc_offset(ip + JMP_E9_INSN_SIZE, addr);
return calc.code;
}
static unsigned char *klp_old_code(unsigned char *code)
{
static union klp_code_union old_code;
strncpy(old_code.code, code, JMP_E9_INSN_SIZE);
return old_code.code;
}
void arch_klp_code_modify_prepare(void)
{
set_kernel_text_rw();
set_all_modules_text_rw();
}
void arch_klp_code_modify_post_process(void)
{
set_all_modules_text_ro();
set_kernel_text_ro();
}
static inline int within(unsigned long addr, unsigned long start,
unsigned long end)
{
return addr >= start && addr < end;
}
static unsigned long text_ip_addr(unsigned long ip)
{
if (within(ip, (unsigned long)_text, (unsigned long)_etext))
ip = (unsigned long)__va(__pa_symbol(ip));
return ip;
}
int arch_klp_patch_func(struct klp_func *func)
{
struct klp_func_node *func_node;
unsigned long ip, new_addr;
const unsigned char *new;
func_node = klp_find_func_node(func->old_addr);
ip = func->old_addr;
if (!func_node) {
func_node = kzalloc(sizeof(*func_node), GFP_ATOMIC);
if (!func_node)
return -ENOMEM;
INIT_LIST_HEAD(&func_node->func_stack);
func_node->old_addr = func->old_addr;
probe_kernel_read(func_node->old_code,
(void *)ip, JMP_E9_INSN_SIZE);
list_add_rcu(&func_node->node, &klp_func_list);
}
list_add_rcu(&func->stack_node, &func_node->func_stack);
new_addr = (unsigned long)func->new_func;
new = klp_jmp_code(ip, new_addr);
ip = text_ip_addr(ip);
if (probe_kernel_write((void *)ip, new, JMP_E9_INSN_SIZE))
return -EPERM;
sync_core();
return 0;
}
void arch_klp_unpatch_func(struct klp_func *func)
{
struct klp_func_node *func_node;
struct klp_func *next_func;
unsigned long ip, new_addr;
const unsigned char *new;
func_node = klp_find_func_node(func->old_addr);
ip = func_node->old_addr;
if (list_is_singular(&func_node->func_stack)) {
list_del_rcu(&func->stack_node);
list_del_rcu(&func_node->node);
new = klp_old_code(func_node->old_code);
kfree(func_node);
} else {
list_del_rcu(&func->stack_node);
next_func = list_first_or_null_rcu(&func_node->func_stack,
struct klp_func, stack_node);
new_addr = (unsigned long)next_func->new_func;
new = klp_jmp_code(ip, new_addr);
}
ip = text_ip_addr(ip);
probe_kernel_write((void *)ip, new, JMP_E9_INSN_SIZE);
sync_core();
}
#endif
......@@ -438,6 +438,15 @@ int klp_try_disable_patch(void *data)
return ret;
}
void __weak arch_klp_code_modify_prepare(void)
{
}
void __weak arch_klp_code_modify_post_process(void)
{
}
static int __klp_disable_patch(struct klp_patch *patch)
{
int ret;
......@@ -458,7 +467,9 @@ static int __klp_disable_patch(struct klp_patch *patch)
}
#endif
arch_klp_code_modify_prepare();
ret = stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask);
arch_klp_code_modify_post_process();
return ret;
}
......@@ -662,7 +673,9 @@ static int __klp_enable_patch(struct klp_patch *patch)
}
#endif
arch_klp_code_modify_prepare();
ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask);
arch_klp_code_modify_prepare();
if (ret)
return ret;
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment