Skip to content
This repository was archived by the owner on Jan 28, 2023. It is now read-only.

Commit 584c8fb

Browse files
committed
Added support for Linux hosts
1 parent 1713f3c commit 584c8fb

36 files changed

+1171
-59
lines changed

.gitignore

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,3 +18,13 @@ Win8.1Release/
1818
# macOS
1919
/darwin/hax_driver/com_intel_hax/build/
2020
.DS_Store
21+
22+
# Linux
23+
*.o
24+
*.cmd
25+
*.ko
26+
*.mod.c
27+
.tmp_versions
28+
.cache.mk
29+
modules.order
30+
Module.symvers

core/cpu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -572,7 +572,7 @@ uint32_t load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
572572
/* when wake up from sleep, we need the barrier, as vm operation
573573
* are not serialized instructions.
574574
*/
575-
smp_mb();
575+
hax_smp_mb();
576576

577577
cpu_data = current_cpu_data();
578578

core/ept.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -328,7 +328,7 @@ static void invept_smpfunc(struct invept_bundle *bundle)
328328
{
329329
struct per_cpu_data *cpu_data;
330330

331-
smp_mb();
331+
hax_smp_mb();
332332
cpu_data = current_cpu_data();
333333
cpu_data->invept_res = VMX_SUCCEED;
334334

@@ -373,7 +373,7 @@ void invept(hax_vm_t *hax_vm, uint type)
373373

374374
bundle.type = type;
375375
bundle.desc = &desc;
376-
smp_call_function(&cpu_online_map, (void (*)(void *))invept_smpfunc,
376+
hax_smp_call_function(&cpu_online_map, (void (*)(void *))invept_smpfunc,
377377
&bundle);
378378

379379
/*

core/hax.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,12 +62,12 @@ struct hax_t *hax;
6262
extern hax_atomic_t vmx_cpu_num, vmx_enabled_num;
6363
static void hax_enable_vmx(void)
6464
{
65-
smp_call_function(&cpu_online_map, cpu_init_vmx, NULL);
65+
hax_smp_call_function(&cpu_online_map, cpu_init_vmx, NULL);
6666
}
6767

6868
static void hax_disable_vmx(void)
6969
{
70-
smp_call_function(&cpu_online_map, cpu_exit_vmx, NULL);
70+
hax_smp_call_function(&cpu_online_map, cpu_exit_vmx, NULL);
7171
}
7272

7373
static void free_cpu_vmxon_region(void)
@@ -415,7 +415,7 @@ static void hax_pmu_init(void)
415415
int ref_cpu_id = -1;
416416

417417
// Execute cpu_pmu_init() on each logical processor of the host CPU
418-
smp_call_function(&cpu_online_map, cpu_pmu_init, NULL);
418+
hax_smp_call_function(&cpu_online_map, cpu_pmu_init, NULL);
419419

420420
// Find the common APM version supported by all host logical processors
421421
// TODO: Theoretically we should do the same for other APM parameters

core/ia32_ops.asm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ function __nmi, 0
161161
int 2h
162162
ret
163163

164-
function __fls, 1
164+
function asm_fls, 1
165165
xor reg_ret_32, reg_ret_32
166166
bsr reg_ret_32, reg_arg1_32
167167
ret

core/include/cpu.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,12 +107,12 @@ struct per_cpu_data {
107107

108108
/*
109109
* These fields are used to record the result of certain VMX instructions
110-
* when they are used in a function wrapped by smp_call_function(). This is
110+
* when they are used in a function wrapped by hax_smp_call_function(). This is
111111
* because it is not safe to call hax_error(), etc. (whose underlying
112112
* implementation may use a lock) from the wrapped function to log a
113113
* failure; doing so may cause a deadlock and thus a host reboot, especially
114114
* on macOS, where mp_rendezvous_no_intrs() (the legacy Darwin API used by
115-
* HAXM to implement smp_call_function()) is known to be prone to deadlocks:
115+
* HAXM to implement hax_smp_call_function()) is known to be prone to deadlocks:
116116
* https://lists.apple.com/archives/darwin-kernel/2006/Dec/msg00006.html
117117
*/
118118
vmx_result_t vmxon_res;

core/include/emulate_ops.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
(0 * FASTOP_ALIGN))
4040

4141
/* Instruction handlers */
42-
typedef void(__cdecl em_handler_t)(void);
42+
typedef void(ASMCALL em_handler_t)(void);
4343
em_handler_t em_not;
4444
em_handler_t em_neg;
4545
em_handler_t em_inc;
@@ -72,7 +72,7 @@ em_handler_t em_bextr;
7272
em_handler_t em_andn;
7373

7474
/* Dispatch handlers */
75-
void __cdecl fastop_dispatch(void *handler, uint64_t *dst,
75+
void ASMCALL fastop_dispatch(void *handler, uint64_t *dst,
7676
uint64_t *src1, uint64_t *src2, uint64_t *flags);
7777

7878
#endif /* HAX_CORE_EMULATE_OPS_H_ */

core/include/ia32.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ void ASMCALL asm_fxrstor(mword *addr);
7878
void ASMCALL asm_cpuid(union cpuid_args_t *state);
7979

8080
void ASMCALL __nmi(void);
81-
uint32_t ASMCALL __fls(uint32_t bit32);
81+
uint32_t ASMCALL asm_fls(uint32_t bit32);
8282

8383
uint64_t ia32_rdmsr(uint32_t reg);
8484
void ia32_wrmsr(uint32_t reg, uint64_t val);

core/include/vmx.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -681,6 +681,17 @@ void vmx_vmwrite(struct vcpu_t *vcpu, const char *name,
681681
vmwrite(vcpu, GUEST_##seg##_AR, tmp_ar); \
682682
}
683683

684+
#elif defined(__linux__)
685+
#define VMWRITE_SEG(vcpu, seg, val) ({ \
686+
uint32_t tmp_ar = val.ar; \
687+
if (tmp_ar == 0) \
688+
tmp_ar = 0x10000; \
689+
vmwrite(vcpu, GUEST_##seg##_SELECTOR, (val).selector); \
690+
vmwrite(vcpu, GUEST_##seg##_BASE, (val).base); \
691+
vmwrite(vcpu, GUEST_##seg##_LIMIT, (val).limit); \
692+
vmwrite(vcpu, GUEST_##seg##_AR, tmp_ar); \
693+
})
694+
684695
#elif defined(__MACH__)
685696
#define VMWRITE_SEG(vcpu, seg, val) ({ \
686697
uint32_t tmp_ar = val.ar; \

core/intr_exc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ uint32_t vcpu_get_pending_intrs(struct vcpu_t *vcpu)
4848

4949
for (i = 7; i >= 0; i--) {
5050
if (intr_pending[i]) {
51-
offset = __fls(intr_pending[i]);
51+
offset = asm_fls(intr_pending[i]);
5252
break;
5353
}
5454
}

0 commit comments

Comments
 (0)