diff options
author | Thilo Schulz <arny@ats.s.bawue.de> | 2011-06-13 09:56:39 +0000 |
---|---|---|
committer | Tim Angus <tim@ngus.net> | 2013-01-09 23:15:55 +0000 |
commit | 6a71409a0622050f9a682d4e3b02419c444febe5 (patch) | |
tree | 7766ff71304d04c6e42de7dd7d48ed7e7e0fac59 /src/qcommon/vm_x86.c | |
parent | b15804d39f71e9be202818288726777d1ca8ac09 (diff) |
- Add MASM assembler files for MSVC x64 projects to support vm_x86 in x64 mode - Clean up ftol()/snapvector() mess - Make use of SSE instructions for ftol()/snapvector() if available - move ftol/snapvector pure assembler to inline assembler, this will add x86_64 and improve support for different calling conventions - Set FPU control word at program startup to get consistent behaviour on all platforms
Diffstat (limited to 'src/qcommon/vm_x86.c')
-rw-r--r-- | src/qcommon/vm_x86.c | 99 |
1 files changed, 35 insertions, 64 deletions
diff --git a/src/qcommon/vm_x86.c b/src/qcommon/vm_x86.c index e609bc1e..72225473 100644 --- a/src/qcommon/vm_x86.c +++ b/src/qcommon/vm_x86.c @@ -68,29 +68,6 @@ static int pc = 0; #define FTOL_PTR -#ifdef _MSC_VER - -#if defined( FTOL_PTR ) -int _ftol( float ); -static void *ftolPtr = _ftol; -#endif - -#else // _MSC_VER - -#if defined( FTOL_PTR ) - -int qftol( void ); -int qftol027F( void ); -int qftol037F( void ); -int qftol0E7F( void ); -int qftol0F7F( void ); - - -static void *ftolPtr = qftol0F7F; -#endif // FTOL_PTR - -#endif - static int instruction, pass; static int lastConst = 0; static int oc0, oc1, pop0, pop1; @@ -112,15 +89,17 @@ typedef enum static ELastCommand LastCommand; -static inline int iss8(int32_t v) +static int iss8(int32_t v) { return (SCHAR_MIN <= v && v <= SCHAR_MAX); } -static inline int isu8(uint32_t v) +#if 0 +static int isu8(uint32_t v) { return (v <= UCHAR_MAX); } +#endif static int NextConstant4(void) { @@ -437,30 +416,37 @@ Uses asm to retrieve arguments from registers to work around different calling c ================= */ +#if defined(_MSC_VER) && defined(idx64) + +extern void qsyscall64(void); +extern uint8_t qvmcall64(int *programStack, int *opStack, intptr_t *instructionPointers, byte *dataBase); + +// Microsoft does not support inline assembler on x64 platforms. Meh. +void DoSyscall(int syscallNum, int programStack, int *opStackBase, uint8_t opStackOfs, intptr_t arg) +{ +#else static void DoSyscall(void) { - vm_t *savedVM; - int syscallNum; int programStack; int *opStackBase; - int opStackOfs; + uint8_t opStackOfs; intptr_t arg; +#endif + + vm_t *savedVM; #ifdef _MSC_VER + #ifndef idx64 __asm { mov dword ptr syscallNum, eax mov dword ptr programStack, esi - mov dword ptr opStackOfs, ebx -#ifdef idx64 - mov qword ptr opStackBase, rdi - mov qword ptr arg, rcx -#else + mov byte ptr opStackOfs, bl mov dword ptr opStackBase, edi mov dword ptr arg, ecx -#endif } + #endif #else __asm__ volatile( "" @@ -540,8 +526,13 @@ Call to DoSyscall() int EmitCallDoSyscall(vm_t *vm) { // use edx register to store DoSyscall address +#if defined(_MSC_VER) && defined(idx64) + EmitRexString(0x48, "BA"); // mov edx, qsyscall64 + EmitPtr(qsyscall64); +#else EmitRexString(0x48, "BA"); // mov edx, DoSyscall EmitPtr(DoSyscall); +#endif // Push important registers to stack as we can't really make // any assumptions about calling conventions. @@ -1630,9 +1621,8 @@ void VM_Compile(vm_t *vm, vmHeader_t *header) EmitString("DB 1C 9F"); // fistp dword ptr [edi + ebx * 4] #else // FTOL_PTR // call the library conversion function - EmitString("D9 04 9F"); // fld dword ptr [edi + ebx * 4] - EmitRexString(0x48, "BA"); // mov edx, ftolPtr - EmitPtr(ftolPtr); + EmitRexString(0x48, "BA"); // mov edx, Q_VMftol + EmitPtr(Q_VMftol); EmitRexString(0x48, "FF D2"); // call edx EmitCommand(LAST_COMMAND_MOV_STACK_EAX); // mov dword ptr [edi + ebx * 4], eax #endif @@ -1747,12 +1737,12 @@ This function is called directly by the generated code int VM_CallCompiled(vm_t *vm, int *args) { - int stack[OPSTACK_SIZE + 7]; + byte stack[OPSTACK_SIZE * 4 + 15]; void *entryPoint; int programCounter; int programStack, stackOnEntry; byte *image; - int *opStack, *opStackOnEntry; + int *opStack; int opStackOfs; currentVM = vm; @@ -1785,35 +1775,16 @@ int VM_CallCompiled(vm_t *vm, int *args) // off we go into generated code... entryPoint = vm->codeBase + vm->entryOfs; - opStack = opStackOnEntry = PADP(stack, 8); + opStack = PADP(stack, 16); *opStack = 0xDEADBEEF; opStackOfs = 0; #ifdef _MSC_VER + #ifdef idx64 + opStackOfs = qvmcall64(&programStack, opStack, vm->instructionPointers, vm->dataBase); + #else __asm { -#ifdef idx64 - // non-volatile registers according to x64 calling convention - push rsi - push rdi - push rbx - - mov esi, dword ptr programStack - mov rdi, qword ptr opStack - mov ebx, dword ptr opStackOfs - mov r8, qword ptr vm->instructionPointers - mov r9, qword ptr vm->dataBase - - call entryPoint - - mov dword ptr opStackOfs, ebx - mov qword ptr opStack, rdi - mov dword ptr programStack, esi - - pop rbx - pop rdi - pop rsi -#else pushad mov esi, dword ptr programStack @@ -1827,8 +1798,8 @@ int VM_CallCompiled(vm_t *vm, int *args) mov dword ptr programStack, esi popad -#endif } + #endif #elif defined(idx64) __asm__ volatile( "movq %5, %%rax\r\n" @@ -1856,7 +1827,7 @@ int VM_CallCompiled(vm_t *vm, int *args) ); #endif - if(opStack != opStackOnEntry || opStackOfs != 1 || *opStack != 0xDEADBEEF) + if(opStackOfs != 1 || *opStack != 0xDEADBEEF) { Com_Error(ERR_DROP, "opStack corrupted in compiled code"); } |