diff --git a/.gitmodules b/.gitmodules
index f498a60de..dbb1b0dd3 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -16,3 +16,6 @@
 [submodule "dynarmic"]
     path = externals/dynarmic
     url = https://github.com/MerryMage/dynarmic.git
+[submodule "xbyak"]
+    path = externals/xbyak
+    url = https://github.com/herumi/xbyak.git
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2c00690c0..bcee98a5f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -256,6 +256,8 @@ set(INI_PREFIX "${CMAKE_CURRENT_SOURCE_DIR}/externals/inih")
 include_directories(${INI_PREFIX})
 add_subdirectory(${INI_PREFIX})
 
+add_subdirectory(externals)
+
 option(DYNARMIC_TESTS OFF)
 add_subdirectory(externals/dynarmic)
 
diff --git a/externals/CMakeLists.txt b/externals/CMakeLists.txt
new file mode 100644
index 000000000..7e4b05ffc
--- /dev/null
+++ b/externals/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Xbyak
+if (ARCHITECTURE_x86_64)
+    add_library(xbyak INTERFACE)
+    target_include_directories(xbyak INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/xbyak/xbyak)
+    if (NOT MSVC)
+        target_compile_options(xbyak INTERFACE -fno-operator-names)
+    endif()
+endif()
diff --git a/externals/dynarmic b/externals/dynarmic
index 54d051977..34e19f135 160000
--- a/externals/dynarmic
+++ b/externals/dynarmic
@@ -1 +1 @@
-Subproject commit 54d051977f7a6af9c7596ba6a4e6eb467bd1e0bc
+Subproject commit 34e19f135c0dd2feac4f77660f51aa4ea28a7386
diff --git a/externals/xbyak b/externals/xbyak
new file mode 160000
index 000000000..fe4765d2f
--- /dev/null
+++ b/externals/xbyak
@@ -0,0 +1 @@
+Subproject commit fe4765d2fed4e990ea5e9661b6bc5fc9bf48ec16
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 74a271f08..e6c2ce335 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -71,9 +71,15 @@ if(ARCHITECTURE_x86_64)
     set(HEADERS ${HEADERS}
             x64/abi.h
             x64/cpu_detect.h
-            x64/emitter.h)
+            x64/emitter.h
+            x64/xbyak_abi.h
+            x64/xbyak_util.h
+            )
 endif()
 
 create_directory_groups(${SRCS} ${HEADERS})
 
 add_library(common STATIC ${SRCS} ${HEADERS})
+if (ARCHITECTURE_x86_64)
+    target_link_libraries(common xbyak)
+endif()
diff --git a/src/common/x64/xbyak_abi.h b/src/common/x64/xbyak_abi.h
new file mode 100644
index 000000000..6090d93e1
--- /dev/null
+++ b/src/common/x64/xbyak_abi.h
@@ -0,0 +1,178 @@
+// Copyright 2016 Citra Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <initializer_list>
+#include <xbyak.h>
+#include "common/assert.h"
+#include "common/bit_set.h"
+
+namespace Common {
+namespace X64 {
+
+int RegToIndex(const Xbyak::Reg& reg) {
+    using Kind = Xbyak::Reg::Kind;
+    ASSERT_MSG((reg.getKind() & (Kind::REG | Kind::XMM)) != 0,
+               "RegSet only support GPRs and XMM registers.");
+    ASSERT_MSG(reg.getIdx() < 16, "RegSet only supports XXM0-15.");
+    return reg.getIdx() + (reg.getKind() == Kind::REG ? 0 : 16);
+}
+
+inline Xbyak::Reg64 IndexToReg64(int reg_index) {
+    ASSERT(reg_index < 16);
+    return Xbyak::Reg64(reg_index);
+}
+
+inline Xbyak::Xmm IndexToXmm(int reg_index) {
+    ASSERT(reg_index >= 16 && reg_index < 32);
+    return Xbyak::Xmm(reg_index - 16);
+}
+
+inline Xbyak::Reg IndexToReg(int reg_index) {
+    if (reg_index < 16) {
+        return IndexToReg64(reg_index);
+    } else {
+        return IndexToXmm(reg_index);
+    }
+}
+
+inline BitSet32 BuildRegSet(std::initializer_list<Xbyak::Reg> regs) {
+    BitSet32 bits;
+    for (const Xbyak::Reg& reg : regs) {
+        bits[RegToIndex(reg)] = true;
+    }
+    return bits;
+}
+
+const BitSet32 ABI_ALL_GPRS(0x0000FFFF);
+const BitSet32 ABI_ALL_XMMS(0xFFFF0000);
+
+#ifdef _WIN32
+
+// Microsoft x64 ABI
+const Xbyak::Reg ABI_RETURN = Xbyak::util::rax;
+const Xbyak::Reg ABI_PARAM1 = Xbyak::util::rcx;
+const Xbyak::Reg ABI_PARAM2 = Xbyak::util::rdx;
+const Xbyak::Reg ABI_PARAM3 = Xbyak::util::r8;
+const Xbyak::Reg ABI_PARAM4 = Xbyak::util::r9;
+
+const BitSet32 ABI_ALL_CALLER_SAVED = BuildRegSet({
+    // GPRs
+    Xbyak::util::rcx, Xbyak::util::rdx, Xbyak::util::r8, Xbyak::util::r9, Xbyak::util::r10,
+    Xbyak::util::r11,
+    // XMMs
+    Xbyak::util::xmm0, Xbyak::util::xmm1, Xbyak::util::xmm2, Xbyak::util::xmm3, Xbyak::util::xmm4,
+    Xbyak::util::xmm5,
+});
+
+const BitSet32 ABI_ALL_CALLEE_SAVED = BuildRegSet({
+    // GPRs
+    Xbyak::util::rbx, Xbyak::util::rsi, Xbyak::util::rdi, Xbyak::util::rbp, Xbyak::util::r12,
+    Xbyak::util::r13, Xbyak::util::r14, Xbyak::util::r15,
+    // XMMs
+    Xbyak::util::xmm6, Xbyak::util::xmm7, Xbyak::util::xmm8, Xbyak::util::xmm9, Xbyak::util::xmm10,
+    Xbyak::util::xmm11, Xbyak::util::xmm12, Xbyak::util::xmm13, Xbyak::util::xmm14,
+    Xbyak::util::xmm15,
+});
+
+constexpr size_t ABI_SHADOW_SPACE = 0x20;
+
+#else
+
+// System V x86-64 ABI
+const Xbyak::Reg ABI_RETURN = Xbyak::util::rax;
+const Xbyak::Reg ABI_PARAM1 = Xbyak::util::rdi;
+const Xbyak::Reg ABI_PARAM2 = Xbyak::util::rsi;
+const Xbyak::Reg ABI_PARAM3 = Xbyak::util::rdx;
+const Xbyak::Reg ABI_PARAM4 = Xbyak::util::rcx;
+
+const BitSet32 ABI_ALL_CALLER_SAVED = BuildRegSet({
+    // GPRs
+    Xbyak::util::rcx, Xbyak::util::rdx, Xbyak::util::rdi, Xbyak::util::rsi, Xbyak::util::r8,
+    Xbyak::util::r9, Xbyak::util::r10, Xbyak::util::r11,
+    // XMMs
+    Xbyak::util::xmm0, Xbyak::util::xmm1, Xbyak::util::xmm2, Xbyak::util::xmm3, Xbyak::util::xmm4,
+    Xbyak::util::xmm5, Xbyak::util::xmm6, Xbyak::util::xmm7, Xbyak::util::xmm8, Xbyak::util::xmm9,
+    Xbyak::util::xmm10, Xbyak::util::xmm11, Xbyak::util::xmm12, Xbyak::util::xmm13,
+    Xbyak::util::xmm14, Xbyak::util::xmm15,
+});
+
+const BitSet32 ABI_ALL_CALLEE_SAVED = BuildRegSet({
+    // GPRs
+    Xbyak::util::rbx, Xbyak::util::rbp, Xbyak::util::r12, Xbyak::util::r13, Xbyak::util::r14,
+    Xbyak::util::r15,
+});
+
+constexpr size_t ABI_SHADOW_SPACE = 0;
+
+#endif
+
+void ABI_CalculateFrameSize(BitSet32 regs, size_t rsp_alignment, size_t needed_frame_size,
+                            s32* out_subtraction, s32* out_xmm_offset) {
+    int count = (regs & ABI_ALL_GPRS).Count();
+    rsp_alignment -= count * 8;
+    size_t subtraction = 0;
+    int xmm_count = (regs & ABI_ALL_XMMS).Count();
+    if (xmm_count) {
+        // If we have any XMMs to save, we must align the stack here.
+        subtraction = rsp_alignment & 0xF;
+    }
+    subtraction += 0x10 * xmm_count;
+    size_t xmm_base_subtraction = subtraction;
+    subtraction += needed_frame_size;
+    subtraction += ABI_SHADOW_SPACE;
+    // Final alignment.
+    rsp_alignment -= subtraction;
+    subtraction += rsp_alignment & 0xF;
+
+    *out_subtraction = (s32)subtraction;
+    *out_xmm_offset = (s32)(subtraction - xmm_base_subtraction);
+}
+
+size_t ABI_PushRegistersAndAdjustStack(Xbyak::CodeGenerator& code, BitSet32 regs,
+                                       size_t rsp_alignment, size_t needed_frame_size = 0) {
+    s32 subtraction, xmm_offset;
+    ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size, &subtraction, &xmm_offset);
+
+    for (int reg_index : (regs & ABI_ALL_GPRS)) {
+        code.push(IndexToReg64(reg_index));
+    }
+
+    if (subtraction != 0) {
+        code.sub(code.rsp, subtraction);
+    }
+
+    for (int reg_index : (regs & ABI_ALL_XMMS)) {
+        code.movaps(code.xword[code.rsp + xmm_offset], IndexToXmm(reg_index));
+        xmm_offset += 0x10;
+    }
+
+    return ABI_SHADOW_SPACE;
+}
+
+void ABI_PopRegistersAndAdjustStack(Xbyak::CodeGenerator& code, BitSet32 regs, size_t rsp_alignment,
+                                    size_t needed_frame_size = 0) {
+    s32 subtraction, xmm_offset;
+    ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size, &subtraction, &xmm_offset);
+
+    for (int reg_index : (regs & ABI_ALL_XMMS)) {
+        code.movaps(IndexToXmm(reg_index), code.xword[code.rsp + xmm_offset]);
+        xmm_offset += 0x10;
+    }
+
+    if (subtraction != 0) {
+        code.add(code.rsp, subtraction);
+    }
+
+    // GPRs need to be popped in reverse order
+    for (int reg_index = 15; reg_index >= 0; reg_index--) {
+        if (regs[reg_index]) {
+            code.pop(IndexToReg64(reg_index));
+        }
+    }
+}
+
+} // namespace X64
+} // namespace Common
diff --git a/src/common/x64/xbyak_util.h b/src/common/x64/xbyak_util.h
new file mode 100644
index 000000000..0f52f704b
--- /dev/null
+++ b/src/common/x64/xbyak_util.h
@@ -0,0 +1,49 @@
+// Copyright 2016 Citra Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <type_traits>
+#include <xbyak.h>
+#include "common/x64/xbyak_abi.h"
+
+namespace Common {
+namespace X64 {
+
+// Constants for use with cmpps/cmpss
+enum {
+    CMP_EQ = 0,
+    CMP_LT = 1,
+    CMP_LE = 2,
+    CMP_UNORD = 3,
+    CMP_NEQ = 4,
+    CMP_NLT = 5,
+    CMP_NLE = 6,
+    CMP_ORD = 7,
+};
+
+inline bool IsWithin2G(uintptr_t ref, uintptr_t target) {
+    u64 distance = target - (ref + 5);
+    return !(distance >= 0x8000'0000ULL && distance <= ~0x8000'0000ULL);
+}
+
+inline bool IsWithin2G(const Xbyak::CodeGenerator& code, uintptr_t target) {
+    return IsWithin2G(reinterpret_cast<uintptr_t>(code.getCurr()), target);
+}
+
+template <typename T>
+inline void CallFarFunction(Xbyak::CodeGenerator& code, const T f) {
+    static_assert(std::is_pointer<T>(), "Argument must be a (function) pointer.");
+    size_t addr = reinterpret_cast<size_t>(f);
+    if (IsWithin2G(code, addr)) {
+        code.call(f);
+    } else {
+        // ABI_RETURN is a safe temp register to use before a call
+        code.mov(ABI_RETURN, addr);
+        code.call(ABI_RETURN);
+    }
+}
+
+} // namespace X64
+} // namespace Common
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 581a37897..9aa446a8f 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -59,6 +59,9 @@ create_directory_groups(${SRCS} ${HEADERS})
 
 add_library(video_core STATIC ${SRCS} ${HEADERS})
 target_link_libraries(video_core glad)
+if (ARCHITECTURE_x86_64)
+    target_link_libraries(video_core xbyak)
+endif()
 
 if (PNG_FOUND)
     target_link_libraries(video_core ${PNG_LIBRARIES})
diff --git a/src/video_core/shader/shader_jit_x64.cpp b/src/video_core/shader/shader_jit_x64.cpp
index c96110bb2..cfdeb8d6a 100644
--- a/src/video_core/shader/shader_jit_x64.cpp
+++ b/src/video_core/shader/shader_jit_x64.cpp
@@ -6,24 +6,30 @@
 #include <cmath>
 #include <cstdint>
 #include <nihstro/shader_bytecode.h>
+#include <smmintrin.h>
 #include <xmmintrin.h>
 #include "common/assert.h"
 #include "common/logging/log.h"
 #include "common/vector_math.h"
-#include "common/x64/abi.h"
 #include "common/x64/cpu_detect.h"
-#include "common/x64/emitter.h"
-#include "shader.h"
-#include "shader_jit_x64.h"
+#include "common/x64/xbyak_abi.h"
+#include "common/x64/xbyak_util.h"
 #include "video_core/pica_state.h"
 #include "video_core/pica_types.h"
+#include "video_core/shader/shader.h"
+#include "video_core/shader/shader_jit_x64.h"
+
+using namespace Common::X64;
+using namespace Xbyak::util;
+using Xbyak::Label;
+using Xbyak::Reg32;
+using Xbyak::Reg64;
+using Xbyak::Xmm;
 
 namespace Pica {
 
 namespace Shader {
 
-using namespace Gen;
-
 typedef void (JitShader::*JitFunction)(Instruction instr);
 
 const JitFunction instr_table[64] = {
@@ -98,44 +104,47 @@ const JitFunction instr_table[64] = {
 // purposes, as documented below:
 
 /// Pointer to the uniform memory
-static const X64Reg SETUP = R9;
+static const Reg64 SETUP = r9;
 /// The two 32-bit VS address offset registers set by the MOVA instruction
-static const X64Reg ADDROFFS_REG_0 = R10;
-static const X64Reg ADDROFFS_REG_1 = R11;
+static const Reg64 ADDROFFS_REG_0 = r10;
+static const Reg64 ADDROFFS_REG_1 = r11;
 /// VS loop count register (Multiplied by 16)
-static const X64Reg LOOPCOUNT_REG = R12;
+static const Reg32 LOOPCOUNT_REG = r12d;
 /// Current VS loop iteration number (we could probably use LOOPCOUNT_REG, but this quicker)
-static const X64Reg LOOPCOUNT = RSI;
+static const Reg32 LOOPCOUNT = esi;
 /// Number to increment LOOPCOUNT_REG by on each loop iteration (Multiplied by 16)
-static const X64Reg LOOPINC = RDI;
+static const Reg32 LOOPINC = edi;
 /// Result of the previous CMP instruction for the X-component comparison
-static const X64Reg COND0 = R13;
+static const Reg64 COND0 = r13;
 /// Result of the previous CMP instruction for the Y-component comparison
-static const X64Reg COND1 = R14;
+static const Reg64 COND1 = r14;
 /// Pointer to the UnitState instance for the current VS unit
-static const X64Reg STATE = R15;
+static const Reg64 STATE = r15;
 /// SIMD scratch register
-static const X64Reg SCRATCH = XMM0;
+static const Xmm SCRATCH = xmm0;
 /// Loaded with the first swizzled source register, otherwise can be used as a scratch register
-static const X64Reg SRC1 = XMM1;
+static const Xmm SRC1 = xmm1;
 /// Loaded with the second swizzled source register, otherwise can be used as a scratch register
-static const X64Reg SRC2 = XMM2;
+static const Xmm SRC2 = xmm2;
 /// Loaded with the third swizzled source register, otherwise can be used as a scratch register
-static const X64Reg SRC3 = XMM3;
+static const Xmm SRC3 = xmm3;
 /// Additional scratch register
-static const X64Reg SCRATCH2 = XMM4;
+static const Xmm SCRATCH2 = xmm4;
 /// Constant vector of [1.0f, 1.0f, 1.0f, 1.0f], used to efficiently set a vector to one
-static const X64Reg ONE = XMM14;
+static const Xmm ONE = xmm14;
 /// Constant vector of [-0.f, -0.f, -0.f, -0.f], used to efficiently negate a vector with XOR
-static const X64Reg NEGBIT = XMM15;
+static const Xmm NEGBIT = xmm15;
 
 // State registers that must not be modified by external functions calls
 // Scratch registers, e.g., SRC1 and SCRATCH, have to be saved on the side if needed
-static const BitSet32 persistent_regs = {
-    SETUP,          STATE,                                       // Pointers to register blocks
-    ADDROFFS_REG_0, ADDROFFS_REG_1, LOOPCOUNT_REG, COND0, COND1, // Cached registers
-    ONE + 16,       NEGBIT + 16,                                 // Constants
-};
+static const BitSet32 persistent_regs = BuildRegSet({
+    // Pointers to register blocks
+    SETUP, STATE,
+    // Cached registers
+    ADDROFFS_REG_0, ADDROFFS_REG_1, LOOPCOUNT_REG, COND0, COND1,
+    // Constants
+    ONE, NEGBIT,
+});
 
 /// Raw constant for the source register selector that indicates no swizzling is performed
 static const u8 NO_SRC_REG_SWIZZLE = 0x1b;
@@ -157,7 +166,8 @@ static void LogCritical(const char* msg) {
 
 void JitShader::Compile_Assert(bool condition, const char* msg) {
     if (!condition) {
-        ABI_CallFunctionP(reinterpret_cast<const void*>(LogCritical), const_cast<char*>(msg));
+        mov(ABI_PARAM1, reinterpret_cast<size_t>(msg));
+        CallFarFunction(*this, LogCritical);
     }
 }
 
@@ -169,8 +179,8 @@ void JitShader::Compile_Assert(bool condition, const char* msg) {
  * @param dest Destination XMM register to store the loaded, swizzled source register
  */
 void JitShader::Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRegister src_reg,
-                                   X64Reg dest) {
-    X64Reg src_ptr;
+                                   Xmm dest) {
+    Reg64 src_ptr;
     size_t src_offset;
 
     if (src_reg.GetRegisterType() == RegisterType::FloatUniform) {
@@ -206,13 +216,13 @@ void JitShader::Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRe
     if (src_num == offset_src && address_register_index != 0) {
         switch (address_register_index) {
         case 1: // address offset 1
-            MOVAPS(dest, MComplex(src_ptr, ADDROFFS_REG_0, SCALE_1, src_offset_disp));
+            movaps(dest, xword[src_ptr + ADDROFFS_REG_0 + src_offset_disp]);
             break;
         case 2: // address offset 2
-            MOVAPS(dest, MComplex(src_ptr, ADDROFFS_REG_1, SCALE_1, src_offset_disp));
+            movaps(dest, xword[src_ptr + ADDROFFS_REG_1 + src_offset_disp]);
             break;
         case 3: // address offset 3
-            MOVAPS(dest, MComplex(src_ptr, LOOPCOUNT_REG, SCALE_1, src_offset_disp));
+            movaps(dest, xword[src_ptr + LOOPCOUNT_REG + src_offset_disp]);
             break;
         default:
             UNREACHABLE();
@@ -220,7 +230,7 @@ void JitShader::Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRe
         }
     } else {
         // Load the source
-        MOVAPS(dest, MDisp(src_ptr, src_offset_disp));
+        movaps(dest, xword[src_ptr + src_offset_disp]);
     }
 
     SwizzlePattern swiz = {g_state.vs.swizzle_data[operand_desc_id]};
@@ -232,17 +242,17 @@ void JitShader::Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRe
         sel = ((sel & 0xc0) >> 6) | ((sel & 3) << 6) | ((sel & 0xc) << 2) | ((sel & 0x30) >> 2);
 
         // Shuffle inputs for swizzle
-        SHUFPS(dest, R(dest), sel);
+        shufps(dest, dest, sel);
     }
 
     // If the source register should be negated, flip the negative bit using XOR
     const bool negate[] = {swiz.negate_src1, swiz.negate_src2, swiz.negate_src3};
     if (negate[src_num - 1]) {
-        XORPS(dest, R(NEGBIT));
+        xorps(dest, NEGBIT);
     }
 }
 
-void JitShader::Compile_DestEnable(Instruction instr, X64Reg src) {
+void JitShader::Compile_DestEnable(Instruction instr, Xmm src) {
     DestRegister dest;
     unsigned operand_desc_id;
     if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MAD ||
@@ -263,21 +273,21 @@ void JitShader::Compile_DestEnable(Instruction instr, X64Reg src) {
     // If all components are enabled, write the result to the destination register
     if (swiz.dest_mask == NO_DEST_REG_MASK) {
         // Store dest back to memory
-        MOVAPS(MDisp(STATE, dest_offset_disp), src);
+        movaps(xword[STATE + dest_offset_disp], src);
 
     } else {
         // Not all components are enabled, so mask the result when storing to the destination
         // register...
-        MOVAPS(SCRATCH, MDisp(STATE, dest_offset_disp));
+        movaps(SCRATCH, xword[STATE + dest_offset_disp]);
 
         if (Common::GetCPUCaps().sse4_1) {
             u8 mask = ((swiz.dest_mask & 1) << 3) | ((swiz.dest_mask & 8) >> 3) |
                       ((swiz.dest_mask & 2) << 1) | ((swiz.dest_mask & 4) >> 1);
-            BLENDPS(SCRATCH, R(src), mask);
+            blendps(SCRATCH, src, mask);
         } else {
-            MOVAPS(SCRATCH2, R(src));
-            UNPCKHPS(SCRATCH2, R(SCRATCH)); // Unpack X/Y components of source and destination
-            UNPCKLPS(SCRATCH, R(src));      // Unpack Z/W components of source and destination
+            movaps(SCRATCH2, src);
+            unpckhps(SCRATCH2, SCRATCH); // Unpack X/Y components of source and destination
+            unpcklps(SCRATCH, src);      // Unpack Z/W components of source and destination
 
             // Compute selector to selectively copy source components to destination for SHUFPS
             // instruction
@@ -285,62 +295,62 @@ void JitShader::Compile_DestEnable(Instruction instr, X64Reg src) {
                      ((swiz.DestComponentEnabled(1) ? 3 : 2) << 2) |
                      ((swiz.DestComponentEnabled(2) ? 0 : 1) << 4) |
                      ((swiz.DestComponentEnabled(3) ? 2 : 3) << 6);
-            SHUFPS(SCRATCH, R(SCRATCH2), sel);
+            shufps(SCRATCH, SCRATCH2, sel);
         }
 
         // Store dest back to memory
-        MOVAPS(MDisp(STATE, dest_offset_disp), SCRATCH);
+        movaps(xword[STATE + dest_offset_disp], SCRATCH);
     }
 }
 
-void JitShader::Compile_SanitizedMul(Gen::X64Reg src1, Gen::X64Reg src2, Gen::X64Reg scratch) {
-    MOVAPS(scratch, R(src1));
-    CMPPS(scratch, R(src2), CMP_ORD);
+void JitShader::Compile_SanitizedMul(Xmm src1, Xmm src2, Xmm scratch) {
+    movaps(scratch, src1);
+    cmpordps(scratch, src2);
 
-    MULPS(src1, R(src2));
+    mulps(src1, src2);
 
-    MOVAPS(src2, R(src1));
-    CMPPS(src2, R(src2), CMP_UNORD);
+    movaps(src2, src1);
+    cmpunordps(src2, src2);
 
-    XORPS(scratch, R(src2));
-    ANDPS(src1, R(scratch));
+    xorps(scratch, src2);
+    andps(src1, scratch);
 }
 
 void JitShader::Compile_EvaluateCondition(Instruction instr) {
     // Note: NXOR is used below to check for equality
     switch (instr.flow_control.op) {
     case Instruction::FlowControlType::Or:
-        MOV(32, R(RAX), R(COND0));
-        MOV(32, R(RBX), R(COND1));
-        XOR(32, R(RAX), Imm32(instr.flow_control.refx.Value() ^ 1));
-        XOR(32, R(RBX), Imm32(instr.flow_control.refy.Value() ^ 1));
-        OR(32, R(RAX), R(RBX));
+        mov(eax, COND0);
+        mov(ebx, COND1);
+        xor(eax, (instr.flow_control.refx.Value() ^ 1));
+        xor(ebx, (instr.flow_control.refy.Value() ^ 1));
+        or (eax, ebx);
         break;
 
     case Instruction::FlowControlType::And:
-        MOV(32, R(RAX), R(COND0));
-        MOV(32, R(RBX), R(COND1));
-        XOR(32, R(RAX), Imm32(instr.flow_control.refx.Value() ^ 1));
-        XOR(32, R(RBX), Imm32(instr.flow_control.refy.Value() ^ 1));
-        AND(32, R(RAX), R(RBX));
+        mov(eax, COND0);
+        mov(ebx, COND1);
+        xor(eax, (instr.flow_control.refx.Value() ^ 1));
+        xor(ebx, (instr.flow_control.refy.Value() ^ 1));
+        and(eax, ebx);
         break;
 
     case Instruction::FlowControlType::JustX:
-        MOV(32, R(RAX), R(COND0));
-        XOR(32, R(RAX), Imm32(instr.flow_control.refx.Value() ^ 1));
+        mov(eax, COND0);
+        xor(eax, (instr.flow_control.refx.Value() ^ 1));
         break;
 
     case Instruction::FlowControlType::JustY:
-        MOV(32, R(RAX), R(COND1));
-        XOR(32, R(RAX), Imm32(instr.flow_control.refy.Value() ^ 1));
+        mov(eax, COND1);
+        xor(eax, (instr.flow_control.refy.Value() ^ 1));
         break;
     }
 }
 
 void JitShader::Compile_UniformCondition(Instruction instr) {
-    int offset =
+    size_t offset =
         ShaderSetup::UniformOffset(RegisterType::BoolUniform, instr.flow_control.bool_uniform_id);
-    CMP(sizeof(bool) * 8, MDisp(SETUP, offset), Imm8(0));
+    cmp(byte[SETUP + offset], 0);
 }
 
 BitSet32 JitShader::PersistentCallerSavedRegs() {
@@ -350,7 +360,7 @@ BitSet32 JitShader::PersistentCallerSavedRegs() {
 void JitShader::Compile_ADD(Instruction instr) {
     Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
     Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
-    ADDPS(SRC1, R(SRC2));
+    addps(SRC1, SRC2);
     Compile_DestEnable(instr, SRC1);
 }
 
@@ -360,15 +370,15 @@ void JitShader::Compile_DP3(Instruction instr) {
 
     Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
 
-    MOVAPS(SRC2, R(SRC1));
-    SHUFPS(SRC2, R(SRC2), _MM_SHUFFLE(1, 1, 1, 1));
+    movaps(SRC2, SRC1);
+    shufps(SRC2, SRC2, _MM_SHUFFLE(1, 1, 1, 1));
 
-    MOVAPS(SRC3, R(SRC1));
-    SHUFPS(SRC3, R(SRC3), _MM_SHUFFLE(2, 2, 2, 2));
+    movaps(SRC3, SRC1);
+    shufps(SRC3, SRC3, _MM_SHUFFLE(2, 2, 2, 2));
 
-    SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(0, 0, 0, 0));
-    ADDPS(SRC1, R(SRC2));
-    ADDPS(SRC1, R(SRC3));
+    shufps(SRC1, SRC1, _MM_SHUFFLE(0, 0, 0, 0));
+    addps(SRC1, SRC2);
+    addps(SRC1, SRC3);
 
     Compile_DestEnable(instr, SRC1);
 }
@@ -379,13 +389,13 @@ void JitShader::Compile_DP4(Instruction instr) {
 
     Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
 
-    MOVAPS(SRC2, R(SRC1));
-    SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(2, 3, 0, 1)); // XYZW -> ZWXY
-    ADDPS(SRC1, R(SRC2));
+    movaps(SRC2, SRC1);
+    shufps(SRC1, SRC1, _MM_SHUFFLE(2, 3, 0, 1)); // XYZW -> ZWXY
+    addps(SRC1, SRC2);
 
-    MOVAPS(SRC2, R(SRC1));
-    SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(0, 1, 2, 3)); // XYZW -> WZYX
-    ADDPS(SRC1, R(SRC2));
+    movaps(SRC2, SRC1);
+    shufps(SRC1, SRC1, _MM_SHUFFLE(0, 1, 2, 3)); // XYZW -> WZYX
+    addps(SRC1, SRC2);
 
     Compile_DestEnable(instr, SRC1);
 }
@@ -401,50 +411,50 @@ void JitShader::Compile_DPH(Instruction instr) {
 
     if (Common::GetCPUCaps().sse4_1) {
         // Set 4th component to 1.0
-        BLENDPS(SRC1, R(ONE), 0x8); // 0b1000
+        blendps(SRC1, ONE, 0b1000);
     } else {
         // Set 4th component to 1.0
-        MOVAPS(SCRATCH, R(SRC1));
-        UNPCKHPS(SCRATCH, R(ONE));  // XYZW, 1111 -> Z1__
-        UNPCKLPD(SRC1, R(SCRATCH)); // XYZW, Z1__ -> XYZ1
+        movaps(SCRATCH, SRC1);
+        unpckhps(SCRATCH, ONE);  // XYZW, 1111 -> Z1__
+        unpcklpd(SRC1, SCRATCH); // XYZW, Z1__ -> XYZ1
     }
 
     Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
 
-    MOVAPS(SRC2, R(SRC1));
-    SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(2, 3, 0, 1)); // XYZW -> ZWXY
-    ADDPS(SRC1, R(SRC2));
+    movaps(SRC2, SRC1);
+    shufps(SRC1, SRC1, _MM_SHUFFLE(2, 3, 0, 1)); // XYZW -> ZWXY
+    addps(SRC1, SRC2);
 
-    MOVAPS(SRC2, R(SRC1));
-    SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(0, 1, 2, 3)); // XYZW -> WZYX
-    ADDPS(SRC1, R(SRC2));
+    movaps(SRC2, SRC1);
+    shufps(SRC1, SRC1, _MM_SHUFFLE(0, 1, 2, 3)); // XYZW -> WZYX
+    addps(SRC1, SRC2);
 
     Compile_DestEnable(instr, SRC1);
 }
 
 void JitShader::Compile_EX2(Instruction instr) {
     Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
-    MOVSS(XMM0, R(SRC1));
+    movss(xmm0, SRC1); // ABI_PARAM1
 
-    ABI_PushRegistersAndAdjustStack(PersistentCallerSavedRegs(), 0);
-    ABI_CallFunction(reinterpret_cast<const void*>(exp2f));
-    ABI_PopRegistersAndAdjustStack(PersistentCallerSavedRegs(), 0);
+    ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
+    CallFarFunction(*this, exp2f);
+    ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
 
-    SHUFPS(XMM0, R(XMM0), _MM_SHUFFLE(0, 0, 0, 0));
-    MOVAPS(SRC1, R(XMM0));
+    shufps(xmm0, xmm0, _MM_SHUFFLE(0, 0, 0, 0)); // ABI_RETURN
+    movaps(SRC1, xmm0);
     Compile_DestEnable(instr, SRC1);
 }
 
 void JitShader::Compile_LG2(Instruction instr) {
     Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
-    MOVSS(XMM0, R(SRC1));
+    movss(xmm0, SRC1); // ABI_PARAM1
 
-    ABI_PushRegistersAndAdjustStack(PersistentCallerSavedRegs(), 0);
-    ABI_CallFunction(reinterpret_cast<const void*>(log2f));
-    ABI_PopRegistersAndAdjustStack(PersistentCallerSavedRegs(), 0);
+    ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
+    CallFarFunction(*this, log2f);
+    ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
 
-    SHUFPS(XMM0, R(XMM0), _MM_SHUFFLE(0, 0, 0, 0));
-    MOVAPS(SRC1, R(XMM0));
+    shufps(xmm0, xmm0, _MM_SHUFFLE(0, 0, 0, 0)); // ABI_RETURN
+    movaps(SRC1, xmm0);
     Compile_DestEnable(instr, SRC1);
 }
 
@@ -464,8 +474,8 @@ void JitShader::Compile_SGE(Instruction instr) {
         Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
     }
 
-    CMPPS(SRC2, R(SRC1), CMP_LE);
-    ANDPS(SRC2, R(ONE));
+    cmpleps(SRC2, SRC1);
+    andps(SRC2, ONE);
 
     Compile_DestEnable(instr, SRC2);
 }
@@ -479,8 +489,8 @@ void JitShader::Compile_SLT(Instruction instr) {
         Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
     }
 
-    CMPPS(SRC1, R(SRC2), CMP_LT);
-    ANDPS(SRC1, R(ONE));
+    cmpltps(SRC1, SRC2);
+    andps(SRC1, ONE);
 
     Compile_DestEnable(instr, SRC1);
 }
@@ -489,10 +499,10 @@ void JitShader::Compile_FLR(Instruction instr) {
     Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
 
     if (Common::GetCPUCaps().sse4_1) {
-        ROUNDFLOORPS(SRC1, R(SRC1));
+        roundps(SRC1, SRC1, _MM_FROUND_FLOOR);
     } else {
-        CVTTPS2DQ(SRC1, R(SRC1));
-        CVTDQ2PS(SRC1, R(SRC1));
+        cvttps2dq(SRC1, SRC1);
+        cvtdq2ps(SRC1, SRC1);
     }
 
     Compile_DestEnable(instr, SRC1);
@@ -502,7 +512,7 @@ void JitShader::Compile_MAX(Instruction instr) {
     Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
     Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
     // SSE semantics match PICA200 ones: In case of NaN, SRC2 is returned.
-    MAXPS(SRC1, R(SRC2));
+    maxps(SRC1, SRC2);
     Compile_DestEnable(instr, SRC1);
 }
 
@@ -510,7 +520,7 @@ void JitShader::Compile_MIN(Instruction instr) {
     Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
     Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
     // SSE semantics match PICA200 ones: In case of NaN, SRC2 is returned.
-    MINPS(SRC1, R(SRC2));
+    minps(SRC1, SRC2);
     Compile_DestEnable(instr, SRC1);
 }
 
@@ -524,37 +534,37 @@ void JitShader::Compile_MOVA(Instruction instr) {
     Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
 
     // Convert floats to integers using truncation (only care about X and Y components)
-    CVTTPS2DQ(SRC1, R(SRC1));
+    cvttps2dq(SRC1, SRC1);
 
     // Get result
-    MOVQ_xmm(R(RAX), SRC1);
+    movq(rax, SRC1);
 
     // Handle destination enable
     if (swiz.DestComponentEnabled(0) && swiz.DestComponentEnabled(1)) {
         // Move and sign-extend low 32 bits
-        MOVSX(64, 32, ADDROFFS_REG_0, R(RAX));
+        movsxd(ADDROFFS_REG_0, eax);
 
         // Move and sign-extend high 32 bits
-        SHR(64, R(RAX), Imm8(32));
-        MOVSX(64, 32, ADDROFFS_REG_1, R(RAX));
+        shr(rax, 32);
+        movsxd(ADDROFFS_REG_1, eax);
 
         // Multiply by 16 to be used as an offset later
-        SHL(64, R(ADDROFFS_REG_0), Imm8(4));
-        SHL(64, R(ADDROFFS_REG_1), Imm8(4));
+        shl(ADDROFFS_REG_0, 4);
+        shl(ADDROFFS_REG_1, 4);
     } else {
         if (swiz.DestComponentEnabled(0)) {
             // Move and sign-extend low 32 bits
-            MOVSX(64, 32, ADDROFFS_REG_0, R(RAX));
+            movsxd(ADDROFFS_REG_0, eax);
 
             // Multiply by 16 to be used as an offset later
-            SHL(64, R(ADDROFFS_REG_0), Imm8(4));
+            shl(ADDROFFS_REG_0, 4);
         } else if (swiz.DestComponentEnabled(1)) {
             // Move and sign-extend high 32 bits
-            SHR(64, R(RAX), Imm8(32));
-            MOVSX(64, 32, ADDROFFS_REG_1, R(RAX));
+            shr(rax, 32);
+            movsxd(ADDROFFS_REG_1, eax);
 
             // Multiply by 16 to be used as an offset later
-            SHL(64, R(ADDROFFS_REG_1), Imm8(4));
+            shl(ADDROFFS_REG_1, 4);
         }
     }
 }
@@ -569,8 +579,8 @@ void JitShader::Compile_RCP(Instruction instr) {
 
     // TODO(bunnei): RCPSS is a pretty rough approximation, this might cause problems if Pica
     // performs this operation more accurately. This should be checked on hardware.
-    RCPSS(SRC1, R(SRC1));
-    SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(0, 0, 0, 0)); // XYWZ -> XXXX
+    rcpss(SRC1, SRC1);
+    shufps(SRC1, SRC1, _MM_SHUFFLE(0, 0, 0, 0)); // XYWZ -> XXXX
 
     Compile_DestEnable(instr, SRC1);
 }
@@ -580,8 +590,8 @@ void JitShader::Compile_RSQ(Instruction instr) {
 
     // TODO(bunnei): RSQRTSS is a pretty rough approximation, this might cause problems if Pica
     // performs this operation more accurately. This should be checked on hardware.
-    RSQRTSS(SRC1, R(SRC1));
-    SHUFPS(SRC1, R(SRC1), _MM_SHUFFLE(0, 0, 0, 0)); // XYWZ -> XXXX
+    rsqrtss(SRC1, SRC1);
+    shufps(SRC1, SRC1, _MM_SHUFFLE(0, 0, 0, 0)); // XYWZ -> XXXX
 
     Compile_DestEnable(instr, SRC1);
 }
@@ -589,34 +599,35 @@ void JitShader::Compile_RSQ(Instruction instr) {
 void JitShader::Compile_NOP(Instruction instr) {}
 
 void JitShader::Compile_END(Instruction instr) {
-    ABI_PopRegistersAndAdjustStack(ABI_ALL_CALLEE_SAVED, 8);
-    RET();
+    ABI_PopRegistersAndAdjustStack(*this, ABI_ALL_CALLEE_SAVED, 8);
+    ret();
 }
 
 void JitShader::Compile_CALL(Instruction instr) {
     // Push offset of the return
-    PUSH(64, Imm32(instr.flow_control.dest_offset + instr.flow_control.num_instructions));
+    push(qword, (instr.flow_control.dest_offset + instr.flow_control.num_instructions));
 
     // Call the subroutine
-    FixupBranch b = CALL();
-    fixup_branches.push_back({b, instr.flow_control.dest_offset});
+    call(instruction_labels[instr.flow_control.dest_offset]);
 
     // Skip over the return offset that's on the stack
-    ADD(64, R(RSP), Imm32(8));
+    add(rsp, 8);
 }
 
 void JitShader::Compile_CALLC(Instruction instr) {
     Compile_EvaluateCondition(instr);
-    FixupBranch b = J_CC(CC_Z, true);
+    Label b;
+    jz(b);
     Compile_CALL(instr);
-    SetJumpTarget(b);
+    L(b);
 }
 
 void JitShader::Compile_CALLU(Instruction instr) {
     Compile_UniformCondition(instr);
-    FixupBranch b = J_CC(CC_Z, true);
+    Label b;
+    jz(b);
     Compile_CALL(instr);
-    SetJumpTarget(b);
+    L(b);
 }
 
 void JitShader::Compile_CMP(Instruction instr) {
@@ -633,33 +644,33 @@ void JitShader::Compile_CMP(Instruction instr) {
     static const u8 cmp[] = {CMP_EQ, CMP_NEQ, CMP_LT, CMP_LE, CMP_LT, CMP_LE};
 
     bool invert_op_x = (op_x == Op::GreaterThan || op_x == Op::GreaterEqual);
-    Gen::X64Reg lhs_x = invert_op_x ? SRC2 : SRC1;
-    Gen::X64Reg rhs_x = invert_op_x ? SRC1 : SRC2;
+    Xmm lhs_x = invert_op_x ? SRC2 : SRC1;
+    Xmm rhs_x = invert_op_x ? SRC1 : SRC2;
 
     if (op_x == op_y) {
         // Compare X-component and Y-component together
-        CMPPS(lhs_x, R(rhs_x), cmp[op_x]);
-        MOVQ_xmm(R(COND0), lhs_x);
+        cmpps(lhs_x, rhs_x, cmp[op_x]);
+        movq(COND0, lhs_x);
 
-        MOV(64, R(COND1), R(COND0));
+        mov(COND1, COND0);
     } else {
         bool invert_op_y = (op_y == Op::GreaterThan || op_y == Op::GreaterEqual);
-        Gen::X64Reg lhs_y = invert_op_y ? SRC2 : SRC1;
-        Gen::X64Reg rhs_y = invert_op_y ? SRC1 : SRC2;
+        Xmm lhs_y = invert_op_y ? SRC2 : SRC1;
+        Xmm rhs_y = invert_op_y ? SRC1 : SRC2;
 
         // Compare X-component
-        MOVAPS(SCRATCH, R(lhs_x));
-        CMPSS(SCRATCH, R(rhs_x), cmp[op_x]);
+        movaps(SCRATCH, lhs_x);
+        cmpss(SCRATCH, rhs_x, cmp[op_x]);
 
         // Compare Y-component
-        CMPPS(lhs_y, R(rhs_y), cmp[op_y]);
+        cmpps(lhs_y, rhs_y, cmp[op_y]);
 
-        MOVQ_xmm(R(COND0), SCRATCH);
-        MOVQ_xmm(R(COND1), lhs_y);
+        movq(COND0, SCRATCH);
+        movq(COND1, lhs_y);
     }
 
-    SHR(32, R(COND0), Imm8(31));
-    SHR(64, R(COND1), Imm8(63));
+    shr(COND0.cvt32(), 31); // ignores upper 32 bits in source
+    shr(COND1, 63);
 }
 
 void JitShader::Compile_MAD(Instruction instr) {
@@ -674,7 +685,7 @@ void JitShader::Compile_MAD(Instruction instr) {
     }
 
     Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
-    ADDPS(SRC1, R(SRC3));
+    addps(SRC1, SRC3);
 
     Compile_DestEnable(instr, SRC1);
 }
@@ -682,6 +693,7 @@ void JitShader::Compile_MAD(Instruction instr) {
 void JitShader::Compile_IF(Instruction instr) {
     Compile_Assert(instr.flow_control.dest_offset >= program_counter,
                    "Backwards if-statements not supported");
+    Label l_else, l_endif;
 
     // Evaluate the "IF" condition
     if (instr.opcode.Value() == OpCode::Id::IFU) {
@@ -689,26 +701,25 @@ void JitShader::Compile_IF(Instruction instr) {
     } else if (instr.opcode.Value() == OpCode::Id::IFC) {
         Compile_EvaluateCondition(instr);
     }
-    FixupBranch b = J_CC(CC_Z, true);
+    jz(l_else, T_NEAR);
 
     // Compile the code that corresponds to the condition evaluating as true
     Compile_Block(instr.flow_control.dest_offset);
 
     // If there isn't an "ELSE" condition, we are done here
     if (instr.flow_control.num_instructions == 0) {
-        SetJumpTarget(b);
+        L(l_else);
         return;
     }
 
-    FixupBranch b2 = J(true);
-
-    SetJumpTarget(b);
+    jmp(l_endif, T_NEAR);
 
+    L(l_else);
     // This code corresponds to the "ELSE" condition
     // Comple the code that corresponds to the condition evaluating as false
     Compile_Block(instr.flow_control.dest_offset + instr.flow_control.num_instructions);
 
-    SetJumpTarget(b2);
+    L(l_endif);
 }
 
 void JitShader::Compile_LOOP(Instruction instr) {
@@ -721,25 +732,26 @@ void JitShader::Compile_LOOP(Instruction instr) {
     // This decodes the fields from the integer uniform at index instr.flow_control.int_uniform_id.
     // The Y (LOOPCOUNT_REG) and Z (LOOPINC) component are kept multiplied by 16 (Left shifted by
     // 4 bits) to be used as an offset into the 16-byte vector registers later
-    int offset =
+    size_t offset =
         ShaderSetup::UniformOffset(RegisterType::IntUniform, instr.flow_control.int_uniform_id);
-    MOV(32, R(LOOPCOUNT), MDisp(SETUP, offset));
-    MOV(32, R(LOOPCOUNT_REG), R(LOOPCOUNT));
-    SHR(32, R(LOOPCOUNT_REG), Imm8(4));
-    AND(32, R(LOOPCOUNT_REG), Imm32(0xFF0)); // Y-component is the start
-    MOV(32, R(LOOPINC), R(LOOPCOUNT));
-    SHR(32, R(LOOPINC), Imm8(12));
-    AND(32, R(LOOPINC), Imm32(0xFF0));     // Z-component is the incrementer
-    MOVZX(32, 8, LOOPCOUNT, R(LOOPCOUNT)); // X-component is iteration count
-    ADD(32, R(LOOPCOUNT), Imm8(1));        // Iteration count is X-component + 1
+    mov(LOOPCOUNT, dword[SETUP + offset]);
+    mov(LOOPCOUNT_REG, LOOPCOUNT);
+    shr(LOOPCOUNT_REG, 4);
+    and(LOOPCOUNT_REG, 0xFF0); // Y-component is the start
+    mov(LOOPINC, LOOPCOUNT);
+    shr(LOOPINC, 12);
+    and(LOOPINC, 0xFF0);                // Z-component is the incrementer
+    movzx(LOOPCOUNT, LOOPCOUNT.cvt8()); // X-component is iteration count
+    add(LOOPCOUNT, 1);                  // Iteration count is X-component + 1
 
-    auto loop_start = GetCodePtr();
+    Label l_loop_start;
+    L(l_loop_start);
 
     Compile_Block(instr.flow_control.dest_offset + 1);
 
-    ADD(32, R(LOOPCOUNT_REG), R(LOOPINC)); // Increment LOOPCOUNT_REG by Z-component
-    SUB(32, R(LOOPCOUNT), Imm8(1));        // Increment loop count by 1
-    J_CC(CC_NZ, loop_start);               // Loop if not equal
+    add(LOOPCOUNT_REG, LOOPINC); // Increment LOOPCOUNT_REG by Z-component
+    sub(LOOPCOUNT, 1);           // Increment loop count by 1
+    jnz(l_loop_start);           // Loop if not equal
 
     looping = false;
 }
@@ -755,8 +767,12 @@ void JitShader::Compile_JMP(Instruction instr) {
     bool inverted_condition =
         (instr.opcode.Value() == OpCode::Id::JMPU) && (instr.flow_control.num_instructions & 1);
 
-    FixupBranch b = J_CC(inverted_condition ? CC_Z : CC_NZ, true);
-    fixup_branches.push_back({b, instr.flow_control.dest_offset});
+    Label& b = instruction_labels[instr.flow_control.dest_offset];
+    if (inverted_condition) {
+        jz(b, T_NEAR);
+    } else {
+        jnz(b, T_NEAR);
+    }
 }
 
 void JitShader::Compile_Block(unsigned end) {
@@ -767,13 +783,14 @@ void JitShader::Compile_Block(unsigned end) {
 
 void JitShader::Compile_Return() {
     // Peek return offset on the stack and check if we're at that offset
-    MOV(64, R(RAX), MDisp(RSP, 8));
-    CMP(32, R(RAX), Imm32(program_counter));
+    mov(rax, qword[rsp + 8]);
+    cmp(eax, (program_counter));
 
     // If so, jump back to before CALL
-    FixupBranch b = J_CC(CC_NZ, true);
-    RET();
-    SetJumpTarget(b);
+    Label b;
+    jnz(b);
+    ret();
+    L(b);
 }
 
 void JitShader::Compile_NextInstr() {
@@ -781,9 +798,7 @@ void JitShader::Compile_NextInstr() {
         Compile_Return();
     }
 
-    ASSERT_MSG(code_ptr[program_counter] == nullptr,
-               "Tried to compile already compiled shader location!");
-    code_ptr[program_counter] = GetCodePtr();
+    L(instruction_labels[program_counter]);
 
     Instruction instr = GetVertexShaderInstruction(program_counter++);
 
@@ -824,64 +839,53 @@ void JitShader::FindReturnOffsets() {
 
 void JitShader::Compile() {
     // Reset flow control state
-    program = (CompiledShader*)GetCodePtr();
+    program = (CompiledShader*)getCurr();
     program_counter = 0;
     looping = false;
-    code_ptr.fill(nullptr);
-    fixup_branches.clear();
+    instruction_labels.fill(Xbyak::Label());
 
     // Find all `CALL` instructions and identify return locations
     FindReturnOffsets();
 
     // The stack pointer is 8 modulo 16 at the entry of a procedure
-    ABI_PushRegistersAndAdjustStack(ABI_ALL_CALLEE_SAVED, 8);
+    ABI_PushRegistersAndAdjustStack(*this, ABI_ALL_CALLEE_SAVED, 8);
 
-    MOV(PTRBITS, R(SETUP), R(ABI_PARAM1));
-    MOV(PTRBITS, R(STATE), R(ABI_PARAM2));
+    mov(SETUP, ABI_PARAM1);
+    mov(STATE, ABI_PARAM2);
 
     // Zero address/loop  registers
-    XOR(64, R(ADDROFFS_REG_0), R(ADDROFFS_REG_0));
-    XOR(64, R(ADDROFFS_REG_1), R(ADDROFFS_REG_1));
-    XOR(64, R(LOOPCOUNT_REG), R(LOOPCOUNT_REG));
+    xor(ADDROFFS_REG_0.cvt32(), ADDROFFS_REG_0.cvt32());
+    xor(ADDROFFS_REG_1.cvt32(), ADDROFFS_REG_1.cvt32());
+    xor(LOOPCOUNT_REG, LOOPCOUNT_REG);
 
     // Used to set a register to one
     static const __m128 one = {1.f, 1.f, 1.f, 1.f};
-    MOV(PTRBITS, R(RAX), ImmPtr(&one));
-    MOVAPS(ONE, MatR(RAX));
+    mov(rax, reinterpret_cast<size_t>(&one));
+    movaps(ONE, xword[rax]);
 
     // Used to negate registers
     static const __m128 neg = {-0.f, -0.f, -0.f, -0.f};
-    MOV(PTRBITS, R(RAX), ImmPtr(&neg));
-    MOVAPS(NEGBIT, MatR(RAX));
+    mov(rax, reinterpret_cast<size_t>(&neg));
+    movaps(NEGBIT, xword[rax]);
 
     // Jump to start of the shader program
-    JMPptr(R(ABI_PARAM3));
+    jmp(ABI_PARAM3);
 
     // Compile entire program
     Compile_Block(static_cast<unsigned>(g_state.vs.program_code.size()));
 
-    // Set the target for any incomplete branches now that the entire shader program has been
-    // emitted
-    for (const auto& branch : fixup_branches) {
-        SetJumpTarget(branch.first, code_ptr[branch.second]);
-    }
-
     // Free memory that's no longer needed
     return_offsets.clear();
     return_offsets.shrink_to_fit();
-    fixup_branches.clear();
-    fixup_branches.shrink_to_fit();
 
-    uintptr_t size =
-        reinterpret_cast<uintptr_t>(GetCodePtr()) - reinterpret_cast<uintptr_t>(program);
+    ready();
+
+    uintptr_t size = reinterpret_cast<uintptr_t>(getCurr()) - reinterpret_cast<uintptr_t>(program);
     ASSERT_MSG(size <= MAX_SHADER_SIZE, "Compiled a shader that exceeds the allocated size!");
-
     LOG_DEBUG(HW_GPU, "Compiled shader size=%lu", size);
 }
 
-JitShader::JitShader() {
-    AllocCodeSpace(MAX_SHADER_SIZE);
-}
+JitShader::JitShader() : Xbyak::CodeGenerator(MAX_SHADER_SIZE) {}
 
 } // namespace Shader
 
diff --git a/src/video_core/shader/shader_jit_x64.h b/src/video_core/shader/shader_jit_x64.h
index 98de5ecef..e0ecde3f2 100644
--- a/src/video_core/shader/shader_jit_x64.h
+++ b/src/video_core/shader/shader_jit_x64.h
@@ -9,6 +9,7 @@
 #include <utility>
 #include <vector>
 #include <nihstro/shader_bytecode.h>
+#include <xbyak.h>
 #include "common/bit_set.h"
 #include "common/common_types.h"
 #include "common/x64/emitter.h"
@@ -29,12 +30,12 @@ constexpr size_t MAX_SHADER_SIZE = 1024 * 64;
  * This class implements the shader JIT compiler. It recompiles a Pica shader program into x86_64
  * code that can be executed on the host machine directly.
  */
-class JitShader : public Gen::XCodeBlock {
+class JitShader : public Xbyak::CodeGenerator {
 public:
     JitShader();
 
     void Run(const ShaderSetup& setup, UnitState<false>& state, unsigned offset) const {
-        program(&setup, &state, code_ptr[offset]);
+        program(&setup, &state, instruction_labels[offset].getAddress());
     }
 
     void Compile();
@@ -71,14 +72,14 @@ private:
     void Compile_NextInstr();
 
     void Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRegister src_reg,
-                            Gen::X64Reg dest);
-    void Compile_DestEnable(Instruction instr, Gen::X64Reg dest);
+                            Xbyak::Xmm dest);
+    void Compile_DestEnable(Instruction instr, Xbyak::Xmm dest);
 
     /**
      * Compiles a `MUL src1, src2` operation, properly handling the PICA semantics when multiplying
      * zero by inf. Clobbers `src2` and `scratch`.
      */
-    void Compile_SanitizedMul(Gen::X64Reg src1, Gen::X64Reg src2, Gen::X64Reg scratch);
+    void Compile_SanitizedMul(Xbyak::Xmm src1, Xbyak::Xmm src2, Xbyak::Xmm scratch);
 
     void Compile_EvaluateCondition(Instruction instr);
     void Compile_UniformCondition(Instruction instr);
@@ -103,7 +104,7 @@ private:
     void FindReturnOffsets();
 
     /// Mapping of Pica VS instructions to pointers in the emitted code
-    std::array<const u8*, 1024> code_ptr;
+    std::array<Xbyak::Label, 1024> instruction_labels;
 
     /// Offsets in code where a return needs to be inserted
     std::vector<unsigned> return_offsets;
@@ -111,9 +112,6 @@ private:
     unsigned program_counter = 0; ///< Offset of the next instruction to decode
     bool looping = false;         ///< True if compiling a loop, used to check for nested loops
 
-    /// Branches that need to be fixed up once the entire shader program is compiled
-    std::vector<std::pair<Gen::FixupBranch, unsigned>> fixup_branches;
-
     using CompiledShader = void(const void* setup, void* state, const u8* start_addr);
     CompiledShader* program = nullptr;
 };